mirror of
https://github.com/ansible-collections/community.general.git
synced 2024-09-14 20:13:21 +02:00
Remove postgresql content for 2.0.0 (#1355)
* Remove postgresql content * add suggested * Update meta/runtime.yml Co-authored-by: Felix Fontein <felix@fontein.de> * Remove symlinks * Remove a consequence of rebasing * Remove ssl task from setup_postgresql_db Co-authored-by: Felix Fontein <felix@fontein.de>
This commit is contained in:
parent
31443e57b1
commit
72b59c764e
186 changed files with 61 additions and 27584 deletions
23
.github/BOTMETA.yml
vendored
23
.github/BOTMETA.yml
vendored
|
@ -55,10 +55,6 @@ files:
|
|||
labels: hwc
|
||||
$doc_fragments/nomad.py:
|
||||
maintainers: chris93111
|
||||
$doc_fragments/postgres.py:
|
||||
maintainers: $team_postgresql
|
||||
labels: postgres postgresql
|
||||
keywords: database postgres postgresql
|
||||
$doc_fragments/xenserver.py:
|
||||
maintainers: bvitnik
|
||||
labels: xenserver
|
||||
|
@ -147,10 +143,6 @@ files:
|
|||
$module_utils/oracle/oci_utils.py:
|
||||
maintainers: $team_oracle
|
||||
labels: cloud
|
||||
$module_utils/postgres.py:
|
||||
maintainers: $team_postgresql
|
||||
labels: postgres postgresql
|
||||
keywords: database postgres postgresql
|
||||
$module_utils/pure.py:
|
||||
maintainers: $team_purestorage
|
||||
labels: pure pure_storage
|
||||
|
@ -346,20 +338,6 @@ files:
|
|||
$modules/database/mssql/mssql_db.py:
|
||||
maintainers: vedit Jmainguy kenichi-ogawa-1988
|
||||
labels: mssql_db
|
||||
$modules/database/postgresql/:
|
||||
keywords: database postgres postgresql
|
||||
labels: postgres postgresql
|
||||
maintainers: $team_postgresql
|
||||
$modules/database/postgresql/postgresql_ext.py:
|
||||
maintainers: dschep strk
|
||||
$modules/database/postgresql/postgresql_lang.py:
|
||||
maintainers: jensdepuydt
|
||||
$modules/database/postgresql/postgresql_privs.py:
|
||||
maintainers: b6d
|
||||
$modules/database/postgresql/postgresql_query.py:
|
||||
maintainers: archf wrouesnel
|
||||
$modules/database/postgresql/postgresql_tablespace.py:
|
||||
maintainers: antoinell
|
||||
$modules/database/vertica/:
|
||||
maintainers: dareko
|
||||
$modules/files/archive.py:
|
||||
|
@ -1045,7 +1023,6 @@ macros:
|
|||
team_networking: NilashishC Qalthos danielmellado ganeshrn justjais trishnaguha sganesh-infoblox privateip
|
||||
team_opennebula: ilicmilan meerkampdvv rsmontero xorel
|
||||
team_oracle: manojmeda mross22 nalsaber
|
||||
team_postgresql: Andersson007 Dorn- andytom jbscalia kostiantyn-nemchenko matburt nerzhul sebasmannem tcraxs ilicmilan
|
||||
team_purestorage: bannaych dnix101 genegr lionmax opslounge raekins sdodsley sile16
|
||||
team_redfish: billdodd mraineri tomasg2012
|
||||
team_rhn: FlossWare alikins barnabycourt vritant
|
||||
|
|
12
changelogs/fragments/postgresql-migration-removal.yml
Normal file
12
changelogs/fragments/postgresql-migration-removal.yml
Normal file
|
@ -0,0 +1,12 @@
|
|||
removed_features:
|
||||
- >
|
||||
All ``postgresql`` modules have been removed from this collection.
|
||||
They have been migrated to the `community.postgresql <https://galaxy.ansible.com/community/postgresql>`_ collection.
|
||||
If you use ansible-base 2.10 or newer, redirections have been provided.
|
||||
If you use Ansible 2.9 and installed this collection, you need to adjust the FQCNs (``community.general.postgresql_info`` → ``community.postgresql.postgresql_info``) and make sure to install the community.postgresql collection.
|
||||
breaking_changes:
|
||||
- >
|
||||
If you use Ansible 2.9 and the ``postgresql`` modules from this collections, community.general 2.0.0 results in errors when trying to use the postgresql content by FQCN, like ``community.general.postgresql_info``.
|
||||
Since Ansible 2.9 is not able to use redirections, you will have to adjust your playbooks and roles manually to use the new FQCNs (``community.postgresql.postgresql_info`` for the previous example) and to make sure that you have ``community.postgresql`` installed.
|
||||
If you use ansible-base 2.10 or newer and did not install Ansible 3.0.0, but installed (and/or upgraded) community.general manually, you need to make sure to also install ``community.postgresql`` if you are using any of the ``postgresql`` modules.
|
||||
While ansible-base 2.10 or newer can use the redirects that community.general 2.0.0 adds, the collection they point to (community.postgresql) must be installed for them to work.
|
|
@ -380,6 +380,50 @@ plugin_routing:
|
|||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
postgresql_copy:
|
||||
redirect: community.postgresql.postgresql_copy
|
||||
postgresql_db:
|
||||
redirect: community.postgresql.postgresql_db
|
||||
postgresql_ext:
|
||||
redirect: community.postgresql.postgresql_ext
|
||||
postgresql_idx:
|
||||
redirect: community.postgresql.postgresql_idx
|
||||
postgresql_info:
|
||||
redirect: community.postgresql.postgresql_info
|
||||
postgresql_lang:
|
||||
redirect: community.postgresql.postgresql_lang
|
||||
postgresql_membership:
|
||||
redirect: community.postgresql.postgresql_membership
|
||||
postgresql_owner:
|
||||
redirect: community.postgresql.postgresql_owner
|
||||
postgresql_pg_hba:
|
||||
redirect: community.postgresql.postgresql_pg_hba
|
||||
postgresql_ping:
|
||||
redirect: community.postgresql.postgresql_ping
|
||||
postgresql_privs:
|
||||
redirect: community.postgresql.postgresql_privs
|
||||
postgresql_publication:
|
||||
redirect: community.postgresql.postgresql_publication
|
||||
postgresql_query:
|
||||
redirect: community.postgresql.postgresql_query
|
||||
postgresql_schema:
|
||||
redirect: community.postgresql.postgresql_schema
|
||||
postgresql_sequence:
|
||||
redirect: community.postgresql.postgresql_sequence
|
||||
postgresql_set:
|
||||
redirect: community.postgresql.postgresql_set
|
||||
postgresql_slot:
|
||||
redirect: community.postgresql.postgresql_slot
|
||||
postgresql_subscription:
|
||||
redirect: community.postgresql.postgresql_subscription
|
||||
postgresql_table:
|
||||
redirect: community.postgresql.postgresql_table
|
||||
postgresql_tablespace:
|
||||
redirect: community.postgresql.postgresql_tablespace
|
||||
postgresql_user_obj_stat_info:
|
||||
redirect: community.postgresql.postgresql_user_obj_stat_info
|
||||
postgresql_user:
|
||||
redirect: community.postgresql.postgresql_user
|
||||
purefa_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
|
@ -461,6 +505,8 @@ plugin_routing:
|
|||
redirect: community.docker.docker
|
||||
hetzner:
|
||||
redirect: community.hrobot.robot
|
||||
postgresql:
|
||||
redirect: community.postgresql.postgresql
|
||||
module_utils:
|
||||
docker.common:
|
||||
redirect: community.docker.common
|
||||
|
@ -468,6 +514,8 @@ plugin_routing:
|
|||
redirect: community.docker.swarm
|
||||
hetzner:
|
||||
redirect: community.hrobot.robot
|
||||
postgresql:
|
||||
redirect: community.postgresql.postgresql
|
||||
callback:
|
||||
actionable:
|
||||
tombstone:
|
||||
|
|
|
@ -1,62 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class ModuleDocFragment(object):
|
||||
# Postgres documentation fragment
|
||||
DOCUMENTATION = r'''
|
||||
options:
|
||||
login_user:
|
||||
description:
|
||||
- The username used to authenticate with.
|
||||
type: str
|
||||
default: postgres
|
||||
login_password:
|
||||
description:
|
||||
- The password used to authenticate with.
|
||||
type: str
|
||||
login_host:
|
||||
description:
|
||||
- Host running the database.
|
||||
type: str
|
||||
login_unix_socket:
|
||||
description:
|
||||
- Path to a Unix domain socket for local connections.
|
||||
type: str
|
||||
port:
|
||||
description:
|
||||
- Database port to connect to.
|
||||
type: int
|
||||
default: 5432
|
||||
aliases: [ login_port ]
|
||||
ssl_mode:
|
||||
description:
|
||||
- Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
|
||||
- See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
|
||||
- Default of C(prefer) matches libpq default.
|
||||
type: str
|
||||
default: prefer
|
||||
choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
|
||||
ca_cert:
|
||||
description:
|
||||
- Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
|
||||
- If the file exists, the server's certificate will be verified to be signed by one of these authorities.
|
||||
type: str
|
||||
aliases: [ ssl_rootcert ]
|
||||
notes:
|
||||
- The default authentication assumes that you are either logging in as or sudo'ing to the C(postgres) account on the host.
|
||||
- To avoid "Peer authentication failed for user postgres" error,
|
||||
use postgres user as a I(become_user).
|
||||
- This module uses psycopg2, a Python PostgreSQL database adapter. You must
|
||||
ensure that psycopg2 is installed on the host before using this module.
|
||||
- If the remote host is the PostgreSQL server (which is the default case), then
|
||||
PostgreSQL must also be installed on the remote host.
|
||||
- For Ubuntu-based systems, install the postgresql, libpq-dev, and python-psycopg2 packages
|
||||
on the remote host before using this module.
|
||||
- The ca_cert parameter requires at least Postgres version 8.4 and I(psycopg2) version 2.4.3.
|
||||
requirements: [ psycopg2 ]
|
||||
'''
|
|
@ -1,314 +0,0 @@
|
|||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# Copyright (c), Ted Timmons <ted@timmons.me>, 2017.
|
||||
# Most of this was originally added by other creators in the postgresql_user module.
|
||||
#
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
psycopg2 = None # This line needs for unit tests
|
||||
try:
|
||||
import psycopg2
|
||||
HAS_PSYCOPG2 = True
|
||||
except ImportError:
|
||||
HAS_PSYCOPG2 = False
|
||||
|
||||
from ansible.module_utils.basic import missing_required_lib
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible.module_utils.six import iteritems
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
|
||||
def postgres_common_argument_spec():
|
||||
"""
|
||||
Return a dictionary with connection options.
|
||||
|
||||
The options are commonly used by most of PostgreSQL modules.
|
||||
"""
|
||||
return dict(
|
||||
login_user=dict(default='postgres'),
|
||||
login_password=dict(default='', no_log=True),
|
||||
login_host=dict(default=''),
|
||||
login_unix_socket=dict(default=''),
|
||||
port=dict(type='int', default=5432, aliases=['login_port']),
|
||||
ssl_mode=dict(default='prefer', choices=['allow', 'disable', 'prefer', 'require', 'verify-ca', 'verify-full']),
|
||||
ca_cert=dict(aliases=['ssl_rootcert']),
|
||||
)
|
||||
|
||||
|
||||
def ensure_required_libs(module):
|
||||
"""Check required libraries."""
|
||||
if not HAS_PSYCOPG2:
|
||||
module.fail_json(msg=missing_required_lib('psycopg2'))
|
||||
|
||||
if module.params.get('ca_cert') and LooseVersion(psycopg2.__version__) < LooseVersion('2.4.3'):
|
||||
module.fail_json(msg='psycopg2 must be at least 2.4.3 in order to use the ca_cert parameter')
|
||||
|
||||
|
||||
def connect_to_db(module, conn_params, autocommit=False, fail_on_conn=True):
|
||||
"""Connect to a PostgreSQL database.
|
||||
|
||||
Return psycopg2 connection object.
|
||||
|
||||
Args:
|
||||
module (AnsibleModule) -- object of ansible.module_utils.basic.AnsibleModule class
|
||||
conn_params (dict) -- dictionary with connection parameters
|
||||
|
||||
Kwargs:
|
||||
autocommit (bool) -- commit automatically (default False)
|
||||
fail_on_conn (bool) -- fail if connection failed or just warn and return None (default True)
|
||||
"""
|
||||
ensure_required_libs(module)
|
||||
|
||||
db_connection = None
|
||||
try:
|
||||
db_connection = psycopg2.connect(**conn_params)
|
||||
if autocommit:
|
||||
if LooseVersion(psycopg2.__version__) >= LooseVersion('2.4.2'):
|
||||
db_connection.set_session(autocommit=True)
|
||||
else:
|
||||
db_connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
|
||||
|
||||
# Switch role, if specified:
|
||||
if module.params.get('session_role'):
|
||||
cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
|
||||
|
||||
try:
|
||||
cursor.execute('SET ROLE "%s"' % module.params['session_role'])
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Could not switch role: %s" % to_native(e))
|
||||
finally:
|
||||
cursor.close()
|
||||
|
||||
except TypeError as e:
|
||||
if 'sslrootcert' in e.args[0]:
|
||||
module.fail_json(msg='Postgresql server must be at least '
|
||||
'version 8.4 to support sslrootcert')
|
||||
|
||||
if fail_on_conn:
|
||||
module.fail_json(msg="unable to connect to database: %s" % to_native(e))
|
||||
else:
|
||||
module.warn("PostgreSQL server is unavailable: %s" % to_native(e))
|
||||
db_connection = None
|
||||
|
||||
except Exception as e:
|
||||
if fail_on_conn:
|
||||
module.fail_json(msg="unable to connect to database: %s" % to_native(e))
|
||||
else:
|
||||
module.warn("PostgreSQL server is unavailable: %s" % to_native(e))
|
||||
db_connection = None
|
||||
|
||||
return db_connection
|
||||
|
||||
|
||||
def exec_sql(obj, query, query_params=None, return_bool=False, add_to_executed=True, dont_exec=False):
|
||||
"""Execute SQL.
|
||||
|
||||
Auxiliary function for PostgreSQL user classes.
|
||||
|
||||
Returns a query result if possible or a boolean value.
|
||||
|
||||
Args:
|
||||
obj (obj) -- must be an object of a user class.
|
||||
The object must have module (AnsibleModule class object) and
|
||||
cursor (psycopg cursor object) attributes
|
||||
query (str) -- SQL query to execute
|
||||
|
||||
Kwargs:
|
||||
query_params (dict or tuple) -- Query parameters to prevent SQL injections,
|
||||
could be a dict or tuple
|
||||
return_bool (bool) -- return True instead of rows if a query was successfully executed.
|
||||
It's necessary for statements that don't return any result like DDL queries (default False).
|
||||
add_to_executed (bool) -- append the query to obj.executed_queries attribute
|
||||
dont_exec (bool) -- used with add_to_executed=True to generate a query, add it
|
||||
to obj.executed_queries list and return True (default False)
|
||||
"""
|
||||
|
||||
if dont_exec:
|
||||
# This is usually needed to return queries in check_mode
|
||||
# without execution
|
||||
query = obj.cursor.mogrify(query, query_params)
|
||||
if add_to_executed:
|
||||
obj.executed_queries.append(query)
|
||||
|
||||
return True
|
||||
|
||||
try:
|
||||
if query_params is not None:
|
||||
obj.cursor.execute(query, query_params)
|
||||
else:
|
||||
obj.cursor.execute(query)
|
||||
|
||||
if add_to_executed:
|
||||
if query_params is not None:
|
||||
obj.executed_queries.append(obj.cursor.mogrify(query, query_params))
|
||||
else:
|
||||
obj.executed_queries.append(query)
|
||||
|
||||
if not return_bool:
|
||||
res = obj.cursor.fetchall()
|
||||
return res
|
||||
return True
|
||||
except Exception as e:
|
||||
obj.module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e)))
|
||||
return False
|
||||
|
||||
|
||||
def get_conn_params(module, params_dict, warn_db_default=True):
|
||||
"""Get connection parameters from the passed dictionary.
|
||||
|
||||
Return a dictionary with parameters to connect to PostgreSQL server.
|
||||
|
||||
Args:
|
||||
module (AnsibleModule) -- object of ansible.module_utils.basic.AnsibleModule class
|
||||
params_dict (dict) -- dictionary with variables
|
||||
|
||||
Kwargs:
|
||||
warn_db_default (bool) -- warn that the default DB is used (default True)
|
||||
"""
|
||||
# To use defaults values, keyword arguments must be absent, so
|
||||
# check which values are empty and don't include in the return dictionary
|
||||
params_map = {
|
||||
"login_host": "host",
|
||||
"login_user": "user",
|
||||
"login_password": "password",
|
||||
"port": "port",
|
||||
"ssl_mode": "sslmode",
|
||||
"ca_cert": "sslrootcert"
|
||||
}
|
||||
|
||||
# Might be different in the modules:
|
||||
if params_dict.get('db'):
|
||||
params_map['db'] = 'database'
|
||||
elif params_dict.get('database'):
|
||||
params_map['database'] = 'database'
|
||||
elif params_dict.get('login_db'):
|
||||
params_map['login_db'] = 'database'
|
||||
else:
|
||||
if warn_db_default:
|
||||
module.warn('Database name has not been passed, '
|
||||
'used default database to connect to.')
|
||||
|
||||
kw = dict((params_map[k], v) for (k, v) in iteritems(params_dict)
|
||||
if k in params_map and v != '' and v is not None)
|
||||
|
||||
# If a login_unix_socket is specified, incorporate it here.
|
||||
is_localhost = "host" not in kw or kw["host"] is None or kw["host"] == "localhost"
|
||||
if is_localhost and params_dict["login_unix_socket"] != "":
|
||||
kw["host"] = params_dict["login_unix_socket"]
|
||||
|
||||
return kw
|
||||
|
||||
|
||||
class PgMembership(object):
|
||||
def __init__(self, module, cursor, groups, target_roles, fail_on_role=True):
|
||||
self.module = module
|
||||
self.cursor = cursor
|
||||
self.target_roles = [r.strip() for r in target_roles]
|
||||
self.groups = [r.strip() for r in groups]
|
||||
self.executed_queries = []
|
||||
self.granted = {}
|
||||
self.revoked = {}
|
||||
self.fail_on_role = fail_on_role
|
||||
self.non_existent_roles = []
|
||||
self.changed = False
|
||||
self.__check_roles_exist()
|
||||
|
||||
def grant(self):
|
||||
for group in self.groups:
|
||||
self.granted[group] = []
|
||||
|
||||
for role in self.target_roles:
|
||||
# If role is in a group now, pass:
|
||||
if self.__check_membership(group, role):
|
||||
continue
|
||||
|
||||
query = 'GRANT "%s" TO "%s"' % (group, role)
|
||||
self.changed = exec_sql(self, query, return_bool=True)
|
||||
|
||||
if self.changed:
|
||||
self.granted[group].append(role)
|
||||
|
||||
return self.changed
|
||||
|
||||
def revoke(self):
|
||||
for group in self.groups:
|
||||
self.revoked[group] = []
|
||||
|
||||
for role in self.target_roles:
|
||||
# If role is not in a group now, pass:
|
||||
if not self.__check_membership(group, role):
|
||||
continue
|
||||
|
||||
query = 'REVOKE "%s" FROM "%s"' % (group, role)
|
||||
self.changed = exec_sql(self, query, return_bool=True)
|
||||
|
||||
if self.changed:
|
||||
self.revoked[group].append(role)
|
||||
|
||||
return self.changed
|
||||
|
||||
def __check_membership(self, src_role, dst_role):
|
||||
query = ("SELECT ARRAY(SELECT b.rolname FROM "
|
||||
"pg_catalog.pg_auth_members m "
|
||||
"JOIN pg_catalog.pg_roles b ON (m.roleid = b.oid) "
|
||||
"WHERE m.member = r.oid) "
|
||||
"FROM pg_catalog.pg_roles r "
|
||||
"WHERE r.rolname = %(dst_role)s")
|
||||
|
||||
res = exec_sql(self, query, query_params={'dst_role': dst_role}, add_to_executed=False)
|
||||
membership = []
|
||||
if res:
|
||||
membership = res[0][0]
|
||||
|
||||
if not membership:
|
||||
return False
|
||||
|
||||
if src_role in membership:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def __check_roles_exist(self):
|
||||
existent_groups = self.__roles_exist(self.groups)
|
||||
existent_roles = self.__roles_exist(self.target_roles)
|
||||
|
||||
for group in self.groups:
|
||||
if group not in existent_groups:
|
||||
if self.fail_on_role:
|
||||
self.module.fail_json(msg="Role %s does not exist" % group)
|
||||
else:
|
||||
self.module.warn("Role %s does not exist, pass" % group)
|
||||
self.non_existent_roles.append(group)
|
||||
|
||||
for role in self.target_roles:
|
||||
if role not in existent_roles:
|
||||
if self.fail_on_role:
|
||||
self.module.fail_json(msg="Role %s does not exist" % role)
|
||||
else:
|
||||
self.module.warn("Role %s does not exist, pass" % role)
|
||||
|
||||
if role not in self.groups:
|
||||
self.non_existent_roles.append(role)
|
||||
|
||||
else:
|
||||
if self.fail_on_role:
|
||||
self.module.exit_json(msg="Role role '%s' is a member of role '%s'" % (role, role))
|
||||
else:
|
||||
self.module.warn("Role role '%s' is a member of role '%s', pass" % (role, role))
|
||||
|
||||
# Update role lists, excluding non existent roles:
|
||||
self.groups = [g for g in self.groups if g not in self.non_existent_roles]
|
||||
|
||||
self.target_roles = [r for r in self.target_roles if r not in self.non_existent_roles]
|
||||
|
||||
def __roles_exist(self, roles):
|
||||
tmp = ["'" + x + "'" for x in roles]
|
||||
query = "SELECT rolname FROM pg_roles WHERE rolname IN (%s)" % ','.join(tmp)
|
||||
return [x[0] for x in exec_sql(self, query, add_to_executed=False)]
|
|
@ -1,420 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: postgresql_copy
|
||||
short_description: Copy data between a file/program and a PostgreSQL table
|
||||
description:
|
||||
- Copy data between a file/program and a PostgreSQL table.
|
||||
|
||||
options:
|
||||
copy_to:
|
||||
description:
|
||||
- Copy the contents of a table to a file.
|
||||
- Can also copy the results of a SELECT query.
|
||||
- Mutually exclusive with I(copy_from) and I(dst).
|
||||
type: path
|
||||
aliases: [ to ]
|
||||
copy_from:
|
||||
description:
|
||||
- Copy data from a file to a table (appending the data to whatever is in the table already).
|
||||
- Mutually exclusive with I(copy_to) and I(src).
|
||||
type: path
|
||||
aliases: [ from ]
|
||||
src:
|
||||
description:
|
||||
- Copy data from I(copy_from) to I(src=tablename).
|
||||
- Used with I(copy_to) only.
|
||||
type: str
|
||||
aliases: [ source ]
|
||||
dst:
|
||||
description:
|
||||
- Copy data to I(dst=tablename) from I(copy_from=/path/to/data.file).
|
||||
- Used with I(copy_from) only.
|
||||
type: str
|
||||
aliases: [ destination ]
|
||||
columns:
|
||||
description:
|
||||
- List of column names for the src/dst table to COPY FROM/TO.
|
||||
type: list
|
||||
elements: str
|
||||
aliases: [ column ]
|
||||
program:
|
||||
description:
|
||||
- Mark I(src)/I(dst) as a program. Data will be copied to/from a program.
|
||||
- See block Examples and PROGRAM arg description U(https://www.postgresql.org/docs/current/sql-copy.html).
|
||||
type: bool
|
||||
default: no
|
||||
options:
|
||||
description:
|
||||
- Options of COPY command.
|
||||
- See the full list of available options U(https://www.postgresql.org/docs/current/sql-copy.html).
|
||||
type: dict
|
||||
db:
|
||||
description:
|
||||
- Name of database to connect to.
|
||||
type: str
|
||||
aliases: [ login_db ]
|
||||
session_role:
|
||||
description:
|
||||
- Switch to session_role after connecting.
|
||||
The specified session_role must be a role that the current login_user is a member of.
|
||||
- Permissions checking for SQL commands is carried out as though
|
||||
the session_role were the one that had logged in originally.
|
||||
type: str
|
||||
trust_input:
|
||||
description:
|
||||
- If C(no), check whether values of parameters are potentially dangerous.
|
||||
- It makes sense to use C(no) only when SQL injections are possible.
|
||||
type: bool
|
||||
default: yes
|
||||
version_added: '0.2.0'
|
||||
notes:
|
||||
- Supports PostgreSQL version 9.4+.
|
||||
- COPY command is only allowed to database superusers.
|
||||
- if I(check_mode=yes), we just check the src/dst table availability
|
||||
and return the COPY query that actually has not been executed.
|
||||
- If i(check_mode=yes) and the source has been passed as SQL, the module
|
||||
will execute it and rolled the transaction back but pay attention
|
||||
it can affect database performance (e.g., if SQL collects a lot of data).
|
||||
|
||||
seealso:
|
||||
- name: COPY command reference
|
||||
description: Complete reference of the COPY command documentation.
|
||||
link: https://www.postgresql.org/docs/current/sql-copy.html
|
||||
|
||||
author:
|
||||
- Andrew Klychkov (@Andersson007)
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.general.postgres
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Copy text TAB-separated data from file /tmp/data.txt to acme table
|
||||
community.general.postgresql_copy:
|
||||
copy_from: /tmp/data.txt
|
||||
dst: acme
|
||||
|
||||
- name: Copy CSV (comma-separated) data from file /tmp/data.csv to columns id, name of table acme
|
||||
community.general.postgresql_copy:
|
||||
copy_from: /tmp/data.csv
|
||||
dst: acme
|
||||
columns: id,name
|
||||
options:
|
||||
format: csv
|
||||
|
||||
- name: >
|
||||
Copy text vertical-bar-separated data from file /tmp/data.txt to bar table.
|
||||
The NULL values are specified as N
|
||||
community.general.postgresql_copy:
|
||||
copy_from: /tmp/data.csv
|
||||
dst: bar
|
||||
options:
|
||||
delimiter: '|'
|
||||
null: 'N'
|
||||
|
||||
- name: Copy data from acme table to file /tmp/data.txt in text format, TAB-separated
|
||||
community.general.postgresql_copy:
|
||||
src: acme
|
||||
copy_to: /tmp/data.txt
|
||||
|
||||
- name: Copy data from SELECT query to/tmp/data.csv in CSV format
|
||||
community.general.postgresql_copy:
|
||||
src: 'SELECT * FROM acme'
|
||||
copy_to: /tmp/data.csv
|
||||
options:
|
||||
format: csv
|
||||
|
||||
- name: Copy CSV data from my_table to gzip
|
||||
community.general.postgresql_copy:
|
||||
src: my_table
|
||||
copy_to: 'gzip > /tmp/data.csv.gz'
|
||||
program: yes
|
||||
options:
|
||||
format: csv
|
||||
|
||||
- name: >
|
||||
Copy data from columns id, name of table bar to /tmp/data.txt.
|
||||
Output format is text, vertical-bar-separated, NULL as N
|
||||
community.general.postgresql_copy:
|
||||
src: bar
|
||||
columns:
|
||||
- id
|
||||
- name
|
||||
copy_to: /tmp/data.csv
|
||||
options:
|
||||
delimiter: '|'
|
||||
null: 'N'
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
queries:
|
||||
description: List of executed queries.
|
||||
returned: always
|
||||
type: str
|
||||
sample: [ "COPY test_table FROM '/tmp/data_file.txt' (FORMAT csv, DELIMITER ',', NULL 'NULL')" ]
|
||||
src:
|
||||
description: Data source.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "mytable"
|
||||
dst:
|
||||
description: Data destination.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/tmp/data.csv"
|
||||
'''
|
||||
|
||||
try:
|
||||
from psycopg2.extras import DictCursor
|
||||
except ImportError:
|
||||
# psycopg2 is checked by connect_to_db()
|
||||
# from ansible.module_utils.postgres
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.database import (
|
||||
check_input,
|
||||
pg_quote_identifier,
|
||||
)
|
||||
from ansible_collections.community.general.plugins.module_utils.postgres import (
|
||||
connect_to_db,
|
||||
exec_sql,
|
||||
get_conn_params,
|
||||
postgres_common_argument_spec,
|
||||
)
|
||||
from ansible.module_utils.six import iteritems
|
||||
|
||||
|
||||
class PgCopyData(object):
|
||||
|
||||
"""Implements behavior of COPY FROM, COPY TO PostgreSQL command.
|
||||
|
||||
Arguments:
|
||||
module (AnsibleModule) -- object of AnsibleModule class
|
||||
cursor (cursor) -- cursor object of psycopg2 library
|
||||
|
||||
Attributes:
|
||||
module (AnsibleModule) -- object of AnsibleModule class
|
||||
cursor (cursor) -- cursor object of psycopg2 library
|
||||
changed (bool) -- something was changed after execution or not
|
||||
executed_queries (list) -- executed queries
|
||||
dst (str) -- data destination table (when copy_from)
|
||||
src (str) -- data source table (when copy_to)
|
||||
opt_need_quotes (tuple) -- values of these options must be passed
|
||||
to SQL in quotes
|
||||
"""
|
||||
|
||||
def __init__(self, module, cursor):
|
||||
self.module = module
|
||||
self.cursor = cursor
|
||||
self.executed_queries = []
|
||||
self.changed = False
|
||||
self.dst = ''
|
||||
self.src = ''
|
||||
self.opt_need_quotes = (
|
||||
'DELIMITER',
|
||||
'NULL',
|
||||
'QUOTE',
|
||||
'ESCAPE',
|
||||
'ENCODING',
|
||||
)
|
||||
|
||||
def copy_from(self):
|
||||
"""Implements COPY FROM command behavior."""
|
||||
self.src = self.module.params['copy_from']
|
||||
self.dst = self.module.params['dst']
|
||||
|
||||
query_fragments = ['COPY %s' % pg_quote_identifier(self.dst, 'table')]
|
||||
|
||||
if self.module.params.get('columns'):
|
||||
query_fragments.append('(%s)' % ','.join(self.module.params['columns']))
|
||||
|
||||
query_fragments.append('FROM')
|
||||
|
||||
if self.module.params.get('program'):
|
||||
query_fragments.append('PROGRAM')
|
||||
|
||||
query_fragments.append("'%s'" % self.src)
|
||||
|
||||
if self.module.params.get('options'):
|
||||
query_fragments.append(self.__transform_options())
|
||||
|
||||
# Note: check mode is implemented here:
|
||||
if self.module.check_mode:
|
||||
self.changed = self.__check_table(self.dst)
|
||||
|
||||
if self.changed:
|
||||
self.executed_queries.append(' '.join(query_fragments))
|
||||
else:
|
||||
if exec_sql(self, ' '.join(query_fragments), return_bool=True):
|
||||
self.changed = True
|
||||
|
||||
def copy_to(self):
|
||||
"""Implements COPY TO command behavior."""
|
||||
self.src = self.module.params['src']
|
||||
self.dst = self.module.params['copy_to']
|
||||
|
||||
if 'SELECT ' in self.src.upper():
|
||||
# If src is SQL SELECT statement:
|
||||
query_fragments = ['COPY (%s)' % self.src]
|
||||
else:
|
||||
# If src is a table:
|
||||
query_fragments = ['COPY %s' % pg_quote_identifier(self.src, 'table')]
|
||||
|
||||
if self.module.params.get('columns'):
|
||||
query_fragments.append('(%s)' % ','.join(self.module.params['columns']))
|
||||
|
||||
query_fragments.append('TO')
|
||||
|
||||
if self.module.params.get('program'):
|
||||
query_fragments.append('PROGRAM')
|
||||
|
||||
query_fragments.append("'%s'" % self.dst)
|
||||
|
||||
if self.module.params.get('options'):
|
||||
query_fragments.append(self.__transform_options())
|
||||
|
||||
# Note: check mode is implemented here:
|
||||
if self.module.check_mode:
|
||||
self.changed = self.__check_table(self.src)
|
||||
|
||||
if self.changed:
|
||||
self.executed_queries.append(' '.join(query_fragments))
|
||||
else:
|
||||
if exec_sql(self, ' '.join(query_fragments), return_bool=True):
|
||||
self.changed = True
|
||||
|
||||
def __transform_options(self):
|
||||
"""Transform options dict into a suitable string."""
|
||||
for (key, val) in iteritems(self.module.params['options']):
|
||||
if key.upper() in self.opt_need_quotes:
|
||||
self.module.params['options'][key] = "'%s'" % val
|
||||
|
||||
opt = ['%s %s' % (key, val) for (key, val) in iteritems(self.module.params['options'])]
|
||||
return '(%s)' % ', '.join(opt)
|
||||
|
||||
def __check_table(self, table):
|
||||
"""Check table or SQL in transaction mode for check_mode.
|
||||
|
||||
Return True if it is OK.
|
||||
|
||||
Arguments:
|
||||
table (str) - Table name that needs to be checked.
|
||||
It can be SQL SELECT statement that was passed
|
||||
instead of the table name.
|
||||
"""
|
||||
if 'SELECT ' in table.upper():
|
||||
# In this case table is actually SQL SELECT statement.
|
||||
# If SQL fails, it's handled by exec_sql():
|
||||
exec_sql(self, table, add_to_executed=False)
|
||||
# If exec_sql was passed, it means all is OK:
|
||||
return True
|
||||
|
||||
exec_sql(self, 'SELECT 1 FROM %s' % pg_quote_identifier(table, 'table'),
|
||||
add_to_executed=False)
|
||||
# If SQL was executed successfully:
|
||||
return True
|
||||
|
||||
|
||||
# ===========================================
|
||||
# Module execution.
|
||||
#
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = postgres_common_argument_spec()
|
||||
argument_spec.update(
|
||||
copy_to=dict(type='path', aliases=['to']),
|
||||
copy_from=dict(type='path', aliases=['from']),
|
||||
src=dict(type='str', aliases=['source']),
|
||||
dst=dict(type='str', aliases=['destination']),
|
||||
columns=dict(type='list', elements='str', aliases=['column']),
|
||||
options=dict(type='dict'),
|
||||
program=dict(type='bool', default=False),
|
||||
db=dict(type='str', aliases=['login_db']),
|
||||
session_role=dict(type='str'),
|
||||
trust_input=dict(type='bool', default=True),
|
||||
)
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
mutually_exclusive=[
|
||||
['copy_from', 'copy_to'],
|
||||
['copy_from', 'src'],
|
||||
['copy_to', 'dst'],
|
||||
]
|
||||
)
|
||||
|
||||
if not module.params['trust_input']:
|
||||
# Check input for potentially dangerous elements:
|
||||
opt_list = None
|
||||
if module.params['options']:
|
||||
opt_list = ['%s %s' % (key, val) for (key, val) in iteritems(module.params['options'])]
|
||||
|
||||
check_input(module,
|
||||
module.params['copy_to'],
|
||||
module.params['copy_from'],
|
||||
module.params['src'],
|
||||
module.params['dst'],
|
||||
opt_list,
|
||||
module.params['columns'],
|
||||
module.params['session_role'])
|
||||
|
||||
# Note: we don't need to check mutually exclusive params here, because they are
|
||||
# checked automatically by AnsibleModule (mutually_exclusive=[] list above).
|
||||
if module.params.get('copy_from') and not module.params.get('dst'):
|
||||
module.fail_json(msg='dst param is necessary with copy_from')
|
||||
|
||||
elif module.params.get('copy_to') and not module.params.get('src'):
|
||||
module.fail_json(msg='src param is necessary with copy_to')
|
||||
|
||||
# Connect to DB and make cursor object:
|
||||
conn_params = get_conn_params(module, module.params)
|
||||
db_connection = connect_to_db(module, conn_params, autocommit=False)
|
||||
cursor = db_connection.cursor(cursor_factory=DictCursor)
|
||||
|
||||
##############
|
||||
# Create the object and do main job:
|
||||
data = PgCopyData(module, cursor)
|
||||
|
||||
# Note: parameters like dst, src, etc. are got
|
||||
# from module object into data object of PgCopyData class.
|
||||
# Therefore not need to pass args to the methods below.
|
||||
# Note: check mode is implemented inside the methods below
|
||||
# by checking passed module.check_mode arg.
|
||||
if module.params.get('copy_to'):
|
||||
data.copy_to()
|
||||
|
||||
elif module.params.get('copy_from'):
|
||||
data.copy_from()
|
||||
|
||||
# Finish:
|
||||
if module.check_mode:
|
||||
db_connection.rollback()
|
||||
else:
|
||||
db_connection.commit()
|
||||
|
||||
cursor.close()
|
||||
db_connection.close()
|
||||
|
||||
# Return some values:
|
||||
module.exit_json(
|
||||
changed=data.changed,
|
||||
queries=data.executed_queries,
|
||||
src=data.src,
|
||||
dst=data.dst,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,667 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: postgresql_db
|
||||
short_description: Add or remove PostgreSQL databases from a remote host.
|
||||
description:
|
||||
- Add or remove PostgreSQL databases from a remote host.
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the database to add or remove
|
||||
type: str
|
||||
required: true
|
||||
aliases: [ db ]
|
||||
port:
|
||||
description:
|
||||
- Database port to connect (if needed)
|
||||
type: int
|
||||
default: 5432
|
||||
aliases:
|
||||
- login_port
|
||||
owner:
|
||||
description:
|
||||
- Name of the role to set as owner of the database
|
||||
type: str
|
||||
template:
|
||||
description:
|
||||
- Template used to create the database
|
||||
type: str
|
||||
encoding:
|
||||
description:
|
||||
- Encoding of the database
|
||||
type: str
|
||||
lc_collate:
|
||||
description:
|
||||
- Collation order (LC_COLLATE) to use in the database. Must match collation order of template database unless C(template0) is used as template.
|
||||
type: str
|
||||
lc_ctype:
|
||||
description:
|
||||
- Character classification (LC_CTYPE) to use in the database (e.g. lower, upper, ...) Must match LC_CTYPE of template database unless C(template0)
|
||||
is used as template.
|
||||
type: str
|
||||
session_role:
|
||||
description:
|
||||
- Switch to session_role after connecting. The specified session_role must be a role that the current login_user is a member of.
|
||||
- Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally.
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- The database state.
|
||||
- C(present) implies that the database should be created if necessary.
|
||||
- C(absent) implies that the database should be removed if present.
|
||||
- C(dump) requires a target definition to which the database will be backed up. (Added in Ansible 2.4)
|
||||
Note that in some PostgreSQL versions of pg_dump, which is an embedded PostgreSQL utility and is used by the module,
|
||||
returns rc 0 even when errors occurred (e.g. the connection is forbidden by pg_hba.conf, etc.),
|
||||
so the module returns changed=True but the dump has not actually been done. Please, be sure that your version of
|
||||
pg_dump returns rc 1 in this case.
|
||||
- C(restore) also requires a target definition from which the database will be restored. (Added in Ansible 2.4)
|
||||
- The format of the backup will be detected based on the target name.
|
||||
- Supported compression formats for dump and restore include C(.pgc), C(.bz2), C(.gz) and C(.xz)
|
||||
- Supported formats for dump and restore include C(.sql) and C(.tar)
|
||||
- "Restore program is selected by target file format: C(.tar) and C(.pgc) are handled by pg_restore, other with pgsql."
|
||||
type: str
|
||||
choices: [ absent, dump, present, restore ]
|
||||
default: present
|
||||
target:
|
||||
description:
|
||||
- File to back up or restore from.
|
||||
- Used when I(state) is C(dump) or C(restore).
|
||||
type: path
|
||||
target_opts:
|
||||
description:
|
||||
- Additional arguments for pg_dump or restore program (pg_restore or psql, depending on target's format).
|
||||
- Used when I(state) is C(dump) or C(restore).
|
||||
type: str
|
||||
maintenance_db:
|
||||
description:
|
||||
- The value specifies the initial database (which is also called as maintenance DB) that Ansible connects to.
|
||||
type: str
|
||||
default: postgres
|
||||
conn_limit:
|
||||
description:
|
||||
- Specifies the database connection limit.
|
||||
type: str
|
||||
tablespace:
|
||||
description:
|
||||
- The tablespace to set for the database
|
||||
U(https://www.postgresql.org/docs/current/sql-alterdatabase.html).
|
||||
- If you want to move the database back to the default tablespace,
|
||||
explicitly set this to pg_default.
|
||||
type: path
|
||||
dump_extra_args:
|
||||
description:
|
||||
- Provides additional arguments when I(state) is C(dump).
|
||||
- Cannot be used with dump-file-format-related arguments like ``--format=d``.
|
||||
type: str
|
||||
version_added: '0.2.0'
|
||||
trust_input:
|
||||
description:
|
||||
- If C(no), check whether values of parameters I(owner), I(conn_limit), I(encoding),
|
||||
I(db), I(template), I(tablespace), I(session_role) are potentially dangerous.
|
||||
- It makes sense to use C(no) only when SQL injections via the parameters are possible.
|
||||
type: bool
|
||||
default: yes
|
||||
version_added: '0.2.0'
|
||||
seealso:
|
||||
- name: CREATE DATABASE reference
|
||||
description: Complete reference of the CREATE DATABASE command documentation.
|
||||
link: https://www.postgresql.org/docs/current/sql-createdatabase.html
|
||||
- name: DROP DATABASE reference
|
||||
description: Complete reference of the DROP DATABASE command documentation.
|
||||
link: https://www.postgresql.org/docs/current/sql-dropdatabase.html
|
||||
- name: pg_dump reference
|
||||
description: Complete reference of pg_dump documentation.
|
||||
link: https://www.postgresql.org/docs/current/app-pgdump.html
|
||||
- name: pg_restore reference
|
||||
description: Complete reference of pg_restore documentation.
|
||||
link: https://www.postgresql.org/docs/current/app-pgrestore.html
|
||||
- module: community.general.postgresql_tablespace
|
||||
- module: community.general.postgresql_info
|
||||
- module: community.general.postgresql_ping
|
||||
notes:
|
||||
- State C(dump) and C(restore) don't require I(psycopg2) since version 2.8.
|
||||
author: "Ansible Core Team"
|
||||
extends_documentation_fragment:
|
||||
- community.general.postgres
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Create a new database with name "acme"
|
||||
community.general.postgresql_db:
|
||||
name: acme
|
||||
|
||||
# Note: If a template different from "template0" is specified, encoding and locale settings must match those of the template.
|
||||
- name: Create a new database with name "acme" and specific encoding and locale # settings.
|
||||
community.general.postgresql_db:
|
||||
name: acme
|
||||
encoding: UTF-8
|
||||
lc_collate: de_DE.UTF-8
|
||||
lc_ctype: de_DE.UTF-8
|
||||
template: template0
|
||||
|
||||
# Note: Default limit for the number of concurrent connections to a specific database is "-1", which means "unlimited"
|
||||
- name: Create a new database with name "acme" which has a limit of 100 concurrent connections
|
||||
community.general.postgresql_db:
|
||||
name: acme
|
||||
conn_limit: "100"
|
||||
|
||||
- name: Dump an existing database to a file
|
||||
community.general.postgresql_db:
|
||||
name: acme
|
||||
state: dump
|
||||
target: /tmp/acme.sql
|
||||
|
||||
- name: Dump an existing database to a file excluding the test table
|
||||
community.general.postgresql_db:
|
||||
name: acme
|
||||
state: dump
|
||||
target: /tmp/acme.sql
|
||||
dump_extra_args: --exclude-table=test
|
||||
|
||||
- name: Dump an existing database to a file (with compression)
|
||||
community.general.postgresql_db:
|
||||
name: acme
|
||||
state: dump
|
||||
target: /tmp/acme.sql.gz
|
||||
|
||||
- name: Dump a single schema for an existing database
|
||||
community.general.postgresql_db:
|
||||
name: acme
|
||||
state: dump
|
||||
target: /tmp/acme.sql
|
||||
target_opts: "-n public"
|
||||
|
||||
- name: Dump only table1 and table2 from the acme database
|
||||
community.general.postgresql_db:
|
||||
name: acme
|
||||
state: dump
|
||||
target: /tmp/table1_table2.sql
|
||||
target_opts: "-t table1 -t table2"
|
||||
|
||||
# Note: In the example below, if database foo exists and has another tablespace
|
||||
# the tablespace will be changed to foo. Access to the database will be locked
|
||||
# until the copying of database files is finished.
|
||||
- name: Create a new database called foo in tablespace bar
|
||||
community.general.postgresql_db:
|
||||
name: foo
|
||||
tablespace: bar
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
executed_commands:
|
||||
description: List of commands which tried to run.
|
||||
returned: always
|
||||
type: list
|
||||
sample: ["CREATE DATABASE acme"]
|
||||
version_added: '0.2.0'
|
||||
'''
|
||||
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
import traceback
|
||||
|
||||
try:
|
||||
import psycopg2
|
||||
import psycopg2.extras
|
||||
except ImportError:
|
||||
HAS_PSYCOPG2 = False
|
||||
else:
|
||||
HAS_PSYCOPG2 = True
|
||||
|
||||
import ansible_collections.community.general.plugins.module_utils.postgres as pgutils
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.database import (
|
||||
check_input,
|
||||
SQLParseError,
|
||||
)
|
||||
from ansible.module_utils.six import iteritems
|
||||
from ansible.module_utils.six.moves import shlex_quote
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
executed_commands = []
|
||||
|
||||
|
||||
class NotSupportedError(Exception):
|
||||
pass
|
||||
|
||||
# ===========================================
|
||||
# PostgreSQL module specific support methods.
|
||||
#
|
||||
|
||||
|
||||
def set_owner(cursor, db, owner):
|
||||
query = 'ALTER DATABASE "%s" OWNER TO "%s"' % (db, owner)
|
||||
executed_commands.append(query)
|
||||
cursor.execute(query)
|
||||
return True
|
||||
|
||||
|
||||
def set_conn_limit(cursor, db, conn_limit):
|
||||
query = 'ALTER DATABASE "%s" CONNECTION LIMIT %s' % (db, conn_limit)
|
||||
executed_commands.append(query)
|
||||
cursor.execute(query)
|
||||
return True
|
||||
|
||||
|
||||
def get_encoding_id(cursor, encoding):
|
||||
query = "SELECT pg_char_to_encoding(%(encoding)s) AS encoding_id;"
|
||||
cursor.execute(query, {'encoding': encoding})
|
||||
return cursor.fetchone()['encoding_id']
|
||||
|
||||
|
||||
def get_db_info(cursor, db):
|
||||
query = """
|
||||
SELECT rolname AS owner,
|
||||
pg_encoding_to_char(encoding) AS encoding, encoding AS encoding_id,
|
||||
datcollate AS lc_collate, datctype AS lc_ctype, pg_database.datconnlimit AS conn_limit,
|
||||
spcname AS tablespace
|
||||
FROM pg_database
|
||||
JOIN pg_roles ON pg_roles.oid = pg_database.datdba
|
||||
JOIN pg_tablespace ON pg_tablespace.oid = pg_database.dattablespace
|
||||
WHERE datname = %(db)s
|
||||
"""
|
||||
cursor.execute(query, {'db': db})
|
||||
return cursor.fetchone()
|
||||
|
||||
|
||||
def db_exists(cursor, db):
|
||||
query = "SELECT * FROM pg_database WHERE datname=%(db)s"
|
||||
cursor.execute(query, {'db': db})
|
||||
return cursor.rowcount == 1
|
||||
|
||||
|
||||
def db_delete(cursor, db):
|
||||
if db_exists(cursor, db):
|
||||
query = 'DROP DATABASE "%s"' % db
|
||||
executed_commands.append(query)
|
||||
cursor.execute(query)
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace):
|
||||
params = dict(enc=encoding, collate=lc_collate, ctype=lc_ctype, conn_limit=conn_limit, tablespace=tablespace)
|
||||
if not db_exists(cursor, db):
|
||||
query_fragments = ['CREATE DATABASE "%s"' % db]
|
||||
if owner:
|
||||
query_fragments.append('OWNER "%s"' % owner)
|
||||
if template:
|
||||
query_fragments.append('TEMPLATE "%s"' % template)
|
||||
if encoding:
|
||||
query_fragments.append('ENCODING %(enc)s')
|
||||
if lc_collate:
|
||||
query_fragments.append('LC_COLLATE %(collate)s')
|
||||
if lc_ctype:
|
||||
query_fragments.append('LC_CTYPE %(ctype)s')
|
||||
if tablespace:
|
||||
query_fragments.append('TABLESPACE "%s"' % tablespace)
|
||||
if conn_limit:
|
||||
query_fragments.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit})
|
||||
query = ' '.join(query_fragments)
|
||||
executed_commands.append(cursor.mogrify(query, params))
|
||||
cursor.execute(query, params)
|
||||
return True
|
||||
else:
|
||||
db_info = get_db_info(cursor, db)
|
||||
if (encoding and get_encoding_id(cursor, encoding) != db_info['encoding_id']):
|
||||
raise NotSupportedError(
|
||||
'Changing database encoding is not supported. '
|
||||
'Current encoding: %s' % db_info['encoding']
|
||||
)
|
||||
elif lc_collate and lc_collate != db_info['lc_collate']:
|
||||
raise NotSupportedError(
|
||||
'Changing LC_COLLATE is not supported. '
|
||||
'Current LC_COLLATE: %s' % db_info['lc_collate']
|
||||
)
|
||||
elif lc_ctype and lc_ctype != db_info['lc_ctype']:
|
||||
raise NotSupportedError(
|
||||
'Changing LC_CTYPE is not supported.'
|
||||
'Current LC_CTYPE: %s' % db_info['lc_ctype']
|
||||
)
|
||||
else:
|
||||
changed = False
|
||||
|
||||
if owner and owner != db_info['owner']:
|
||||
changed = set_owner(cursor, db, owner)
|
||||
|
||||
if conn_limit and conn_limit != str(db_info['conn_limit']):
|
||||
changed = set_conn_limit(cursor, db, conn_limit)
|
||||
|
||||
if tablespace and tablespace != db_info['tablespace']:
|
||||
changed = set_tablespace(cursor, db, tablespace)
|
||||
|
||||
return changed
|
||||
|
||||
|
||||
def db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace):
|
||||
if not db_exists(cursor, db):
|
||||
return False
|
||||
else:
|
||||
db_info = get_db_info(cursor, db)
|
||||
if (encoding and get_encoding_id(cursor, encoding) != db_info['encoding_id']):
|
||||
return False
|
||||
elif lc_collate and lc_collate != db_info['lc_collate']:
|
||||
return False
|
||||
elif lc_ctype and lc_ctype != db_info['lc_ctype']:
|
||||
return False
|
||||
elif owner and owner != db_info['owner']:
|
||||
return False
|
||||
elif conn_limit and conn_limit != str(db_info['conn_limit']):
|
||||
return False
|
||||
elif tablespace and tablespace != db_info['tablespace']:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
|
||||
def db_dump(module, target, target_opts="",
|
||||
db=None,
|
||||
dump_extra_args=None,
|
||||
user=None,
|
||||
password=None,
|
||||
host=None,
|
||||
port=None,
|
||||
**kw):
|
||||
|
||||
flags = login_flags(db, host, port, user, db_prefix=False)
|
||||
cmd = module.get_bin_path('pg_dump', True)
|
||||
comp_prog_path = None
|
||||
|
||||
if os.path.splitext(target)[-1] == '.tar':
|
||||
flags.append(' --format=t')
|
||||
elif os.path.splitext(target)[-1] == '.pgc':
|
||||
flags.append(' --format=c')
|
||||
if os.path.splitext(target)[-1] == '.gz':
|
||||
if module.get_bin_path('pigz'):
|
||||
comp_prog_path = module.get_bin_path('pigz', True)
|
||||
else:
|
||||
comp_prog_path = module.get_bin_path('gzip', True)
|
||||
elif os.path.splitext(target)[-1] == '.bz2':
|
||||
comp_prog_path = module.get_bin_path('bzip2', True)
|
||||
elif os.path.splitext(target)[-1] == '.xz':
|
||||
comp_prog_path = module.get_bin_path('xz', True)
|
||||
|
||||
cmd += "".join(flags)
|
||||
|
||||
if dump_extra_args:
|
||||
cmd += " {0} ".format(dump_extra_args)
|
||||
|
||||
if target_opts:
|
||||
cmd += " {0} ".format(target_opts)
|
||||
|
||||
if comp_prog_path:
|
||||
# Use a fifo to be notified of an error in pg_dump
|
||||
# Using shell pipe has no way to return the code of the first command
|
||||
# in a portable way.
|
||||
fifo = os.path.join(module.tmpdir, 'pg_fifo')
|
||||
os.mkfifo(fifo)
|
||||
cmd = '{1} <{3} > {2} & {0} >{3}'.format(cmd, comp_prog_path, shlex_quote(target), fifo)
|
||||
else:
|
||||
cmd = '{0} > {1}'.format(cmd, shlex_quote(target))
|
||||
|
||||
return do_with_password(module, cmd, password)
|
||||
|
||||
|
||||
def db_restore(module, target, target_opts="",
|
||||
db=None,
|
||||
user=None,
|
||||
password=None,
|
||||
host=None,
|
||||
port=None,
|
||||
**kw):
|
||||
|
||||
flags = login_flags(db, host, port, user)
|
||||
comp_prog_path = None
|
||||
cmd = module.get_bin_path('psql', True)
|
||||
|
||||
if os.path.splitext(target)[-1] == '.sql':
|
||||
flags.append(' --file={0}'.format(target))
|
||||
|
||||
elif os.path.splitext(target)[-1] == '.tar':
|
||||
flags.append(' --format=Tar')
|
||||
cmd = module.get_bin_path('pg_restore', True)
|
||||
|
||||
elif os.path.splitext(target)[-1] == '.pgc':
|
||||
flags.append(' --format=Custom')
|
||||
cmd = module.get_bin_path('pg_restore', True)
|
||||
|
||||
elif os.path.splitext(target)[-1] == '.gz':
|
||||
comp_prog_path = module.get_bin_path('zcat', True)
|
||||
|
||||
elif os.path.splitext(target)[-1] == '.bz2':
|
||||
comp_prog_path = module.get_bin_path('bzcat', True)
|
||||
|
||||
elif os.path.splitext(target)[-1] == '.xz':
|
||||
comp_prog_path = module.get_bin_path('xzcat', True)
|
||||
|
||||
cmd += "".join(flags)
|
||||
if target_opts:
|
||||
cmd += " {0} ".format(target_opts)
|
||||
|
||||
if comp_prog_path:
|
||||
env = os.environ.copy()
|
||||
if password:
|
||||
env = {"PGPASSWORD": password}
|
||||
p1 = subprocess.Popen([comp_prog_path, target], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
p2 = subprocess.Popen(cmd, stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=env)
|
||||
(stdout2, stderr2) = p2.communicate()
|
||||
p1.stdout.close()
|
||||
p1.wait()
|
||||
if p1.returncode != 0:
|
||||
stderr1 = p1.stderr.read()
|
||||
return p1.returncode, '', stderr1, 'cmd: ****'
|
||||
else:
|
||||
return p2.returncode, '', stderr2, 'cmd: ****'
|
||||
else:
|
||||
cmd = '{0} < {1}'.format(cmd, shlex_quote(target))
|
||||
|
||||
return do_with_password(module, cmd, password)
|
||||
|
||||
|
||||
def login_flags(db, host, port, user, db_prefix=True):
|
||||
"""
|
||||
returns a list of connection argument strings each prefixed
|
||||
with a space and quoted where necessary to later be combined
|
||||
in a single shell string with `"".join(rv)`
|
||||
|
||||
db_prefix determines if "--dbname" is prefixed to the db argument,
|
||||
since the argument was introduced in 9.3.
|
||||
"""
|
||||
flags = []
|
||||
if db:
|
||||
if db_prefix:
|
||||
flags.append(' --dbname={0}'.format(shlex_quote(db)))
|
||||
else:
|
||||
flags.append(' {0}'.format(shlex_quote(db)))
|
||||
if host:
|
||||
flags.append(' --host={0}'.format(host))
|
||||
if port:
|
||||
flags.append(' --port={0}'.format(port))
|
||||
if user:
|
||||
flags.append(' --username={0}'.format(user))
|
||||
return flags
|
||||
|
||||
|
||||
def do_with_password(module, cmd, password):
|
||||
env = {}
|
||||
if password:
|
||||
env = {"PGPASSWORD": password}
|
||||
executed_commands.append(cmd)
|
||||
rc, stderr, stdout = module.run_command(cmd, use_unsafe_shell=True, environ_update=env)
|
||||
return rc, stderr, stdout, cmd
|
||||
|
||||
|
||||
def set_tablespace(cursor, db, tablespace):
|
||||
query = 'ALTER DATABASE "%s" SET TABLESPACE "%s"' % (db, tablespace)
|
||||
executed_commands.append(query)
|
||||
cursor.execute(query)
|
||||
return True
|
||||
|
||||
# ===========================================
|
||||
# Module execution.
|
||||
#
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = pgutils.postgres_common_argument_spec()
|
||||
argument_spec.update(
|
||||
db=dict(type='str', required=True, aliases=['name']),
|
||||
owner=dict(type='str', default=''),
|
||||
template=dict(type='str', default=''),
|
||||
encoding=dict(type='str', default=''),
|
||||
lc_collate=dict(type='str', default=''),
|
||||
lc_ctype=dict(type='str', default=''),
|
||||
state=dict(type='str', default='present', choices=['absent', 'dump', 'present', 'restore']),
|
||||
target=dict(type='path', default=''),
|
||||
target_opts=dict(type='str', default=''),
|
||||
maintenance_db=dict(type='str', default="postgres"),
|
||||
session_role=dict(type='str'),
|
||||
conn_limit=dict(type='str', default=''),
|
||||
tablespace=dict(type='path', default=''),
|
||||
dump_extra_args=dict(type='str', default=None),
|
||||
trust_input=dict(type='bool', default=True),
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
db = module.params["db"]
|
||||
owner = module.params["owner"]
|
||||
template = module.params["template"]
|
||||
encoding = module.params["encoding"]
|
||||
lc_collate = module.params["lc_collate"]
|
||||
lc_ctype = module.params["lc_ctype"]
|
||||
target = module.params["target"]
|
||||
target_opts = module.params["target_opts"]
|
||||
state = module.params["state"]
|
||||
changed = False
|
||||
maintenance_db = module.params['maintenance_db']
|
||||
session_role = module.params["session_role"]
|
||||
conn_limit = module.params['conn_limit']
|
||||
tablespace = module.params['tablespace']
|
||||
dump_extra_args = module.params['dump_extra_args']
|
||||
trust_input = module.params['trust_input']
|
||||
|
||||
# Check input
|
||||
if not trust_input:
|
||||
# Check input for potentially dangerous elements:
|
||||
check_input(module, owner, conn_limit, encoding, db, template, tablespace, session_role)
|
||||
|
||||
raw_connection = state in ("dump", "restore")
|
||||
|
||||
if not raw_connection:
|
||||
pgutils.ensure_required_libs(module)
|
||||
|
||||
# To use defaults values, keyword arguments must be absent, so
|
||||
# check which values are empty and don't include in the **kw
|
||||
# dictionary
|
||||
params_map = {
|
||||
"login_host": "host",
|
||||
"login_user": "user",
|
||||
"login_password": "password",
|
||||
"port": "port",
|
||||
"ssl_mode": "sslmode",
|
||||
"ca_cert": "sslrootcert"
|
||||
}
|
||||
kw = dict((params_map[k], v) for (k, v) in iteritems(module.params)
|
||||
if k in params_map and v != '' and v is not None)
|
||||
|
||||
# If a login_unix_socket is specified, incorporate it here.
|
||||
is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
|
||||
|
||||
if is_localhost and module.params["login_unix_socket"] != "":
|
||||
kw["host"] = module.params["login_unix_socket"]
|
||||
|
||||
if target == "":
|
||||
target = "{0}/{1}.sql".format(os.getcwd(), db)
|
||||
target = os.path.expanduser(target)
|
||||
|
||||
if not raw_connection:
|
||||
try:
|
||||
db_connection = psycopg2.connect(database=maintenance_db, **kw)
|
||||
|
||||
# Enable autocommit so we can create databases
|
||||
if psycopg2.__version__ >= '2.4.2':
|
||||
db_connection.autocommit = True
|
||||
else:
|
||||
db_connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
|
||||
cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
|
||||
|
||||
except TypeError as e:
|
||||
if 'sslrootcert' in e.args[0]:
|
||||
module.fail_json(msg='Postgresql server must be at least version 8.4 to support sslrootcert. Exception: {0}'.format(to_native(e)),
|
||||
exception=traceback.format_exc())
|
||||
module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
|
||||
|
||||
except Exception as e:
|
||||
module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
|
||||
|
||||
if session_role:
|
||||
try:
|
||||
cursor.execute('SET ROLE "%s"' % session_role)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Could not switch role: %s" % to_native(e), exception=traceback.format_exc())
|
||||
|
||||
try:
|
||||
if module.check_mode:
|
||||
if state == "absent":
|
||||
changed = db_exists(cursor, db)
|
||||
elif state == "present":
|
||||
changed = not db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace)
|
||||
module.exit_json(changed=changed, db=db, executed_commands=executed_commands)
|
||||
|
||||
if state == "absent":
|
||||
try:
|
||||
changed = db_delete(cursor, db)
|
||||
except SQLParseError as e:
|
||||
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
|
||||
|
||||
elif state == "present":
|
||||
try:
|
||||
changed = db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace)
|
||||
except SQLParseError as e:
|
||||
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
|
||||
|
||||
elif state in ("dump", "restore"):
|
||||
method = state == "dump" and db_dump or db_restore
|
||||
try:
|
||||
if state == 'dump':
|
||||
rc, stdout, stderr, cmd = method(module, target, target_opts, db, dump_extra_args, **kw)
|
||||
else:
|
||||
rc, stdout, stderr, cmd = method(module, target, target_opts, db, **kw)
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(msg=stderr, stdout=stdout, rc=rc, cmd=cmd)
|
||||
else:
|
||||
module.exit_json(changed=True, msg=stdout, stderr=stderr, rc=rc, cmd=cmd,
|
||||
executed_commands=executed_commands)
|
||||
except SQLParseError as e:
|
||||
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
|
||||
|
||||
except NotSupportedError as e:
|
||||
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
|
||||
except SystemExit:
|
||||
# Avoid catching this on Python 2.4
|
||||
raise
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc())
|
||||
|
||||
module.exit_json(changed=changed, db=db, executed_commands=executed_commands)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,443 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: postgresql_ext
|
||||
short_description: Add or remove PostgreSQL extensions from a database
|
||||
description:
|
||||
- Add or remove PostgreSQL extensions from a database.
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the extension to add or remove.
|
||||
required: true
|
||||
type: str
|
||||
aliases:
|
||||
- ext
|
||||
db:
|
||||
description:
|
||||
- Name of the database to add or remove the extension to/from.
|
||||
required: true
|
||||
type: str
|
||||
aliases:
|
||||
- login_db
|
||||
schema:
|
||||
description:
|
||||
- Name of the schema to add the extension to.
|
||||
type: str
|
||||
session_role:
|
||||
description:
|
||||
- Switch to session_role after connecting.
|
||||
- The specified session_role must be a role that the current login_user is a member of.
|
||||
- Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally.
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- The database extension state.
|
||||
default: present
|
||||
choices: [ absent, present ]
|
||||
type: str
|
||||
cascade:
|
||||
description:
|
||||
- Automatically install/remove any extensions that this extension depends on
|
||||
that are not already installed/removed (supported since PostgreSQL 9.6).
|
||||
type: bool
|
||||
default: no
|
||||
login_unix_socket:
|
||||
description:
|
||||
- Path to a Unix domain socket for local connections.
|
||||
type: str
|
||||
ssl_mode:
|
||||
description:
|
||||
- Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
|
||||
- See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
|
||||
- Default of C(prefer) matches libpq default.
|
||||
type: str
|
||||
default: prefer
|
||||
choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
|
||||
ca_cert:
|
||||
description:
|
||||
- Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
|
||||
- If the file exists, the server's certificate will be verified to be signed by one of these authorities.
|
||||
type: str
|
||||
aliases: [ ssl_rootcert ]
|
||||
version:
|
||||
description:
|
||||
- Extension version to add or update to. Has effect with I(state=present) only.
|
||||
- If not specified, the latest extension version will be created.
|
||||
- It can't downgrade an extension version.
|
||||
When version downgrade is needed, remove the extension and create new one with appropriate version.
|
||||
- Set I(version=latest) to update the extension to the latest available version.
|
||||
type: str
|
||||
trust_input:
|
||||
description:
|
||||
- If C(no), check whether values of parameters I(ext), I(schema),
|
||||
I(version), I(session_role) are potentially dangerous.
|
||||
- It makes sense to use C(no) only when SQL injections via the parameters are possible.
|
||||
type: bool
|
||||
default: yes
|
||||
version_added: '0.2.0'
|
||||
seealso:
|
||||
- name: PostgreSQL extensions
|
||||
description: General information about PostgreSQL extensions.
|
||||
link: https://www.postgresql.org/docs/current/external-extensions.html
|
||||
- name: CREATE EXTENSION reference
|
||||
description: Complete reference of the CREATE EXTENSION command documentation.
|
||||
link: https://www.postgresql.org/docs/current/sql-createextension.html
|
||||
- name: ALTER EXTENSION reference
|
||||
description: Complete reference of the ALTER EXTENSION command documentation.
|
||||
link: https://www.postgresql.org/docs/current/sql-alterextension.html
|
||||
- name: DROP EXTENSION reference
|
||||
description: Complete reference of the DROP EXTENSION command documentation.
|
||||
link: https://www.postgresql.org/docs/current/sql-droppublication.html
|
||||
notes:
|
||||
- The default authentication assumes that you are either logging in as
|
||||
or sudo'ing to the C(postgres) account on the host.
|
||||
- This module uses I(psycopg2), a Python PostgreSQL database adapter.
|
||||
- You must ensure that C(psycopg2) is installed on the host before using this module.
|
||||
- If the remote host is the PostgreSQL server (which is the default case),
|
||||
then PostgreSQL must also be installed on the remote host.
|
||||
- For Ubuntu-based systems, install the C(postgresql), C(libpq-dev),
|
||||
and C(python-psycopg2) packages on the remote host before using this module.
|
||||
- Incomparable versions, for example PostGIS ``unpackaged``, cannot be installed.
|
||||
requirements: [ psycopg2 ]
|
||||
author:
|
||||
- Daniel Schep (@dschep)
|
||||
- Thomas O'Donnell (@andytom)
|
||||
- Sandro Santilli (@strk)
|
||||
- Andrew Klychkov (@Andersson007)
|
||||
extends_documentation_fragment:
|
||||
- community.general.postgres
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Adds postgis extension to the database acme in the schema foo
|
||||
community.general.postgresql_ext:
|
||||
name: postgis
|
||||
db: acme
|
||||
schema: foo
|
||||
|
||||
- name: Removes postgis extension to the database acme
|
||||
community.general.postgresql_ext:
|
||||
name: postgis
|
||||
db: acme
|
||||
state: absent
|
||||
|
||||
- name: Adds earthdistance extension to the database template1 cascade
|
||||
community.general.postgresql_ext:
|
||||
name: earthdistance
|
||||
db: template1
|
||||
cascade: true
|
||||
|
||||
# In the example below, if earthdistance extension is installed,
|
||||
# it will be removed too because it depends on cube:
|
||||
- name: Removes cube extension from the database acme cascade
|
||||
community.general.postgresql_ext:
|
||||
name: cube
|
||||
db: acme
|
||||
cascade: yes
|
||||
state: absent
|
||||
|
||||
- name: Create extension foo of version 1.2 or update it if it's already created
|
||||
community.general.postgresql_ext:
|
||||
db: acme
|
||||
name: foo
|
||||
version: 1.2
|
||||
|
||||
- name: Assuming extension foo is created, update it to the latest version
|
||||
community.general.postgresql_ext:
|
||||
db: acme
|
||||
name: foo
|
||||
version: latest
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
query:
|
||||
description: List of executed queries.
|
||||
returned: always
|
||||
type: list
|
||||
sample: ["DROP EXTENSION \"acme\""]
|
||||
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
try:
|
||||
from psycopg2.extras import DictCursor
|
||||
except ImportError:
|
||||
# psycopg2 is checked by connect_to_db()
|
||||
# from ansible.module_utils.postgres
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.database import (
|
||||
check_input,
|
||||
)
|
||||
from ansible_collections.community.general.plugins.module_utils.postgres import (
|
||||
connect_to_db,
|
||||
get_conn_params,
|
||||
postgres_common_argument_spec,
|
||||
)
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
executed_queries = []
|
||||
|
||||
|
||||
# ===========================================
|
||||
# PostgreSQL module specific support methods.
|
||||
#
|
||||
|
||||
def ext_exists(cursor, ext):
|
||||
query = "SELECT * FROM pg_extension WHERE extname=%(ext)s"
|
||||
cursor.execute(query, {'ext': ext})
|
||||
return cursor.rowcount == 1
|
||||
|
||||
|
||||
def ext_delete(cursor, ext, cascade):
|
||||
if ext_exists(cursor, ext):
|
||||
query = "DROP EXTENSION \"%s\"" % ext
|
||||
if cascade:
|
||||
query += " CASCADE"
|
||||
cursor.execute(query)
|
||||
executed_queries.append(query)
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def ext_update_version(cursor, ext, version):
|
||||
"""Update extension version.
|
||||
|
||||
Return True if success.
|
||||
|
||||
Args:
|
||||
cursor (cursor) -- cursor object of psycopg2 library
|
||||
ext (str) -- extension name
|
||||
version (str) -- extension version
|
||||
"""
|
||||
query = "ALTER EXTENSION \"%s\" UPDATE" % ext
|
||||
params = {}
|
||||
|
||||
if version != 'latest':
|
||||
query += " TO %(ver)s"
|
||||
params['ver'] = version
|
||||
|
||||
cursor.execute(query, params)
|
||||
executed_queries.append(cursor.mogrify(query, params))
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def ext_create(cursor, ext, schema, cascade, version):
|
||||
query = "CREATE EXTENSION \"%s\"" % ext
|
||||
params = {}
|
||||
|
||||
if schema:
|
||||
query += " WITH SCHEMA \"%s\"" % schema
|
||||
if version:
|
||||
query += " VERSION %(ver)s"
|
||||
params['ver'] = version
|
||||
if cascade:
|
||||
query += " CASCADE"
|
||||
|
||||
cursor.execute(query, params)
|
||||
executed_queries.append(cursor.mogrify(query, params))
|
||||
return True
|
||||
|
||||
|
||||
def ext_get_versions(cursor, ext):
|
||||
"""
|
||||
Get the current created extension version and available versions.
|
||||
|
||||
Return tuple (current_version, [list of available versions]).
|
||||
|
||||
Note: the list of available versions contains only versions
|
||||
that higher than the current created version.
|
||||
If the extension is not created, this list will contain all
|
||||
available versions.
|
||||
|
||||
Args:
|
||||
cursor (cursor) -- cursor object of psycopg2 library
|
||||
ext (str) -- extension name
|
||||
"""
|
||||
|
||||
# 1. Get the current extension version:
|
||||
query = ("SELECT extversion FROM pg_catalog.pg_extension "
|
||||
"WHERE extname = %(ext)s")
|
||||
|
||||
current_version = '0'
|
||||
cursor.execute(query, {'ext': ext})
|
||||
res = cursor.fetchone()
|
||||
if res:
|
||||
current_version = res[0]
|
||||
|
||||
# 2. Get available versions:
|
||||
query = ("SELECT version FROM pg_available_extension_versions "
|
||||
"WHERE name = %(ext)s")
|
||||
cursor.execute(query, {'ext': ext})
|
||||
res = cursor.fetchall()
|
||||
|
||||
available_versions = parse_ext_versions(current_version, res)
|
||||
|
||||
if current_version == '0':
|
||||
current_version = False
|
||||
|
||||
return (current_version, available_versions)
|
||||
|
||||
|
||||
def parse_ext_versions(current_version, ext_ver_list):
|
||||
"""Parse ext versions.
|
||||
|
||||
Args:
|
||||
current_version (str) -- version to compare elements of ext_ver_list with
|
||||
ext_ver_list (list) -- list containing dicts with versions
|
||||
|
||||
Return a sorted list with versions that are higher than current_version.
|
||||
|
||||
Note: Incomparable versions (e.g., postgis version "unpackaged") are skipped.
|
||||
"""
|
||||
available_versions = []
|
||||
|
||||
for line in ext_ver_list:
|
||||
if line['version'] == 'unpackaged':
|
||||
continue
|
||||
|
||||
try:
|
||||
if LooseVersion(line['version']) > LooseVersion(current_version):
|
||||
available_versions.append(line['version'])
|
||||
except Exception:
|
||||
# When a version cannot be compared, skip it
|
||||
# (there's a note in the documentation)
|
||||
continue
|
||||
|
||||
return sorted(available_versions, key=LooseVersion)
|
||||
|
||||
# ===========================================
|
||||
# Module execution.
|
||||
#
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = postgres_common_argument_spec()
|
||||
argument_spec.update(
|
||||
db=dict(type="str", required=True, aliases=["login_db"]),
|
||||
ext=dict(type="str", required=True, aliases=["name"]),
|
||||
schema=dict(type="str"),
|
||||
state=dict(type="str", default="present", choices=["absent", "present"]),
|
||||
cascade=dict(type="bool", default=False),
|
||||
session_role=dict(type="str"),
|
||||
version=dict(type="str"),
|
||||
trust_input=dict(type="bool", default=True),
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
ext = module.params["ext"]
|
||||
schema = module.params["schema"]
|
||||
state = module.params["state"]
|
||||
cascade = module.params["cascade"]
|
||||
version = module.params["version"]
|
||||
session_role = module.params["session_role"]
|
||||
trust_input = module.params["trust_input"]
|
||||
changed = False
|
||||
|
||||
if not trust_input:
|
||||
check_input(module, ext, schema, version, session_role)
|
||||
|
||||
if version and state == 'absent':
|
||||
module.warn("Parameter version is ignored when state=absent")
|
||||
|
||||
conn_params = get_conn_params(module, module.params)
|
||||
db_connection = connect_to_db(module, conn_params, autocommit=True)
|
||||
cursor = db_connection.cursor(cursor_factory=DictCursor)
|
||||
|
||||
try:
|
||||
# Get extension info and available versions:
|
||||
curr_version, available_versions = ext_get_versions(cursor, ext)
|
||||
|
||||
if state == "present":
|
||||
if version == 'latest':
|
||||
if available_versions:
|
||||
version = available_versions[-1]
|
||||
else:
|
||||
version = ''
|
||||
|
||||
if version:
|
||||
# If the specific version is passed and it is not available for update:
|
||||
if version not in available_versions:
|
||||
if not curr_version:
|
||||
module.fail_json(msg="Passed version '%s' is not available" % version)
|
||||
|
||||
elif LooseVersion(curr_version) == LooseVersion(version):
|
||||
changed = False
|
||||
|
||||
else:
|
||||
module.fail_json(msg="Passed version '%s' is lower than "
|
||||
"the current created version '%s' or "
|
||||
"the passed version is not available" % (version, curr_version))
|
||||
|
||||
# If the specific version is passed and it is higher that the current version:
|
||||
if curr_version:
|
||||
if LooseVersion(curr_version) < LooseVersion(version):
|
||||
if module.check_mode:
|
||||
changed = True
|
||||
else:
|
||||
changed = ext_update_version(cursor, ext, version)
|
||||
|
||||
# If the specific version is passed and it is created now:
|
||||
if curr_version == version:
|
||||
changed = False
|
||||
|
||||
# If the ext doesn't exist and installed:
|
||||
elif not curr_version and available_versions:
|
||||
if module.check_mode:
|
||||
changed = True
|
||||
else:
|
||||
changed = ext_create(cursor, ext, schema, cascade, version)
|
||||
|
||||
# If version is not passed:
|
||||
else:
|
||||
if not curr_version:
|
||||
# If the ext doesn't exist and it's installed:
|
||||
if available_versions:
|
||||
if module.check_mode:
|
||||
changed = True
|
||||
else:
|
||||
changed = ext_create(cursor, ext, schema, cascade, version)
|
||||
|
||||
# If the ext doesn't exist and not installed:
|
||||
else:
|
||||
module.fail_json(msg="Extension %s is not installed" % ext)
|
||||
|
||||
elif state == "absent":
|
||||
if curr_version:
|
||||
if module.check_mode:
|
||||
changed = True
|
||||
else:
|
||||
changed = ext_delete(cursor, ext, cascade)
|
||||
else:
|
||||
changed = False
|
||||
|
||||
except Exception as e:
|
||||
db_connection.close()
|
||||
module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc())
|
||||
|
||||
db_connection.close()
|
||||
module.exit_json(changed=changed, db=module.params["db"], ext=ext, queries=executed_queries)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,589 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2018-2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: postgresql_idx
|
||||
short_description: Create or drop indexes from a PostgreSQL database
|
||||
description:
|
||||
- Create or drop indexes from a PostgreSQL database.
|
||||
|
||||
options:
|
||||
idxname:
|
||||
description:
|
||||
- Name of the index to create or drop.
|
||||
type: str
|
||||
required: true
|
||||
aliases:
|
||||
- name
|
||||
db:
|
||||
description:
|
||||
- Name of database to connect to and where the index will be created/dropped.
|
||||
type: str
|
||||
aliases:
|
||||
- login_db
|
||||
session_role:
|
||||
description:
|
||||
- Switch to session_role after connecting.
|
||||
The specified session_role must be a role that the current login_user is a member of.
|
||||
- Permissions checking for SQL commands is carried out as though
|
||||
the session_role were the one that had logged in originally.
|
||||
type: str
|
||||
schema:
|
||||
description:
|
||||
- Name of a database schema where the index will be created.
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- Index state.
|
||||
- C(present) implies the index will be created if it does not exist.
|
||||
- C(absent) implies the index will be dropped if it exists.
|
||||
type: str
|
||||
default: present
|
||||
choices: [ absent, present ]
|
||||
table:
|
||||
description:
|
||||
- Table to create index on it.
|
||||
- Mutually exclusive with I(state=absent).
|
||||
type: str
|
||||
columns:
|
||||
description:
|
||||
- List of index columns that need to be covered by index.
|
||||
- Mutually exclusive with I(state=absent).
|
||||
type: list
|
||||
elements: str
|
||||
aliases:
|
||||
- column
|
||||
cond:
|
||||
description:
|
||||
- Index conditions.
|
||||
- Mutually exclusive with I(state=absent).
|
||||
type: str
|
||||
idxtype:
|
||||
description:
|
||||
- Index type (like btree, gist, gin, etc.).
|
||||
- Mutually exclusive with I(state=absent).
|
||||
type: str
|
||||
aliases:
|
||||
- type
|
||||
concurrent:
|
||||
description:
|
||||
- Enable or disable concurrent mode (CREATE / DROP INDEX CONCURRENTLY).
|
||||
- Pay attention, if I(concurrent=no), the table will be locked (ACCESS EXCLUSIVE) during the building process.
|
||||
For more information about the lock levels see U(https://www.postgresql.org/docs/current/explicit-locking.html).
|
||||
- If the building process was interrupted for any reason when I(cuncurrent=yes), the index becomes invalid.
|
||||
In this case it should be dropped and created again.
|
||||
- Mutually exclusive with I(cascade=yes).
|
||||
type: bool
|
||||
default: yes
|
||||
unique:
|
||||
description:
|
||||
- Enable unique index.
|
||||
- Only btree currently supports unique indexes.
|
||||
type: bool
|
||||
default: no
|
||||
version_added: '0.2.0'
|
||||
tablespace:
|
||||
description:
|
||||
- Set a tablespace for the index.
|
||||
- Mutually exclusive with I(state=absent).
|
||||
required: false
|
||||
type: str
|
||||
storage_params:
|
||||
description:
|
||||
- Storage parameters like fillfactor, vacuum_cleanup_index_scale_factor, etc.
|
||||
- Mutually exclusive with I(state=absent).
|
||||
type: list
|
||||
elements: str
|
||||
cascade:
|
||||
description:
|
||||
- Automatically drop objects that depend on the index,
|
||||
and in turn all objects that depend on those objects.
|
||||
- It used only with I(state=absent).
|
||||
- Mutually exclusive with I(concurrent=yes)
|
||||
type: bool
|
||||
default: no
|
||||
trust_input:
|
||||
description:
|
||||
- If C(no), check whether values of parameters I(idxname), I(session_role),
|
||||
I(schema), I(table), I(columns), I(tablespace), I(storage_params),
|
||||
I(cond) are potentially dangerous.
|
||||
- It makes sense to use C(no) only when SQL injections via the parameters are possible.
|
||||
type: bool
|
||||
default: yes
|
||||
version_added: '0.2.0'
|
||||
|
||||
seealso:
|
||||
- module: community.general.postgresql_table
|
||||
- module: community.general.postgresql_tablespace
|
||||
- name: PostgreSQL indexes reference
|
||||
description: General information about PostgreSQL indexes.
|
||||
link: https://www.postgresql.org/docs/current/indexes.html
|
||||
- name: CREATE INDEX reference
|
||||
description: Complete reference of the CREATE INDEX command documentation.
|
||||
link: https://www.postgresql.org/docs/current/sql-createindex.html
|
||||
- name: ALTER INDEX reference
|
||||
description: Complete reference of the ALTER INDEX command documentation.
|
||||
link: https://www.postgresql.org/docs/current/sql-alterindex.html
|
||||
- name: DROP INDEX reference
|
||||
description: Complete reference of the DROP INDEX command documentation.
|
||||
link: https://www.postgresql.org/docs/current/sql-dropindex.html
|
||||
|
||||
notes:
|
||||
- The index building process can affect database performance.
|
||||
- To avoid table locks on production databases, use I(concurrent=yes) (default behavior).
|
||||
|
||||
author:
|
||||
- Andrew Klychkov (@Andersson007)
|
||||
- Thomas O'Donnell (@andytom)
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.general.postgres
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Create btree index if not exists test_idx concurrently covering columns id and name of table products
|
||||
community.general.postgresql_idx:
|
||||
db: acme
|
||||
table: products
|
||||
columns: id,name
|
||||
name: test_idx
|
||||
|
||||
- name: Create btree index test_idx concurrently with tablespace called ssd and storage parameter
|
||||
community.general.postgresql_idx:
|
||||
db: acme
|
||||
table: products
|
||||
columns:
|
||||
- id
|
||||
- name
|
||||
idxname: test_idx
|
||||
tablespace: ssd
|
||||
storage_params:
|
||||
- fillfactor=90
|
||||
|
||||
- name: Create gist index test_gist_idx concurrently on column geo_data of table map
|
||||
community.general.postgresql_idx:
|
||||
db: somedb
|
||||
table: map
|
||||
idxtype: gist
|
||||
columns: geo_data
|
||||
idxname: test_gist_idx
|
||||
|
||||
# Note: for the example below pg_trgm extension must be installed for gin_trgm_ops
|
||||
- name: Create gin index gin0_idx not concurrently on column comment of table test
|
||||
community.general.postgresql_idx:
|
||||
idxname: gin0_idx
|
||||
table: test
|
||||
columns: comment gin_trgm_ops
|
||||
concurrent: no
|
||||
idxtype: gin
|
||||
|
||||
- name: Drop btree test_idx concurrently
|
||||
community.general.postgresql_idx:
|
||||
db: mydb
|
||||
idxname: test_idx
|
||||
state: absent
|
||||
|
||||
- name: Drop test_idx cascade
|
||||
community.general.postgresql_idx:
|
||||
db: mydb
|
||||
idxname: test_idx
|
||||
state: absent
|
||||
cascade: yes
|
||||
concurrent: no
|
||||
|
||||
- name: Create btree index test_idx concurrently on columns id,comment where column id > 1
|
||||
community.general.postgresql_idx:
|
||||
db: mydb
|
||||
table: test
|
||||
columns: id,comment
|
||||
idxname: test_idx
|
||||
cond: id > 1
|
||||
|
||||
- name: Create unique btree index if not exists test_unique_idx on column name of table products
|
||||
community.general.postgresql_idx:
|
||||
db: acme
|
||||
table: products
|
||||
columns: name
|
||||
name: test_unique_idx
|
||||
unique: yes
|
||||
concurrent: no
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
name:
|
||||
description: Index name.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 'foo_idx'
|
||||
state:
|
||||
description: Index state.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 'present'
|
||||
schema:
|
||||
description: Schema where index exists.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 'public'
|
||||
tablespace:
|
||||
description: Tablespace where index exists.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 'ssd'
|
||||
query:
|
||||
description: Query that was tried to be executed.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 'CREATE INDEX CONCURRENTLY foo_idx ON test_table USING BTREE (id)'
|
||||
storage_params:
|
||||
description: Index storage parameters.
|
||||
returned: always
|
||||
type: list
|
||||
sample: [ "fillfactor=90" ]
|
||||
valid:
|
||||
description: Index validity.
|
||||
returned: always
|
||||
type: bool
|
||||
sample: true
|
||||
'''
|
||||
|
||||
try:
|
||||
from psycopg2.extras import DictCursor
|
||||
except ImportError:
|
||||
# psycopg2 is checked by connect_to_db()
|
||||
# from ansible.module_utils.postgres
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.database import check_input
|
||||
from ansible_collections.community.general.plugins.module_utils.postgres import (
|
||||
connect_to_db,
|
||||
exec_sql,
|
||||
get_conn_params,
|
||||
postgres_common_argument_spec,
|
||||
)
|
||||
|
||||
|
||||
VALID_IDX_TYPES = ('BTREE', 'HASH', 'GIST', 'SPGIST', 'GIN', 'BRIN')
|
||||
|
||||
|
||||
# ===========================================
|
||||
# PostgreSQL module specific support methods.
|
||||
#
|
||||
|
||||
class Index(object):
|
||||
|
||||
"""Class for working with PostgreSQL indexes.
|
||||
|
||||
TODO:
|
||||
1. Add possibility to change ownership
|
||||
2. Add possibility to change tablespace
|
||||
3. Add list called executed_queries (executed_query should be left too)
|
||||
4. Use self.module instead of passing arguments to the methods whenever possible
|
||||
|
||||
Args:
|
||||
module (AnsibleModule) -- object of AnsibleModule class
|
||||
cursor (cursor) -- cursor object of psycopg2 library
|
||||
schema (str) -- name of the index schema
|
||||
name (str) -- name of the index
|
||||
|
||||
Attrs:
|
||||
module (AnsibleModule) -- object of AnsibleModule class
|
||||
cursor (cursor) -- cursor object of psycopg2 library
|
||||
schema (str) -- name of the index schema
|
||||
name (str) -- name of the index
|
||||
exists (bool) -- flag the index exists in the DB or not
|
||||
info (dict) -- dict that contents information about the index
|
||||
executed_query (str) -- executed query
|
||||
"""
|
||||
|
||||
def __init__(self, module, cursor, schema, name):
|
||||
self.name = name
|
||||
if schema:
|
||||
self.schema = schema
|
||||
else:
|
||||
self.schema = 'public'
|
||||
self.module = module
|
||||
self.cursor = cursor
|
||||
self.info = {
|
||||
'name': self.name,
|
||||
'state': 'absent',
|
||||
'schema': '',
|
||||
'tblname': '',
|
||||
'tblspace': '',
|
||||
'valid': True,
|
||||
'storage_params': [],
|
||||
}
|
||||
self.exists = False
|
||||
self.__exists_in_db()
|
||||
self.executed_query = ''
|
||||
|
||||
def get_info(self):
|
||||
"""Refresh index info.
|
||||
|
||||
Return self.info dict.
|
||||
"""
|
||||
self.__exists_in_db()
|
||||
return self.info
|
||||
|
||||
def __exists_in_db(self):
|
||||
"""Check index existence, collect info, add it to self.info dict.
|
||||
|
||||
Return True if the index exists, otherwise, return False.
|
||||
"""
|
||||
query = ("SELECT i.schemaname, i.tablename, i.tablespace, "
|
||||
"pi.indisvalid, c.reloptions "
|
||||
"FROM pg_catalog.pg_indexes AS i "
|
||||
"JOIN pg_catalog.pg_class AS c "
|
||||
"ON i.indexname = c.relname "
|
||||
"JOIN pg_catalog.pg_index AS pi "
|
||||
"ON c.oid = pi.indexrelid "
|
||||
"WHERE i.indexname = %(name)s")
|
||||
|
||||
res = exec_sql(self, query, query_params={'name': self.name}, add_to_executed=False)
|
||||
if res:
|
||||
self.exists = True
|
||||
self.info = dict(
|
||||
name=self.name,
|
||||
state='present',
|
||||
schema=res[0][0],
|
||||
tblname=res[0][1],
|
||||
tblspace=res[0][2] if res[0][2] else '',
|
||||
valid=res[0][3],
|
||||
storage_params=res[0][4] if res[0][4] else [],
|
||||
)
|
||||
return True
|
||||
|
||||
else:
|
||||
self.exists = False
|
||||
return False
|
||||
|
||||
def create(self, tblname, idxtype, columns, cond, tblspace,
|
||||
storage_params, concurrent=True, unique=False):
|
||||
"""Create PostgreSQL index.
|
||||
|
||||
Return True if success, otherwise, return False.
|
||||
|
||||
Args:
|
||||
tblname (str) -- name of a table for the index
|
||||
idxtype (str) -- type of the index like BTREE, BRIN, etc
|
||||
columns (str) -- string of comma-separated columns that need to be covered by index
|
||||
tblspace (str) -- tablespace for storing the index
|
||||
storage_params (str) -- string of comma-separated storage parameters
|
||||
|
||||
Kwargs:
|
||||
concurrent (bool) -- build index in concurrent mode, default True
|
||||
"""
|
||||
if self.exists:
|
||||
return False
|
||||
|
||||
if idxtype is None:
|
||||
idxtype = "BTREE"
|
||||
|
||||
query = 'CREATE'
|
||||
|
||||
if unique:
|
||||
query += ' UNIQUE'
|
||||
|
||||
query += ' INDEX'
|
||||
|
||||
if concurrent:
|
||||
query += ' CONCURRENTLY'
|
||||
|
||||
query += ' "%s"' % self.name
|
||||
|
||||
query += ' ON "%s"."%s" ' % (self.schema, tblname)
|
||||
|
||||
query += 'USING %s (%s)' % (idxtype, columns)
|
||||
|
||||
if storage_params:
|
||||
query += ' WITH (%s)' % storage_params
|
||||
|
||||
if tblspace:
|
||||
query += ' TABLESPACE "%s"' % tblspace
|
||||
|
||||
if cond:
|
||||
query += ' WHERE %s' % cond
|
||||
|
||||
self.executed_query = query
|
||||
|
||||
return exec_sql(self, query, return_bool=True, add_to_executed=False)
|
||||
|
||||
def drop(self, cascade=False, concurrent=True):
|
||||
"""Drop PostgreSQL index.
|
||||
|
||||
Return True if success, otherwise, return False.
|
||||
|
||||
Args:
|
||||
schema (str) -- name of the index schema
|
||||
|
||||
Kwargs:
|
||||
cascade (bool) -- automatically drop objects that depend on the index,
|
||||
default False
|
||||
concurrent (bool) -- build index in concurrent mode, default True
|
||||
"""
|
||||
if not self.exists:
|
||||
return False
|
||||
|
||||
query = 'DROP INDEX'
|
||||
|
||||
if concurrent:
|
||||
query += ' CONCURRENTLY'
|
||||
|
||||
query += ' "%s"."%s"' % (self.schema, self.name)
|
||||
|
||||
if cascade:
|
||||
query += ' CASCADE'
|
||||
|
||||
self.executed_query = query
|
||||
|
||||
return exec_sql(self, query, return_bool=True, add_to_executed=False)
|
||||
|
||||
|
||||
# ===========================================
|
||||
# Module execution.
|
||||
#
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = postgres_common_argument_spec()
|
||||
argument_spec.update(
|
||||
idxname=dict(type='str', required=True, aliases=['name']),
|
||||
db=dict(type='str', aliases=['login_db']),
|
||||
state=dict(type='str', default='present', choices=['absent', 'present']),
|
||||
concurrent=dict(type='bool', default=True),
|
||||
unique=dict(type='bool', default=False),
|
||||
table=dict(type='str'),
|
||||
idxtype=dict(type='str', aliases=['type']),
|
||||
columns=dict(type='list', elements='str', aliases=['column']),
|
||||
cond=dict(type='str'),
|
||||
session_role=dict(type='str'),
|
||||
tablespace=dict(type='str'),
|
||||
storage_params=dict(type='list', elements='str'),
|
||||
cascade=dict(type='bool', default=False),
|
||||
schema=dict(type='str'),
|
||||
trust_input=dict(type='bool', default=True),
|
||||
)
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
idxname = module.params["idxname"]
|
||||
state = module.params["state"]
|
||||
concurrent = module.params["concurrent"]
|
||||
unique = module.params["unique"]
|
||||
table = module.params["table"]
|
||||
idxtype = module.params["idxtype"]
|
||||
columns = module.params["columns"]
|
||||
cond = module.params["cond"]
|
||||
tablespace = module.params["tablespace"]
|
||||
storage_params = module.params["storage_params"]
|
||||
cascade = module.params["cascade"]
|
||||
schema = module.params["schema"]
|
||||
session_role = module.params["session_role"]
|
||||
trust_input = module.params["trust_input"]
|
||||
|
||||
if not trust_input:
|
||||
# Check input for potentially dangerous elements:
|
||||
check_input(module, idxname, session_role, schema, table, columns,
|
||||
tablespace, storage_params, cond)
|
||||
|
||||
if concurrent and cascade:
|
||||
module.fail_json(msg="Concurrent mode and cascade parameters are mutually exclusive")
|
||||
|
||||
if unique and (idxtype and idxtype != 'btree'):
|
||||
module.fail_json(msg="Only btree currently supports unique indexes")
|
||||
|
||||
if state == 'present':
|
||||
if not table:
|
||||
module.fail_json(msg="Table must be specified")
|
||||
if not columns:
|
||||
module.fail_json(msg="At least one column must be specified")
|
||||
else:
|
||||
if table or columns or cond or idxtype or tablespace:
|
||||
module.fail_json(msg="Index %s is going to be removed, so it does not "
|
||||
"make sense to pass a table name, columns, conditions, "
|
||||
"index type, or tablespace" % idxname)
|
||||
|
||||
if cascade and state != 'absent':
|
||||
module.fail_json(msg="cascade parameter used only with state=absent")
|
||||
|
||||
conn_params = get_conn_params(module, module.params)
|
||||
db_connection = connect_to_db(module, conn_params, autocommit=True)
|
||||
cursor = db_connection.cursor(cursor_factory=DictCursor)
|
||||
|
||||
# Set defaults:
|
||||
changed = False
|
||||
|
||||
# Do job:
|
||||
index = Index(module, cursor, schema, idxname)
|
||||
kw = index.get_info()
|
||||
kw['query'] = ''
|
||||
|
||||
#
|
||||
# check_mode start
|
||||
if module.check_mode:
|
||||
if state == 'present' and index.exists:
|
||||
kw['changed'] = False
|
||||
module.exit_json(**kw)
|
||||
|
||||
elif state == 'present' and not index.exists:
|
||||
kw['changed'] = True
|
||||
module.exit_json(**kw)
|
||||
|
||||
elif state == 'absent' and not index.exists:
|
||||
kw['changed'] = False
|
||||
module.exit_json(**kw)
|
||||
|
||||
elif state == 'absent' and index.exists:
|
||||
kw['changed'] = True
|
||||
module.exit_json(**kw)
|
||||
# check_mode end
|
||||
#
|
||||
|
||||
if state == "present":
|
||||
if idxtype and idxtype.upper() not in VALID_IDX_TYPES:
|
||||
module.fail_json(msg="Index type '%s' of %s is not in valid types" % (idxtype, idxname))
|
||||
|
||||
columns = ','.join(columns)
|
||||
|
||||
if storage_params:
|
||||
storage_params = ','.join(storage_params)
|
||||
|
||||
changed = index.create(table, idxtype, columns, cond, tablespace, storage_params, concurrent, unique)
|
||||
|
||||
if changed:
|
||||
kw = index.get_info()
|
||||
kw['state'] = 'present'
|
||||
kw['query'] = index.executed_query
|
||||
|
||||
else:
|
||||
changed = index.drop(cascade, concurrent)
|
||||
|
||||
if changed:
|
||||
kw['state'] = 'absent'
|
||||
kw['query'] = index.executed_query
|
||||
|
||||
if not kw['valid']:
|
||||
db_connection.rollback()
|
||||
module.warn("Index %s is invalid! ROLLBACK" % idxname)
|
||||
|
||||
if not concurrent:
|
||||
db_connection.commit()
|
||||
|
||||
kw['changed'] = changed
|
||||
db_connection.close()
|
||||
module.exit_json(**kw)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
File diff suppressed because it is too large
Load diff
|
@ -1,363 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# (c) 2014, Jens Depuydt <http://www.jensd.be>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: postgresql_lang
|
||||
short_description: Adds, removes or changes procedural languages with a PostgreSQL database
|
||||
description:
|
||||
- Adds, removes or changes procedural languages with a PostgreSQL database.
|
||||
- This module allows you to add a language, remote a language or change the trust
|
||||
relationship with a PostgreSQL database.
|
||||
- The module can be used on the machine where executed or on a remote host.
|
||||
- When removing a language from a database, it is possible that dependencies prevent
|
||||
the database from being removed. In that case, you can specify I(cascade=yes) to
|
||||
automatically drop objects that depend on the language (such as functions in the
|
||||
language).
|
||||
- In case the language can't be deleted because it is required by the
|
||||
database system, you can specify I(fail_on_drop=no) to ignore the error.
|
||||
- Be careful when marking a language as trusted since this could be a potential
|
||||
security breach. Untrusted languages allow only users with the PostgreSQL superuser
|
||||
privilege to use this language to create new functions.
|
||||
options:
|
||||
lang:
|
||||
description:
|
||||
- Name of the procedural language to add, remove or change.
|
||||
required: true
|
||||
type: str
|
||||
aliases:
|
||||
- name
|
||||
trust:
|
||||
description:
|
||||
- Make this language trusted for the selected db.
|
||||
type: bool
|
||||
default: 'no'
|
||||
db:
|
||||
description:
|
||||
- Name of database to connect to and where the language will be added, removed or changed.
|
||||
type: str
|
||||
aliases:
|
||||
- login_db
|
||||
required: true
|
||||
force_trust:
|
||||
description:
|
||||
- Marks the language as trusted, even if it's marked as untrusted in pg_pltemplate.
|
||||
- Use with care!
|
||||
type: bool
|
||||
default: 'no'
|
||||
fail_on_drop:
|
||||
description:
|
||||
- If C(yes), fail when removing a language. Otherwise just log and continue.
|
||||
- In some cases, it is not possible to remove a language (used by the db-system).
|
||||
- When dependencies block the removal, consider using I(cascade).
|
||||
type: bool
|
||||
default: 'yes'
|
||||
cascade:
|
||||
description:
|
||||
- When dropping a language, also delete object that depend on this language.
|
||||
- Only used when I(state=absent).
|
||||
type: bool
|
||||
default: 'no'
|
||||
session_role:
|
||||
description:
|
||||
- Switch to session_role after connecting.
|
||||
- The specified I(session_role) must be a role that the current I(login_user) is a member of.
|
||||
- Permissions checking for SQL commands is carried out as though the
|
||||
I(session_role) were the one that had logged in originally.
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- The state of the language for the selected database.
|
||||
type: str
|
||||
default: present
|
||||
choices: [ absent, present ]
|
||||
login_unix_socket:
|
||||
description:
|
||||
- Path to a Unix domain socket for local connections.
|
||||
type: str
|
||||
ssl_mode:
|
||||
description:
|
||||
- Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
|
||||
- See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
|
||||
- Default of C(prefer) matches libpq default.
|
||||
type: str
|
||||
default: prefer
|
||||
choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
|
||||
ca_cert:
|
||||
description:
|
||||
- Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
|
||||
- If the file exists, the server's certificate will be verified to be signed by one of these authorities.
|
||||
type: str
|
||||
aliases: [ ssl_rootcert ]
|
||||
owner:
|
||||
description:
|
||||
- Set an owner for the language.
|
||||
- Ignored when I(state=absent).
|
||||
type: str
|
||||
version_added: '0.2.0'
|
||||
trust_input:
|
||||
description:
|
||||
- If C(no), check whether values of parameters I(lang), I(session_role),
|
||||
I(owner) are potentially dangerous.
|
||||
- It makes sense to use C(no) only when SQL injections via the parameters are possible.
|
||||
type: bool
|
||||
default: yes
|
||||
version_added: '0.2.0'
|
||||
seealso:
|
||||
- name: PostgreSQL languages
|
||||
description: General information about PostgreSQL languages.
|
||||
link: https://www.postgresql.org/docs/current/xplang.html
|
||||
- name: CREATE LANGUAGE reference
|
||||
description: Complete reference of the CREATE LANGUAGE command documentation.
|
||||
link: https://www.postgresql.org/docs/current/sql-createlanguage.html
|
||||
- name: ALTER LANGUAGE reference
|
||||
description: Complete reference of the ALTER LANGUAGE command documentation.
|
||||
link: https://www.postgresql.org/docs/current/sql-alterlanguage.html
|
||||
- name: DROP LANGUAGE reference
|
||||
description: Complete reference of the DROP LANGUAGE command documentation.
|
||||
link: https://www.postgresql.org/docs/current/sql-droplanguage.html
|
||||
author:
|
||||
- Jens Depuydt (@jensdepuydt)
|
||||
- Thomas O'Donnell (@andytom)
|
||||
extends_documentation_fragment:
|
||||
- community.general.postgres
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Add language pltclu to database testdb if it doesn't exist
|
||||
community.general.postgresql_lang: db=testdb lang=pltclu state=present
|
||||
|
||||
# Add language pltclu to database testdb if it doesn't exist and mark it as trusted.
|
||||
# Marks the language as trusted if it exists but isn't trusted yet.
|
||||
# force_trust makes sure that the language will be marked as trusted
|
||||
- name: Add language pltclu to database testdb if it doesn't exist and mark it as trusted
|
||||
community.general.postgresql_lang:
|
||||
db: testdb
|
||||
lang: pltclu
|
||||
state: present
|
||||
trust: yes
|
||||
force_trust: yes
|
||||
|
||||
- name: Remove language pltclu from database testdb
|
||||
community.general.postgresql_lang:
|
||||
db: testdb
|
||||
lang: pltclu
|
||||
state: absent
|
||||
|
||||
- name: Remove language pltclu from database testdb and remove all dependencies
|
||||
community.general.postgresql_lang:
|
||||
db: testdb
|
||||
lang: pltclu
|
||||
state: absent
|
||||
cascade: yes
|
||||
|
||||
- name: Remove language c from database testdb but ignore errors if something prevents the removal
|
||||
community.general.postgresql_lang:
|
||||
db: testdb
|
||||
lang: pltclu
|
||||
state: absent
|
||||
fail_on_drop: no
|
||||
|
||||
- name: In testdb change owner of mylang to alice
|
||||
community.general.postgresql_lang:
|
||||
db: testdb
|
||||
lang: mylang
|
||||
owner: alice
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
queries:
|
||||
description: List of executed queries.
|
||||
returned: always
|
||||
type: list
|
||||
sample: ['CREATE LANGUAGE "acme"']
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.database import check_input
|
||||
from ansible_collections.community.general.plugins.module_utils.postgres import (
|
||||
connect_to_db,
|
||||
get_conn_params,
|
||||
postgres_common_argument_spec,
|
||||
)
|
||||
|
||||
executed_queries = []
|
||||
|
||||
|
||||
def lang_exists(cursor, lang):
|
||||
"""Checks if language exists for db"""
|
||||
query = "SELECT lanname FROM pg_language WHERE lanname = %(lang)s"
|
||||
cursor.execute(query, {'lang': lang})
|
||||
return cursor.rowcount > 0
|
||||
|
||||
|
||||
def lang_istrusted(cursor, lang):
|
||||
"""Checks if language is trusted for db"""
|
||||
query = "SELECT lanpltrusted FROM pg_language WHERE lanname = %(lang)s"
|
||||
cursor.execute(query, {'lang': lang})
|
||||
return cursor.fetchone()[0]
|
||||
|
||||
|
||||
def lang_altertrust(cursor, lang, trust):
|
||||
"""Changes if language is trusted for db"""
|
||||
query = "UPDATE pg_language SET lanpltrusted = %(trust)s WHERE lanname = %(lang)s"
|
||||
cursor.execute(query, {'trust': trust, 'lang': lang})
|
||||
executed_queries.append(cursor.mogrify(query, {'trust': trust, 'lang': lang}))
|
||||
return True
|
||||
|
||||
|
||||
def lang_add(cursor, lang, trust):
|
||||
"""Adds language for db"""
|
||||
if trust:
|
||||
query = 'CREATE TRUSTED LANGUAGE "%s"' % lang
|
||||
else:
|
||||
query = 'CREATE LANGUAGE "%s"' % lang
|
||||
executed_queries.append(query)
|
||||
cursor.execute(query)
|
||||
return True
|
||||
|
||||
|
||||
def lang_drop(cursor, lang, cascade):
|
||||
"""Drops language for db"""
|
||||
cursor.execute("SAVEPOINT ansible_pgsql_lang_drop")
|
||||
try:
|
||||
if cascade:
|
||||
query = "DROP LANGUAGE \"%s\" CASCADE" % lang
|
||||
else:
|
||||
query = "DROP LANGUAGE \"%s\"" % lang
|
||||
executed_queries.append(query)
|
||||
cursor.execute(query)
|
||||
except Exception:
|
||||
cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_lang_drop")
|
||||
cursor.execute("RELEASE SAVEPOINT ansible_pgsql_lang_drop")
|
||||
return False
|
||||
cursor.execute("RELEASE SAVEPOINT ansible_pgsql_lang_drop")
|
||||
return True
|
||||
|
||||
|
||||
def get_lang_owner(cursor, lang):
|
||||
"""Get language owner.
|
||||
|
||||
Args:
|
||||
cursor (cursor): psycopg2 cursor object.
|
||||
lang (str): language name.
|
||||
"""
|
||||
query = ("SELECT r.rolname FROM pg_language l "
|
||||
"JOIN pg_roles r ON l.lanowner = r.oid "
|
||||
"WHERE l.lanname = %(lang)s")
|
||||
cursor.execute(query, {'lang': lang})
|
||||
return cursor.fetchone()[0]
|
||||
|
||||
|
||||
def set_lang_owner(cursor, lang, owner):
|
||||
"""Set language owner.
|
||||
|
||||
Args:
|
||||
cursor (cursor): psycopg2 cursor object.
|
||||
lang (str): language name.
|
||||
owner (str): name of new owner.
|
||||
"""
|
||||
query = "ALTER LANGUAGE \"%s\" OWNER TO \"%s\"" % (lang, owner)
|
||||
executed_queries.append(query)
|
||||
cursor.execute(query)
|
||||
return True
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = postgres_common_argument_spec()
|
||||
argument_spec.update(
|
||||
db=dict(type="str", required=True, aliases=["login_db"]),
|
||||
lang=dict(type="str", required=True, aliases=["name"]),
|
||||
state=dict(type="str", default="present", choices=["absent", "present"]),
|
||||
trust=dict(type="bool", default="no"),
|
||||
force_trust=dict(type="bool", default="no"),
|
||||
cascade=dict(type="bool", default="no"),
|
||||
fail_on_drop=dict(type="bool", default="yes"),
|
||||
session_role=dict(type="str"),
|
||||
owner=dict(type="str"),
|
||||
trust_input=dict(type="bool", default="yes")
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
db = module.params["db"]
|
||||
lang = module.params["lang"]
|
||||
state = module.params["state"]
|
||||
trust = module.params["trust"]
|
||||
force_trust = module.params["force_trust"]
|
||||
cascade = module.params["cascade"]
|
||||
fail_on_drop = module.params["fail_on_drop"]
|
||||
owner = module.params["owner"]
|
||||
session_role = module.params["session_role"]
|
||||
trust_input = module.params["trust_input"]
|
||||
|
||||
if not trust_input:
|
||||
# Check input for potentially dangerous elements:
|
||||
check_input(module, lang, session_role, owner)
|
||||
|
||||
conn_params = get_conn_params(module, module.params)
|
||||
db_connection = connect_to_db(module, conn_params, autocommit=False)
|
||||
cursor = db_connection.cursor()
|
||||
|
||||
changed = False
|
||||
kw = {'db': db, 'lang': lang, 'trust': trust}
|
||||
|
||||
if state == "present":
|
||||
if lang_exists(cursor, lang):
|
||||
lang_trusted = lang_istrusted(cursor, lang)
|
||||
if (lang_trusted and not trust) or (not lang_trusted and trust):
|
||||
if module.check_mode:
|
||||
changed = True
|
||||
else:
|
||||
changed = lang_altertrust(cursor, lang, trust)
|
||||
else:
|
||||
if module.check_mode:
|
||||
changed = True
|
||||
else:
|
||||
changed = lang_add(cursor, lang, trust)
|
||||
if force_trust:
|
||||
changed = lang_altertrust(cursor, lang, trust)
|
||||
|
||||
else:
|
||||
if lang_exists(cursor, lang):
|
||||
if module.check_mode:
|
||||
changed = True
|
||||
kw['lang_dropped'] = True
|
||||
else:
|
||||
changed = lang_drop(cursor, lang, cascade)
|
||||
if fail_on_drop and not changed:
|
||||
msg = ("unable to drop language, use cascade "
|
||||
"to delete dependencies or fail_on_drop=no to ignore")
|
||||
module.fail_json(msg=msg)
|
||||
kw['lang_dropped'] = changed
|
||||
|
||||
if owner and state == 'present':
|
||||
if lang_exists(cursor, lang):
|
||||
if owner != get_lang_owner(cursor, lang):
|
||||
changed = set_lang_owner(cursor, lang, owner)
|
||||
|
||||
if changed:
|
||||
if module.check_mode:
|
||||
db_connection.rollback()
|
||||
else:
|
||||
db_connection.commit()
|
||||
|
||||
kw['changed'] = changed
|
||||
kw['queries'] = executed_queries
|
||||
db_connection.close()
|
||||
module.exit_json(**kw)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,228 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: postgresql_membership
|
||||
short_description: Add or remove PostgreSQL roles from groups
|
||||
description:
|
||||
- Adds or removes PostgreSQL roles from groups (other roles).
|
||||
- Users are roles with login privilege.
|
||||
- Groups are PostgreSQL roles usually without LOGIN privilege.
|
||||
- "Common use case:"
|
||||
- 1) add a new group (groups) by M(community.general.postgresql_user) module with I(role_attr_flags=NOLOGIN)
|
||||
- 2) grant them desired privileges by M(community.general.postgresql_privs) module
|
||||
- 3) add desired PostgreSQL users to the new group (groups) by this module
|
||||
options:
|
||||
groups:
|
||||
description:
|
||||
- The list of groups (roles) that need to be granted to or revoked from I(target_roles).
|
||||
required: yes
|
||||
type: list
|
||||
elements: str
|
||||
aliases:
|
||||
- group
|
||||
- source_role
|
||||
- source_roles
|
||||
target_roles:
|
||||
description:
|
||||
- The list of target roles (groups will be granted to them).
|
||||
required: yes
|
||||
type: list
|
||||
elements: str
|
||||
aliases:
|
||||
- target_role
|
||||
- users
|
||||
- user
|
||||
fail_on_role:
|
||||
description:
|
||||
- If C(yes), fail when group or target_role doesn't exist. If C(no), just warn and continue.
|
||||
default: yes
|
||||
type: bool
|
||||
state:
|
||||
description:
|
||||
- Membership state.
|
||||
- I(state=present) implies the I(groups)must be granted to I(target_roles).
|
||||
- I(state=absent) implies the I(groups) must be revoked from I(target_roles).
|
||||
type: str
|
||||
default: present
|
||||
choices: [ absent, present ]
|
||||
db:
|
||||
description:
|
||||
- Name of database to connect to.
|
||||
type: str
|
||||
aliases:
|
||||
- login_db
|
||||
session_role:
|
||||
description:
|
||||
- Switch to session_role after connecting.
|
||||
The specified session_role must be a role that the current login_user is a member of.
|
||||
- Permissions checking for SQL commands is carried out as though
|
||||
the session_role were the one that had logged in originally.
|
||||
type: str
|
||||
trust_input:
|
||||
description:
|
||||
- If C(no), check whether values of parameters I(groups),
|
||||
I(target_roles), I(session_role) are potentially dangerous.
|
||||
- It makes sense to use C(no) only when SQL injections via the parameters are possible.
|
||||
type: bool
|
||||
default: yes
|
||||
version_added: '0.2.0'
|
||||
seealso:
|
||||
- module: community.general.postgresql_user
|
||||
- module: community.general.postgresql_privs
|
||||
- module: community.general.postgresql_owner
|
||||
- name: PostgreSQL role membership reference
|
||||
description: Complete reference of the PostgreSQL role membership documentation.
|
||||
link: https://www.postgresql.org/docs/current/role-membership.html
|
||||
- name: PostgreSQL role attributes reference
|
||||
description: Complete reference of the PostgreSQL role attributes documentation.
|
||||
link: https://www.postgresql.org/docs/current/role-attributes.html
|
||||
author:
|
||||
- Andrew Klychkov (@Andersson007)
|
||||
extends_documentation_fragment:
|
||||
- community.general.postgres
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Grant role read_only to alice and bob
|
||||
community.general.postgresql_membership:
|
||||
group: read_only
|
||||
target_roles:
|
||||
- alice
|
||||
- bob
|
||||
state: present
|
||||
|
||||
# you can also use target_roles: alice,bob,etc to pass the role list
|
||||
|
||||
- name: Revoke role read_only and exec_func from bob. Ignore if roles don't exist
|
||||
community.general.postgresql_membership:
|
||||
groups:
|
||||
- read_only
|
||||
- exec_func
|
||||
target_role: bob
|
||||
fail_on_role: no
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
queries:
|
||||
description: List of executed queries.
|
||||
returned: always
|
||||
type: str
|
||||
sample: [ "GRANT \"user_ro\" TO \"alice\"" ]
|
||||
granted:
|
||||
description: Dict of granted groups and roles.
|
||||
returned: if I(state=present)
|
||||
type: dict
|
||||
sample: { "ro_group": [ "alice", "bob" ] }
|
||||
revoked:
|
||||
description: Dict of revoked groups and roles.
|
||||
returned: if I(state=absent)
|
||||
type: dict
|
||||
sample: { "ro_group": [ "alice", "bob" ] }
|
||||
state:
|
||||
description: Membership state that tried to be set.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "present"
|
||||
'''
|
||||
|
||||
try:
|
||||
from psycopg2.extras import DictCursor
|
||||
except ImportError:
|
||||
# psycopg2 is checked by connect_to_db()
|
||||
# from ansible.module_utils.postgres
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.database import check_input
|
||||
from ansible_collections.community.general.plugins.module_utils.postgres import (
|
||||
connect_to_db,
|
||||
get_conn_params,
|
||||
PgMembership,
|
||||
postgres_common_argument_spec,
|
||||
)
|
||||
|
||||
|
||||
# ===========================================
|
||||
# Module execution.
|
||||
#
|
||||
|
||||
def main():
|
||||
argument_spec = postgres_common_argument_spec()
|
||||
argument_spec.update(
|
||||
groups=dict(type='list', elements='str', required=True, aliases=['group', 'source_role', 'source_roles']),
|
||||
target_roles=dict(type='list', elements='str', required=True, aliases=['target_role', 'user', 'users']),
|
||||
fail_on_role=dict(type='bool', default=True),
|
||||
state=dict(type='str', default='present', choices=['absent', 'present']),
|
||||
db=dict(type='str', aliases=['login_db']),
|
||||
session_role=dict(type='str'),
|
||||
trust_input=dict(type='bool', default=True),
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
groups = module.params['groups']
|
||||
target_roles = module.params['target_roles']
|
||||
fail_on_role = module.params['fail_on_role']
|
||||
state = module.params['state']
|
||||
session_role = module.params['session_role']
|
||||
trust_input = module.params['trust_input']
|
||||
if not trust_input:
|
||||
# Check input for potentially dangerous elements:
|
||||
check_input(module, groups, target_roles, session_role)
|
||||
|
||||
conn_params = get_conn_params(module, module.params, warn_db_default=False)
|
||||
db_connection = connect_to_db(module, conn_params, autocommit=False)
|
||||
cursor = db_connection.cursor(cursor_factory=DictCursor)
|
||||
|
||||
##############
|
||||
# Create the object and do main job:
|
||||
|
||||
pg_membership = PgMembership(module, cursor, groups, target_roles, fail_on_role)
|
||||
|
||||
if state == 'present':
|
||||
pg_membership.grant()
|
||||
|
||||
elif state == 'absent':
|
||||
pg_membership.revoke()
|
||||
|
||||
# Rollback if it's possible and check_mode:
|
||||
if module.check_mode:
|
||||
db_connection.rollback()
|
||||
else:
|
||||
db_connection.commit()
|
||||
|
||||
cursor.close()
|
||||
db_connection.close()
|
||||
|
||||
# Make return values:
|
||||
return_dict = dict(
|
||||
changed=pg_membership.changed,
|
||||
state=state,
|
||||
groups=pg_membership.groups,
|
||||
target_roles=pg_membership.target_roles,
|
||||
queries=pg_membership.executed_queries,
|
||||
)
|
||||
|
||||
if state == 'present':
|
||||
return_dict['granted'] = pg_membership.granted
|
||||
elif state == 'absent':
|
||||
return_dict['revoked'] = pg_membership.revoked
|
||||
|
||||
module.exit_json(**return_dict)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,453 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: postgresql_owner
|
||||
short_description: Change an owner of PostgreSQL database object
|
||||
description:
|
||||
- Change an owner of PostgreSQL database object.
|
||||
- Also allows to reassign the ownership of database objects owned by a database role to another role.
|
||||
|
||||
options:
|
||||
new_owner:
|
||||
description:
|
||||
- Role (user/group) to set as an I(obj_name) owner.
|
||||
type: str
|
||||
required: yes
|
||||
obj_name:
|
||||
description:
|
||||
- Name of a database object to change ownership.
|
||||
- Mutually exclusive with I(reassign_owned_by).
|
||||
type: str
|
||||
obj_type:
|
||||
description:
|
||||
- Type of a database object.
|
||||
- Mutually exclusive with I(reassign_owned_by).
|
||||
type: str
|
||||
choices: [ database, function, matview, sequence, schema, table, tablespace, view ]
|
||||
aliases:
|
||||
- type
|
||||
reassign_owned_by:
|
||||
description:
|
||||
- The list of role names. The ownership of all the objects within the current database,
|
||||
and of all shared objects (databases, tablespaces), owned by this role(s) will be reassigned to I(owner).
|
||||
- Pay attention - it reassigns all objects owned by this role(s) in the I(db)!
|
||||
- If role(s) exists, always returns changed True.
|
||||
- Cannot reassign ownership of objects that are required by the database system.
|
||||
- Mutually exclusive with C(obj_type).
|
||||
type: list
|
||||
elements: str
|
||||
fail_on_role:
|
||||
description:
|
||||
- If C(yes), fail when I(reassign_owned_by) role does not exist.
|
||||
Otherwise just warn and continue.
|
||||
- Mutually exclusive with I(obj_name) and I(obj_type).
|
||||
default: yes
|
||||
type: bool
|
||||
db:
|
||||
description:
|
||||
- Name of database to connect to.
|
||||
type: str
|
||||
aliases:
|
||||
- login_db
|
||||
session_role:
|
||||
description:
|
||||
- Switch to session_role after connecting.
|
||||
The specified session_role must be a role that the current login_user is a member of.
|
||||
- Permissions checking for SQL commands is carried out as though
|
||||
the session_role were the one that had logged in originally.
|
||||
type: str
|
||||
trust_input:
|
||||
description:
|
||||
- If C(no), check whether values of parameters I(new_owner), I(obj_name),
|
||||
I(reassign_owned_by), I(session_role) are potentially dangerous.
|
||||
- It makes sense to use C(no) only when SQL injections via the parameters are possible.
|
||||
type: bool
|
||||
default: yes
|
||||
version_added: '0.2.0'
|
||||
seealso:
|
||||
- module: community.general.postgresql_user
|
||||
- module: community.general.postgresql_privs
|
||||
- module: community.general.postgresql_membership
|
||||
- name: PostgreSQL REASSIGN OWNED command reference
|
||||
description: Complete reference of the PostgreSQL REASSIGN OWNED command documentation.
|
||||
link: https://www.postgresql.org/docs/current/sql-reassign-owned.html
|
||||
author:
|
||||
- Andrew Klychkov (@Andersson007)
|
||||
extends_documentation_fragment:
|
||||
- community.general.postgres
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
# Set owner as alice for function myfunc in database bar by ansible ad-hoc command:
|
||||
# ansible -m postgresql_owner -a "db=bar new_owner=alice obj_name=myfunc obj_type=function"
|
||||
|
||||
- name: The same as above by playbook
|
||||
community.general.postgresql_owner:
|
||||
db: bar
|
||||
new_owner: alice
|
||||
obj_name: myfunc
|
||||
obj_type: function
|
||||
|
||||
- name: Set owner as bob for table acme in database bar
|
||||
community.general.postgresql_owner:
|
||||
db: bar
|
||||
new_owner: bob
|
||||
obj_name: acme
|
||||
obj_type: table
|
||||
|
||||
- name: Set owner as alice for view test_view in database bar
|
||||
community.general.postgresql_owner:
|
||||
db: bar
|
||||
new_owner: alice
|
||||
obj_name: test_view
|
||||
obj_type: view
|
||||
|
||||
- name: Set owner as bob for tablespace ssd in database foo
|
||||
community.general.postgresql_owner:
|
||||
db: foo
|
||||
new_owner: bob
|
||||
obj_name: ssd
|
||||
obj_type: tablespace
|
||||
|
||||
- name: Reassign all object in database bar owned by bob to alice
|
||||
community.general.postgresql_owner:
|
||||
db: bar
|
||||
new_owner: alice
|
||||
reassign_owned_by: bob
|
||||
|
||||
- name: Reassign all object in database bar owned by bob and bill to alice
|
||||
community.general.postgresql_owner:
|
||||
db: bar
|
||||
new_owner: alice
|
||||
reassign_owned_by:
|
||||
- bob
|
||||
- bill
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
queries:
|
||||
description: List of executed queries.
|
||||
returned: always
|
||||
type: str
|
||||
sample: [ 'REASSIGN OWNED BY "bob" TO "alice"' ]
|
||||
'''
|
||||
|
||||
try:
|
||||
from psycopg2.extras import DictCursor
|
||||
except ImportError:
|
||||
# psycopg2 is checked by connect_to_db()
|
||||
# from ansible.module_utils.postgres
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.database import (
|
||||
check_input,
|
||||
pg_quote_identifier,
|
||||
)
|
||||
from ansible_collections.community.general.plugins.module_utils.postgres import (
|
||||
connect_to_db,
|
||||
exec_sql,
|
||||
get_conn_params,
|
||||
postgres_common_argument_spec,
|
||||
)
|
||||
|
||||
|
||||
class PgOwnership(object):
|
||||
|
||||
"""Class for changing ownership of PostgreSQL objects.
|
||||
|
||||
Arguments:
|
||||
module (AnsibleModule): Object of Ansible module class.
|
||||
cursor (psycopg2.connect.cursor): Cursor object for interaction with the database.
|
||||
role (str): Role name to set as a new owner of objects.
|
||||
|
||||
Important:
|
||||
If you want to add handling of a new type of database objects:
|
||||
1. Add a specific method for this like self.__set_db_owner(), etc.
|
||||
2. Add a condition with a check of ownership for new type objects to self.__is_owner()
|
||||
3. Add a condition with invocation of the specific method to self.set_owner()
|
||||
4. Add the information to the module documentation
|
||||
That's all.
|
||||
"""
|
||||
|
||||
def __init__(self, module, cursor, role):
|
||||
self.module = module
|
||||
self.cursor = cursor
|
||||
self.check_role_exists(role)
|
||||
self.role = role
|
||||
self.changed = False
|
||||
self.executed_queries = []
|
||||
self.obj_name = ''
|
||||
self.obj_type = ''
|
||||
|
||||
def check_role_exists(self, role, fail_on_role=True):
|
||||
"""Check the role exists or not.
|
||||
|
||||
Arguments:
|
||||
role (str): Role name.
|
||||
fail_on_role (bool): If True, fail when the role does not exist.
|
||||
Otherwise just warn and continue.
|
||||
"""
|
||||
if not self.__role_exists(role):
|
||||
if fail_on_role:
|
||||
self.module.fail_json(msg="Role '%s' does not exist" % role)
|
||||
else:
|
||||
self.module.warn("Role '%s' does not exist, pass" % role)
|
||||
|
||||
return False
|
||||
|
||||
else:
|
||||
return True
|
||||
|
||||
def reassign(self, old_owners, fail_on_role):
|
||||
"""Implements REASSIGN OWNED BY command.
|
||||
|
||||
If success, set self.changed as True.
|
||||
|
||||
Arguments:
|
||||
old_owners (list): The ownership of all the objects within
|
||||
the current database, and of all shared objects (databases, tablespaces),
|
||||
owned by these roles will be reassigned to self.role.
|
||||
fail_on_role (bool): If True, fail when a role from old_owners does not exist.
|
||||
Otherwise just warn and continue.
|
||||
"""
|
||||
roles = []
|
||||
for r in old_owners:
|
||||
if self.check_role_exists(r, fail_on_role):
|
||||
roles.append('"%s"' % r)
|
||||
|
||||
# Roles do not exist, nothing to do, exit:
|
||||
if not roles:
|
||||
return False
|
||||
|
||||
old_owners = ','.join(roles)
|
||||
|
||||
query = ['REASSIGN OWNED BY']
|
||||
query.append(old_owners)
|
||||
query.append('TO "%s"' % self.role)
|
||||
query = ' '.join(query)
|
||||
|
||||
self.changed = exec_sql(self, query, return_bool=True)
|
||||
|
||||
def set_owner(self, obj_type, obj_name):
|
||||
"""Change owner of a database object.
|
||||
|
||||
Arguments:
|
||||
obj_type (str): Type of object (like database, table, view, etc.).
|
||||
obj_name (str): Object name.
|
||||
"""
|
||||
self.obj_name = obj_name
|
||||
self.obj_type = obj_type
|
||||
|
||||
# if a new_owner is the object owner now,
|
||||
# nothing to do:
|
||||
if self.__is_owner():
|
||||
return False
|
||||
|
||||
if obj_type == 'database':
|
||||
self.__set_db_owner()
|
||||
|
||||
elif obj_type == 'function':
|
||||
self.__set_func_owner()
|
||||
|
||||
elif obj_type == 'sequence':
|
||||
self.__set_seq_owner()
|
||||
|
||||
elif obj_type == 'schema':
|
||||
self.__set_schema_owner()
|
||||
|
||||
elif obj_type == 'table':
|
||||
self.__set_table_owner()
|
||||
|
||||
elif obj_type == 'tablespace':
|
||||
self.__set_tablespace_owner()
|
||||
|
||||
elif obj_type == 'view':
|
||||
self.__set_view_owner()
|
||||
|
||||
elif obj_type == 'matview':
|
||||
self.__set_mat_view_owner()
|
||||
|
||||
def __is_owner(self):
|
||||
"""Return True if self.role is the current object owner."""
|
||||
if self.obj_type == 'table':
|
||||
query = ("SELECT 1 FROM pg_tables "
|
||||
"WHERE tablename = %(obj_name)s "
|
||||
"AND tableowner = %(role)s")
|
||||
|
||||
elif self.obj_type == 'database':
|
||||
query = ("SELECT 1 FROM pg_database AS d "
|
||||
"JOIN pg_roles AS r ON d.datdba = r.oid "
|
||||
"WHERE d.datname = %(obj_name)s "
|
||||
"AND r.rolname = %(role)s")
|
||||
|
||||
elif self.obj_type == 'function':
|
||||
query = ("SELECT 1 FROM pg_proc AS f "
|
||||
"JOIN pg_roles AS r ON f.proowner = r.oid "
|
||||
"WHERE f.proname = %(obj_name)s "
|
||||
"AND r.rolname = %(role)s")
|
||||
|
||||
elif self.obj_type == 'sequence':
|
||||
query = ("SELECT 1 FROM pg_class AS c "
|
||||
"JOIN pg_roles AS r ON c.relowner = r.oid "
|
||||
"WHERE c.relkind = 'S' AND c.relname = %(obj_name)s "
|
||||
"AND r.rolname = %(role)s")
|
||||
|
||||
elif self.obj_type == 'schema':
|
||||
query = ("SELECT 1 FROM information_schema.schemata "
|
||||
"WHERE schema_name = %(obj_name)s "
|
||||
"AND schema_owner = %(role)s")
|
||||
|
||||
elif self.obj_type == 'tablespace':
|
||||
query = ("SELECT 1 FROM pg_tablespace AS t "
|
||||
"JOIN pg_roles AS r ON t.spcowner = r.oid "
|
||||
"WHERE t.spcname = %(obj_name)s "
|
||||
"AND r.rolname = %(role)s")
|
||||
|
||||
elif self.obj_type == 'view':
|
||||
query = ("SELECT 1 FROM pg_views "
|
||||
"WHERE viewname = %(obj_name)s "
|
||||
"AND viewowner = %(role)s")
|
||||
|
||||
elif self.obj_type == 'matview':
|
||||
query = ("SELECT 1 FROM pg_matviews "
|
||||
"WHERE matviewname = %(obj_name)s "
|
||||
"AND matviewowner = %(role)s")
|
||||
|
||||
query_params = {'obj_name': self.obj_name, 'role': self.role}
|
||||
return exec_sql(self, query, query_params, add_to_executed=False)
|
||||
|
||||
def __set_db_owner(self):
|
||||
"""Set the database owner."""
|
||||
query = 'ALTER DATABASE "%s" OWNER TO "%s"' % (self.obj_name, self.role)
|
||||
self.changed = exec_sql(self, query, return_bool=True)
|
||||
|
||||
def __set_func_owner(self):
|
||||
"""Set the function owner."""
|
||||
query = 'ALTER FUNCTION %s OWNER TO "%s"' % (self.obj_name, self.role)
|
||||
self.changed = exec_sql(self, query, return_bool=True)
|
||||
|
||||
def __set_seq_owner(self):
|
||||
"""Set the sequence owner."""
|
||||
query = 'ALTER SEQUENCE %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'),
|
||||
self.role)
|
||||
self.changed = exec_sql(self, query, return_bool=True)
|
||||
|
||||
def __set_schema_owner(self):
|
||||
"""Set the schema owner."""
|
||||
query = 'ALTER SCHEMA %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'schema'),
|
||||
self.role)
|
||||
self.changed = exec_sql(self, query, return_bool=True)
|
||||
|
||||
def __set_table_owner(self):
|
||||
"""Set the table owner."""
|
||||
query = 'ALTER TABLE %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'),
|
||||
self.role)
|
||||
self.changed = exec_sql(self, query, return_bool=True)
|
||||
|
||||
def __set_tablespace_owner(self):
|
||||
"""Set the tablespace owner."""
|
||||
query = 'ALTER TABLESPACE "%s" OWNER TO "%s"' % (self.obj_name, self.role)
|
||||
self.changed = exec_sql(self, query, return_bool=True)
|
||||
|
||||
def __set_view_owner(self):
|
||||
"""Set the view owner."""
|
||||
query = 'ALTER VIEW %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'),
|
||||
self.role)
|
||||
self.changed = exec_sql(self, query, return_bool=True)
|
||||
|
||||
def __set_mat_view_owner(self):
|
||||
"""Set the materialized view owner."""
|
||||
query = 'ALTER MATERIALIZED VIEW %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'),
|
||||
self.role)
|
||||
self.changed = exec_sql(self, query, return_bool=True)
|
||||
|
||||
def __role_exists(self, role):
|
||||
"""Return True if role exists, otherwise return False."""
|
||||
query_params = {'role': role}
|
||||
query = "SELECT 1 FROM pg_roles WHERE rolname = %(role)s"
|
||||
return exec_sql(self, query, query_params, add_to_executed=False)
|
||||
|
||||
|
||||
# ===========================================
|
||||
# Module execution.
|
||||
#
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = postgres_common_argument_spec()
|
||||
argument_spec.update(
|
||||
new_owner=dict(type='str', required=True),
|
||||
obj_name=dict(type='str'),
|
||||
obj_type=dict(type='str', aliases=['type'], choices=[
|
||||
'database', 'function', 'matview', 'sequence', 'schema', 'table', 'tablespace', 'view']),
|
||||
reassign_owned_by=dict(type='list', elements='str'),
|
||||
fail_on_role=dict(type='bool', default=True),
|
||||
db=dict(type='str', aliases=['login_db']),
|
||||
session_role=dict(type='str'),
|
||||
trust_input=dict(type='bool', default=True),
|
||||
)
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
mutually_exclusive=[
|
||||
['obj_name', 'reassign_owned_by'],
|
||||
['obj_type', 'reassign_owned_by'],
|
||||
['obj_name', 'fail_on_role'],
|
||||
['obj_type', 'fail_on_role'],
|
||||
],
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
new_owner = module.params['new_owner']
|
||||
obj_name = module.params['obj_name']
|
||||
obj_type = module.params['obj_type']
|
||||
reassign_owned_by = module.params['reassign_owned_by']
|
||||
fail_on_role = module.params['fail_on_role']
|
||||
session_role = module.params['session_role']
|
||||
trust_input = module.params['trust_input']
|
||||
if not trust_input:
|
||||
# Check input for potentially dangerous elements:
|
||||
check_input(module, new_owner, obj_name, reassign_owned_by, session_role)
|
||||
|
||||
conn_params = get_conn_params(module, module.params)
|
||||
db_connection = connect_to_db(module, conn_params, autocommit=False)
|
||||
cursor = db_connection.cursor(cursor_factory=DictCursor)
|
||||
|
||||
##############
|
||||
# Create the object and do main job:
|
||||
pg_ownership = PgOwnership(module, cursor, new_owner)
|
||||
|
||||
# if we want to change ownership:
|
||||
if obj_name:
|
||||
pg_ownership.set_owner(obj_type, obj_name)
|
||||
|
||||
# if we want to reassign objects owned by roles:
|
||||
elif reassign_owned_by:
|
||||
pg_ownership.reassign(reassign_owned_by, fail_on_role)
|
||||
|
||||
# Rollback if it's possible and check_mode:
|
||||
if module.check_mode:
|
||||
db_connection.rollback()
|
||||
else:
|
||||
db_connection.commit()
|
||||
|
||||
cursor.close()
|
||||
db_connection.close()
|
||||
|
||||
module.exit_json(
|
||||
changed=pg_ownership.changed,
|
||||
queries=pg_ownership.executed_queries,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,745 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2019, Sebastiaan Mannem (@sebasmannem) <sebastiaan.mannem@enterprisedb.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
'''
|
||||
This module is used to manage postgres pg_hba files with Ansible.
|
||||
'''
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: postgresql_pg_hba
|
||||
short_description: Add, remove or modify a rule in a pg_hba file
|
||||
description:
|
||||
- The fundamental function of the module is to create, or delete lines in pg_hba files.
|
||||
- The lines in the file should be in a typical pg_hba form and lines should be unique per key (type, databases, users, source).
|
||||
If they are not unique and the SID is 'the one to change', only one for C(state=present) or none for C(state=absent) of the SID's will remain.
|
||||
extends_documentation_fragment: files
|
||||
options:
|
||||
address:
|
||||
description:
|
||||
- The source address/net where the connections could come from.
|
||||
- Will not be used for entries of I(type)=C(local).
|
||||
- You can also use keywords C(all), C(samehost), and C(samenet).
|
||||
default: samehost
|
||||
type: str
|
||||
aliases: [ source, src ]
|
||||
backup:
|
||||
description:
|
||||
- If set, create a backup of the C(pg_hba) file before it is modified.
|
||||
The location of the backup is returned in the (backup) variable by this module.
|
||||
default: false
|
||||
type: bool
|
||||
backup_file:
|
||||
description:
|
||||
- Write backup to a specific backupfile rather than a temp file.
|
||||
type: str
|
||||
create:
|
||||
description:
|
||||
- Create an C(pg_hba) file if none exists.
|
||||
- When set to false, an error is raised when the C(pg_hba) file doesn't exist.
|
||||
default: false
|
||||
type: bool
|
||||
contype:
|
||||
description:
|
||||
- Type of the rule. If not set, C(postgresql_pg_hba) will only return contents.
|
||||
type: str
|
||||
choices: [ local, host, hostnossl, hostssl ]
|
||||
databases:
|
||||
description:
|
||||
- Databases this line applies to.
|
||||
default: all
|
||||
type: str
|
||||
dest:
|
||||
description:
|
||||
- Path to C(pg_hba) file to modify.
|
||||
type: path
|
||||
required: true
|
||||
method:
|
||||
description:
|
||||
- Authentication method to be used.
|
||||
type: str
|
||||
choices: [ cert, gss, ident, krb5, ldap, md5, pam, password, peer, radius, reject, scram-sha-256 , sspi, trust ]
|
||||
default: md5
|
||||
netmask:
|
||||
description:
|
||||
- The netmask of the source address.
|
||||
type: str
|
||||
options:
|
||||
description:
|
||||
- Additional options for the authentication I(method).
|
||||
type: str
|
||||
order:
|
||||
description:
|
||||
- The entries will be written out in a specific order.
|
||||
With this option you can control by which field they are ordered first, second and last.
|
||||
s=source, d=databases, u=users.
|
||||
This option is deprecated since 2.9 and will be removed in community.general 3.0.0.
|
||||
Sortorder is now hardcoded to sdu.
|
||||
type: str
|
||||
default: sdu
|
||||
choices: [ sdu, sud, dsu, dus, usd, uds ]
|
||||
state:
|
||||
description:
|
||||
- The lines will be added/modified when C(state=present) and removed when C(state=absent).
|
||||
type: str
|
||||
default: present
|
||||
choices: [ absent, present ]
|
||||
users:
|
||||
description:
|
||||
- Users this line applies to.
|
||||
type: str
|
||||
default: all
|
||||
|
||||
notes:
|
||||
- The default authentication assumes that on the host, you are either logging in as or
|
||||
sudo'ing to an account with appropriate permissions to read and modify the file.
|
||||
- This module also returns the pg_hba info. You can use this module to only retrieve it by only specifying I(dest).
|
||||
The info can be found in the returned data under key pg_hba, being a list, containing a dict per rule.
|
||||
- This module will sort resulting C(pg_hba) files if a rule change is required.
|
||||
This could give unexpected results with manual created hba files, if it was improperly sorted.
|
||||
For example a rule was created for a net first and for a ip in that net range next.
|
||||
In that situation, the 'ip specific rule' will never hit, it is in the C(pg_hba) file obsolete.
|
||||
After the C(pg_hba) file is rewritten by the M(community.general.postgresql_pg_hba) module, the ip specific rule will be sorted above the range rule.
|
||||
And then it will hit, which will give unexpected results.
|
||||
- With the 'order' parameter you can control which field is used to sort first, next and last.
|
||||
- The module supports a check mode and a diff mode.
|
||||
|
||||
seealso:
|
||||
- name: PostgreSQL pg_hba.conf file reference
|
||||
description: Complete reference of the PostgreSQL pg_hba.conf file documentation.
|
||||
link: https://www.postgresql.org/docs/current/auth-pg-hba-conf.html
|
||||
|
||||
requirements:
|
||||
- ipaddress
|
||||
|
||||
author: Sebastiaan Mannem (@sebasmannem)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Grant users joe and simon access to databases sales and logistics from ipv6 localhost ::1/128 using peer authentication.
|
||||
community.general.postgresql_pg_hba:
|
||||
dest: /var/lib/postgres/data/pg_hba.conf
|
||||
contype: host
|
||||
users: joe,simon
|
||||
source: ::1
|
||||
databases: sales,logistics
|
||||
method: peer
|
||||
create: true
|
||||
|
||||
- name: Grant user replication from network 192.168.0.100/24 access for replication with client cert authentication.
|
||||
community.general.postgresql_pg_hba:
|
||||
dest: /var/lib/postgres/data/pg_hba.conf
|
||||
contype: host
|
||||
users: replication
|
||||
source: 192.168.0.100/24
|
||||
databases: replication
|
||||
method: cert
|
||||
|
||||
- name: Revoke access from local user mary on database mydb.
|
||||
community.general.postgresql_pg_hba:
|
||||
dest: /var/lib/postgres/data/pg_hba.conf
|
||||
contype: local
|
||||
users: mary
|
||||
databases: mydb
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
msgs:
|
||||
description: List of textual messages what was done
|
||||
returned: always
|
||||
type: list
|
||||
sample:
|
||||
"msgs": [
|
||||
"Removing",
|
||||
"Changed",
|
||||
"Writing"
|
||||
]
|
||||
backup_file:
|
||||
description: File that the original pg_hba file was backed up to
|
||||
returned: changed
|
||||
type: str
|
||||
sample: /tmp/pg_hba_jxobj_p
|
||||
pg_hba:
|
||||
description: List of the pg_hba rules as they are configured in the specified hba file
|
||||
returned: always
|
||||
type: list
|
||||
sample:
|
||||
"pg_hba": [
|
||||
{
|
||||
"db": "all",
|
||||
"method": "md5",
|
||||
"src": "samehost",
|
||||
"type": "host",
|
||||
"usr": "all"
|
||||
}
|
||||
]
|
||||
'''
|
||||
|
||||
import os
|
||||
import re
|
||||
import traceback
|
||||
|
||||
IPADDRESS_IMP_ERR = None
|
||||
try:
|
||||
import ipaddress
|
||||
except ImportError:
|
||||
IPADDRESS_IMP_ERR = traceback.format_exc()
|
||||
|
||||
import tempfile
|
||||
import shutil
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
# from ansible.module_utils.postgres import postgres_common_argument_spec
|
||||
|
||||
PG_HBA_METHODS = ["trust", "reject", "md5", "password", "gss", "sspi", "krb5", "ident", "peer",
|
||||
"ldap", "radius", "cert", "pam", "scram-sha-256"]
|
||||
PG_HBA_TYPES = ["local", "host", "hostssl", "hostnossl"]
|
||||
PG_HBA_ORDERS = ["sdu", "sud", "dsu", "dus", "usd", "uds"]
|
||||
PG_HBA_HDR = ['type', 'db', 'usr', 'src', 'mask', 'method', 'options']
|
||||
|
||||
WHITESPACES_RE = re.compile(r'\s+')
|
||||
|
||||
|
||||
class PgHbaError(Exception):
|
||||
'''
|
||||
This exception is raised when parsing the pg_hba file ends in an error.
|
||||
'''
|
||||
|
||||
|
||||
class PgHbaRuleError(PgHbaError):
|
||||
'''
|
||||
This exception is raised when parsing the pg_hba file ends in an error.
|
||||
'''
|
||||
|
||||
|
||||
class PgHbaRuleChanged(PgHbaRuleError):
|
||||
'''
|
||||
This exception is raised when a new parsed rule is a changed version of an existing rule.
|
||||
'''
|
||||
|
||||
|
||||
class PgHbaValueError(PgHbaError):
|
||||
'''
|
||||
This exception is raised when a new parsed rule is a changed version of an existing rule.
|
||||
'''
|
||||
|
||||
|
||||
class PgHbaRuleValueError(PgHbaRuleError):
|
||||
'''
|
||||
This exception is raised when a new parsed rule is a changed version of an existing rule.
|
||||
'''
|
||||
|
||||
|
||||
class PgHba(object):
|
||||
"""
|
||||
PgHba object to read/write entries to/from.
|
||||
pg_hba_file - the pg_hba file almost always /etc/pg_hba
|
||||
"""
|
||||
def __init__(self, pg_hba_file=None, order="sdu", backup=False, create=False):
|
||||
if order not in PG_HBA_ORDERS:
|
||||
msg = "invalid order setting {0} (should be one of '{1}')."
|
||||
raise PgHbaError(msg.format(order, "', '".join(PG_HBA_ORDERS)))
|
||||
self.pg_hba_file = pg_hba_file
|
||||
self.rules = None
|
||||
self.comment = None
|
||||
self.order = order
|
||||
self.backup = backup
|
||||
self.last_backup = None
|
||||
self.create = create
|
||||
self.unchanged()
|
||||
# self.databases will be update by add_rule and gives some idea of the number of databases
|
||||
# (at least that are handled by this pg_hba)
|
||||
self.databases = set(['postgres', 'template0', 'template1'])
|
||||
|
||||
# self.databases will be update by add_rule and gives some idea of the number of users
|
||||
# (at least that are handled by this pg_hba) since this might also be groups with multiple
|
||||
# users, this might be totally off, but at least it is some info...
|
||||
self.users = set(['postgres'])
|
||||
|
||||
self.read()
|
||||
|
||||
def unchanged(self):
|
||||
'''
|
||||
This method resets self.diff to a empty default
|
||||
'''
|
||||
self.diff = {'before': {'file': self.pg_hba_file, 'pg_hba': []},
|
||||
'after': {'file': self.pg_hba_file, 'pg_hba': []}}
|
||||
|
||||
def read(self):
|
||||
'''
|
||||
Read in the pg_hba from the system
|
||||
'''
|
||||
self.rules = {}
|
||||
self.comment = []
|
||||
# read the pg_hbafile
|
||||
try:
|
||||
with open(self.pg_hba_file, 'r') as file:
|
||||
for line in file:
|
||||
line = line.strip()
|
||||
# uncomment
|
||||
if '#' in line:
|
||||
line, comment = line.split('#', 1)
|
||||
self.comment.append('#' + comment)
|
||||
try:
|
||||
self.add_rule(PgHbaRule(line=line))
|
||||
except PgHbaRuleError:
|
||||
pass
|
||||
self.unchanged()
|
||||
except IOError:
|
||||
pass
|
||||
|
||||
def write(self, backup_file=''):
|
||||
'''
|
||||
This method writes the PgHba rules (back) to a file.
|
||||
'''
|
||||
if not self.changed():
|
||||
return False
|
||||
|
||||
contents = self.render()
|
||||
if self.pg_hba_file:
|
||||
if not (os.path.isfile(self.pg_hba_file) or self.create):
|
||||
raise PgHbaError("pg_hba file '{0}' doesn't exist. "
|
||||
"Use create option to autocreate.".format(self.pg_hba_file))
|
||||
if self.backup and os.path.isfile(self.pg_hba_file):
|
||||
if backup_file:
|
||||
self.last_backup = backup_file
|
||||
else:
|
||||
__backup_file_h, self.last_backup = tempfile.mkstemp(prefix='pg_hba')
|
||||
shutil.copy(self.pg_hba_file, self.last_backup)
|
||||
fileh = open(self.pg_hba_file, 'w')
|
||||
else:
|
||||
filed, __path = tempfile.mkstemp(prefix='pg_hba')
|
||||
fileh = os.fdopen(filed, 'w')
|
||||
|
||||
fileh.write(contents)
|
||||
self.unchanged()
|
||||
fileh.close()
|
||||
return True
|
||||
|
||||
def add_rule(self, rule):
|
||||
'''
|
||||
This method can be used to add a rule to the list of rules in this PgHba object
|
||||
'''
|
||||
key = rule.key()
|
||||
try:
|
||||
try:
|
||||
oldrule = self.rules[key]
|
||||
except KeyError:
|
||||
raise PgHbaRuleChanged
|
||||
ekeys = set(list(oldrule.keys()) + list(rule.keys()))
|
||||
ekeys.remove('line')
|
||||
for k in ekeys:
|
||||
if oldrule.get(k) != rule.get(k):
|
||||
raise PgHbaRuleChanged('{0} changes {1}'.format(rule, oldrule))
|
||||
except PgHbaRuleChanged:
|
||||
self.rules[key] = rule
|
||||
self.diff['after']['pg_hba'].append(rule.line())
|
||||
if rule['db'] not in ['all', 'samerole', 'samegroup', 'replication']:
|
||||
databases = set(rule['db'].split(','))
|
||||
self.databases.update(databases)
|
||||
if rule['usr'] != 'all':
|
||||
user = rule['usr']
|
||||
if user[0] == '+':
|
||||
user = user[1:]
|
||||
self.users.add(user)
|
||||
|
||||
def remove_rule(self, rule):
|
||||
'''
|
||||
This method can be used to find and remove a rule. It doesn't look for the exact rule, only
|
||||
the rule with the same key.
|
||||
'''
|
||||
keys = rule.key()
|
||||
try:
|
||||
del self.rules[keys]
|
||||
self.diff['before']['pg_hba'].append(rule.line())
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
def get_rules(self, with_lines=False):
|
||||
'''
|
||||
This method returns all the rules of the PgHba object
|
||||
'''
|
||||
rules = sorted(self.rules.values())
|
||||
for rule in rules:
|
||||
ret = {}
|
||||
for key, value in rule.items():
|
||||
ret[key] = value
|
||||
if not with_lines:
|
||||
if 'line' in ret:
|
||||
del ret['line']
|
||||
else:
|
||||
ret['line'] = rule.line()
|
||||
|
||||
yield ret
|
||||
|
||||
def render(self):
|
||||
'''
|
||||
This method renders the content of the PgHba rules and comments.
|
||||
The returning value can be used directly to write to a new file.
|
||||
'''
|
||||
comment = '\n'.join(self.comment)
|
||||
rule_lines = '\n'.join([rule['line'] for rule in self.get_rules(with_lines=True)])
|
||||
result = comment + '\n' + rule_lines
|
||||
# End it properly with a linefeed (if not already).
|
||||
if result and result[-1] not in ['\n', '\r']:
|
||||
result += '\n'
|
||||
return result
|
||||
|
||||
def changed(self):
|
||||
'''
|
||||
This method can be called to detect if the PgHba file has been changed.
|
||||
'''
|
||||
return bool(self.diff['before']['pg_hba'] or self.diff['after']['pg_hba'])
|
||||
|
||||
|
||||
class PgHbaRule(dict):
|
||||
'''
|
||||
This class represents one rule as defined in a line in a PgHbaFile.
|
||||
'''
|
||||
|
||||
def __init__(self, contype=None, databases=None, users=None, source=None, netmask=None,
|
||||
method=None, options=None, line=None):
|
||||
'''
|
||||
This function can be called with a comma seperated list of databases and a comma seperated
|
||||
list of users and it will act as a generator that returns a expanded list of rules one by
|
||||
one.
|
||||
'''
|
||||
|
||||
super(PgHbaRule, self).__init__()
|
||||
|
||||
if line:
|
||||
# Read values from line if parsed
|
||||
self.fromline(line)
|
||||
|
||||
# read rule cols from parsed items
|
||||
rule = dict(zip(PG_HBA_HDR, [contype, databases, users, source, netmask, method, options]))
|
||||
for key, value in rule.items():
|
||||
if value:
|
||||
self[key] = value
|
||||
|
||||
# Some sanity checks
|
||||
for key in ['method', 'type']:
|
||||
if key not in self:
|
||||
raise PgHbaRuleError('Missing {0} in rule {1}'.format(key, self))
|
||||
|
||||
if self['method'] not in PG_HBA_METHODS:
|
||||
msg = "invalid method {0} (should be one of '{1}')."
|
||||
raise PgHbaRuleValueError(msg.format(self['method'], "', '".join(PG_HBA_METHODS)))
|
||||
|
||||
if self['type'] not in PG_HBA_TYPES:
|
||||
msg = "invalid connection type {0} (should be one of '{1}')."
|
||||
raise PgHbaRuleValueError(msg.format(self['type'], "', '".join(PG_HBA_TYPES)))
|
||||
|
||||
if self['type'] == 'local':
|
||||
self.unset('src')
|
||||
self.unset('mask')
|
||||
elif 'src' not in self:
|
||||
raise PgHbaRuleError('Missing src in rule {1}'.format(self))
|
||||
elif '/' in self['src']:
|
||||
self.unset('mask')
|
||||
else:
|
||||
self['src'] = str(self.source())
|
||||
self.unset('mask')
|
||||
|
||||
def unset(self, key):
|
||||
'''
|
||||
This method is used to unset certain columns if they exist
|
||||
'''
|
||||
if key in self:
|
||||
del self[key]
|
||||
|
||||
def line(self):
|
||||
'''
|
||||
This method can be used to return (or generate) the line
|
||||
'''
|
||||
try:
|
||||
return self['line']
|
||||
except KeyError:
|
||||
self['line'] = "\t".join([self[k] for k in PG_HBA_HDR if k in self.keys()])
|
||||
return self['line']
|
||||
|
||||
def fromline(self, line):
|
||||
'''
|
||||
split into 'type', 'db', 'usr', 'src', 'mask', 'method', 'options' cols
|
||||
'''
|
||||
if WHITESPACES_RE.sub('', line) == '':
|
||||
# empty line. skip this one...
|
||||
return
|
||||
cols = WHITESPACES_RE.split(line)
|
||||
if len(cols) < 4:
|
||||
msg = "Rule {0} has too few columns."
|
||||
raise PgHbaValueError(msg.format(line))
|
||||
if cols[0] not in PG_HBA_TYPES:
|
||||
msg = "Rule {0} has unknown type: {1}."
|
||||
raise PgHbaValueError(msg.format(line, cols[0]))
|
||||
if cols[0] == 'local':
|
||||
cols.insert(3, None) # No address
|
||||
cols.insert(3, None) # No IP-mask
|
||||
if len(cols) < 6:
|
||||
cols.insert(4, None) # No IP-mask
|
||||
elif cols[5] not in PG_HBA_METHODS:
|
||||
cols.insert(4, None) # No IP-mask
|
||||
if cols[5] not in PG_HBA_METHODS:
|
||||
raise PgHbaValueError("Rule {0} of '{1}' type has invalid auth-method '{2}'".format(line, cols[0], cols[5]))
|
||||
|
||||
if len(cols) < 7:
|
||||
cols.insert(6, None) # No auth-options
|
||||
else:
|
||||
cols[6] = " ".join(cols[6:]) # combine all auth-options
|
||||
rule = dict(zip(PG_HBA_HDR, cols[:7]))
|
||||
for key, value in rule.items():
|
||||
if value:
|
||||
self[key] = value
|
||||
|
||||
def key(self):
|
||||
'''
|
||||
This method can be used to get the key from a rule.
|
||||
'''
|
||||
if self['type'] == 'local':
|
||||
source = 'local'
|
||||
else:
|
||||
source = str(self.source())
|
||||
return (source, self['db'], self['usr'])
|
||||
|
||||
def source(self):
|
||||
'''
|
||||
This method is used to get the source of a rule as an ipaddress object if possible.
|
||||
'''
|
||||
if 'mask' in self.keys():
|
||||
try:
|
||||
ipaddress.ip_address(u'{0}'.format(self['src']))
|
||||
except ValueError:
|
||||
raise PgHbaValueError('Mask was specified, but source "{0}" '
|
||||
'is no valid ip'.format(self['src']))
|
||||
# ipaddress module cannot work with ipv6 netmask, so lets convert it to prefixlen
|
||||
# furthermore ipv4 with bad netmask throws 'Rule {} doesn't seem to be an ip, but has a
|
||||
# mask error that doesn't seem to describe what is going on.
|
||||
try:
|
||||
mask_as_ip = ipaddress.ip_address(u'{0}'.format(self['mask']))
|
||||
except ValueError:
|
||||
raise PgHbaValueError('Mask {0} seems to be invalid'.format(self['mask']))
|
||||
binvalue = "{0:b}".format(int(mask_as_ip))
|
||||
if '01' in binvalue:
|
||||
raise PgHbaValueError('IP mask {0} seems invalid '
|
||||
'(binary value has 1 after 0)'.format(self['mask']))
|
||||
prefixlen = binvalue.count('1')
|
||||
sourcenw = '{0}/{1}'.format(self['src'], prefixlen)
|
||||
try:
|
||||
return ipaddress.ip_network(u'{0}'.format(sourcenw), strict=False)
|
||||
except ValueError:
|
||||
raise PgHbaValueError('{0} is no valid address range'.format(sourcenw))
|
||||
|
||||
try:
|
||||
return ipaddress.ip_network(u'{0}'.format(self['src']), strict=False)
|
||||
except ValueError:
|
||||
return self['src']
|
||||
|
||||
def __lt__(self, other):
|
||||
"""This function helps sorted to decide how to sort.
|
||||
|
||||
It just checks itself against the other and decides on some key values
|
||||
if it should be sorted higher or lower in the list.
|
||||
The way it works:
|
||||
For networks, every 1 in 'netmask in binary' makes the subnet more specific.
|
||||
Therefore I chose to use prefix as the weight.
|
||||
So a single IP (/32) should have twice the weight of a /16 network.
|
||||
To keep everything in the same weight scale,
|
||||
- for ipv6, we use a weight scale of 0 (all possible ipv6 addresses) to 128 (single ip)
|
||||
- for ipv4, we use a weight scale of 0 (all possible ipv4 addresses) to 128 (single ip)
|
||||
Therefore for ipv4, we use prefixlen (0-32) * 4 for weight,
|
||||
which corresponds to ipv6 (0-128).
|
||||
"""
|
||||
myweight = self.source_weight()
|
||||
hisweight = other.source_weight()
|
||||
if myweight != hisweight:
|
||||
return myweight > hisweight
|
||||
|
||||
myweight = self.db_weight()
|
||||
hisweight = other.db_weight()
|
||||
if myweight != hisweight:
|
||||
return myweight < hisweight
|
||||
|
||||
myweight = self.user_weight()
|
||||
hisweight = other.user_weight()
|
||||
if myweight != hisweight:
|
||||
return myweight < hisweight
|
||||
try:
|
||||
return self['src'] < other['src']
|
||||
except TypeError:
|
||||
return self.source_type_weight() < other.source_type_weight()
|
||||
except Exception:
|
||||
# When all else fails, just compare the exact line.
|
||||
return self.line() < other.line()
|
||||
|
||||
def source_weight(self):
|
||||
"""Report the weight of this source net.
|
||||
|
||||
Basically this is the netmask, where IPv4 is normalized to IPv6
|
||||
(IPv4/32 has the same weight as IPv6/128).
|
||||
"""
|
||||
if self['type'] == 'local':
|
||||
return 130
|
||||
|
||||
sourceobj = self.source()
|
||||
if isinstance(sourceobj, ipaddress.IPv4Network):
|
||||
return sourceobj.prefixlen * 4
|
||||
if isinstance(sourceobj, ipaddress.IPv6Network):
|
||||
return sourceobj.prefixlen
|
||||
if isinstance(sourceobj, str):
|
||||
# You can also write all to match any IP address,
|
||||
# samehost to match any of the server's own IP addresses,
|
||||
# or samenet to match any address in any subnet that the server is connected to.
|
||||
if sourceobj == 'all':
|
||||
# (all is considered the full range of all ips, which has a weight of 0)
|
||||
return 0
|
||||
if sourceobj == 'samehost':
|
||||
# (sort samehost second after local)
|
||||
return 129
|
||||
if sourceobj == 'samenet':
|
||||
# Might write some fancy code to determine all prefix's
|
||||
# from all interfaces and find a sane value for this one.
|
||||
# For now, let's assume IPv4/24 or IPv6/96 (both have weight 96).
|
||||
return 96
|
||||
if sourceobj[0] == '.':
|
||||
# suffix matching (domain name), let's assume a very large scale
|
||||
# and therefore a very low weight IPv4/16 or IPv6/64 (both have weight 64).
|
||||
return 64
|
||||
# hostname, let's assume only one host matches, which is
|
||||
# IPv4/32 or IPv6/128 (both have weight 128)
|
||||
return 128
|
||||
raise PgHbaValueError('Cannot deduct the source weight of this source {1}'.format(sourceobj))
|
||||
|
||||
def source_type_weight(self):
|
||||
"""Give a weight on the type of this source.
|
||||
|
||||
Basically make sure that IPv6Networks are sorted higher than IPv4Networks.
|
||||
This is a 'when all else fails' solution in __lt__.
|
||||
"""
|
||||
if self['type'] == 'local':
|
||||
return 3
|
||||
|
||||
sourceobj = self.source()
|
||||
if isinstance(sourceobj, ipaddress.IPv4Network):
|
||||
return 2
|
||||
if isinstance(sourceobj, ipaddress.IPv6Network):
|
||||
return 1
|
||||
if isinstance(sourceobj, str):
|
||||
return 0
|
||||
raise PgHbaValueError('This source {0} is of an unknown type...'.format(sourceobj))
|
||||
|
||||
def db_weight(self):
|
||||
"""Report the weight of the database.
|
||||
|
||||
Normally, just 1, but for replication this is 0, and for 'all', this is more than 2.
|
||||
"""
|
||||
if self['db'] == 'all':
|
||||
return 100000
|
||||
if self['db'] == 'replication':
|
||||
return 0
|
||||
if self['db'] in ['samerole', 'samegroup']:
|
||||
return 1
|
||||
return 1 + self['db'].count(',')
|
||||
|
||||
def user_weight(self):
|
||||
"""Report weight when comparing users."""
|
||||
if self['usr'] == 'all':
|
||||
return 1000000
|
||||
return 1
|
||||
|
||||
|
||||
def main():
|
||||
'''
|
||||
This function is the main function of this module
|
||||
'''
|
||||
# argument_spec = postgres_common_argument_spec()
|
||||
argument_spec = dict()
|
||||
argument_spec.update(
|
||||
address=dict(type='str', default='samehost', aliases=['source', 'src']),
|
||||
backup=dict(type='bool', default=False),
|
||||
backup_file=dict(type='str'),
|
||||
contype=dict(type='str', default=None, choices=PG_HBA_TYPES),
|
||||
create=dict(type='bool', default=False),
|
||||
databases=dict(type='str', default='all'),
|
||||
dest=dict(type='path', required=True),
|
||||
method=dict(type='str', default='md5', choices=PG_HBA_METHODS),
|
||||
netmask=dict(type='str'),
|
||||
options=dict(type='str'),
|
||||
order=dict(type='str', default="sdu", choices=PG_HBA_ORDERS,
|
||||
removed_in_version='3.0.0', removed_from_collection='community.general'),
|
||||
state=dict(type='str', default="present", choices=["absent", "present"]),
|
||||
users=dict(type='str', default='all')
|
||||
)
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
add_file_common_args=True,
|
||||
supports_check_mode=True
|
||||
)
|
||||
if IPADDRESS_IMP_ERR is not None:
|
||||
module.fail_json(msg=missing_required_lib('ipaddress'), exception=IPADDRESS_IMP_ERR)
|
||||
|
||||
contype = module.params["contype"]
|
||||
create = bool(module.params["create"] or module.check_mode)
|
||||
if module.check_mode:
|
||||
backup = False
|
||||
else:
|
||||
backup = module.params['backup']
|
||||
backup_file = module.params['backup_file']
|
||||
databases = module.params["databases"]
|
||||
dest = module.params["dest"]
|
||||
|
||||
method = module.params["method"]
|
||||
netmask = module.params["netmask"]
|
||||
options = module.params["options"]
|
||||
order = module.params["order"]
|
||||
source = module.params["address"]
|
||||
state = module.params["state"]
|
||||
users = module.params["users"]
|
||||
|
||||
ret = {'msgs': []}
|
||||
try:
|
||||
pg_hba = PgHba(dest, order, backup=backup, create=create)
|
||||
except PgHbaError as error:
|
||||
module.fail_json(msg='Error reading file:\n{0}'.format(error))
|
||||
|
||||
if contype:
|
||||
try:
|
||||
for database in databases.split(','):
|
||||
for user in users.split(','):
|
||||
rule = PgHbaRule(contype, database, user, source, netmask, method, options)
|
||||
if state == "present":
|
||||
ret['msgs'].append('Adding')
|
||||
pg_hba.add_rule(rule)
|
||||
else:
|
||||
ret['msgs'].append('Removing')
|
||||
pg_hba.remove_rule(rule)
|
||||
except PgHbaError as error:
|
||||
module.fail_json(msg='Error modifying rules:\n{0}'.format(error))
|
||||
file_args = module.load_file_common_arguments(module.params)
|
||||
ret['changed'] = changed = pg_hba.changed()
|
||||
if changed:
|
||||
ret['msgs'].append('Changed')
|
||||
ret['diff'] = pg_hba.diff
|
||||
|
||||
if not module.check_mode:
|
||||
ret['msgs'].append('Writing')
|
||||
try:
|
||||
if pg_hba.write(backup_file):
|
||||
module.set_fs_attributes_if_different(file_args, True, pg_hba.diff,
|
||||
expand=False)
|
||||
except PgHbaError as error:
|
||||
module.fail_json(msg='Error writing file:\n{0}'.format(error))
|
||||
if pg_hba.last_backup:
|
||||
ret['backup_file'] = pg_hba.last_backup
|
||||
|
||||
ret['pg_hba'] = list(pg_hba.get_rules())
|
||||
module.exit_json(**ret)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,170 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2018, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: postgresql_ping
|
||||
short_description: Check remote PostgreSQL server availability
|
||||
description:
|
||||
- Simple module to check remote PostgreSQL server availability.
|
||||
options:
|
||||
db:
|
||||
description:
|
||||
- Name of a database to connect to.
|
||||
type: str
|
||||
aliases:
|
||||
- login_db
|
||||
session_role:
|
||||
description:
|
||||
- Switch to session_role after connecting. The specified session_role must
|
||||
be a role that the current login_user is a member of.
|
||||
- Permissions checking for SQL commands is carried out as though
|
||||
the session_role were the one that had logged in originally.
|
||||
type: str
|
||||
version_added: '0.2.0'
|
||||
trust_input:
|
||||
description:
|
||||
- If C(no), check whether a value of I(session_role) is potentially dangerous.
|
||||
- It makes sense to use C(no) only when SQL injections via I(session_role) are possible.
|
||||
type: bool
|
||||
default: yes
|
||||
version_added: '0.2.0'
|
||||
seealso:
|
||||
- module: community.general.postgresql_info
|
||||
author:
|
||||
- Andrew Klychkov (@Andersson007)
|
||||
extends_documentation_fragment:
|
||||
- community.general.postgres
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
# PostgreSQL ping dbsrv server from the shell:
|
||||
# ansible dbsrv -m postgresql_ping
|
||||
|
||||
# In the example below you need to generate certificates previously.
|
||||
# See https://www.postgresql.org/docs/current/libpq-ssl.html for more information.
|
||||
- name: PostgreSQL ping dbsrv server using not default credentials and ssl
|
||||
community.general.postgresql_ping:
|
||||
db: protected_db
|
||||
login_host: dbsrv
|
||||
login_user: secret
|
||||
login_password: secret_pass
|
||||
ca_cert: /root/root.crt
|
||||
ssl_mode: verify-full
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
is_available:
|
||||
description: PostgreSQL server availability.
|
||||
returned: always
|
||||
type: bool
|
||||
sample: true
|
||||
server_version:
|
||||
description: PostgreSQL server version.
|
||||
returned: always
|
||||
type: dict
|
||||
sample: { major: 10, minor: 1 }
|
||||
'''
|
||||
|
||||
try:
|
||||
from psycopg2.extras import DictCursor
|
||||
except ImportError:
|
||||
# psycopg2 is checked by connect_to_db()
|
||||
# from ansible.module_utils.postgres
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.database import (
|
||||
check_input,
|
||||
)
|
||||
from ansible_collections.community.general.plugins.module_utils.postgres import (
|
||||
connect_to_db,
|
||||
exec_sql,
|
||||
get_conn_params,
|
||||
postgres_common_argument_spec,
|
||||
)
|
||||
|
||||
|
||||
# ===========================================
|
||||
# PostgreSQL module specific support methods.
|
||||
#
|
||||
|
||||
|
||||
class PgPing(object):
|
||||
def __init__(self, module, cursor):
|
||||
self.module = module
|
||||
self.cursor = cursor
|
||||
self.is_available = False
|
||||
self.version = {}
|
||||
|
||||
def do(self):
|
||||
self.get_pg_version()
|
||||
return (self.is_available, self.version)
|
||||
|
||||
def get_pg_version(self):
|
||||
query = "SELECT version()"
|
||||
raw = exec_sql(self, query, add_to_executed=False)[0][0]
|
||||
if raw:
|
||||
self.is_available = True
|
||||
raw = raw.split()[1].split('.')
|
||||
self.version = dict(
|
||||
major=int(raw[0]),
|
||||
minor=int(raw[1]),
|
||||
)
|
||||
|
||||
|
||||
# ===========================================
|
||||
# Module execution.
|
||||
#
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = postgres_common_argument_spec()
|
||||
argument_spec.update(
|
||||
db=dict(type='str', aliases=['login_db']),
|
||||
session_role=dict(type='str'),
|
||||
trust_input=dict(type='bool', default=True),
|
||||
)
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
if not module.params['trust_input']:
|
||||
# Check input for potentially dangerous elements:
|
||||
check_input(module, module.params['session_role'])
|
||||
|
||||
# Set some default values:
|
||||
cursor = False
|
||||
db_connection = False
|
||||
result = dict(
|
||||
changed=False,
|
||||
is_available=False,
|
||||
server_version=dict(),
|
||||
)
|
||||
|
||||
conn_params = get_conn_params(module, module.params, warn_db_default=False)
|
||||
db_connection = connect_to_db(module, conn_params, fail_on_conn=False)
|
||||
|
||||
if db_connection is not None:
|
||||
cursor = db_connection.cursor(cursor_factory=DictCursor)
|
||||
|
||||
# Do job:
|
||||
pg_ping = PgPing(module, cursor)
|
||||
if cursor:
|
||||
# If connection established:
|
||||
result["is_available"], result["server_version"] = pg_ping.do()
|
||||
db_connection.rollback()
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
File diff suppressed because it is too large
Load diff
|
@ -1,682 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2019, Loic Blot (@nerzhul) <loic.blot@unix-experience.fr>
|
||||
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: postgresql_publication
|
||||
short_description: Add, update, or remove PostgreSQL publication
|
||||
description:
|
||||
- Add, update, or remove PostgreSQL publication.
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the publication to add, update, or remove.
|
||||
required: true
|
||||
type: str
|
||||
db:
|
||||
description:
|
||||
- Name of the database to connect to and where
|
||||
the publication state will be changed.
|
||||
aliases: [ login_db ]
|
||||
type: str
|
||||
tables:
|
||||
description:
|
||||
- List of tables to add to the publication.
|
||||
- If no value is set all tables are targeted.
|
||||
- If the publication already exists for specific tables and I(tables) is not passed,
|
||||
nothing will be changed. If you need to add all tables to the publication with the same name,
|
||||
drop existent and create new without passing I(tables).
|
||||
type: list
|
||||
elements: str
|
||||
state:
|
||||
description:
|
||||
- The publication state.
|
||||
default: present
|
||||
choices: [ absent, present ]
|
||||
type: str
|
||||
parameters:
|
||||
description:
|
||||
- Dictionary with optional publication parameters.
|
||||
- Available parameters depend on PostgreSQL version.
|
||||
type: dict
|
||||
owner:
|
||||
description:
|
||||
- Publication owner.
|
||||
- If I(owner) is not defined, the owner will be set as I(login_user) or I(session_role).
|
||||
type: str
|
||||
cascade:
|
||||
description:
|
||||
- Drop publication dependencies. Has effect with I(state=absent) only.
|
||||
type: bool
|
||||
default: false
|
||||
session_role:
|
||||
description:
|
||||
- Switch to session_role after connecting. The specified session_role must
|
||||
be a role that the current login_user is a member of.
|
||||
- Permissions checking for SQL commands is carried out as though
|
||||
the session_role were the one that had logged in originally.
|
||||
type: str
|
||||
version_added: '0.2.0'
|
||||
trust_input:
|
||||
description:
|
||||
- If C(no), check whether values of parameters I(name), I(tables), I(owner),
|
||||
I(session_role), I(params) are potentially dangerous.
|
||||
- It makes sense to use C(no) only when SQL injections via the parameters are possible.
|
||||
type: bool
|
||||
default: yes
|
||||
version_added: '0.2.0'
|
||||
notes:
|
||||
- PostgreSQL version must be 10 or greater.
|
||||
seealso:
|
||||
- name: CREATE PUBLICATION reference
|
||||
description: Complete reference of the CREATE PUBLICATION command documentation.
|
||||
link: https://www.postgresql.org/docs/current/sql-createpublication.html
|
||||
- name: ALTER PUBLICATION reference
|
||||
description: Complete reference of the ALTER PUBLICATION command documentation.
|
||||
link: https://www.postgresql.org/docs/current/sql-alterpublication.html
|
||||
- name: DROP PUBLICATION reference
|
||||
description: Complete reference of the DROP PUBLICATION command documentation.
|
||||
link: https://www.postgresql.org/docs/current/sql-droppublication.html
|
||||
author:
|
||||
- Loic Blot (@nerzhul) <loic.blot@unix-experience.fr>
|
||||
- Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
|
||||
extends_documentation_fragment:
|
||||
- community.general.postgres
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Create a new publication with name "acme" targeting all tables in database "test".
|
||||
community.general.postgresql_publication:
|
||||
db: test
|
||||
name: acme
|
||||
|
||||
- name: Create publication "acme" publishing only prices and vehicles tables.
|
||||
community.general.postgresql_publication:
|
||||
name: acme
|
||||
tables:
|
||||
- prices
|
||||
- vehicles
|
||||
|
||||
- name: >
|
||||
Create publication "acme", set user alice as an owner, targeting all tables.
|
||||
Allowable DML operations are INSERT and UPDATE only
|
||||
community.general.postgresql_publication:
|
||||
name: acme
|
||||
owner: alice
|
||||
parameters:
|
||||
publish: 'insert,update'
|
||||
|
||||
- name: >
|
||||
Assuming publication "acme" exists and there are targeted
|
||||
tables "prices" and "vehicles", add table "stores" to the publication.
|
||||
community.general.postgresql_publication:
|
||||
name: acme
|
||||
tables:
|
||||
- prices
|
||||
- vehicles
|
||||
- stores
|
||||
|
||||
- name: Remove publication "acme" if exists in database "test".
|
||||
community.general.postgresql_publication:
|
||||
db: test
|
||||
name: acme
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
exists:
|
||||
description:
|
||||
- Flag indicates the publication exists or not at the end of runtime.
|
||||
returned: always
|
||||
type: bool
|
||||
sample: true
|
||||
queries:
|
||||
description: List of executed queries.
|
||||
returned: always
|
||||
type: str
|
||||
sample: [ 'DROP PUBLICATION "acme" CASCADE' ]
|
||||
owner:
|
||||
description: Owner of the publication at the end of runtime.
|
||||
returned: if publication exists
|
||||
type: str
|
||||
sample: "alice"
|
||||
tables:
|
||||
description:
|
||||
- List of tables in the publication at the end of runtime.
|
||||
- If all tables are published, returns empty list.
|
||||
returned: if publication exists
|
||||
type: list
|
||||
sample: ["\"public\".\"prices\"", "\"public\".\"vehicles\""]
|
||||
alltables:
|
||||
description:
|
||||
- Flag indicates that all tables are published.
|
||||
returned: if publication exists
|
||||
type: bool
|
||||
sample: false
|
||||
parameters:
|
||||
description: Publication parameters at the end of runtime.
|
||||
returned: if publication exists
|
||||
type: dict
|
||||
sample: {'publish': {'insert': false, 'delete': false, 'update': true}}
|
||||
'''
|
||||
|
||||
|
||||
try:
|
||||
from psycopg2.extras import DictCursor
|
||||
except ImportError:
|
||||
# psycopg2 is checked by connect_to_db()
|
||||
# from ansible.module_utils.postgres
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.database import (
|
||||
check_input,
|
||||
pg_quote_identifier,
|
||||
)
|
||||
from ansible_collections.community.general.plugins.module_utils.postgres import (
|
||||
connect_to_db,
|
||||
exec_sql,
|
||||
get_conn_params,
|
||||
postgres_common_argument_spec,
|
||||
)
|
||||
from ansible.module_utils.six import iteritems
|
||||
|
||||
SUPPORTED_PG_VERSION = 10000
|
||||
|
||||
|
||||
################################
|
||||
# Module functions and classes #
|
||||
################################
|
||||
|
||||
def transform_tables_representation(tbl_list):
|
||||
"""Add 'public.' to names of tables where a schema identifier is absent
|
||||
and add quotes to each element.
|
||||
|
||||
Args:
|
||||
tbl_list (list): List of table names.
|
||||
|
||||
Returns:
|
||||
tbl_list (list): Changed list.
|
||||
"""
|
||||
for i, table in enumerate(tbl_list):
|
||||
if '.' not in table:
|
||||
tbl_list[i] = pg_quote_identifier('public.%s' % table.strip(), 'table')
|
||||
else:
|
||||
tbl_list[i] = pg_quote_identifier(table.strip(), 'table')
|
||||
|
||||
return tbl_list
|
||||
|
||||
|
||||
class PgPublication():
|
||||
"""Class to work with PostgreSQL publication.
|
||||
|
||||
Args:
|
||||
module (AnsibleModule): Object of AnsibleModule class.
|
||||
cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
|
||||
name (str): The name of the publication.
|
||||
|
||||
Attributes:
|
||||
module (AnsibleModule): Object of AnsibleModule class.
|
||||
cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
|
||||
name (str): Name of the publication.
|
||||
executed_queries (list): List of executed queries.
|
||||
attrs (dict): Dict with publication attributes.
|
||||
exists (bool): Flag indicates the publication exists or not.
|
||||
"""
|
||||
|
||||
def __init__(self, module, cursor, name):
|
||||
self.module = module
|
||||
self.cursor = cursor
|
||||
self.name = name
|
||||
self.executed_queries = []
|
||||
self.attrs = {
|
||||
'alltables': False,
|
||||
'tables': [],
|
||||
'parameters': {},
|
||||
'owner': '',
|
||||
}
|
||||
self.exists = self.check_pub()
|
||||
|
||||
def get_info(self):
|
||||
"""Refresh the publication information.
|
||||
|
||||
Returns:
|
||||
``self.attrs``.
|
||||
"""
|
||||
self.exists = self.check_pub()
|
||||
return self.attrs
|
||||
|
||||
def check_pub(self):
|
||||
"""Check the publication and refresh ``self.attrs`` publication attribute.
|
||||
|
||||
Returns:
|
||||
True if the publication with ``self.name`` exists, False otherwise.
|
||||
"""
|
||||
|
||||
pub_info = self.__get_general_pub_info()
|
||||
|
||||
if not pub_info:
|
||||
# Publication does not exist:
|
||||
return False
|
||||
|
||||
self.attrs['owner'] = pub_info.get('pubowner')
|
||||
|
||||
# Publication DML operations:
|
||||
self.attrs['parameters']['publish'] = {}
|
||||
self.attrs['parameters']['publish']['insert'] = pub_info.get('pubinsert', False)
|
||||
self.attrs['parameters']['publish']['update'] = pub_info.get('pubupdate', False)
|
||||
self.attrs['parameters']['publish']['delete'] = pub_info.get('pubdelete', False)
|
||||
if pub_info.get('pubtruncate'):
|
||||
self.attrs['parameters']['publish']['truncate'] = pub_info.get('pubtruncate')
|
||||
|
||||
# If alltables flag is False, get the list of targeted tables:
|
||||
if not pub_info.get('puballtables'):
|
||||
table_info = self.__get_tables_pub_info()
|
||||
# Join sublists [['schema', 'table'], ...] to ['schema.table', ...]
|
||||
# for better representation:
|
||||
for i, schema_and_table in enumerate(table_info):
|
||||
table_info[i] = pg_quote_identifier('.'.join(schema_and_table), 'table')
|
||||
|
||||
self.attrs['tables'] = table_info
|
||||
else:
|
||||
self.attrs['alltables'] = True
|
||||
|
||||
# Publication exists:
|
||||
return True
|
||||
|
||||
def create(self, tables, params, owner, check_mode=True):
|
||||
"""Create the publication.
|
||||
|
||||
Args:
|
||||
tables (list): List with names of the tables that need to be added to the publication.
|
||||
params (dict): Dict contains optional publication parameters and their values.
|
||||
owner (str): Name of the publication owner.
|
||||
|
||||
Kwargs:
|
||||
check_mode (bool): If True, don't actually change anything,
|
||||
just make SQL, add it to ``self.executed_queries`` and return True.
|
||||
|
||||
Returns:
|
||||
changed (bool): True if publication has been created, otherwise False.
|
||||
"""
|
||||
changed = True
|
||||
|
||||
query_fragments = ["CREATE PUBLICATION %s" % pg_quote_identifier(self.name, 'publication')]
|
||||
|
||||
if tables:
|
||||
query_fragments.append("FOR TABLE %s" % ', '.join(tables))
|
||||
else:
|
||||
query_fragments.append("FOR ALL TABLES")
|
||||
|
||||
if params:
|
||||
params_list = []
|
||||
# Make list ["param = 'value'", ...] from params dict:
|
||||
for (key, val) in iteritems(params):
|
||||
params_list.append("%s = '%s'" % (key, val))
|
||||
|
||||
# Add the list to query_fragments:
|
||||
query_fragments.append("WITH (%s)" % ', '.join(params_list))
|
||||
|
||||
changed = self.__exec_sql(' '.join(query_fragments), check_mode=check_mode)
|
||||
|
||||
if owner:
|
||||
# If check_mode, just add possible SQL to
|
||||
# executed_queries and return:
|
||||
self.__pub_set_owner(owner, check_mode=check_mode)
|
||||
|
||||
return changed
|
||||
|
||||
def update(self, tables, params, owner, check_mode=True):
|
||||
"""Update the publication.
|
||||
|
||||
Args:
|
||||
tables (list): List with names of the tables that need to be presented in the publication.
|
||||
params (dict): Dict contains optional publication parameters and their values.
|
||||
owner (str): Name of the publication owner.
|
||||
|
||||
Kwargs:
|
||||
check_mode (bool): If True, don't actually change anything,
|
||||
just make SQL, add it to ``self.executed_queries`` and return True.
|
||||
|
||||
Returns:
|
||||
changed (bool): True if publication has been updated, otherwise False.
|
||||
"""
|
||||
changed = False
|
||||
|
||||
# Add or drop tables from published tables suit:
|
||||
if tables and not self.attrs['alltables']:
|
||||
|
||||
# 1. If needs to add table to the publication:
|
||||
for tbl in tables:
|
||||
if tbl not in self.attrs['tables']:
|
||||
# If needs to add table to the publication:
|
||||
changed = self.__pub_add_table(tbl, check_mode=check_mode)
|
||||
|
||||
# 2. if there is a table in targeted tables
|
||||
# that's not presented in the passed tables:
|
||||
for tbl in self.attrs['tables']:
|
||||
if tbl not in tables:
|
||||
changed = self.__pub_drop_table(tbl, check_mode=check_mode)
|
||||
|
||||
elif tables and self.attrs['alltables']:
|
||||
changed = self.__pub_set_tables(tables, check_mode=check_mode)
|
||||
|
||||
# Update pub parameters:
|
||||
if params:
|
||||
for key, val in iteritems(params):
|
||||
if self.attrs['parameters'].get(key):
|
||||
|
||||
# In PostgreSQL 10/11 only 'publish' optional parameter is presented.
|
||||
if key == 'publish':
|
||||
# 'publish' value can be only a string with comma-separated items
|
||||
# of allowed DML operations like 'insert,update' or
|
||||
# 'insert,update,delete', etc.
|
||||
# Make dictionary to compare with current attrs later:
|
||||
val_dict = self.attrs['parameters']['publish'].copy()
|
||||
val_list = val.split(',')
|
||||
for v in val_dict:
|
||||
if v in val_list:
|
||||
val_dict[v] = True
|
||||
else:
|
||||
val_dict[v] = False
|
||||
|
||||
# Compare val_dict and the dict with current 'publish' parameters,
|
||||
# if they're different, set new values:
|
||||
if val_dict != self.attrs['parameters']['publish']:
|
||||
changed = self.__pub_set_param(key, val, check_mode=check_mode)
|
||||
|
||||
# Default behavior for other cases:
|
||||
elif self.attrs['parameters'][key] != val:
|
||||
changed = self.__pub_set_param(key, val, check_mode=check_mode)
|
||||
|
||||
else:
|
||||
# If the parameter was not set before:
|
||||
changed = self.__pub_set_param(key, val, check_mode=check_mode)
|
||||
|
||||
# Update pub owner:
|
||||
if owner:
|
||||
if owner != self.attrs['owner']:
|
||||
changed = self.__pub_set_owner(owner, check_mode=check_mode)
|
||||
|
||||
return changed
|
||||
|
||||
def drop(self, cascade=False, check_mode=True):
|
||||
"""Drop the publication.
|
||||
|
||||
Kwargs:
|
||||
cascade (bool): Flag indicates that publication needs to be deleted
|
||||
with its dependencies.
|
||||
check_mode (bool): If True, don't actually change anything,
|
||||
just make SQL, add it to ``self.executed_queries`` and return True.
|
||||
|
||||
Returns:
|
||||
changed (bool): True if publication has been updated, otherwise False.
|
||||
"""
|
||||
if self.exists:
|
||||
query_fragments = []
|
||||
query_fragments.append("DROP PUBLICATION %s" % pg_quote_identifier(self.name, 'publication'))
|
||||
if cascade:
|
||||
query_fragments.append("CASCADE")
|
||||
|
||||
return self.__exec_sql(' '.join(query_fragments), check_mode=check_mode)
|
||||
|
||||
def __get_general_pub_info(self):
|
||||
"""Get and return general publication information.
|
||||
|
||||
Returns:
|
||||
Dict with publication information if successful, False otherwise.
|
||||
"""
|
||||
# Check pg_publication.pubtruncate exists (supported from PostgreSQL 11):
|
||||
pgtrunc_sup = exec_sql(self, ("SELECT 1 FROM information_schema.columns "
|
||||
"WHERE table_name = 'pg_publication' "
|
||||
"AND column_name = 'pubtruncate'"), add_to_executed=False)
|
||||
|
||||
if pgtrunc_sup:
|
||||
query = ("SELECT r.rolname AS pubowner, p.puballtables, p.pubinsert, "
|
||||
"p.pubupdate , p.pubdelete, p.pubtruncate FROM pg_publication AS p "
|
||||
"JOIN pg_catalog.pg_roles AS r "
|
||||
"ON p.pubowner = r.oid "
|
||||
"WHERE p.pubname = %(pname)s")
|
||||
else:
|
||||
query = ("SELECT r.rolname AS pubowner, p.puballtables, p.pubinsert, "
|
||||
"p.pubupdate , p.pubdelete FROM pg_publication AS p "
|
||||
"JOIN pg_catalog.pg_roles AS r "
|
||||
"ON p.pubowner = r.oid "
|
||||
"WHERE p.pubname = %(pname)s")
|
||||
|
||||
result = exec_sql(self, query, query_params={'pname': self.name}, add_to_executed=False)
|
||||
if result:
|
||||
return result[0]
|
||||
else:
|
||||
return False
|
||||
|
||||
def __get_tables_pub_info(self):
|
||||
"""Get and return tables that are published by the publication.
|
||||
|
||||
Returns:
|
||||
List of dicts with published tables.
|
||||
"""
|
||||
query = ("SELECT schemaname, tablename "
|
||||
"FROM pg_publication_tables WHERE pubname = %(pname)s")
|
||||
return exec_sql(self, query, query_params={'pname': self.name}, add_to_executed=False)
|
||||
|
||||
def __pub_add_table(self, table, check_mode=False):
|
||||
"""Add a table to the publication.
|
||||
|
||||
Args:
|
||||
table (str): Table name.
|
||||
|
||||
Kwargs:
|
||||
check_mode (bool): If True, don't actually change anything,
|
||||
just make SQL, add it to ``self.executed_queries`` and return True.
|
||||
|
||||
Returns:
|
||||
True if successful, False otherwise.
|
||||
"""
|
||||
query = ("ALTER PUBLICATION %s ADD TABLE %s" % (pg_quote_identifier(self.name, 'publication'),
|
||||
pg_quote_identifier(table, 'table')))
|
||||
return self.__exec_sql(query, check_mode=check_mode)
|
||||
|
||||
def __pub_drop_table(self, table, check_mode=False):
|
||||
"""Drop a table from the publication.
|
||||
|
||||
Args:
|
||||
table (str): Table name.
|
||||
|
||||
Kwargs:
|
||||
check_mode (bool): If True, don't actually change anything,
|
||||
just make SQL, add it to ``self.executed_queries`` and return True.
|
||||
|
||||
Returns:
|
||||
True if successful, False otherwise.
|
||||
"""
|
||||
query = ("ALTER PUBLICATION %s DROP TABLE %s" % (pg_quote_identifier(self.name, 'publication'),
|
||||
pg_quote_identifier(table, 'table')))
|
||||
return self.__exec_sql(query, check_mode=check_mode)
|
||||
|
||||
def __pub_set_tables(self, tables, check_mode=False):
|
||||
"""Set a table suit that need to be published by the publication.
|
||||
|
||||
Args:
|
||||
tables (list): List of tables.
|
||||
|
||||
Kwargs:
|
||||
check_mode (bool): If True, don't actually change anything,
|
||||
just make SQL, add it to ``self.executed_queries`` and return True.
|
||||
|
||||
Returns:
|
||||
True if successful, False otherwise.
|
||||
"""
|
||||
quoted_tables = [pg_quote_identifier(t, 'table') for t in tables]
|
||||
query = ("ALTER PUBLICATION %s SET TABLE %s" % (pg_quote_identifier(self.name, 'publication'),
|
||||
', '.join(quoted_tables)))
|
||||
return self.__exec_sql(query, check_mode=check_mode)
|
||||
|
||||
def __pub_set_param(self, param, value, check_mode=False):
|
||||
"""Set an optional publication parameter.
|
||||
|
||||
Args:
|
||||
param (str): Name of the parameter.
|
||||
value (str): Parameter value.
|
||||
|
||||
Kwargs:
|
||||
check_mode (bool): If True, don't actually change anything,
|
||||
just make SQL, add it to ``self.executed_queries`` and return True.
|
||||
|
||||
Returns:
|
||||
True if successful, False otherwise.
|
||||
"""
|
||||
query = ("ALTER PUBLICATION %s SET (%s = '%s')" % (pg_quote_identifier(self.name, 'publication'),
|
||||
param, value))
|
||||
return self.__exec_sql(query, check_mode=check_mode)
|
||||
|
||||
def __pub_set_owner(self, role, check_mode=False):
|
||||
"""Set a publication owner.
|
||||
|
||||
Args:
|
||||
role (str): Role (user) name that needs to be set as a publication owner.
|
||||
|
||||
Kwargs:
|
||||
check_mode (bool): If True, don't actually change anything,
|
||||
just make SQL, add it to ``self.executed_queries`` and return True.
|
||||
|
||||
Returns:
|
||||
True if successful, False otherwise.
|
||||
"""
|
||||
query = ('ALTER PUBLICATION %s '
|
||||
'OWNER TO "%s"' % (pg_quote_identifier(self.name, 'publication'), role))
|
||||
return self.__exec_sql(query, check_mode=check_mode)
|
||||
|
||||
def __exec_sql(self, query, check_mode=False):
|
||||
"""Execute SQL query.
|
||||
|
||||
Note: If we need just to get information from the database,
|
||||
we use ``exec_sql`` function directly.
|
||||
|
||||
Args:
|
||||
query (str): Query that needs to be executed.
|
||||
|
||||
Kwargs:
|
||||
check_mode (bool): If True, don't actually change anything,
|
||||
just add ``query`` to ``self.executed_queries`` and return True.
|
||||
|
||||
Returns:
|
||||
True if successful, False otherwise.
|
||||
"""
|
||||
if check_mode:
|
||||
self.executed_queries.append(query)
|
||||
return True
|
||||
else:
|
||||
return exec_sql(self, query, return_bool=True)
|
||||
|
||||
|
||||
# ===========================================
|
||||
# Module execution.
|
||||
#
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = postgres_common_argument_spec()
|
||||
argument_spec.update(
|
||||
name=dict(required=True),
|
||||
db=dict(type='str', aliases=['login_db']),
|
||||
state=dict(type='str', default='present', choices=['absent', 'present']),
|
||||
tables=dict(type='list', elements='str'),
|
||||
parameters=dict(type='dict'),
|
||||
owner=dict(type='str'),
|
||||
cascade=dict(type='bool', default=False),
|
||||
session_role=dict(type='str'),
|
||||
trust_input=dict(type='bool', default=True),
|
||||
)
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
# Parameters handling:
|
||||
name = module.params['name']
|
||||
state = module.params['state']
|
||||
tables = module.params['tables']
|
||||
params = module.params['parameters']
|
||||
owner = module.params['owner']
|
||||
cascade = module.params['cascade']
|
||||
session_role = module.params['session_role']
|
||||
trust_input = module.params['trust_input']
|
||||
|
||||
if not trust_input:
|
||||
# Check input for potentially dangerous elements:
|
||||
if not params:
|
||||
params_list = None
|
||||
else:
|
||||
params_list = ['%s = %s' % (k, v) for k, v in iteritems(params)]
|
||||
|
||||
check_input(module, name, tables, owner, session_role, params_list)
|
||||
|
||||
if state == 'absent':
|
||||
if tables:
|
||||
module.warn('parameter "tables" is ignored when "state=absent"')
|
||||
if params:
|
||||
module.warn('parameter "parameters" is ignored when "state=absent"')
|
||||
if owner:
|
||||
module.warn('parameter "owner" is ignored when "state=absent"')
|
||||
|
||||
if state == 'present' and cascade:
|
||||
module.warn('parameter "cascade" is ignored when "state=present"')
|
||||
|
||||
# Connect to DB and make cursor object:
|
||||
conn_params = get_conn_params(module, module.params)
|
||||
# We check publication state without DML queries execution, so set autocommit:
|
||||
db_connection = connect_to_db(module, conn_params, autocommit=True)
|
||||
cursor = db_connection.cursor(cursor_factory=DictCursor)
|
||||
|
||||
# Check version:
|
||||
if cursor.connection.server_version < SUPPORTED_PG_VERSION:
|
||||
module.fail_json(msg="PostgreSQL server version should be 10.0 or greater")
|
||||
|
||||
# Nothing was changed by default:
|
||||
changed = False
|
||||
|
||||
###################################
|
||||
# Create object and do rock'n'roll:
|
||||
publication = PgPublication(module, cursor, name)
|
||||
|
||||
if tables:
|
||||
tables = transform_tables_representation(tables)
|
||||
|
||||
# If module.check_mode=True, nothing will be changed:
|
||||
if state == 'present':
|
||||
if not publication.exists:
|
||||
changed = publication.create(tables, params, owner, check_mode=module.check_mode)
|
||||
|
||||
else:
|
||||
changed = publication.update(tables, params, owner, check_mode=module.check_mode)
|
||||
|
||||
elif state == 'absent':
|
||||
changed = publication.drop(cascade=cascade, check_mode=module.check_mode)
|
||||
|
||||
# Get final publication info:
|
||||
pub_fin_info = {}
|
||||
if state == 'present' or (state == 'absent' and module.check_mode):
|
||||
pub_fin_info = publication.get_info()
|
||||
elif state == 'absent' and not module.check_mode:
|
||||
publication.exists = False
|
||||
|
||||
# Connection is not needed any more:
|
||||
cursor.close()
|
||||
db_connection.close()
|
||||
|
||||
# Update publication info and return ret values:
|
||||
module.exit_json(changed=changed, queries=publication.executed_queries, exists=publication.exists, **pub_fin_info)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,452 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2017, Felix Archambault
|
||||
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: postgresql_query
|
||||
short_description: Run PostgreSQL queries
|
||||
description:
|
||||
- Runs arbitrary PostgreSQL queries.
|
||||
- Can run queries from SQL script files.
|
||||
- Does not run against backup files. Use M(community.general.postgresql_db) with I(state=restore)
|
||||
to run queries on files made by pg_dump/pg_dumpall utilities.
|
||||
options:
|
||||
query:
|
||||
description:
|
||||
- SQL query to run. Variables can be escaped with psycopg2 syntax
|
||||
U(http://initd.org/psycopg/docs/usage.html).
|
||||
type: str
|
||||
positional_args:
|
||||
description:
|
||||
- List of values to be passed as positional arguments to the query.
|
||||
When the value is a list, it will be converted to PostgreSQL array.
|
||||
- Mutually exclusive with I(named_args).
|
||||
type: list
|
||||
elements: raw
|
||||
named_args:
|
||||
description:
|
||||
- Dictionary of key-value arguments to pass to the query.
|
||||
When the value is a list, it will be converted to PostgreSQL array.
|
||||
- Mutually exclusive with I(positional_args).
|
||||
type: dict
|
||||
path_to_script:
|
||||
description:
|
||||
- Path to a SQL script on the target machine.
|
||||
- If the script contains several queries, they must be semicolon-separated.
|
||||
- Mutually exclusive with I(query).
|
||||
type: path
|
||||
session_role:
|
||||
description:
|
||||
- Switch to session_role after connecting. The specified session_role must
|
||||
be a role that the current login_user is a member of.
|
||||
- Permissions checking for SQL commands is carried out as though
|
||||
the session_role were the one that had logged in originally.
|
||||
type: str
|
||||
db:
|
||||
description:
|
||||
- Name of database to connect to and run queries against.
|
||||
type: str
|
||||
aliases:
|
||||
- login_db
|
||||
autocommit:
|
||||
description:
|
||||
- Execute in autocommit mode when the query can't be run inside a transaction block
|
||||
(e.g., VACUUM).
|
||||
- Mutually exclusive with I(check_mode).
|
||||
type: bool
|
||||
default: no
|
||||
encoding:
|
||||
description:
|
||||
- Set the client encoding for the current session (e.g. C(UTF-8)).
|
||||
- The default is the encoding defined by the database.
|
||||
type: str
|
||||
version_added: '0.2.0'
|
||||
trust_input:
|
||||
description:
|
||||
- If C(no), check whether a value of I(session_role) is potentially dangerous.
|
||||
- It makes sense to use C(no) only when SQL injections via I(session_role) are possible.
|
||||
type: bool
|
||||
default: yes
|
||||
version_added: '0.2.0'
|
||||
search_path:
|
||||
description:
|
||||
- List of schema names to look in.
|
||||
type: list
|
||||
elements: str
|
||||
version_added: '1.0.0'
|
||||
seealso:
|
||||
- module: community.general.postgresql_db
|
||||
- name: PostgreSQL Schema reference
|
||||
description: Complete reference of the PostgreSQL schema documentation.
|
||||
link: https://www.postgresql.org/docs/current/ddl-schemas.html
|
||||
author:
|
||||
- Felix Archambault (@archf)
|
||||
- Andrew Klychkov (@Andersson007)
|
||||
- Will Rouesnel (@wrouesnel)
|
||||
extends_documentation_fragment:
|
||||
- community.general.postgres
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Simple select query to acme db
|
||||
community.general.postgresql_query:
|
||||
db: acme
|
||||
query: SELECT version()
|
||||
|
||||
- name: Select query to db acme with positional arguments and non-default credentials
|
||||
community.general.postgresql_query:
|
||||
db: acme
|
||||
login_user: django
|
||||
login_password: mysecretpass
|
||||
query: SELECT * FROM acme WHERE id = %s AND story = %s
|
||||
positional_args:
|
||||
- 1
|
||||
- test
|
||||
|
||||
- name: Select query to test_db with named_args
|
||||
community.general.postgresql_query:
|
||||
db: test_db
|
||||
query: SELECT * FROM test WHERE id = %(id_val)s AND story = %(story_val)s
|
||||
named_args:
|
||||
id_val: 1
|
||||
story_val: test
|
||||
|
||||
- name: Insert query to test_table in db test_db
|
||||
community.general.postgresql_query:
|
||||
db: test_db
|
||||
query: INSERT INTO test_table (id, story) VALUES (2, 'my_long_story')
|
||||
|
||||
- name: Run queries from SQL script using UTF-8 client encoding for session
|
||||
community.general.postgresql_query:
|
||||
db: test_db
|
||||
path_to_script: /var/lib/pgsql/test.sql
|
||||
positional_args:
|
||||
- 1
|
||||
encoding: UTF-8
|
||||
|
||||
- name: Example of using autocommit parameter
|
||||
community.general.postgresql_query:
|
||||
db: test_db
|
||||
query: VACUUM
|
||||
autocommit: yes
|
||||
|
||||
- name: >
|
||||
Insert data to the column of array type using positional_args.
|
||||
Note that we use quotes here, the same as for passing JSON, etc.
|
||||
community.general.postgresql_query:
|
||||
query: INSERT INTO test_table (array_column) VALUES (%s)
|
||||
positional_args:
|
||||
- '{1,2,3}'
|
||||
|
||||
# Pass list and string vars as positional_args
|
||||
- name: Set vars
|
||||
ansible.builtin.set_fact:
|
||||
my_list:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
my_arr: '{1, 2, 3}'
|
||||
|
||||
- name: Select from test table by passing positional_args as arrays
|
||||
community.general.postgresql_query:
|
||||
query: SELECT * FROM test_array_table WHERE arr_col1 = %s AND arr_col2 = %s
|
||||
positional_args:
|
||||
- '{{ my_list }}'
|
||||
- '{{ my_arr|string }}'
|
||||
|
||||
# Select from test table looking into app1 schema first, then,
|
||||
# if the schema doesn't exist or the table hasn't been found there,
|
||||
# try to find it in the schema public
|
||||
- name: Select from test using search_path
|
||||
community.general.postgresql_query:
|
||||
query: SELECT * FROM test_array_table
|
||||
search_path:
|
||||
- app1
|
||||
- public
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
query:
|
||||
description:
|
||||
- Executed query.
|
||||
- When reading several queries from a file, it contains only the last one.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 'SELECT * FROM bar'
|
||||
statusmessage:
|
||||
description:
|
||||
- Attribute containing the message returned by the command.
|
||||
- When reading several queries from a file, it contains a message of the last one.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 'INSERT 0 1'
|
||||
query_result:
|
||||
description:
|
||||
- List of dictionaries in column:value form representing returned rows.
|
||||
- When running queries from a file, returns result of the last query.
|
||||
returned: always
|
||||
type: list
|
||||
elements: dict
|
||||
sample: [{"Column": "Value1"},{"Column": "Value2"}]
|
||||
query_list:
|
||||
description:
|
||||
- List of executed queries.
|
||||
Useful when reading several queries from a file.
|
||||
returned: always
|
||||
type: list
|
||||
elements: str
|
||||
sample: ['SELECT * FROM foo', 'SELECT * FROM bar']
|
||||
query_all_results:
|
||||
description:
|
||||
- List containing results of all queries executed (one sublist for every query).
|
||||
Useful when reading several queries from a file.
|
||||
returned: always
|
||||
type: list
|
||||
elements: list
|
||||
sample: [[{"Column": "Value1"},{"Column": "Value2"}], [{"Column": "Value1"},{"Column": "Value2"}]]
|
||||
rowcount:
|
||||
description:
|
||||
- Number of produced or affected rows.
|
||||
- When using a script with multiple queries,
|
||||
it contains a total number of produced or affected rows.
|
||||
returned: changed
|
||||
type: int
|
||||
sample: 5
|
||||
'''
|
||||
|
||||
try:
|
||||
from psycopg2 import ProgrammingError as Psycopg2ProgrammingError
|
||||
from psycopg2.extras import DictCursor
|
||||
except ImportError:
|
||||
# it is needed for checking 'no result to fetch' in main(),
|
||||
# psycopg2 availability will be checked by connect_to_db() into
|
||||
# ansible.module_utils.postgres
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.database import (
|
||||
check_input,
|
||||
)
|
||||
from ansible_collections.community.general.plugins.module_utils.postgres import (
|
||||
connect_to_db,
|
||||
get_conn_params,
|
||||
postgres_common_argument_spec,
|
||||
)
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible.module_utils.six import iteritems
|
||||
|
||||
|
||||
# ===========================================
|
||||
# Module execution.
|
||||
#
|
||||
|
||||
def list_to_pg_array(elem):
|
||||
"""Convert the passed list to PostgreSQL array
|
||||
represented as a string.
|
||||
|
||||
Args:
|
||||
elem (list): List that needs to be converted.
|
||||
|
||||
Returns:
|
||||
elem (str): String representation of PostgreSQL array.
|
||||
"""
|
||||
elem = str(elem).strip('[]')
|
||||
elem = '{' + elem + '}'
|
||||
return elem
|
||||
|
||||
|
||||
def convert_elements_to_pg_arrays(obj):
|
||||
"""Convert list elements of the passed object
|
||||
to PostgreSQL arrays represented as strings.
|
||||
|
||||
Args:
|
||||
obj (dict or list): Object whose elements need to be converted.
|
||||
|
||||
Returns:
|
||||
obj (dict or list): Object with converted elements.
|
||||
"""
|
||||
if isinstance(obj, dict):
|
||||
for (key, elem) in iteritems(obj):
|
||||
if isinstance(elem, list):
|
||||
obj[key] = list_to_pg_array(elem)
|
||||
|
||||
elif isinstance(obj, list):
|
||||
for i, elem in enumerate(obj):
|
||||
if isinstance(elem, list):
|
||||
obj[i] = list_to_pg_array(elem)
|
||||
|
||||
return obj
|
||||
|
||||
|
||||
def set_search_path(cursor, search_path):
|
||||
"""Set session's search_path.
|
||||
|
||||
Args:
|
||||
cursor (Psycopg2 cursor): Database cursor object.
|
||||
search_path (str): String containing comma-separated schema names.
|
||||
"""
|
||||
cursor.execute('SET search_path TO %s' % search_path)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = postgres_common_argument_spec()
|
||||
argument_spec.update(
|
||||
query=dict(type='str'),
|
||||
db=dict(type='str', aliases=['login_db']),
|
||||
positional_args=dict(type='list', elements='raw'),
|
||||
named_args=dict(type='dict'),
|
||||
session_role=dict(type='str'),
|
||||
path_to_script=dict(type='path'),
|
||||
autocommit=dict(type='bool', default=False),
|
||||
encoding=dict(type='str'),
|
||||
trust_input=dict(type='bool', default=True),
|
||||
search_path=dict(type='list', elements='str'),
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
mutually_exclusive=(('positional_args', 'named_args'),),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
query = module.params["query"]
|
||||
positional_args = module.params["positional_args"]
|
||||
named_args = module.params["named_args"]
|
||||
path_to_script = module.params["path_to_script"]
|
||||
autocommit = module.params["autocommit"]
|
||||
encoding = module.params["encoding"]
|
||||
session_role = module.params["session_role"]
|
||||
trust_input = module.params["trust_input"]
|
||||
search_path = module.params["search_path"]
|
||||
|
||||
if not trust_input:
|
||||
# Check input for potentially dangerous elements:
|
||||
check_input(module, session_role)
|
||||
|
||||
if autocommit and module.check_mode:
|
||||
module.fail_json(msg="Using autocommit is mutually exclusive with check_mode")
|
||||
|
||||
if path_to_script and query:
|
||||
module.fail_json(msg="path_to_script is mutually exclusive with query")
|
||||
|
||||
if positional_args:
|
||||
positional_args = convert_elements_to_pg_arrays(positional_args)
|
||||
|
||||
elif named_args:
|
||||
named_args = convert_elements_to_pg_arrays(named_args)
|
||||
|
||||
query_list = []
|
||||
if path_to_script:
|
||||
try:
|
||||
with open(path_to_script, 'rb') as f:
|
||||
query = to_native(f.read())
|
||||
if ';' in query:
|
||||
query_list = [q for q in query.split(';') if q != '\n']
|
||||
else:
|
||||
query_list.append(query)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Cannot read file '%s' : %s" % (path_to_script, to_native(e)))
|
||||
else:
|
||||
query_list.append(query)
|
||||
|
||||
conn_params = get_conn_params(module, module.params)
|
||||
db_connection = connect_to_db(module, conn_params, autocommit=autocommit)
|
||||
if encoding is not None:
|
||||
db_connection.set_client_encoding(encoding)
|
||||
cursor = db_connection.cursor(cursor_factory=DictCursor)
|
||||
|
||||
if search_path:
|
||||
set_search_path(cursor, '%s' % ','.join([x.strip(' ') for x in search_path]))
|
||||
|
||||
# Prepare args:
|
||||
if module.params.get("positional_args"):
|
||||
arguments = module.params["positional_args"]
|
||||
elif module.params.get("named_args"):
|
||||
arguments = module.params["named_args"]
|
||||
else:
|
||||
arguments = None
|
||||
|
||||
# Set defaults:
|
||||
changed = False
|
||||
|
||||
query_all_results = []
|
||||
rowcount = 0
|
||||
statusmessage = ''
|
||||
|
||||
# Execute query:
|
||||
for query in query_list:
|
||||
try:
|
||||
cursor.execute(query, arguments)
|
||||
statusmessage = cursor.statusmessage
|
||||
if cursor.rowcount > 0:
|
||||
rowcount += cursor.rowcount
|
||||
|
||||
try:
|
||||
query_result = [dict(row) for row in cursor.fetchall()]
|
||||
|
||||
except Psycopg2ProgrammingError as e:
|
||||
if to_native(e) == 'no results to fetch':
|
||||
query_result = {}
|
||||
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Cannot fetch rows from cursor: %s" % to_native(e))
|
||||
|
||||
query_all_results.append(query_result)
|
||||
|
||||
if 'SELECT' not in statusmessage:
|
||||
if 'UPDATE' in statusmessage or 'INSERT' in statusmessage or 'DELETE' in statusmessage:
|
||||
s = statusmessage.split()
|
||||
if len(s) == 3:
|
||||
if s[2] != '0':
|
||||
changed = True
|
||||
|
||||
elif len(s) == 2:
|
||||
if s[1] != '0':
|
||||
changed = True
|
||||
|
||||
else:
|
||||
changed = True
|
||||
|
||||
else:
|
||||
changed = True
|
||||
|
||||
except Exception as e:
|
||||
if not autocommit:
|
||||
db_connection.rollback()
|
||||
|
||||
cursor.close()
|
||||
db_connection.close()
|
||||
module.fail_json(msg="Cannot execute SQL '%s' %s: %s, query list: %s" % (query, arguments, to_native(e), query_list))
|
||||
|
||||
if module.check_mode:
|
||||
db_connection.rollback()
|
||||
else:
|
||||
if not autocommit:
|
||||
db_connection.commit()
|
||||
|
||||
kw = dict(
|
||||
changed=changed,
|
||||
query=cursor.query,
|
||||
query_list=query_list,
|
||||
statusmessage=statusmessage,
|
||||
query_result=query_result,
|
||||
query_all_results=query_all_results,
|
||||
rowcount=rowcount,
|
||||
)
|
||||
|
||||
cursor.close()
|
||||
db_connection.close()
|
||||
|
||||
module.exit_json(**kw)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,293 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2016, Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: postgresql_schema
|
||||
short_description: Add or remove PostgreSQL schema
|
||||
description:
|
||||
- Add or remove PostgreSQL schema.
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the schema to add or remove.
|
||||
required: true
|
||||
type: str
|
||||
aliases:
|
||||
- schema
|
||||
database:
|
||||
description:
|
||||
- Name of the database to connect to and add or remove the schema.
|
||||
type: str
|
||||
default: postgres
|
||||
aliases:
|
||||
- db
|
||||
- login_db
|
||||
owner:
|
||||
description:
|
||||
- Name of the role to set as owner of the schema.
|
||||
type: str
|
||||
session_role:
|
||||
description:
|
||||
- Switch to session_role after connecting.
|
||||
- The specified session_role must be a role that the current login_user is a member of.
|
||||
- Permissions checking for SQL commands is carried out as though the session_role
|
||||
were the one that had logged in originally.
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- The schema state.
|
||||
type: str
|
||||
default: present
|
||||
choices: [ absent, present ]
|
||||
cascade_drop:
|
||||
description:
|
||||
- Drop schema with CASCADE to remove child objects.
|
||||
type: bool
|
||||
default: false
|
||||
ssl_mode:
|
||||
description:
|
||||
- Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
|
||||
- See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
|
||||
- Default of C(prefer) matches libpq default.
|
||||
type: str
|
||||
default: prefer
|
||||
choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
|
||||
ca_cert:
|
||||
description:
|
||||
- Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
|
||||
- If the file exists, the server's certificate will be verified to be signed by one of these authorities.
|
||||
type: str
|
||||
aliases: [ ssl_rootcert ]
|
||||
trust_input:
|
||||
description:
|
||||
- If C(no), check whether values of parameters I(schema), I(owner), I(session_role) are potentially dangerous.
|
||||
- It makes sense to use C(no) only when SQL injections via the parameters are possible.
|
||||
type: bool
|
||||
default: yes
|
||||
version_added: '0.2.0'
|
||||
seealso:
|
||||
- name: PostgreSQL schemas
|
||||
description: General information about PostgreSQL schemas.
|
||||
link: https://www.postgresql.org/docs/current/ddl-schemas.html
|
||||
- name: CREATE SCHEMA reference
|
||||
description: Complete reference of the CREATE SCHEMA command documentation.
|
||||
link: https://www.postgresql.org/docs/current/sql-createschema.html
|
||||
- name: ALTER SCHEMA reference
|
||||
description: Complete reference of the ALTER SCHEMA command documentation.
|
||||
link: https://www.postgresql.org/docs/current/sql-alterschema.html
|
||||
- name: DROP SCHEMA reference
|
||||
description: Complete reference of the DROP SCHEMA command documentation.
|
||||
link: https://www.postgresql.org/docs/current/sql-dropschema.html
|
||||
author:
|
||||
- Flavien Chantelot (@Dorn-) <contact@flavien.io>
|
||||
- Thomas O'Donnell (@andytom)
|
||||
extends_documentation_fragment:
|
||||
- community.general.postgres
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Create a new schema with name acme in test database
|
||||
community.general.postgresql_schema:
|
||||
db: test
|
||||
name: acme
|
||||
|
||||
- name: Create a new schema acme with a user bob who will own it
|
||||
community.general.postgresql_schema:
|
||||
name: acme
|
||||
owner: bob
|
||||
|
||||
- name: Drop schema "acme" with cascade
|
||||
community.general.postgresql_schema:
|
||||
name: acme
|
||||
state: absent
|
||||
cascade_drop: yes
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
schema:
|
||||
description: Name of the schema.
|
||||
returned: success, changed
|
||||
type: str
|
||||
sample: "acme"
|
||||
queries:
|
||||
description: List of executed queries.
|
||||
returned: always
|
||||
type: list
|
||||
sample: ["CREATE SCHEMA \"acme\""]
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
try:
|
||||
from psycopg2.extras import DictCursor
|
||||
except ImportError:
|
||||
# psycopg2 is checked by connect_to_db()
|
||||
# from ansible.module_utils.postgres
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.postgres import (
|
||||
connect_to_db,
|
||||
get_conn_params,
|
||||
postgres_common_argument_spec,
|
||||
)
|
||||
from ansible_collections.community.general.plugins.module_utils.database import (
|
||||
check_input,
|
||||
pg_quote_identifier,
|
||||
SQLParseError,
|
||||
)
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
executed_queries = []
|
||||
|
||||
|
||||
class NotSupportedError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
# ===========================================
|
||||
# PostgreSQL module specific support methods.
|
||||
#
|
||||
|
||||
def set_owner(cursor, schema, owner):
|
||||
query = 'ALTER SCHEMA %s OWNER TO "%s"' % (
|
||||
pg_quote_identifier(schema, 'schema'), owner)
|
||||
cursor.execute(query)
|
||||
executed_queries.append(query)
|
||||
return True
|
||||
|
||||
|
||||
def get_schema_info(cursor, schema):
|
||||
query = ("SELECT schema_owner AS owner "
|
||||
"FROM information_schema.schemata "
|
||||
"WHERE schema_name = %(schema)s")
|
||||
cursor.execute(query, {'schema': schema})
|
||||
return cursor.fetchone()
|
||||
|
||||
|
||||
def schema_exists(cursor, schema):
|
||||
query = ("SELECT schema_name FROM information_schema.schemata "
|
||||
"WHERE schema_name = %(schema)s")
|
||||
cursor.execute(query, {'schema': schema})
|
||||
return cursor.rowcount == 1
|
||||
|
||||
|
||||
def schema_delete(cursor, schema, cascade):
|
||||
if schema_exists(cursor, schema):
|
||||
query = "DROP SCHEMA %s" % pg_quote_identifier(schema, 'schema')
|
||||
if cascade:
|
||||
query += " CASCADE"
|
||||
cursor.execute(query)
|
||||
executed_queries.append(query)
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def schema_create(cursor, schema, owner):
|
||||
if not schema_exists(cursor, schema):
|
||||
query_fragments = ['CREATE SCHEMA %s' % pg_quote_identifier(schema, 'schema')]
|
||||
if owner:
|
||||
query_fragments.append('AUTHORIZATION "%s"' % owner)
|
||||
query = ' '.join(query_fragments)
|
||||
cursor.execute(query)
|
||||
executed_queries.append(query)
|
||||
return True
|
||||
else:
|
||||
schema_info = get_schema_info(cursor, schema)
|
||||
if owner and owner != schema_info['owner']:
|
||||
return set_owner(cursor, schema, owner)
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def schema_matches(cursor, schema, owner):
|
||||
if not schema_exists(cursor, schema):
|
||||
return False
|
||||
else:
|
||||
schema_info = get_schema_info(cursor, schema)
|
||||
if owner and owner != schema_info['owner']:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
# ===========================================
|
||||
# Module execution.
|
||||
#
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = postgres_common_argument_spec()
|
||||
argument_spec.update(
|
||||
schema=dict(type="str", required=True, aliases=['name']),
|
||||
owner=dict(type="str", default=""),
|
||||
database=dict(type="str", default="postgres", aliases=["db", "login_db"]),
|
||||
cascade_drop=dict(type="bool", default=False),
|
||||
state=dict(type="str", default="present", choices=["absent", "present"]),
|
||||
session_role=dict(type="str"),
|
||||
trust_input=dict(type="bool", default=True),
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
schema = module.params["schema"]
|
||||
owner = module.params["owner"]
|
||||
state = module.params["state"]
|
||||
cascade_drop = module.params["cascade_drop"]
|
||||
session_role = module.params["session_role"]
|
||||
trust_input = module.params["trust_input"]
|
||||
|
||||
if not trust_input:
|
||||
# Check input for potentially dangerous elements:
|
||||
check_input(module, schema, owner, session_role)
|
||||
|
||||
changed = False
|
||||
|
||||
conn_params = get_conn_params(module, module.params)
|
||||
db_connection = connect_to_db(module, conn_params, autocommit=True)
|
||||
cursor = db_connection.cursor(cursor_factory=DictCursor)
|
||||
|
||||
try:
|
||||
if module.check_mode:
|
||||
if state == "absent":
|
||||
changed = not schema_exists(cursor, schema)
|
||||
elif state == "present":
|
||||
changed = not schema_matches(cursor, schema, owner)
|
||||
module.exit_json(changed=changed, schema=schema)
|
||||
|
||||
if state == "absent":
|
||||
try:
|
||||
changed = schema_delete(cursor, schema, cascade_drop)
|
||||
except SQLParseError as e:
|
||||
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
|
||||
|
||||
elif state == "present":
|
||||
try:
|
||||
changed = schema_create(cursor, schema, owner)
|
||||
except SQLParseError as e:
|
||||
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
|
||||
except NotSupportedError as e:
|
||||
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
|
||||
except SystemExit:
|
||||
# Avoid catching this on Python 2.4
|
||||
raise
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc())
|
||||
|
||||
db_connection.close()
|
||||
module.exit_json(changed=changed, schema=schema, queries=executed_queries)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,627 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2019, Tobias Birkefeld (@tcraxs) <t@craxs.de>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: postgresql_sequence
|
||||
short_description: Create, drop, or alter a PostgreSQL sequence
|
||||
description:
|
||||
- Allows to create, drop or change the definition of a sequence generator.
|
||||
options:
|
||||
sequence:
|
||||
description:
|
||||
- The name of the sequence.
|
||||
required: true
|
||||
type: str
|
||||
aliases:
|
||||
- name
|
||||
state:
|
||||
description:
|
||||
- The sequence state.
|
||||
- If I(state=absent) other options will be ignored except of I(name) and
|
||||
I(schema).
|
||||
default: present
|
||||
choices: [ absent, present ]
|
||||
type: str
|
||||
data_type:
|
||||
description:
|
||||
- Specifies the data type of the sequence. Valid types are bigint, integer,
|
||||
and smallint. bigint is the default. The data type determines the default
|
||||
minimum and maximum values of the sequence. For more info see the
|
||||
documentation
|
||||
U(https://www.postgresql.org/docs/current/sql-createsequence.html).
|
||||
- Supported from PostgreSQL 10.
|
||||
choices: [ bigint, integer, smallint ]
|
||||
type: str
|
||||
increment:
|
||||
description:
|
||||
- Increment specifies which value is added to the current sequence value
|
||||
to create a new value.
|
||||
- A positive value will make an ascending sequence, a negative one a
|
||||
descending sequence. The default value is 1.
|
||||
type: int
|
||||
minvalue:
|
||||
description:
|
||||
- Minvalue determines the minimum value a sequence can generate. The
|
||||
default for an ascending sequence is 1. The default for a descending
|
||||
sequence is the minimum value of the data type.
|
||||
type: int
|
||||
aliases:
|
||||
- min
|
||||
maxvalue:
|
||||
description:
|
||||
- Maxvalue determines the maximum value for the sequence. The default for
|
||||
an ascending sequence is the maximum
|
||||
value of the data type. The default for a descending sequence is -1.
|
||||
type: int
|
||||
aliases:
|
||||
- max
|
||||
start:
|
||||
description:
|
||||
- Start allows the sequence to begin anywhere. The default starting value
|
||||
is I(minvalue) for ascending sequences and I(maxvalue) for descending
|
||||
ones.
|
||||
type: int
|
||||
cache:
|
||||
description:
|
||||
- Cache specifies how many sequence numbers are to be preallocated and
|
||||
stored in memory for faster access. The minimum value is 1 (only one
|
||||
value can be generated at a time, i.e., no cache), and this is also
|
||||
the default.
|
||||
type: int
|
||||
cycle:
|
||||
description:
|
||||
- The cycle option allows the sequence to wrap around when the I(maxvalue)
|
||||
or I(minvalue) has been reached by an ascending or descending sequence
|
||||
respectively. If the limit is reached, the next number generated will be
|
||||
the minvalue or maxvalue, respectively.
|
||||
- If C(false) (NO CYCLE) is specified, any calls to nextval after the sequence
|
||||
has reached its maximum value will return an error. False (NO CYCLE) is
|
||||
the default.
|
||||
type: bool
|
||||
default: no
|
||||
cascade:
|
||||
description:
|
||||
- Automatically drop objects that depend on the sequence, and in turn all
|
||||
objects that depend on those objects.
|
||||
- Ignored if I(state=present).
|
||||
- Only used with I(state=absent).
|
||||
type: bool
|
||||
default: no
|
||||
rename_to:
|
||||
description:
|
||||
- The new name for the I(sequence).
|
||||
- Works only for existing sequences.
|
||||
type: str
|
||||
owner:
|
||||
description:
|
||||
- Set the owner for the I(sequence).
|
||||
type: str
|
||||
schema:
|
||||
description:
|
||||
- The schema of the I(sequence). This is be used to create and relocate
|
||||
a I(sequence) in the given schema.
|
||||
default: public
|
||||
type: str
|
||||
newschema:
|
||||
description:
|
||||
- The new schema for the I(sequence). Will be used for moving a
|
||||
I(sequence) to another I(schema).
|
||||
- Works only for existing sequences.
|
||||
type: str
|
||||
session_role:
|
||||
description:
|
||||
- Switch to session_role after connecting. The specified I(session_role)
|
||||
must be a role that the current I(login_user) is a member of.
|
||||
- Permissions checking for SQL commands is carried out as though
|
||||
the I(session_role) were the one that had logged in originally.
|
||||
type: str
|
||||
db:
|
||||
description:
|
||||
- Name of database to connect to and run queries against.
|
||||
type: str
|
||||
aliases:
|
||||
- database
|
||||
- login_db
|
||||
trust_input:
|
||||
description:
|
||||
- If C(no), check whether values of parameters I(sequence), I(schema), I(rename_to),
|
||||
I(owner), I(newschema), I(session_role) are potentially dangerous.
|
||||
- It makes sense to use C(no) only when SQL injections via the parameters are possible.
|
||||
type: bool
|
||||
default: yes
|
||||
version_added: '0.2.0'
|
||||
notes:
|
||||
- If you do not pass db parameter, sequence will be created in the database
|
||||
named postgres.
|
||||
seealso:
|
||||
- module: community.general.postgresql_table
|
||||
- module: community.general.postgresql_owner
|
||||
- module: community.general.postgresql_privs
|
||||
- module: community.general.postgresql_tablespace
|
||||
- name: CREATE SEQUENCE reference
|
||||
description: Complete reference of the CREATE SEQUENCE command documentation.
|
||||
link: https://www.postgresql.org/docs/current/sql-createsequence.html
|
||||
- name: ALTER SEQUENCE reference
|
||||
description: Complete reference of the ALTER SEQUENCE command documentation.
|
||||
link: https://www.postgresql.org/docs/current/sql-altersequence.html
|
||||
- name: DROP SEQUENCE reference
|
||||
description: Complete reference of the DROP SEQUENCE command documentation.
|
||||
link: https://www.postgresql.org/docs/current/sql-dropsequence.html
|
||||
author:
|
||||
- Tobias Birkefeld (@tcraxs)
|
||||
- Thomas O'Donnell (@andytom)
|
||||
extends_documentation_fragment:
|
||||
- community.general.postgres
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Create an ascending bigint sequence called foobar in the default
|
||||
database
|
||||
community.general.postgresql_sequence:
|
||||
name: foobar
|
||||
|
||||
- name: Create an ascending integer sequence called foobar, starting at 101
|
||||
community.general.postgresql_sequence:
|
||||
name: foobar
|
||||
data_type: integer
|
||||
start: 101
|
||||
|
||||
- name: Create an descending sequence called foobar, starting at 101 and
|
||||
preallocated 10 sequence numbers in cache
|
||||
community.general.postgresql_sequence:
|
||||
name: foobar
|
||||
increment: -1
|
||||
cache: 10
|
||||
start: 101
|
||||
|
||||
- name: Create an ascending sequence called foobar, which cycle between 1 to 10
|
||||
community.general.postgresql_sequence:
|
||||
name: foobar
|
||||
cycle: yes
|
||||
min: 1
|
||||
max: 10
|
||||
|
||||
- name: Create an ascending bigint sequence called foobar in the default
|
||||
database with owner foobar
|
||||
community.general.postgresql_sequence:
|
||||
name: foobar
|
||||
owner: foobar
|
||||
|
||||
- name: Rename an existing sequence named foo to bar
|
||||
community.general.postgresql_sequence:
|
||||
name: foo
|
||||
rename_to: bar
|
||||
|
||||
- name: Change the schema of an existing sequence to foobar
|
||||
community.general.postgresql_sequence:
|
||||
name: foobar
|
||||
newschema: foobar
|
||||
|
||||
- name: Change the owner of an existing sequence to foobar
|
||||
community.general.postgresql_sequence:
|
||||
name: foobar
|
||||
owner: foobar
|
||||
|
||||
- name: Drop a sequence called foobar
|
||||
community.general.postgresql_sequence:
|
||||
name: foobar
|
||||
state: absent
|
||||
|
||||
- name: Drop a sequence called foobar with cascade
|
||||
community.general.postgresql_sequence:
|
||||
name: foobar
|
||||
cascade: yes
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
state:
|
||||
description: Sequence state at the end of execution.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 'present'
|
||||
sequence:
|
||||
description: Sequence name.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 'foobar'
|
||||
queries:
|
||||
description: List of queries that was tried to be executed.
|
||||
returned: always
|
||||
type: str
|
||||
sample: [ "CREATE SEQUENCE \"foo\"" ]
|
||||
schema:
|
||||
description: Name of the schema of the sequence
|
||||
returned: always
|
||||
type: str
|
||||
sample: 'foo'
|
||||
data_type:
|
||||
description: Shows the current data type of the sequence.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 'bigint'
|
||||
increment:
|
||||
description: The value of increment of the sequence. A positive value will
|
||||
make an ascending sequence, a negative one a descending
|
||||
sequence.
|
||||
returned: always
|
||||
type: int
|
||||
sample: '-1'
|
||||
minvalue:
|
||||
description: The value of minvalue of the sequence.
|
||||
returned: always
|
||||
type: int
|
||||
sample: '1'
|
||||
maxvalue:
|
||||
description: The value of maxvalue of the sequence.
|
||||
returned: always
|
||||
type: int
|
||||
sample: '9223372036854775807'
|
||||
start:
|
||||
description: The value of start of the sequence.
|
||||
returned: always
|
||||
type: int
|
||||
sample: '12'
|
||||
cycle:
|
||||
description: Shows if the sequence cycle or not.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 'NO'
|
||||
owner:
|
||||
description: Shows the current owner of the sequence
|
||||
after the successful run of the task.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 'postgres'
|
||||
newname:
|
||||
description: Shows the new sequence name after rename.
|
||||
returned: on success
|
||||
type: str
|
||||
sample: 'barfoo'
|
||||
newschema:
|
||||
description: Shows the new schema of the sequence after schema change.
|
||||
returned: on success
|
||||
type: str
|
||||
sample: 'foobar'
|
||||
'''
|
||||
|
||||
|
||||
try:
|
||||
from psycopg2.extras import DictCursor
|
||||
except ImportError:
|
||||
# psycopg2 is checked by connect_to_db()
|
||||
# from ansible.module_utils.postgres
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.database import (
|
||||
check_input,
|
||||
)
|
||||
from ansible_collections.community.general.plugins.module_utils.postgres import (
|
||||
connect_to_db,
|
||||
exec_sql,
|
||||
get_conn_params,
|
||||
postgres_common_argument_spec,
|
||||
)
|
||||
|
||||
|
||||
class Sequence(object):
|
||||
"""Implements behavior of CREATE, ALTER or DROP SEQUENCE PostgreSQL command.
|
||||
|
||||
Arguments:
|
||||
module (AnsibleModule) -- object of AnsibleModule class
|
||||
cursor (cursor) -- cursor object of psycopg2 library
|
||||
|
||||
Attributes:
|
||||
module (AnsibleModule) -- object of AnsibleModule class
|
||||
cursor (cursor) -- cursor object of psycopg2 library
|
||||
changed (bool) -- something was changed after execution or not
|
||||
executed_queries (list) -- executed queries
|
||||
name (str) -- name of the sequence
|
||||
owner (str) -- name of the owner of the sequence
|
||||
schema (str) -- name of the schema (default: public)
|
||||
data_type (str) -- data type of the sequence
|
||||
start_value (int) -- value of the sequence start
|
||||
minvalue (int) -- minimum value of the sequence
|
||||
maxvalue (int) -- maximum value of the sequence
|
||||
increment (int) -- increment value of the sequence
|
||||
cycle (bool) -- sequence can cycle or not
|
||||
new_name (str) -- name of the renamed sequence
|
||||
new_schema (str) -- name of the new schema
|
||||
exists (bool) -- sequence exists or not
|
||||
"""
|
||||
|
||||
def __init__(self, module, cursor):
|
||||
self.module = module
|
||||
self.cursor = cursor
|
||||
self.executed_queries = []
|
||||
self.name = self.module.params['sequence']
|
||||
self.owner = ''
|
||||
self.schema = self.module.params['schema']
|
||||
self.data_type = ''
|
||||
self.start_value = ''
|
||||
self.minvalue = ''
|
||||
self.maxvalue = ''
|
||||
self.increment = ''
|
||||
self.cycle = ''
|
||||
self.new_name = ''
|
||||
self.new_schema = ''
|
||||
self.exists = False
|
||||
# Collect info
|
||||
self.get_info()
|
||||
|
||||
def get_info(self):
|
||||
"""Getter to refresh and get sequence info"""
|
||||
query = ("SELECT "
|
||||
"s.sequence_schema AS schemaname, "
|
||||
"s.sequence_name AS sequencename, "
|
||||
"pg_get_userbyid(c.relowner) AS sequenceowner, "
|
||||
"s.data_type::regtype AS data_type, "
|
||||
"s.start_value AS start_value, "
|
||||
"s.minimum_value AS min_value, "
|
||||
"s.maximum_value AS max_value, "
|
||||
"s.increment AS increment_by, "
|
||||
"s.cycle_option AS cycle "
|
||||
"FROM information_schema.sequences s "
|
||||
"JOIN pg_class c ON c.relname = s.sequence_name "
|
||||
"LEFT JOIN pg_namespace n ON n.oid = c.relnamespace "
|
||||
"WHERE NOT pg_is_other_temp_schema(n.oid) "
|
||||
"AND c.relkind = 'S'::\"char\" "
|
||||
"AND sequence_name = %(name)s "
|
||||
"AND sequence_schema = %(schema)s")
|
||||
|
||||
res = exec_sql(self, query,
|
||||
query_params={'name': self.name, 'schema': self.schema},
|
||||
add_to_executed=False)
|
||||
|
||||
if not res:
|
||||
self.exists = False
|
||||
return False
|
||||
|
||||
if res:
|
||||
self.exists = True
|
||||
self.schema = res[0]['schemaname']
|
||||
self.name = res[0]['sequencename']
|
||||
self.owner = res[0]['sequenceowner']
|
||||
self.data_type = res[0]['data_type']
|
||||
self.start_value = res[0]['start_value']
|
||||
self.minvalue = res[0]['min_value']
|
||||
self.maxvalue = res[0]['max_value']
|
||||
self.increment = res[0]['increment_by']
|
||||
self.cycle = res[0]['cycle']
|
||||
|
||||
def create(self):
|
||||
"""Implements CREATE SEQUENCE command behavior."""
|
||||
query = ['CREATE SEQUENCE']
|
||||
query.append(self.__add_schema())
|
||||
|
||||
if self.module.params.get('data_type'):
|
||||
query.append('AS %s' % self.module.params['data_type'])
|
||||
|
||||
if self.module.params.get('increment'):
|
||||
query.append('INCREMENT BY %s' % self.module.params['increment'])
|
||||
|
||||
if self.module.params.get('minvalue'):
|
||||
query.append('MINVALUE %s' % self.module.params['minvalue'])
|
||||
|
||||
if self.module.params.get('maxvalue'):
|
||||
query.append('MAXVALUE %s' % self.module.params['maxvalue'])
|
||||
|
||||
if self.module.params.get('start'):
|
||||
query.append('START WITH %s' % self.module.params['start'])
|
||||
|
||||
if self.module.params.get('cache'):
|
||||
query.append('CACHE %s' % self.module.params['cache'])
|
||||
|
||||
if self.module.params.get('cycle'):
|
||||
query.append('CYCLE')
|
||||
|
||||
return exec_sql(self, ' '.join(query), return_bool=True)
|
||||
|
||||
def drop(self):
|
||||
"""Implements DROP SEQUENCE command behavior."""
|
||||
query = ['DROP SEQUENCE']
|
||||
query.append(self.__add_schema())
|
||||
|
||||
if self.module.params.get('cascade'):
|
||||
query.append('CASCADE')
|
||||
|
||||
return exec_sql(self, ' '.join(query), return_bool=True)
|
||||
|
||||
def rename(self):
|
||||
"""Implements ALTER SEQUENCE RENAME TO command behavior."""
|
||||
query = ['ALTER SEQUENCE']
|
||||
query.append(self.__add_schema())
|
||||
query.append('RENAME TO "%s"' % self.module.params['rename_to'])
|
||||
|
||||
return exec_sql(self, ' '.join(query), return_bool=True)
|
||||
|
||||
def set_owner(self):
|
||||
"""Implements ALTER SEQUENCE OWNER TO command behavior."""
|
||||
query = ['ALTER SEQUENCE']
|
||||
query.append(self.__add_schema())
|
||||
query.append('OWNER TO "%s"' % self.module.params['owner'])
|
||||
|
||||
return exec_sql(self, ' '.join(query), return_bool=True)
|
||||
|
||||
def set_schema(self):
|
||||
"""Implements ALTER SEQUENCE SET SCHEMA command behavior."""
|
||||
query = ['ALTER SEQUENCE']
|
||||
query.append(self.__add_schema())
|
||||
query.append('SET SCHEMA "%s"' % self.module.params['newschema'])
|
||||
|
||||
return exec_sql(self, ' '.join(query), return_bool=True)
|
||||
|
||||
def __add_schema(self):
|
||||
return '"%s"."%s"' % (self.schema, self.name)
|
||||
|
||||
|
||||
# ===========================================
|
||||
# Module execution.
|
||||
#
|
||||
|
||||
def main():
|
||||
argument_spec = postgres_common_argument_spec()
|
||||
argument_spec.update(
|
||||
sequence=dict(type='str', required=True, aliases=['name']),
|
||||
state=dict(type='str', default='present', choices=['absent', 'present']),
|
||||
data_type=dict(type='str', choices=['bigint', 'integer', 'smallint']),
|
||||
increment=dict(type='int'),
|
||||
minvalue=dict(type='int', aliases=['min']),
|
||||
maxvalue=dict(type='int', aliases=['max']),
|
||||
start=dict(type='int'),
|
||||
cache=dict(type='int'),
|
||||
cycle=dict(type='bool', default=False),
|
||||
schema=dict(type='str', default='public'),
|
||||
cascade=dict(type='bool', default=False),
|
||||
rename_to=dict(type='str'),
|
||||
owner=dict(type='str'),
|
||||
newschema=dict(type='str'),
|
||||
db=dict(type='str', default='', aliases=['login_db', 'database']),
|
||||
session_role=dict(type='str'),
|
||||
trust_input=dict(type="bool", default=True),
|
||||
)
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
mutually_exclusive=[
|
||||
['rename_to', 'data_type'],
|
||||
['rename_to', 'increment'],
|
||||
['rename_to', 'minvalue'],
|
||||
['rename_to', 'maxvalue'],
|
||||
['rename_to', 'start'],
|
||||
['rename_to', 'cache'],
|
||||
['rename_to', 'cycle'],
|
||||
['rename_to', 'cascade'],
|
||||
['rename_to', 'owner'],
|
||||
['rename_to', 'newschema'],
|
||||
['cascade', 'data_type'],
|
||||
['cascade', 'increment'],
|
||||
['cascade', 'minvalue'],
|
||||
['cascade', 'maxvalue'],
|
||||
['cascade', 'start'],
|
||||
['cascade', 'cache'],
|
||||
['cascade', 'cycle'],
|
||||
['cascade', 'owner'],
|
||||
['cascade', 'newschema'],
|
||||
]
|
||||
)
|
||||
|
||||
if not module.params["trust_input"]:
|
||||
check_input(
|
||||
module,
|
||||
module.params['sequence'],
|
||||
module.params['schema'],
|
||||
module.params['rename_to'],
|
||||
module.params['owner'],
|
||||
module.params['newschema'],
|
||||
module.params['session_role'],
|
||||
)
|
||||
|
||||
# Note: we don't need to check mutually exclusive params here, because they are
|
||||
# checked automatically by AnsibleModule (mutually_exclusive=[] list above).
|
||||
|
||||
# Change autocommit to False if check_mode:
|
||||
autocommit = not module.check_mode
|
||||
# Connect to DB and make cursor object:
|
||||
conn_params = get_conn_params(module, module.params)
|
||||
db_connection = connect_to_db(module, conn_params, autocommit=autocommit)
|
||||
cursor = db_connection.cursor(cursor_factory=DictCursor)
|
||||
|
||||
##############
|
||||
# Create the object and do main job:
|
||||
data = Sequence(module, cursor)
|
||||
|
||||
# Set defaults:
|
||||
changed = False
|
||||
|
||||
# Create new sequence
|
||||
if not data.exists and module.params['state'] == 'present':
|
||||
if module.params.get('rename_to'):
|
||||
module.fail_json(msg="Sequence '%s' does not exist, nothing to rename" % module.params['sequence'])
|
||||
if module.params.get('newschema'):
|
||||
module.fail_json(msg="Sequence '%s' does not exist, change of schema not possible" % module.params['sequence'])
|
||||
|
||||
changed = data.create()
|
||||
|
||||
# Drop non-existing sequence
|
||||
elif not data.exists and module.params['state'] == 'absent':
|
||||
# Nothing to do
|
||||
changed = False
|
||||
|
||||
# Drop existing sequence
|
||||
elif data.exists and module.params['state'] == 'absent':
|
||||
changed = data.drop()
|
||||
|
||||
# Rename sequence
|
||||
if data.exists and module.params.get('rename_to'):
|
||||
if data.name != module.params['rename_to']:
|
||||
changed = data.rename()
|
||||
if changed:
|
||||
data.new_name = module.params['rename_to']
|
||||
|
||||
# Refresh information
|
||||
if module.params['state'] == 'present':
|
||||
data.get_info()
|
||||
|
||||
# Change owner, schema and settings
|
||||
if module.params['state'] == 'present' and data.exists:
|
||||
# change owner
|
||||
if module.params.get('owner'):
|
||||
if data.owner != module.params['owner']:
|
||||
changed = data.set_owner()
|
||||
|
||||
# Set schema
|
||||
if module.params.get('newschema'):
|
||||
if data.schema != module.params['newschema']:
|
||||
changed = data.set_schema()
|
||||
if changed:
|
||||
data.new_schema = module.params['newschema']
|
||||
|
||||
# Rollback if it's possible and check_mode:
|
||||
if module.check_mode:
|
||||
db_connection.rollback()
|
||||
else:
|
||||
db_connection.commit()
|
||||
|
||||
cursor.close()
|
||||
db_connection.close()
|
||||
|
||||
# Make return values:
|
||||
kw = dict(
|
||||
changed=changed,
|
||||
state='present',
|
||||
sequence=data.name,
|
||||
queries=data.executed_queries,
|
||||
schema=data.schema,
|
||||
data_type=data.data_type,
|
||||
increment=data.increment,
|
||||
minvalue=data.minvalue,
|
||||
maxvalue=data.maxvalue,
|
||||
start=data.start_value,
|
||||
cycle=data.cycle,
|
||||
owner=data.owner,
|
||||
)
|
||||
|
||||
if module.params['state'] == 'present':
|
||||
if data.new_name:
|
||||
kw['newname'] = data.new_name
|
||||
if data.new_schema:
|
||||
kw['newschema'] = data.new_schema
|
||||
|
||||
elif module.params['state'] == 'absent':
|
||||
kw['state'] = 'absent'
|
||||
|
||||
module.exit_json(**kw)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,447 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2018, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: postgresql_set
|
||||
short_description: Change a PostgreSQL server configuration parameter
|
||||
description:
|
||||
- Allows to change a PostgreSQL server configuration parameter.
|
||||
- The module uses ALTER SYSTEM command and applies changes by reload server configuration.
|
||||
- ALTER SYSTEM is used for changing server configuration parameters across the entire database cluster.
|
||||
- It can be more convenient and safe than the traditional method of manually editing the postgresql.conf file.
|
||||
- ALTER SYSTEM writes the given parameter setting to the $PGDATA/postgresql.auto.conf file,
|
||||
which is read in addition to postgresql.conf.
|
||||
- The module allows to reset parameter to boot_val (cluster initial value) by I(reset=yes) or remove parameter
|
||||
string from postgresql.auto.conf and reload I(value=default) (for settings with postmaster context restart is required).
|
||||
- After change you can see in the ansible output the previous and
|
||||
the new parameter value and other information using returned values and M(ansible.builtin.debug) module.
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of PostgreSQL server parameter.
|
||||
type: str
|
||||
required: true
|
||||
value:
|
||||
description:
|
||||
- Parameter value to set.
|
||||
- To remove parameter string from postgresql.auto.conf and
|
||||
reload the server configuration you must pass I(value=default).
|
||||
With I(value=default) the playbook always returns changed is true.
|
||||
type: str
|
||||
reset:
|
||||
description:
|
||||
- Restore parameter to initial state (boot_val). Mutually exclusive with I(value).
|
||||
type: bool
|
||||
default: false
|
||||
session_role:
|
||||
description:
|
||||
- Switch to session_role after connecting. The specified session_role must
|
||||
be a role that the current login_user is a member of.
|
||||
- Permissions checking for SQL commands is carried out as though
|
||||
the session_role were the one that had logged in originally.
|
||||
type: str
|
||||
db:
|
||||
description:
|
||||
- Name of database to connect.
|
||||
type: str
|
||||
aliases:
|
||||
- login_db
|
||||
trust_input:
|
||||
description:
|
||||
- If C(no), check whether values of parameters are potentially dangerous.
|
||||
- It makes sense to use C(no) only when SQL injections are possible.
|
||||
type: bool
|
||||
default: yes
|
||||
version_added: '0.2.0'
|
||||
notes:
|
||||
- Supported version of PostgreSQL is 9.4 and later.
|
||||
- Pay attention, change setting with 'postmaster' context can return changed is true
|
||||
when actually nothing changes because the same value may be presented in
|
||||
several different form, for example, 1024MB, 1GB, etc. However in pg_settings
|
||||
system view it can be defined like 131072 number of 8kB pages.
|
||||
The final check of the parameter value cannot compare it because the server was
|
||||
not restarted and the value in pg_settings is not updated yet.
|
||||
- For some parameters restart of PostgreSQL server is required.
|
||||
See official documentation U(https://www.postgresql.org/docs/current/view-pg-settings.html).
|
||||
seealso:
|
||||
- module: community.general.postgresql_info
|
||||
- name: PostgreSQL server configuration
|
||||
description: General information about PostgreSQL server configuration.
|
||||
link: https://www.postgresql.org/docs/current/runtime-config.html
|
||||
- name: PostgreSQL view pg_settings reference
|
||||
description: Complete reference of the pg_settings view documentation.
|
||||
link: https://www.postgresql.org/docs/current/view-pg-settings.html
|
||||
- name: PostgreSQL ALTER SYSTEM command reference
|
||||
description: Complete reference of the ALTER SYSTEM command documentation.
|
||||
link: https://www.postgresql.org/docs/current/sql-altersystem.html
|
||||
author:
|
||||
- Andrew Klychkov (@Andersson007)
|
||||
extends_documentation_fragment:
|
||||
- community.general.postgres
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Restore wal_keep_segments parameter to initial state
|
||||
community.general.postgresql_set:
|
||||
name: wal_keep_segments
|
||||
reset: yes
|
||||
|
||||
# Set work_mem parameter to 32MB and show what's been changed and restart is required or not
|
||||
# (output example: "msg": "work_mem 4MB >> 64MB restart_req: False")
|
||||
- name: Set work mem parameter
|
||||
community.general.postgresql_set:
|
||||
name: work_mem
|
||||
value: 32mb
|
||||
register: set
|
||||
|
||||
- ansible.builtin.debug:
|
||||
msg: "{{ set.name }} {{ set.prev_val_pretty }} >> {{ set.value_pretty }} restart_req: {{ set.restart_required }}"
|
||||
when: set.changed
|
||||
# Ensure that the restart of PostgreSQL server must be required for some parameters.
|
||||
# In this situation you see the same parameter in prev_val_pretty and value_pretty, but 'changed=True'
|
||||
# (If you passed the value that was different from the current server setting).
|
||||
|
||||
- name: Set log_min_duration_statement parameter to 1 second
|
||||
community.general.postgresql_set:
|
||||
name: log_min_duration_statement
|
||||
value: 1s
|
||||
|
||||
- name: Set wal_log_hints parameter to default value (remove parameter from postgresql.auto.conf)
|
||||
community.general.postgresql_set:
|
||||
name: wal_log_hints
|
||||
value: default
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
name:
|
||||
description: Name of PostgreSQL server parameter.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 'shared_buffers'
|
||||
restart_required:
|
||||
description: Information about parameter current state.
|
||||
returned: always
|
||||
type: bool
|
||||
sample: true
|
||||
prev_val_pretty:
|
||||
description: Information about previous state of the parameter.
|
||||
returned: always
|
||||
type: str
|
||||
sample: '4MB'
|
||||
value_pretty:
|
||||
description: Information about current state of the parameter.
|
||||
returned: always
|
||||
type: str
|
||||
sample: '64MB'
|
||||
value:
|
||||
description:
|
||||
- Dictionary that contains the current parameter value (at the time of playbook finish).
|
||||
- Pay attention that for real change some parameters restart of PostgreSQL server is required.
|
||||
- Returns the current value in the check mode.
|
||||
returned: always
|
||||
type: dict
|
||||
sample: { "value": 67108864, "unit": "b" }
|
||||
context:
|
||||
description:
|
||||
- PostgreSQL setting context.
|
||||
returned: always
|
||||
type: str
|
||||
sample: user
|
||||
'''
|
||||
|
||||
try:
|
||||
from psycopg2.extras import DictCursor
|
||||
except Exception:
|
||||
# psycopg2 is checked by connect_to_db()
|
||||
# from ansible.module_utils.postgres
|
||||
pass
|
||||
|
||||
from copy import deepcopy
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.database import (
|
||||
check_input,
|
||||
)
|
||||
from ansible_collections.community.general.plugins.module_utils.postgres import (
|
||||
connect_to_db,
|
||||
get_conn_params,
|
||||
postgres_common_argument_spec,
|
||||
)
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
PG_REQ_VER = 90400
|
||||
|
||||
# To allow to set value like 1mb instead of 1MB, etc:
|
||||
POSSIBLE_SIZE_UNITS = ("mb", "gb", "tb")
|
||||
|
||||
# ===========================================
|
||||
# PostgreSQL module specific support methods.
|
||||
#
|
||||
|
||||
|
||||
def param_get(cursor, module, name):
|
||||
query = ("SELECT name, setting, unit, context, boot_val "
|
||||
"FROM pg_settings WHERE name = %(name)s")
|
||||
try:
|
||||
cursor.execute(query, {'name': name})
|
||||
info = cursor.fetchall()
|
||||
cursor.execute("SHOW %s" % name)
|
||||
val = cursor.fetchone()
|
||||
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Unable to get %s value due to : %s" % (name, to_native(e)))
|
||||
|
||||
raw_val = info[0][1]
|
||||
unit = info[0][2]
|
||||
context = info[0][3]
|
||||
boot_val = info[0][4]
|
||||
|
||||
if val[0] == 'True':
|
||||
val[0] = 'on'
|
||||
elif val[0] == 'False':
|
||||
val[0] = 'off'
|
||||
|
||||
if unit == 'kB':
|
||||
if int(raw_val) > 0:
|
||||
raw_val = int(raw_val) * 1024
|
||||
if int(boot_val) > 0:
|
||||
boot_val = int(boot_val) * 1024
|
||||
|
||||
unit = 'b'
|
||||
|
||||
elif unit == 'MB':
|
||||
if int(raw_val) > 0:
|
||||
raw_val = int(raw_val) * 1024 * 1024
|
||||
if int(boot_val) > 0:
|
||||
boot_val = int(boot_val) * 1024 * 1024
|
||||
|
||||
unit = 'b'
|
||||
|
||||
return (val[0], raw_val, unit, boot_val, context)
|
||||
|
||||
|
||||
def pretty_to_bytes(pretty_val):
|
||||
# The function returns a value in bytes
|
||||
# if the value contains 'B', 'kB', 'MB', 'GB', 'TB'.
|
||||
# Otherwise it returns the passed argument.
|
||||
|
||||
val_in_bytes = None
|
||||
|
||||
if 'kB' in pretty_val:
|
||||
num_part = int(''.join(d for d in pretty_val if d.isdigit()))
|
||||
val_in_bytes = num_part * 1024
|
||||
|
||||
elif 'MB' in pretty_val.upper():
|
||||
num_part = int(''.join(d for d in pretty_val if d.isdigit()))
|
||||
val_in_bytes = num_part * 1024 * 1024
|
||||
|
||||
elif 'GB' in pretty_val.upper():
|
||||
num_part = int(''.join(d for d in pretty_val if d.isdigit()))
|
||||
val_in_bytes = num_part * 1024 * 1024 * 1024
|
||||
|
||||
elif 'TB' in pretty_val.upper():
|
||||
num_part = int(''.join(d for d in pretty_val if d.isdigit()))
|
||||
val_in_bytes = num_part * 1024 * 1024 * 1024 * 1024
|
||||
|
||||
elif 'B' in pretty_val.upper():
|
||||
num_part = int(''.join(d for d in pretty_val if d.isdigit()))
|
||||
val_in_bytes = num_part
|
||||
|
||||
else:
|
||||
return pretty_val
|
||||
|
||||
return val_in_bytes
|
||||
|
||||
|
||||
def param_set(cursor, module, name, value, context):
|
||||
try:
|
||||
if str(value).lower() == 'default':
|
||||
query = "ALTER SYSTEM SET %s = DEFAULT" % name
|
||||
else:
|
||||
query = "ALTER SYSTEM SET %s = '%s'" % (name, value)
|
||||
cursor.execute(query)
|
||||
|
||||
if context != 'postmaster':
|
||||
cursor.execute("SELECT pg_reload_conf()")
|
||||
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Unable to get %s value due to : %s" % (name, to_native(e)))
|
||||
|
||||
return True
|
||||
|
||||
|
||||
# ===========================================
|
||||
# Module execution.
|
||||
#
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = postgres_common_argument_spec()
|
||||
argument_spec.update(
|
||||
name=dict(type='str', required=True),
|
||||
db=dict(type='str', aliases=['login_db']),
|
||||
value=dict(type='str'),
|
||||
reset=dict(type='bool', default=False),
|
||||
session_role=dict(type='str'),
|
||||
trust_input=dict(type='bool', default=True),
|
||||
)
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
name = module.params['name']
|
||||
value = module.params['value']
|
||||
reset = module.params['reset']
|
||||
session_role = module.params['session_role']
|
||||
trust_input = module.params['trust_input']
|
||||
|
||||
if not trust_input:
|
||||
# Check input for potentially dangerous elements:
|
||||
check_input(module, name, value, session_role)
|
||||
|
||||
# Allow to pass values like 1mb instead of 1MB, etc:
|
||||
if value:
|
||||
for unit in POSSIBLE_SIZE_UNITS:
|
||||
if value[:-2].isdigit() and unit in value[-2:]:
|
||||
value = value.upper()
|
||||
|
||||
if value is not None and reset:
|
||||
module.fail_json(msg="%s: value and reset params are mutually exclusive" % name)
|
||||
|
||||
if value is None and not reset:
|
||||
module.fail_json(msg="%s: at least one of value or reset param must be specified" % name)
|
||||
|
||||
conn_params = get_conn_params(module, module.params, warn_db_default=False)
|
||||
db_connection = connect_to_db(module, conn_params, autocommit=True)
|
||||
cursor = db_connection.cursor(cursor_factory=DictCursor)
|
||||
|
||||
kw = {}
|
||||
# Check server version (needs 9.4 or later):
|
||||
ver = db_connection.server_version
|
||||
if ver < PG_REQ_VER:
|
||||
module.warn("PostgreSQL is %s version but %s or later is required" % (ver, PG_REQ_VER))
|
||||
kw = dict(
|
||||
changed=False,
|
||||
restart_required=False,
|
||||
value_pretty="",
|
||||
prev_val_pretty="",
|
||||
value={"value": "", "unit": ""},
|
||||
)
|
||||
kw['name'] = name
|
||||
db_connection.close()
|
||||
module.exit_json(**kw)
|
||||
|
||||
# Set default returned values:
|
||||
restart_required = False
|
||||
changed = False
|
||||
kw['name'] = name
|
||||
kw['restart_required'] = False
|
||||
|
||||
# Get info about param state:
|
||||
res = param_get(cursor, module, name)
|
||||
current_value = res[0]
|
||||
raw_val = res[1]
|
||||
unit = res[2]
|
||||
boot_val = res[3]
|
||||
context = res[4]
|
||||
|
||||
if value == 'True':
|
||||
value = 'on'
|
||||
elif value == 'False':
|
||||
value = 'off'
|
||||
|
||||
kw['prev_val_pretty'] = current_value
|
||||
kw['value_pretty'] = deepcopy(kw['prev_val_pretty'])
|
||||
kw['context'] = context
|
||||
|
||||
# Do job
|
||||
if context == "internal":
|
||||
module.fail_json(msg="%s: cannot be changed (internal context). See "
|
||||
"https://www.postgresql.org/docs/current/runtime-config-preset.html" % name)
|
||||
|
||||
if context == "postmaster":
|
||||
restart_required = True
|
||||
|
||||
# If check_mode, just compare and exit:
|
||||
if module.check_mode:
|
||||
if pretty_to_bytes(value) == pretty_to_bytes(current_value):
|
||||
kw['changed'] = False
|
||||
|
||||
else:
|
||||
kw['value_pretty'] = value
|
||||
kw['changed'] = True
|
||||
|
||||
# Anyway returns current raw value in the check_mode:
|
||||
kw['value'] = dict(
|
||||
value=raw_val,
|
||||
unit=unit,
|
||||
)
|
||||
kw['restart_required'] = restart_required
|
||||
module.exit_json(**kw)
|
||||
|
||||
# Set param (value can be an empty string):
|
||||
if value is not None and value != current_value:
|
||||
changed = param_set(cursor, module, name, value, context)
|
||||
|
||||
kw['value_pretty'] = value
|
||||
|
||||
# Reset param:
|
||||
elif reset:
|
||||
if raw_val == boot_val:
|
||||
# nothing to change, exit:
|
||||
kw['value'] = dict(
|
||||
value=raw_val,
|
||||
unit=unit,
|
||||
)
|
||||
module.exit_json(**kw)
|
||||
|
||||
changed = param_set(cursor, module, name, boot_val, context)
|
||||
|
||||
cursor.close()
|
||||
db_connection.close()
|
||||
|
||||
# Reconnect and recheck current value:
|
||||
if context in ('sighup', 'superuser-backend', 'backend', 'superuser', 'user'):
|
||||
db_connection = connect_to_db(module, conn_params, autocommit=True)
|
||||
cursor = db_connection.cursor(cursor_factory=DictCursor)
|
||||
|
||||
res = param_get(cursor, module, name)
|
||||
# f_ means 'final'
|
||||
f_value = res[0]
|
||||
f_raw_val = res[1]
|
||||
|
||||
if raw_val == f_raw_val:
|
||||
changed = False
|
||||
|
||||
else:
|
||||
changed = True
|
||||
|
||||
kw['value_pretty'] = f_value
|
||||
kw['value'] = dict(
|
||||
value=f_raw_val,
|
||||
unit=unit,
|
||||
)
|
||||
|
||||
cursor.close()
|
||||
db_connection.close()
|
||||
|
||||
kw['changed'] = changed
|
||||
kw['restart_required'] = restart_required
|
||||
|
||||
if restart_required and changed:
|
||||
module.warn("Restart of PostgreSQL is required for setting %s" % name)
|
||||
|
||||
module.exit_json(**kw)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,304 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2019, John Scalia (@jscalia), Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: postgresql_slot
|
||||
short_description: Add or remove replication slots from a PostgreSQL database
|
||||
description:
|
||||
- Add or remove physical or logical replication slots from a PostgreSQL database.
|
||||
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the replication slot to add or remove.
|
||||
type: str
|
||||
required: yes
|
||||
aliases:
|
||||
- slot_name
|
||||
slot_type:
|
||||
description:
|
||||
- Slot type.
|
||||
type: str
|
||||
default: physical
|
||||
choices: [ logical, physical ]
|
||||
state:
|
||||
description:
|
||||
- The slot state.
|
||||
- I(state=present) implies the slot must be present in the system.
|
||||
- I(state=absent) implies the I(groups) must be revoked from I(target_roles).
|
||||
type: str
|
||||
default: present
|
||||
choices: [ absent, present ]
|
||||
immediately_reserve:
|
||||
description:
|
||||
- Optional parameter that when C(yes) specifies that the LSN for this replication slot be reserved
|
||||
immediately, otherwise the default, C(no), specifies that the LSN is reserved on the first connection
|
||||
from a streaming replication client.
|
||||
- Is available from PostgreSQL version 9.6.
|
||||
- Uses only with I(slot_type=physical).
|
||||
- Mutually exclusive with I(slot_type=logical).
|
||||
type: bool
|
||||
default: no
|
||||
output_plugin:
|
||||
description:
|
||||
- All logical slots must indicate which output plugin decoder they're using.
|
||||
- This parameter does not apply to physical slots.
|
||||
- It will be ignored with I(slot_type=physical).
|
||||
type: str
|
||||
default: "test_decoding"
|
||||
db:
|
||||
description:
|
||||
- Name of database to connect to.
|
||||
type: str
|
||||
aliases:
|
||||
- login_db
|
||||
session_role:
|
||||
description:
|
||||
- Switch to session_role after connecting.
|
||||
The specified session_role must be a role that the current login_user is a member of.
|
||||
- Permissions checking for SQL commands is carried out as though
|
||||
the session_role were the one that had logged in originally.
|
||||
type: str
|
||||
trust_input:
|
||||
description:
|
||||
- If C(no), check the value of I(session_role) is potentially dangerous.
|
||||
- It makes sense to use C(no) only when SQL injections via I(session_role) are possible.
|
||||
type: bool
|
||||
default: yes
|
||||
version_added: '0.2.0'
|
||||
|
||||
notes:
|
||||
- Physical replication slots were introduced to PostgreSQL with version 9.4,
|
||||
while logical replication slots were added beginning with version 10.0.
|
||||
|
||||
seealso:
|
||||
- name: PostgreSQL pg_replication_slots view reference
|
||||
description: Complete reference of the PostgreSQL pg_replication_slots view.
|
||||
link: https://www.postgresql.org/docs/current/view-pg-replication-slots.html
|
||||
- name: PostgreSQL streaming replication protocol reference
|
||||
description: Complete reference of the PostgreSQL streaming replication protocol documentation.
|
||||
link: https://www.postgresql.org/docs/current/protocol-replication.html
|
||||
- name: PostgreSQL logical replication protocol reference
|
||||
description: Complete reference of the PostgreSQL logical replication protocol documentation.
|
||||
link: https://www.postgresql.org/docs/current/protocol-logical-replication.html
|
||||
|
||||
author:
|
||||
- John Scalia (@jscalia)
|
||||
- Andrew Klychkov (@Andersson007)
|
||||
- Thomas O'Donnell (@andytom)
|
||||
extends_documentation_fragment:
|
||||
- community.general.postgres
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Create physical_one physical slot if doesn't exist
|
||||
become_user: postgres
|
||||
community.general.postgresql_slot:
|
||||
slot_name: physical_one
|
||||
db: ansible
|
||||
|
||||
- name: Remove physical_one slot if exists
|
||||
become_user: postgres
|
||||
community.general.postgresql_slot:
|
||||
slot_name: physical_one
|
||||
db: ansible
|
||||
state: absent
|
||||
|
||||
- name: Create logical_one logical slot to the database acme if doesn't exist
|
||||
community.general.postgresql_slot:
|
||||
name: logical_slot_one
|
||||
slot_type: logical
|
||||
state: present
|
||||
output_plugin: custom_decoder_one
|
||||
db: "acme"
|
||||
|
||||
- name: Remove logical_one slot if exists from the cluster running on another host and non-standard port
|
||||
community.general.postgresql_slot:
|
||||
name: logical_one
|
||||
login_host: mydatabase.example.org
|
||||
port: 5433
|
||||
login_user: ourSuperuser
|
||||
login_password: thePassword
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
name:
|
||||
description: Name of the slot
|
||||
returned: always
|
||||
type: str
|
||||
sample: "physical_one"
|
||||
queries:
|
||||
description: List of executed queries.
|
||||
returned: always
|
||||
type: str
|
||||
sample: [ "SELECT pg_create_physical_replication_slot('physical_one', False, False)" ]
|
||||
'''
|
||||
|
||||
try:
|
||||
from psycopg2.extras import DictCursor
|
||||
except ImportError:
|
||||
# psycopg2 is checked by connect_to_db()
|
||||
# from ansible.module_utils.postgres
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.database import (
|
||||
check_input,
|
||||
)
|
||||
from ansible_collections.community.general.plugins.module_utils.postgres import (
|
||||
connect_to_db,
|
||||
exec_sql,
|
||||
get_conn_params,
|
||||
postgres_common_argument_spec,
|
||||
)
|
||||
|
||||
|
||||
# ===========================================
|
||||
# PostgreSQL module specific support methods.
|
||||
#
|
||||
|
||||
class PgSlot(object):
|
||||
def __init__(self, module, cursor, name):
|
||||
self.module = module
|
||||
self.cursor = cursor
|
||||
self.name = name
|
||||
self.exists = False
|
||||
self.kind = ''
|
||||
self.__slot_exists()
|
||||
self.changed = False
|
||||
self.executed_queries = []
|
||||
|
||||
def create(self, kind='physical', immediately_reserve=False, output_plugin=False, just_check=False):
|
||||
if self.exists:
|
||||
if self.kind == kind:
|
||||
return False
|
||||
else:
|
||||
self.module.warn("slot with name '%s' already exists "
|
||||
"but has another type '%s'" % (self.name, self.kind))
|
||||
return False
|
||||
|
||||
if just_check:
|
||||
return None
|
||||
|
||||
if kind == 'physical':
|
||||
# Check server version (needs for immedately_reserverd needs 9.6+):
|
||||
if self.cursor.connection.server_version < 96000:
|
||||
query = "SELECT pg_create_physical_replication_slot(%(name)s)"
|
||||
|
||||
else:
|
||||
query = "SELECT pg_create_physical_replication_slot(%(name)s, %(i_reserve)s)"
|
||||
|
||||
self.changed = exec_sql(self, query,
|
||||
query_params={'name': self.name, 'i_reserve': immediately_reserve},
|
||||
return_bool=True)
|
||||
|
||||
elif kind == 'logical':
|
||||
query = "SELECT pg_create_logical_replication_slot(%(name)s, %(o_plugin)s)"
|
||||
self.changed = exec_sql(self, query,
|
||||
query_params={'name': self.name, 'o_plugin': output_plugin}, return_bool=True)
|
||||
|
||||
def drop(self):
|
||||
if not self.exists:
|
||||
return False
|
||||
|
||||
query = "SELECT pg_drop_replication_slot(%(name)s)"
|
||||
self.changed = exec_sql(self, query, query_params={'name': self.name}, return_bool=True)
|
||||
|
||||
def __slot_exists(self):
|
||||
query = "SELECT slot_type FROM pg_replication_slots WHERE slot_name = %(name)s"
|
||||
res = exec_sql(self, query, query_params={'name': self.name}, add_to_executed=False)
|
||||
if res:
|
||||
self.exists = True
|
||||
self.kind = res[0][0]
|
||||
|
||||
|
||||
# ===========================================
|
||||
# Module execution.
|
||||
#
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = postgres_common_argument_spec()
|
||||
argument_spec.update(
|
||||
db=dict(type="str", aliases=["login_db"]),
|
||||
name=dict(type="str", required=True, aliases=["slot_name"]),
|
||||
slot_type=dict(type="str", default="physical", choices=["logical", "physical"]),
|
||||
immediately_reserve=dict(type="bool", default=False),
|
||||
session_role=dict(type="str"),
|
||||
output_plugin=dict(type="str", default="test_decoding"),
|
||||
state=dict(type="str", default="present", choices=["absent", "present"]),
|
||||
trust_input=dict(type="bool", default=True),
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
name = module.params["name"]
|
||||
slot_type = module.params["slot_type"]
|
||||
immediately_reserve = module.params["immediately_reserve"]
|
||||
state = module.params["state"]
|
||||
output_plugin = module.params["output_plugin"]
|
||||
|
||||
if not module.params["trust_input"]:
|
||||
check_input(module, module.params['session_role'])
|
||||
|
||||
if immediately_reserve and slot_type == 'logical':
|
||||
module.fail_json(msg="Module parameters immediately_reserve and slot_type=logical are mutually exclusive")
|
||||
|
||||
# When slot_type is logical and parameter db is not passed,
|
||||
# the default database will be used to create the slot and
|
||||
# the user should know about this.
|
||||
# When the slot type is physical,
|
||||
# it doesn't matter which database will be used
|
||||
# because physical slots are global objects.
|
||||
if slot_type == 'logical':
|
||||
warn_db_default = True
|
||||
else:
|
||||
warn_db_default = False
|
||||
|
||||
conn_params = get_conn_params(module, module.params, warn_db_default=warn_db_default)
|
||||
db_connection = connect_to_db(module, conn_params, autocommit=True)
|
||||
cursor = db_connection.cursor(cursor_factory=DictCursor)
|
||||
|
||||
##################################
|
||||
# Create an object and do main job
|
||||
pg_slot = PgSlot(module, cursor, name)
|
||||
|
||||
changed = False
|
||||
|
||||
if module.check_mode:
|
||||
if state == "present":
|
||||
if not pg_slot.exists:
|
||||
changed = True
|
||||
|
||||
pg_slot.create(slot_type, immediately_reserve, output_plugin, just_check=True)
|
||||
|
||||
elif state == "absent":
|
||||
if pg_slot.exists:
|
||||
changed = True
|
||||
else:
|
||||
if state == "absent":
|
||||
pg_slot.drop()
|
||||
|
||||
elif state == "present":
|
||||
pg_slot.create(slot_type, immediately_reserve, output_plugin)
|
||||
|
||||
changed = pg_slot.changed
|
||||
|
||||
db_connection.close()
|
||||
module.exit_json(changed=changed, name=name, queries=pg_slot.executed_queries)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,717 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: postgresql_subscription
|
||||
short_description: Add, update, or remove PostgreSQL subscription
|
||||
description:
|
||||
- Add, update, or remove PostgreSQL subscription.
|
||||
version_added: '0.2.0'
|
||||
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the subscription to add, update, or remove.
|
||||
type: str
|
||||
required: yes
|
||||
db:
|
||||
description:
|
||||
- Name of the database to connect to and where
|
||||
the subscription state will be changed.
|
||||
aliases: [ login_db ]
|
||||
type: str
|
||||
required: yes
|
||||
state:
|
||||
description:
|
||||
- The subscription state.
|
||||
- C(present) implies that if I(name) subscription doesn't exist, it will be created.
|
||||
- C(absent) implies that if I(name) subscription exists, it will be removed.
|
||||
- C(refresh) implies that if I(name) subscription exists, it will be refreshed.
|
||||
Fetch missing table information from publisher. Always returns ``changed`` is ``True``.
|
||||
This will start replication of tables that were added to the subscribed-to publications
|
||||
since the last invocation of REFRESH PUBLICATION or since CREATE SUBSCRIPTION.
|
||||
The existing data in the publications that are being subscribed to
|
||||
should be copied once the replication starts.
|
||||
- For more information about C(refresh) see U(https://www.postgresql.org/docs/current/sql-altersubscription.html).
|
||||
type: str
|
||||
choices: [ absent, present, refresh ]
|
||||
default: present
|
||||
owner:
|
||||
description:
|
||||
- Subscription owner.
|
||||
- If I(owner) is not defined, the owner will be set as I(login_user) or I(session_role).
|
||||
- Ignored when I(state) is not C(present).
|
||||
type: str
|
||||
publications:
|
||||
description:
|
||||
- The publication names on the publisher to use for the subscription.
|
||||
- Ignored when I(state) is not C(present).
|
||||
type: list
|
||||
elements: str
|
||||
connparams:
|
||||
description:
|
||||
- The connection dict param-value to connect to the publisher.
|
||||
- For more information see U(https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING).
|
||||
- Ignored when I(state) is not C(present).
|
||||
type: dict
|
||||
cascade:
|
||||
description:
|
||||
- Drop subscription dependencies. Has effect with I(state=absent) only.
|
||||
- Ignored when I(state) is not C(absent).
|
||||
type: bool
|
||||
default: false
|
||||
subsparams:
|
||||
description:
|
||||
- Dictionary of optional parameters for a subscription, e.g. copy_data, enabled, create_slot, etc.
|
||||
- For update the subscription allowed keys are C(enabled), C(slot_name), C(synchronous_commit), C(publication_name).
|
||||
- See available parameters to create a new subscription
|
||||
on U(https://www.postgresql.org/docs/current/sql-createsubscription.html).
|
||||
- Ignored when I(state) is not C(present).
|
||||
type: dict
|
||||
session_role:
|
||||
description:
|
||||
- Switch to session_role after connecting. The specified session_role must
|
||||
be a role that the current login_user is a member of.
|
||||
- Permissions checking for SQL commands is carried out as though
|
||||
the session_role were the one that had logged in originally.
|
||||
type: str
|
||||
version_added: '0.2.0'
|
||||
trust_input:
|
||||
description:
|
||||
- If C(no), check whether values of parameters I(name), I(publications), I(owner),
|
||||
I(session_role), I(connparams), I(subsparams) are potentially dangerous.
|
||||
- It makes sense to use C(yes) only when SQL injections via the parameters are possible.
|
||||
type: bool
|
||||
default: yes
|
||||
version_added: '0.2.0'
|
||||
|
||||
notes:
|
||||
- PostgreSQL version must be 10 or greater.
|
||||
|
||||
seealso:
|
||||
- module: community.general.postgresql_publication
|
||||
- module: community.general.postgresql_info
|
||||
- name: CREATE SUBSCRIPTION reference
|
||||
description: Complete reference of the CREATE SUBSCRIPTION command documentation.
|
||||
link: https://www.postgresql.org/docs/current/sql-createsubscription.html
|
||||
- name: ALTER SUBSCRIPTION reference
|
||||
description: Complete reference of the ALTER SUBSCRIPTION command documentation.
|
||||
link: https://www.postgresql.org/docs/current/sql-altersubscription.html
|
||||
- name: DROP SUBSCRIPTION reference
|
||||
description: Complete reference of the DROP SUBSCRIPTION command documentation.
|
||||
link: https://www.postgresql.org/docs/current/sql-dropsubscription.html
|
||||
|
||||
author:
|
||||
- Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.general.postgres
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: >
|
||||
Create acme subscription in mydb database using acme_publication and
|
||||
the following connection parameters to connect to the publisher.
|
||||
Set the subscription owner as alice.
|
||||
community.general.postgresql_subscription:
|
||||
db: mydb
|
||||
name: acme
|
||||
state: present
|
||||
publications: acme_publication
|
||||
owner: alice
|
||||
connparams:
|
||||
host: 127.0.0.1
|
||||
port: 5432
|
||||
user: repl
|
||||
password: replpass
|
||||
dbname: mydb
|
||||
|
||||
- name: Assuming that acme subscription exists, try to change conn parameters
|
||||
community.general.postgresql_subscription:
|
||||
db: mydb
|
||||
name: acme
|
||||
connparams:
|
||||
host: 127.0.0.1
|
||||
port: 5432
|
||||
user: repl
|
||||
password: replpass
|
||||
connect_timeout: 100
|
||||
|
||||
- name: Refresh acme publication
|
||||
community.general.postgresql_subscription:
|
||||
db: mydb
|
||||
name: acme
|
||||
state: refresh
|
||||
|
||||
- name: Drop acme subscription from mydb with dependencies (cascade=yes)
|
||||
community.general.postgresql_subscription:
|
||||
db: mydb
|
||||
name: acme
|
||||
state: absent
|
||||
cascade: yes
|
||||
|
||||
- name: Assuming that acme subscription exists and enabled, disable the subscription
|
||||
community.general.postgresql_subscription:
|
||||
db: mydb
|
||||
name: acme
|
||||
state: present
|
||||
subsparams:
|
||||
enabled: no
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
name:
|
||||
description:
|
||||
- Name of the subscription.
|
||||
returned: always
|
||||
type: str
|
||||
sample: acme
|
||||
exists:
|
||||
description:
|
||||
- Flag indicates the subscription exists or not at the end of runtime.
|
||||
returned: always
|
||||
type: bool
|
||||
sample: true
|
||||
queries:
|
||||
description: List of executed queries.
|
||||
returned: always
|
||||
type: str
|
||||
sample: [ 'DROP SUBSCRIPTION "mysubscription"' ]
|
||||
initial_state:
|
||||
description: Subscription configuration at the beginning of runtime.
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {"conninfo": {}, "enabled": true, "owner": "postgres", "slotname": "test", "synccommit": true}
|
||||
final_state:
|
||||
description: Subscription configuration at the end of runtime.
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {"conninfo": {}, "enabled": true, "owner": "postgres", "slotname": "test", "synccommit": true}
|
||||
'''
|
||||
|
||||
from copy import deepcopy
|
||||
|
||||
try:
|
||||
from psycopg2.extras import DictCursor
|
||||
except ImportError:
|
||||
# psycopg2 is checked by connect_to_db()
|
||||
# from ansible.module_utils.postgres
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.database import check_input
|
||||
from ansible_collections.community.general.plugins.module_utils.postgres import (
|
||||
connect_to_db,
|
||||
exec_sql,
|
||||
get_conn_params,
|
||||
postgres_common_argument_spec,
|
||||
)
|
||||
from ansible.module_utils.six import iteritems
|
||||
|
||||
SUPPORTED_PG_VERSION = 10000
|
||||
|
||||
SUBSPARAMS_KEYS_FOR_UPDATE = ('enabled', 'synchronous_commit', 'slot_name')
|
||||
|
||||
|
||||
################################
|
||||
# Module functions and classes #
|
||||
################################
|
||||
|
||||
def convert_conn_params(conn_dict):
|
||||
"""Converts the passed connection dictionary to string.
|
||||
|
||||
Args:
|
||||
conn_dict (list): Dictionary which needs to be converted.
|
||||
|
||||
Returns:
|
||||
Connection string.
|
||||
"""
|
||||
conn_list = []
|
||||
for (param, val) in iteritems(conn_dict):
|
||||
conn_list.append('%s=%s' % (param, val))
|
||||
|
||||
return ' '.join(conn_list)
|
||||
|
||||
|
||||
def convert_subscr_params(params_dict):
|
||||
"""Converts the passed params dictionary to string.
|
||||
|
||||
Args:
|
||||
params_dict (list): Dictionary which needs to be converted.
|
||||
|
||||
Returns:
|
||||
Parameters string.
|
||||
"""
|
||||
params_list = []
|
||||
for (param, val) in iteritems(params_dict):
|
||||
if val is False:
|
||||
val = 'false'
|
||||
elif val is True:
|
||||
val = 'true'
|
||||
|
||||
params_list.append('%s = %s' % (param, val))
|
||||
|
||||
return ', '.join(params_list)
|
||||
|
||||
|
||||
class PgSubscription():
|
||||
"""Class to work with PostgreSQL subscription.
|
||||
|
||||
Args:
|
||||
module (AnsibleModule): Object of AnsibleModule class.
|
||||
cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
|
||||
name (str): The name of the subscription.
|
||||
db (str): The database name the subscription will be associated with.
|
||||
|
||||
Attributes:
|
||||
module (AnsibleModule): Object of AnsibleModule class.
|
||||
cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
|
||||
name (str): Name of subscription.
|
||||
executed_queries (list): List of executed queries.
|
||||
attrs (dict): Dict with subscription attributes.
|
||||
exists (bool): Flag indicates the subscription exists or not.
|
||||
"""
|
||||
|
||||
def __init__(self, module, cursor, name, db):
|
||||
self.module = module
|
||||
self.cursor = cursor
|
||||
self.name = name
|
||||
self.db = db
|
||||
self.executed_queries = []
|
||||
self.attrs = {
|
||||
'owner': None,
|
||||
'enabled': None,
|
||||
'synccommit': None,
|
||||
'conninfo': {},
|
||||
'slotname': None,
|
||||
'publications': [],
|
||||
}
|
||||
self.empty_attrs = deepcopy(self.attrs)
|
||||
self.exists = self.check_subscr()
|
||||
|
||||
def get_info(self):
|
||||
"""Refresh the subscription information.
|
||||
|
||||
Returns:
|
||||
``self.attrs``.
|
||||
"""
|
||||
self.exists = self.check_subscr()
|
||||
return self.attrs
|
||||
|
||||
def check_subscr(self):
|
||||
"""Check the subscription and refresh ``self.attrs`` subscription attribute.
|
||||
|
||||
Returns:
|
||||
True if the subscription with ``self.name`` exists, False otherwise.
|
||||
"""
|
||||
|
||||
subscr_info = self.__get_general_subscr_info()
|
||||
|
||||
if not subscr_info:
|
||||
# The subscription does not exist:
|
||||
self.attrs = deepcopy(self.empty_attrs)
|
||||
return False
|
||||
|
||||
self.attrs['owner'] = subscr_info.get('rolname')
|
||||
self.attrs['enabled'] = subscr_info.get('subenabled')
|
||||
self.attrs['synccommit'] = subscr_info.get('subenabled')
|
||||
self.attrs['slotname'] = subscr_info.get('subslotname')
|
||||
self.attrs['publications'] = subscr_info.get('subpublications')
|
||||
if subscr_info.get('subconninfo'):
|
||||
for param in subscr_info['subconninfo'].split(' '):
|
||||
tmp = param.split('=')
|
||||
try:
|
||||
self.attrs['conninfo'][tmp[0]] = int(tmp[1])
|
||||
except ValueError:
|
||||
self.attrs['conninfo'][tmp[0]] = tmp[1]
|
||||
|
||||
return True
|
||||
|
||||
def create(self, connparams, publications, subsparams, check_mode=True):
|
||||
"""Create the subscription.
|
||||
|
||||
Args:
|
||||
connparams (str): Connection string in libpq style.
|
||||
publications (list): Publications on the master to use.
|
||||
subsparams (str): Parameters string in WITH () clause style.
|
||||
|
||||
Kwargs:
|
||||
check_mode (bool): If True, don't actually change anything,
|
||||
just make SQL, add it to ``self.executed_queries`` and return True.
|
||||
|
||||
Returns:
|
||||
changed (bool): True if the subscription has been created, otherwise False.
|
||||
"""
|
||||
query_fragments = []
|
||||
query_fragments.append("CREATE SUBSCRIPTION %s CONNECTION '%s' "
|
||||
"PUBLICATION %s" % (self.name, connparams, ', '.join(publications)))
|
||||
|
||||
if subsparams:
|
||||
query_fragments.append("WITH (%s)" % subsparams)
|
||||
|
||||
changed = self.__exec_sql(' '.join(query_fragments), check_mode=check_mode)
|
||||
|
||||
return changed
|
||||
|
||||
def update(self, connparams, publications, subsparams, check_mode=True):
|
||||
"""Update the subscription.
|
||||
|
||||
Args:
|
||||
connparams (str): Connection string in libpq style.
|
||||
publications (list): Publications on the master to use.
|
||||
subsparams (dict): Dictionary of optional parameters.
|
||||
|
||||
Kwargs:
|
||||
check_mode (bool): If True, don't actually change anything,
|
||||
just make SQL, add it to ``self.executed_queries`` and return True.
|
||||
|
||||
Returns:
|
||||
changed (bool): True if subscription has been updated, otherwise False.
|
||||
"""
|
||||
changed = False
|
||||
|
||||
if connparams:
|
||||
if connparams != self.attrs['conninfo']:
|
||||
changed = self.__set_conn_params(convert_conn_params(connparams),
|
||||
check_mode=check_mode)
|
||||
|
||||
if publications:
|
||||
if sorted(self.attrs['publications']) != sorted(publications):
|
||||
changed = self.__set_publications(publications, check_mode=check_mode)
|
||||
|
||||
if subsparams:
|
||||
params_to_update = []
|
||||
|
||||
for (param, value) in iteritems(subsparams):
|
||||
if param == 'enabled':
|
||||
if self.attrs['enabled'] and value is False:
|
||||
changed = self.enable(enabled=False, check_mode=check_mode)
|
||||
elif not self.attrs['enabled'] and value is True:
|
||||
changed = self.enable(enabled=True, check_mode=check_mode)
|
||||
|
||||
elif param == 'synchronous_commit':
|
||||
if self.attrs['synccommit'] is True and value is False:
|
||||
params_to_update.append("%s = false" % param)
|
||||
elif self.attrs['synccommit'] is False and value is True:
|
||||
params_to_update.append("%s = true" % param)
|
||||
|
||||
elif param == 'slot_name':
|
||||
if self.attrs['slotname'] and self.attrs['slotname'] != value:
|
||||
params_to_update.append("%s = %s" % (param, value))
|
||||
|
||||
else:
|
||||
self.module.warn("Parameter '%s' is not in params supported "
|
||||
"for update '%s', ignored..." % (param, SUBSPARAMS_KEYS_FOR_UPDATE))
|
||||
|
||||
if params_to_update:
|
||||
changed = self.__set_params(params_to_update, check_mode=check_mode)
|
||||
|
||||
return changed
|
||||
|
||||
def drop(self, cascade=False, check_mode=True):
|
||||
"""Drop the subscription.
|
||||
|
||||
Kwargs:
|
||||
cascade (bool): Flag indicates that the subscription needs to be deleted
|
||||
with its dependencies.
|
||||
check_mode (bool): If True, don't actually change anything,
|
||||
just make SQL, add it to ``self.executed_queries`` and return True.
|
||||
|
||||
Returns:
|
||||
changed (bool): True if the subscription has been removed, otherwise False.
|
||||
"""
|
||||
if self.exists:
|
||||
query_fragments = ["DROP SUBSCRIPTION %s" % self.name]
|
||||
if cascade:
|
||||
query_fragments.append("CASCADE")
|
||||
|
||||
return self.__exec_sql(' '.join(query_fragments), check_mode=check_mode)
|
||||
|
||||
def set_owner(self, role, check_mode=True):
|
||||
"""Set a subscription owner.
|
||||
|
||||
Args:
|
||||
role (str): Role (user) name that needs to be set as a subscription owner.
|
||||
|
||||
Kwargs:
|
||||
check_mode (bool): If True, don't actually change anything,
|
||||
just make SQL, add it to ``self.executed_queries`` and return True.
|
||||
|
||||
Returns:
|
||||
True if successful, False otherwise.
|
||||
"""
|
||||
query = 'ALTER SUBSCRIPTION %s OWNER TO "%s"' % (self.name, role)
|
||||
return self.__exec_sql(query, check_mode=check_mode)
|
||||
|
||||
def refresh(self, check_mode=True):
|
||||
"""Refresh publication.
|
||||
|
||||
Fetches missing table info from publisher.
|
||||
|
||||
Kwargs:
|
||||
check_mode (bool): If True, don't actually change anything,
|
||||
just make SQL, add it to ``self.executed_queries`` and return True.
|
||||
|
||||
Returns:
|
||||
True if successful, False otherwise.
|
||||
"""
|
||||
query = 'ALTER SUBSCRIPTION %s REFRESH PUBLICATION' % self.name
|
||||
return self.__exec_sql(query, check_mode=check_mode)
|
||||
|
||||
def __set_params(self, params_to_update, check_mode=True):
|
||||
"""Update optional subscription parameters.
|
||||
|
||||
Args:
|
||||
params_to_update (list): Parameters with values to update.
|
||||
|
||||
Kwargs:
|
||||
check_mode (bool): If True, don't actually change anything,
|
||||
just make SQL, add it to ``self.executed_queries`` and return True.
|
||||
|
||||
Returns:
|
||||
True if successful, False otherwise.
|
||||
"""
|
||||
query = 'ALTER SUBSCRIPTION %s SET (%s)' % (self.name, ', '.join(params_to_update))
|
||||
return self.__exec_sql(query, check_mode=check_mode)
|
||||
|
||||
def __set_conn_params(self, connparams, check_mode=True):
|
||||
"""Update connection parameters.
|
||||
|
||||
Args:
|
||||
connparams (str): Connection string in libpq style.
|
||||
|
||||
Kwargs:
|
||||
check_mode (bool): If True, don't actually change anything,
|
||||
just make SQL, add it to ``self.executed_queries`` and return True.
|
||||
|
||||
Returns:
|
||||
True if successful, False otherwise.
|
||||
"""
|
||||
query = "ALTER SUBSCRIPTION %s CONNECTION '%s'" % (self.name, connparams)
|
||||
return self.__exec_sql(query, check_mode=check_mode)
|
||||
|
||||
def __set_publications(self, publications, check_mode=True):
|
||||
"""Update publications.
|
||||
|
||||
Args:
|
||||
publications (list): Publications on the master to use.
|
||||
|
||||
Kwargs:
|
||||
check_mode (bool): If True, don't actually change anything,
|
||||
just make SQL, add it to ``self.executed_queries`` and return True.
|
||||
|
||||
Returns:
|
||||
True if successful, False otherwise.
|
||||
"""
|
||||
query = 'ALTER SUBSCRIPTION %s SET PUBLICATION %s' % (self.name, ', '.join(publications))
|
||||
return self.__exec_sql(query, check_mode=check_mode)
|
||||
|
||||
def enable(self, enabled=True, check_mode=True):
|
||||
"""Enable or disable the subscription.
|
||||
|
||||
Kwargs:
|
||||
enable (bool): Flag indicates that the subscription needs
|
||||
to be enabled or disabled.
|
||||
check_mode (bool): If True, don't actually change anything,
|
||||
just make SQL, add it to ``self.executed_queries`` and return True.
|
||||
|
||||
Returns:
|
||||
True if successful, False otherwise.
|
||||
"""
|
||||
if enabled:
|
||||
query = 'ALTER SUBSCRIPTION %s ENABLE' % self.name
|
||||
else:
|
||||
query = 'ALTER SUBSCRIPTION %s DISABLE' % self.name
|
||||
|
||||
return self.__exec_sql(query, check_mode=check_mode)
|
||||
|
||||
def __get_general_subscr_info(self):
|
||||
"""Get and return general subscription information.
|
||||
|
||||
Returns:
|
||||
Dict with subscription information if successful, False otherwise.
|
||||
"""
|
||||
query = ("SELECT d.datname, r.rolname, s.subenabled, "
|
||||
"s.subconninfo, s.subslotname, s.subsynccommit, "
|
||||
"s.subpublications FROM pg_catalog.pg_subscription s "
|
||||
"JOIN pg_catalog.pg_database d "
|
||||
"ON s.subdbid = d.oid "
|
||||
"JOIN pg_catalog.pg_roles AS r "
|
||||
"ON s.subowner = r.oid "
|
||||
"WHERE s.subname = %(name)s AND d.datname = %(db)s")
|
||||
|
||||
result = exec_sql(self, query, query_params={'name': self.name, 'db': self.db}, add_to_executed=False)
|
||||
if result:
|
||||
return result[0]
|
||||
else:
|
||||
return False
|
||||
|
||||
def __exec_sql(self, query, check_mode=False):
|
||||
"""Execute SQL query.
|
||||
|
||||
Note: If we need just to get information from the database,
|
||||
we use ``exec_sql`` function directly.
|
||||
|
||||
Args:
|
||||
query (str): Query that needs to be executed.
|
||||
|
||||
Kwargs:
|
||||
check_mode (bool): If True, don't actually change anything,
|
||||
just add ``query`` to ``self.executed_queries`` and return True.
|
||||
|
||||
Returns:
|
||||
True if successful, False otherwise.
|
||||
"""
|
||||
if check_mode:
|
||||
self.executed_queries.append(query)
|
||||
return True
|
||||
else:
|
||||
return exec_sql(self, query, return_bool=True)
|
||||
|
||||
|
||||
# ===========================================
|
||||
# Module execution.
|
||||
#
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = postgres_common_argument_spec()
|
||||
argument_spec.update(
|
||||
name=dict(type='str', required=True),
|
||||
db=dict(type='str', required=True, aliases=['login_db']),
|
||||
state=dict(type='str', default='present', choices=['absent', 'present', 'refresh']),
|
||||
publications=dict(type='list', elements='str'),
|
||||
connparams=dict(type='dict'),
|
||||
cascade=dict(type='bool', default=False),
|
||||
owner=dict(type='str'),
|
||||
subsparams=dict(type='dict'),
|
||||
session_role=dict(type='str'),
|
||||
trust_input=dict(type='bool', default=True),
|
||||
)
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
# Parameters handling:
|
||||
db = module.params['db']
|
||||
name = module.params['name']
|
||||
state = module.params['state']
|
||||
publications = module.params['publications']
|
||||
cascade = module.params['cascade']
|
||||
owner = module.params['owner']
|
||||
subsparams = module.params['subsparams']
|
||||
connparams = module.params['connparams']
|
||||
session_role = module.params['session_role']
|
||||
trust_input = module.params['trust_input']
|
||||
|
||||
if not trust_input:
|
||||
# Check input for potentially dangerous elements:
|
||||
if not subsparams:
|
||||
subsparams_str = None
|
||||
else:
|
||||
subsparams_str = convert_subscr_params(subsparams)
|
||||
|
||||
if not connparams:
|
||||
connparams_str = None
|
||||
else:
|
||||
connparams_str = convert_conn_params(connparams)
|
||||
|
||||
check_input(module, name, publications, owner, session_role,
|
||||
connparams_str, subsparams_str)
|
||||
|
||||
if state == 'present' and cascade:
|
||||
module.warn('parameter "cascade" is ignored when state is not absent')
|
||||
|
||||
if state != 'present':
|
||||
if owner:
|
||||
module.warn("parameter 'owner' is ignored when state is not 'present'")
|
||||
if publications:
|
||||
module.warn("parameter 'publications' is ignored when state is not 'present'")
|
||||
if connparams:
|
||||
module.warn("parameter 'connparams' is ignored when state is not 'present'")
|
||||
if subsparams:
|
||||
module.warn("parameter 'subsparams' is ignored when state is not 'present'")
|
||||
|
||||
# Connect to DB and make cursor object:
|
||||
pg_conn_params = get_conn_params(module, module.params)
|
||||
# We check subscription state without DML queries execution, so set autocommit:
|
||||
db_connection = connect_to_db(module, pg_conn_params, autocommit=True)
|
||||
cursor = db_connection.cursor(cursor_factory=DictCursor)
|
||||
|
||||
# Check version:
|
||||
if cursor.connection.server_version < SUPPORTED_PG_VERSION:
|
||||
module.fail_json(msg="PostgreSQL server version should be 10.0 or greater")
|
||||
|
||||
# Set defaults:
|
||||
changed = False
|
||||
initial_state = {}
|
||||
final_state = {}
|
||||
|
||||
###################################
|
||||
# Create object and do rock'n'roll:
|
||||
subscription = PgSubscription(module, cursor, name, db)
|
||||
|
||||
if subscription.exists:
|
||||
initial_state = deepcopy(subscription.attrs)
|
||||
final_state = deepcopy(initial_state)
|
||||
|
||||
if state == 'present':
|
||||
if not subscription.exists:
|
||||
if subsparams:
|
||||
subsparams = convert_subscr_params(subsparams)
|
||||
|
||||
if connparams:
|
||||
connparams = convert_conn_params(connparams)
|
||||
|
||||
changed = subscription.create(connparams,
|
||||
publications,
|
||||
subsparams,
|
||||
check_mode=module.check_mode)
|
||||
|
||||
else:
|
||||
changed = subscription.update(connparams,
|
||||
publications,
|
||||
subsparams,
|
||||
check_mode=module.check_mode)
|
||||
|
||||
if owner and subscription.attrs['owner'] != owner:
|
||||
changed = subscription.set_owner(owner, check_mode=module.check_mode) or changed
|
||||
|
||||
elif state == 'absent':
|
||||
changed = subscription.drop(cascade, check_mode=module.check_mode)
|
||||
|
||||
elif state == 'refresh':
|
||||
if not subscription.exists:
|
||||
module.fail_json(msg="Refresh failed: subscription '%s' does not exist" % name)
|
||||
|
||||
# Always returns True:
|
||||
changed = subscription.refresh(check_mode=module.check_mode)
|
||||
|
||||
# Get final subscription info:
|
||||
final_state = subscription.get_info()
|
||||
|
||||
# Connection is not needed any more:
|
||||
cursor.close()
|
||||
db_connection.close()
|
||||
|
||||
# Return ret values and exit:
|
||||
module.exit_json(changed=changed,
|
||||
name=name,
|
||||
exists=subscription.exists,
|
||||
queries=subscription.executed_queries,
|
||||
initial_state=initial_state,
|
||||
final_state=final_state)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,611 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: postgresql_table
|
||||
short_description: Create, drop, or modify a PostgreSQL table
|
||||
description:
|
||||
- Allows to create, drop, rename, truncate a table, or change some table attributes.
|
||||
options:
|
||||
table:
|
||||
description:
|
||||
- Table name.
|
||||
required: true
|
||||
aliases:
|
||||
- name
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- The table state. I(state=absent) is mutually exclusive with I(tablespace), I(owner), I(unlogged),
|
||||
I(like), I(including), I(columns), I(truncate), I(storage_params) and, I(rename).
|
||||
type: str
|
||||
default: present
|
||||
choices: [ absent, present ]
|
||||
tablespace:
|
||||
description:
|
||||
- Set a tablespace for the table.
|
||||
required: false
|
||||
type: str
|
||||
owner:
|
||||
description:
|
||||
- Set a table owner.
|
||||
type: str
|
||||
unlogged:
|
||||
description:
|
||||
- Create an unlogged table.
|
||||
type: bool
|
||||
default: no
|
||||
like:
|
||||
description:
|
||||
- Create a table like another table (with similar DDL).
|
||||
Mutually exclusive with I(columns), I(rename), and I(truncate).
|
||||
type: str
|
||||
including:
|
||||
description:
|
||||
- Keywords that are used with like parameter, may be DEFAULTS, CONSTRAINTS, INDEXES, STORAGE, COMMENTS or ALL.
|
||||
Needs I(like) specified. Mutually exclusive with I(columns), I(rename), and I(truncate).
|
||||
type: str
|
||||
columns:
|
||||
description:
|
||||
- Columns that are needed.
|
||||
type: list
|
||||
elements: str
|
||||
rename:
|
||||
description:
|
||||
- New table name. Mutually exclusive with I(tablespace), I(owner),
|
||||
I(unlogged), I(like), I(including), I(columns), I(truncate), and I(storage_params).
|
||||
type: str
|
||||
truncate:
|
||||
description:
|
||||
- Truncate a table. Mutually exclusive with I(tablespace), I(owner), I(unlogged),
|
||||
I(like), I(including), I(columns), I(rename), and I(storage_params).
|
||||
type: bool
|
||||
default: no
|
||||
storage_params:
|
||||
description:
|
||||
- Storage parameters like fillfactor, autovacuum_vacuum_treshold, etc.
|
||||
Mutually exclusive with I(rename) and I(truncate).
|
||||
type: list
|
||||
elements: str
|
||||
db:
|
||||
description:
|
||||
- Name of database to connect and where the table will be created.
|
||||
type: str
|
||||
aliases:
|
||||
- login_db
|
||||
session_role:
|
||||
description:
|
||||
- Switch to session_role after connecting.
|
||||
The specified session_role must be a role that the current login_user is a member of.
|
||||
- Permissions checking for SQL commands is carried out as though
|
||||
the session_role were the one that had logged in originally.
|
||||
type: str
|
||||
cascade:
|
||||
description:
|
||||
- Automatically drop objects that depend on the table (such as views).
|
||||
Used with I(state=absent) only.
|
||||
type: bool
|
||||
default: no
|
||||
trust_input:
|
||||
description:
|
||||
- If C(no), check whether values of parameters are potentially dangerous.
|
||||
- It makes sense to use C(no) only when SQL injections are possible.
|
||||
type: bool
|
||||
default: yes
|
||||
version_added: '0.2.0'
|
||||
notes:
|
||||
- If you do not pass db parameter, tables will be created in the database
|
||||
named postgres.
|
||||
- PostgreSQL allows to create columnless table, so columns param is optional.
|
||||
- Unlogged tables are available from PostgreSQL server version 9.1.
|
||||
seealso:
|
||||
- module: community.general.postgresql_sequence
|
||||
- module: community.general.postgresql_idx
|
||||
- module: community.general.postgresql_info
|
||||
- module: community.general.postgresql_tablespace
|
||||
- module: community.general.postgresql_owner
|
||||
- module: community.general.postgresql_privs
|
||||
- module: community.general.postgresql_copy
|
||||
- name: CREATE TABLE reference
|
||||
description: Complete reference of the CREATE TABLE command documentation.
|
||||
link: https://www.postgresql.org/docs/current/sql-createtable.html
|
||||
- name: ALTER TABLE reference
|
||||
description: Complete reference of the ALTER TABLE command documentation.
|
||||
link: https://www.postgresql.org/docs/current/sql-altertable.html
|
||||
- name: DROP TABLE reference
|
||||
description: Complete reference of the DROP TABLE command documentation.
|
||||
link: https://www.postgresql.org/docs/current/sql-droptable.html
|
||||
- name: PostgreSQL data types
|
||||
description: Complete reference of the PostgreSQL data types documentation.
|
||||
link: https://www.postgresql.org/docs/current/datatype.html
|
||||
author:
|
||||
- Andrei Klychkov (@Andersson007)
|
||||
extends_documentation_fragment:
|
||||
- community.general.postgres
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Create tbl2 in the acme database with the DDL like tbl1 with testuser as an owner
|
||||
community.general.postgresql_table:
|
||||
db: acme
|
||||
name: tbl2
|
||||
like: tbl1
|
||||
owner: testuser
|
||||
|
||||
- name: Create tbl2 in the acme database and tablespace ssd with the DDL like tbl1 including comments and indexes
|
||||
community.general.postgresql_table:
|
||||
db: acme
|
||||
table: tbl2
|
||||
like: tbl1
|
||||
including: comments, indexes
|
||||
tablespace: ssd
|
||||
|
||||
- name: Create test_table with several columns in ssd tablespace with fillfactor=10 and autovacuum_analyze_threshold=1
|
||||
community.general.postgresql_table:
|
||||
name: test_table
|
||||
columns:
|
||||
- id bigserial primary key
|
||||
- num bigint
|
||||
- stories text
|
||||
tablespace: ssd
|
||||
storage_params:
|
||||
- fillfactor=10
|
||||
- autovacuum_analyze_threshold=1
|
||||
|
||||
- name: Create an unlogged table in schema acme
|
||||
community.general.postgresql_table:
|
||||
name: acme.useless_data
|
||||
columns: waste_id int
|
||||
unlogged: true
|
||||
|
||||
- name: Rename table foo to bar
|
||||
community.general.postgresql_table:
|
||||
table: foo
|
||||
rename: bar
|
||||
|
||||
- name: Rename table foo from schema acme to bar
|
||||
community.general.postgresql_table:
|
||||
name: acme.foo
|
||||
rename: bar
|
||||
|
||||
- name: Set owner to someuser
|
||||
community.general.postgresql_table:
|
||||
name: foo
|
||||
owner: someuser
|
||||
|
||||
- name: Change tablespace of foo table to new_tablespace and set owner to new_user
|
||||
community.general.postgresql_table:
|
||||
name: foo
|
||||
tablespace: new_tablespace
|
||||
owner: new_user
|
||||
|
||||
- name: Truncate table foo
|
||||
community.general.postgresql_table:
|
||||
name: foo
|
||||
truncate: yes
|
||||
|
||||
- name: Drop table foo from schema acme
|
||||
community.general.postgresql_table:
|
||||
name: acme.foo
|
||||
state: absent
|
||||
|
||||
- name: Drop table bar cascade
|
||||
community.general.postgresql_table:
|
||||
name: bar
|
||||
state: absent
|
||||
cascade: yes
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
table:
|
||||
description: Name of a table.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 'foo'
|
||||
state:
|
||||
description: Table state.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 'present'
|
||||
owner:
|
||||
description: Table owner.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 'postgres'
|
||||
tablespace:
|
||||
description: Tablespace.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 'ssd_tablespace'
|
||||
queries:
|
||||
description: List of executed queries.
|
||||
returned: always
|
||||
type: str
|
||||
sample: [ 'CREATE TABLE "test_table" (id bigint)' ]
|
||||
storage_params:
|
||||
description: Storage parameters.
|
||||
returned: always
|
||||
type: list
|
||||
sample: [ "fillfactor=100", "autovacuum_analyze_threshold=1" ]
|
||||
'''
|
||||
|
||||
try:
|
||||
from psycopg2.extras import DictCursor
|
||||
except ImportError:
|
||||
# psycopg2 is checked by connect_to_db()
|
||||
# from ansible.module_utils.postgres
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.database import (
|
||||
check_input,
|
||||
pg_quote_identifier,
|
||||
)
|
||||
from ansible_collections.community.general.plugins.module_utils.postgres import (
|
||||
connect_to_db,
|
||||
exec_sql,
|
||||
get_conn_params,
|
||||
postgres_common_argument_spec,
|
||||
)
|
||||
|
||||
|
||||
# ===========================================
|
||||
# PostgreSQL module specific support methods.
|
||||
#
|
||||
|
||||
class Table(object):
|
||||
def __init__(self, name, module, cursor):
|
||||
self.name = name
|
||||
self.module = module
|
||||
self.cursor = cursor
|
||||
self.info = {
|
||||
'owner': '',
|
||||
'tblspace': '',
|
||||
'storage_params': [],
|
||||
}
|
||||
self.exists = False
|
||||
self.__exists_in_db()
|
||||
self.executed_queries = []
|
||||
|
||||
def get_info(self):
|
||||
"""Getter to refresh and get table info"""
|
||||
self.__exists_in_db()
|
||||
|
||||
def __exists_in_db(self):
|
||||
"""Check table exists and refresh info"""
|
||||
if "." in self.name:
|
||||
schema = self.name.split('.')[-2]
|
||||
tblname = self.name.split('.')[-1]
|
||||
else:
|
||||
schema = 'public'
|
||||
tblname = self.name
|
||||
|
||||
query = ("SELECT t.tableowner, t.tablespace, c.reloptions "
|
||||
"FROM pg_tables AS t "
|
||||
"INNER JOIN pg_class AS c ON c.relname = t.tablename "
|
||||
"INNER JOIN pg_namespace AS n ON c.relnamespace = n.oid "
|
||||
"WHERE t.tablename = %(tblname)s "
|
||||
"AND n.nspname = %(schema)s")
|
||||
res = exec_sql(self, query, query_params={'tblname': tblname, 'schema': schema},
|
||||
add_to_executed=False)
|
||||
if res:
|
||||
self.exists = True
|
||||
self.info = dict(
|
||||
owner=res[0][0],
|
||||
tblspace=res[0][1] if res[0][1] else '',
|
||||
storage_params=res[0][2] if res[0][2] else [],
|
||||
)
|
||||
|
||||
return True
|
||||
else:
|
||||
self.exists = False
|
||||
return False
|
||||
|
||||
def create(self, columns='', params='', tblspace='',
|
||||
unlogged=False, owner=''):
|
||||
"""
|
||||
Create table.
|
||||
If table exists, check passed args (params, tblspace, owner) and,
|
||||
if they're different from current, change them.
|
||||
Arguments:
|
||||
params - storage params (passed by "WITH (...)" in SQL),
|
||||
comma separated.
|
||||
tblspace - tablespace.
|
||||
owner - table owner.
|
||||
unlogged - create unlogged table.
|
||||
columns - column string (comma separated).
|
||||
"""
|
||||
name = pg_quote_identifier(self.name, 'table')
|
||||
|
||||
changed = False
|
||||
|
||||
if self.exists:
|
||||
if tblspace == 'pg_default' and self.info['tblspace'] is None:
|
||||
pass # Because they have the same meaning
|
||||
elif tblspace and self.info['tblspace'] != tblspace:
|
||||
self.set_tblspace(tblspace)
|
||||
changed = True
|
||||
|
||||
if owner and self.info['owner'] != owner:
|
||||
self.set_owner(owner)
|
||||
changed = True
|
||||
|
||||
if params:
|
||||
param_list = [p.strip(' ') for p in params.split(',')]
|
||||
|
||||
new_param = False
|
||||
for p in param_list:
|
||||
if p not in self.info['storage_params']:
|
||||
new_param = True
|
||||
|
||||
if new_param:
|
||||
self.set_stor_params(params)
|
||||
changed = True
|
||||
|
||||
if changed:
|
||||
return True
|
||||
return False
|
||||
|
||||
query = "CREATE"
|
||||
if unlogged:
|
||||
query += " UNLOGGED TABLE %s" % name
|
||||
else:
|
||||
query += " TABLE %s" % name
|
||||
|
||||
if columns:
|
||||
query += " (%s)" % columns
|
||||
else:
|
||||
query += " ()"
|
||||
|
||||
if params:
|
||||
query += " WITH (%s)" % params
|
||||
|
||||
if tblspace:
|
||||
query += ' TABLESPACE "%s"' % tblspace
|
||||
|
||||
if exec_sql(self, query, return_bool=True):
|
||||
changed = True
|
||||
|
||||
if owner:
|
||||
changed = self.set_owner(owner)
|
||||
|
||||
return changed
|
||||
|
||||
def create_like(self, src_table, including='', tblspace='',
|
||||
unlogged=False, params='', owner=''):
|
||||
"""
|
||||
Create table like another table (with similar DDL).
|
||||
Arguments:
|
||||
src_table - source table.
|
||||
including - corresponds to optional INCLUDING expression
|
||||
in CREATE TABLE ... LIKE statement.
|
||||
params - storage params (passed by "WITH (...)" in SQL),
|
||||
comma separated.
|
||||
tblspace - tablespace.
|
||||
owner - table owner.
|
||||
unlogged - create unlogged table.
|
||||
"""
|
||||
changed = False
|
||||
|
||||
name = pg_quote_identifier(self.name, 'table')
|
||||
|
||||
query = "CREATE"
|
||||
if unlogged:
|
||||
query += " UNLOGGED TABLE %s" % name
|
||||
else:
|
||||
query += " TABLE %s" % name
|
||||
|
||||
query += " (LIKE %s" % pg_quote_identifier(src_table, 'table')
|
||||
|
||||
if including:
|
||||
including = including.split(',')
|
||||
for i in including:
|
||||
query += " INCLUDING %s" % i
|
||||
|
||||
query += ')'
|
||||
|
||||
if params:
|
||||
query += " WITH (%s)" % params
|
||||
|
||||
if tblspace:
|
||||
query += ' TABLESPACE "%s"' % tblspace
|
||||
|
||||
if exec_sql(self, query, return_bool=True):
|
||||
changed = True
|
||||
|
||||
if owner:
|
||||
changed = self.set_owner(owner)
|
||||
|
||||
return changed
|
||||
|
||||
def truncate(self):
|
||||
query = "TRUNCATE TABLE %s" % pg_quote_identifier(self.name, 'table')
|
||||
return exec_sql(self, query, return_bool=True)
|
||||
|
||||
def rename(self, newname):
|
||||
query = "ALTER TABLE %s RENAME TO %s" % (pg_quote_identifier(self.name, 'table'),
|
||||
pg_quote_identifier(newname, 'table'))
|
||||
return exec_sql(self, query, return_bool=True)
|
||||
|
||||
def set_owner(self, username):
|
||||
query = 'ALTER TABLE %s OWNER TO "%s"' % (pg_quote_identifier(self.name, 'table'), username)
|
||||
return exec_sql(self, query, return_bool=True)
|
||||
|
||||
def drop(self, cascade=False):
|
||||
if not self.exists:
|
||||
return False
|
||||
|
||||
query = "DROP TABLE %s" % pg_quote_identifier(self.name, 'table')
|
||||
if cascade:
|
||||
query += " CASCADE"
|
||||
return exec_sql(self, query, return_bool=True)
|
||||
|
||||
def set_tblspace(self, tblspace):
|
||||
query = 'ALTER TABLE %s SET TABLESPACE "%s"' % (pg_quote_identifier(self.name, 'table'), tblspace)
|
||||
return exec_sql(self, query, return_bool=True)
|
||||
|
||||
def set_stor_params(self, params):
|
||||
query = "ALTER TABLE %s SET (%s)" % (pg_quote_identifier(self.name, 'table'), params)
|
||||
return exec_sql(self, query, return_bool=True)
|
||||
|
||||
|
||||
# ===========================================
|
||||
# Module execution.
|
||||
#
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = postgres_common_argument_spec()
|
||||
argument_spec.update(
|
||||
table=dict(type='str', required=True, aliases=['name']),
|
||||
state=dict(type='str', default='present', choices=['absent', 'present']),
|
||||
db=dict(type='str', default='', aliases=['login_db']),
|
||||
tablespace=dict(type='str'),
|
||||
owner=dict(type='str'),
|
||||
unlogged=dict(type='bool', default=False),
|
||||
like=dict(type='str'),
|
||||
including=dict(type='str'),
|
||||
rename=dict(type='str'),
|
||||
truncate=dict(type='bool', default=False),
|
||||
columns=dict(type='list', elements='str'),
|
||||
storage_params=dict(type='list', elements='str'),
|
||||
session_role=dict(type='str'),
|
||||
cascade=dict(type='bool', default=False),
|
||||
trust_input=dict(type='bool', default=True),
|
||||
)
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
table = module.params['table']
|
||||
state = module.params['state']
|
||||
tablespace = module.params['tablespace']
|
||||
owner = module.params['owner']
|
||||
unlogged = module.params['unlogged']
|
||||
like = module.params['like']
|
||||
including = module.params['including']
|
||||
newname = module.params['rename']
|
||||
storage_params = module.params['storage_params']
|
||||
truncate = module.params['truncate']
|
||||
columns = module.params['columns']
|
||||
cascade = module.params['cascade']
|
||||
session_role = module.params['session_role']
|
||||
trust_input = module.params['trust_input']
|
||||
|
||||
if not trust_input:
|
||||
# Check input for potentially dangerous elements:
|
||||
check_input(module, table, tablespace, owner, like, including,
|
||||
newname, storage_params, columns, session_role)
|
||||
|
||||
if state == 'present' and cascade:
|
||||
module.warn("cascade=true is ignored when state=present")
|
||||
|
||||
# Check mutual exclusive parameters:
|
||||
if state == 'absent' and (truncate or newname or columns or tablespace or like or storage_params or unlogged or owner or including):
|
||||
module.fail_json(msg="%s: state=absent is mutually exclusive with: "
|
||||
"truncate, rename, columns, tablespace, "
|
||||
"including, like, storage_params, unlogged, owner" % table)
|
||||
|
||||
if truncate and (newname or columns or like or unlogged or storage_params or owner or tablespace or including):
|
||||
module.fail_json(msg="%s: truncate is mutually exclusive with: "
|
||||
"rename, columns, like, unlogged, including, "
|
||||
"storage_params, owner, tablespace" % table)
|
||||
|
||||
if newname and (columns or like or unlogged or storage_params or owner or tablespace or including):
|
||||
module.fail_json(msg="%s: rename is mutually exclusive with: "
|
||||
"columns, like, unlogged, including, "
|
||||
"storage_params, owner, tablespace" % table)
|
||||
|
||||
if like and columns:
|
||||
module.fail_json(msg="%s: like and columns params are mutually exclusive" % table)
|
||||
if including and not like:
|
||||
module.fail_json(msg="%s: including param needs like param specified" % table)
|
||||
|
||||
conn_params = get_conn_params(module, module.params)
|
||||
db_connection = connect_to_db(module, conn_params, autocommit=False)
|
||||
cursor = db_connection.cursor(cursor_factory=DictCursor)
|
||||
|
||||
if storage_params:
|
||||
storage_params = ','.join(storage_params)
|
||||
|
||||
if columns:
|
||||
columns = ','.join(columns)
|
||||
|
||||
##############
|
||||
# Do main job:
|
||||
table_obj = Table(table, module, cursor)
|
||||
|
||||
# Set default returned values:
|
||||
changed = False
|
||||
kw = {}
|
||||
kw['table'] = table
|
||||
kw['state'] = ''
|
||||
if table_obj.exists:
|
||||
kw = dict(
|
||||
table=table,
|
||||
state='present',
|
||||
owner=table_obj.info['owner'],
|
||||
tablespace=table_obj.info['tblspace'],
|
||||
storage_params=table_obj.info['storage_params'],
|
||||
)
|
||||
|
||||
if state == 'absent':
|
||||
changed = table_obj.drop(cascade=cascade)
|
||||
|
||||
elif truncate:
|
||||
changed = table_obj.truncate()
|
||||
|
||||
elif newname:
|
||||
changed = table_obj.rename(newname)
|
||||
q = table_obj.executed_queries
|
||||
table_obj = Table(newname, module, cursor)
|
||||
table_obj.executed_queries = q
|
||||
|
||||
elif state == 'present' and not like:
|
||||
changed = table_obj.create(columns, storage_params,
|
||||
tablespace, unlogged, owner)
|
||||
|
||||
elif state == 'present' and like:
|
||||
changed = table_obj.create_like(like, including, tablespace,
|
||||
unlogged, storage_params)
|
||||
|
||||
if changed:
|
||||
if module.check_mode:
|
||||
db_connection.rollback()
|
||||
else:
|
||||
db_connection.commit()
|
||||
|
||||
# Refresh table info for RETURN.
|
||||
# Note, if table has been renamed, it gets info by newname:
|
||||
table_obj.get_info()
|
||||
db_connection.commit()
|
||||
if table_obj.exists:
|
||||
kw = dict(
|
||||
table=table,
|
||||
state='present',
|
||||
owner=table_obj.info['owner'],
|
||||
tablespace=table_obj.info['tblspace'],
|
||||
storage_params=table_obj.info['storage_params'],
|
||||
)
|
||||
else:
|
||||
# We just change the table state here
|
||||
# to keep other information about the dropped table:
|
||||
kw['state'] = 'absent'
|
||||
|
||||
kw['queries'] = table_obj.executed_queries
|
||||
kw['changed'] = changed
|
||||
db_connection.close()
|
||||
module.exit_json(**kw)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,541 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2017, Flavien Chantelot (@Dorn-)
|
||||
# Copyright: (c) 2018, Antoine Levy-Lambert (@antoinell)
|
||||
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: postgresql_tablespace
|
||||
short_description: Add or remove PostgreSQL tablespaces from remote hosts
|
||||
description:
|
||||
- Adds or removes PostgreSQL tablespaces from remote hosts.
|
||||
options:
|
||||
tablespace:
|
||||
description:
|
||||
- Name of the tablespace to add or remove.
|
||||
required: true
|
||||
type: str
|
||||
aliases:
|
||||
- name
|
||||
location:
|
||||
description:
|
||||
- Path to the tablespace directory in the file system.
|
||||
- Ensure that the location exists and has right privileges.
|
||||
type: path
|
||||
aliases:
|
||||
- path
|
||||
state:
|
||||
description:
|
||||
- Tablespace state.
|
||||
- I(state=present) implies the tablespace must be created if it doesn't exist.
|
||||
- I(state=absent) implies the tablespace must be removed if present.
|
||||
I(state=absent) is mutually exclusive with I(location), I(owner), i(set).
|
||||
- See the Notes section for information about check mode restrictions.
|
||||
type: str
|
||||
default: present
|
||||
choices: [ absent, present ]
|
||||
owner:
|
||||
description:
|
||||
- Name of the role to set as an owner of the tablespace.
|
||||
- If this option is not specified, the tablespace owner is a role that creates the tablespace.
|
||||
type: str
|
||||
set:
|
||||
description:
|
||||
- Dict of tablespace options to set. Supported from PostgreSQL 9.0.
|
||||
- For more information see U(https://www.postgresql.org/docs/current/sql-createtablespace.html).
|
||||
- When reset is passed as an option's value, if the option was set previously, it will be removed.
|
||||
type: dict
|
||||
rename_to:
|
||||
description:
|
||||
- New name of the tablespace.
|
||||
- The new name cannot begin with pg_, as such names are reserved for system tablespaces.
|
||||
type: str
|
||||
session_role:
|
||||
description:
|
||||
- Switch to session_role after connecting. The specified session_role must
|
||||
be a role that the current login_user is a member of.
|
||||
- Permissions checking for SQL commands is carried out as though
|
||||
the session_role were the one that had logged in originally.
|
||||
type: str
|
||||
db:
|
||||
description:
|
||||
- Name of database to connect to and run queries against.
|
||||
type: str
|
||||
aliases:
|
||||
- login_db
|
||||
trust_input:
|
||||
description:
|
||||
- If C(no), check whether values of parameters I(tablespace), I(location), I(owner),
|
||||
I(rename_to), I(session_role), I(settings_list) are potentially dangerous.
|
||||
- It makes sense to use C(no) only when SQL injections via the parameters are possible.
|
||||
type: bool
|
||||
default: yes
|
||||
version_added: '0.2.0'
|
||||
|
||||
notes:
|
||||
- I(state=absent) and I(state=present) (the second one if the tablespace doesn't exist) do not
|
||||
support check mode because the corresponding PostgreSQL DROP and CREATE TABLESPACE commands
|
||||
can not be run inside the transaction block.
|
||||
|
||||
seealso:
|
||||
- name: PostgreSQL tablespaces
|
||||
description: General information about PostgreSQL tablespaces.
|
||||
link: https://www.postgresql.org/docs/current/manage-ag-tablespaces.html
|
||||
- name: CREATE TABLESPACE reference
|
||||
description: Complete reference of the CREATE TABLESPACE command documentation.
|
||||
link: https://www.postgresql.org/docs/current/sql-createtablespace.html
|
||||
- name: ALTER TABLESPACE reference
|
||||
description: Complete reference of the ALTER TABLESPACE command documentation.
|
||||
link: https://www.postgresql.org/docs/current/sql-altertablespace.html
|
||||
- name: DROP TABLESPACE reference
|
||||
description: Complete reference of the DROP TABLESPACE command documentation.
|
||||
link: https://www.postgresql.org/docs/current/sql-droptablespace.html
|
||||
|
||||
author:
|
||||
- Flavien Chantelot (@Dorn-)
|
||||
- Antoine Levy-Lambert (@antoinell)
|
||||
- Andrew Klychkov (@Andersson007)
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.general.postgres
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Create a new tablespace called acme and set bob as an its owner
|
||||
community.general.postgresql_tablespace:
|
||||
name: acme
|
||||
owner: bob
|
||||
location: /data/foo
|
||||
|
||||
- name: Create a new tablespace called bar with tablespace options
|
||||
community.general.postgresql_tablespace:
|
||||
name: bar
|
||||
set:
|
||||
random_page_cost: 1
|
||||
seq_page_cost: 1
|
||||
|
||||
- name: Reset random_page_cost option
|
||||
community.general.postgresql_tablespace:
|
||||
name: bar
|
||||
set:
|
||||
random_page_cost: reset
|
||||
|
||||
- name: Rename the tablespace from bar to pcie_ssd
|
||||
community.general.postgresql_tablespace:
|
||||
name: bar
|
||||
rename_to: pcie_ssd
|
||||
|
||||
- name: Drop tablespace called bloat
|
||||
community.general.postgresql_tablespace:
|
||||
name: bloat
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
queries:
|
||||
description: List of queries that was tried to be executed.
|
||||
returned: always
|
||||
type: str
|
||||
sample: [ "CREATE TABLESPACE bar LOCATION '/incredible/ssd'" ]
|
||||
tablespace:
|
||||
description: Tablespace name.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 'ssd'
|
||||
owner:
|
||||
description: Tablespace owner.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 'Bob'
|
||||
options:
|
||||
description: Tablespace options.
|
||||
returned: always
|
||||
type: dict
|
||||
sample: { 'random_page_cost': 1, 'seq_page_cost': 1 }
|
||||
location:
|
||||
description: Path to the tablespace in the file system.
|
||||
returned: always
|
||||
type: str
|
||||
sample: '/incredible/fast/ssd'
|
||||
newname:
|
||||
description: New tablespace name
|
||||
returned: if existent
|
||||
type: str
|
||||
sample: new_ssd
|
||||
state:
|
||||
description: Tablespace state at the end of execution.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 'present'
|
||||
'''
|
||||
|
||||
try:
|
||||
from psycopg2 import __version__ as PSYCOPG2_VERSION
|
||||
from psycopg2.extras import DictCursor
|
||||
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT as AUTOCOMMIT
|
||||
from psycopg2.extensions import ISOLATION_LEVEL_READ_COMMITTED as READ_COMMITTED
|
||||
except ImportError:
|
||||
# psycopg2 is checked by connect_to_db()
|
||||
# from ansible.module_utils.postgres
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.six import iteritems
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.database import (
|
||||
check_input,
|
||||
pg_quote_identifier,
|
||||
)
|
||||
from ansible_collections.community.general.plugins.module_utils.postgres import (
|
||||
connect_to_db,
|
||||
exec_sql,
|
||||
get_conn_params,
|
||||
postgres_common_argument_spec,
|
||||
)
|
||||
|
||||
|
||||
class PgTablespace(object):
|
||||
|
||||
"""Class for working with PostgreSQL tablespaces.
|
||||
|
||||
Args:
|
||||
module (AnsibleModule) -- object of AnsibleModule class
|
||||
cursor (cursor) -- cursor object of psycopg2 library
|
||||
name (str) -- name of the tablespace
|
||||
|
||||
Attrs:
|
||||
module (AnsibleModule) -- object of AnsibleModule class
|
||||
cursor (cursor) -- cursor object of psycopg2 library
|
||||
name (str) -- name of the tablespace
|
||||
exists (bool) -- flag the tablespace exists in the DB or not
|
||||
owner (str) -- tablespace owner
|
||||
location (str) -- path to the tablespace directory in the file system
|
||||
executed_queries (list) -- list of executed queries
|
||||
new_name (str) -- new name for the tablespace
|
||||
opt_not_supported (bool) -- flag indicates a tablespace option is supported or not
|
||||
"""
|
||||
|
||||
def __init__(self, module, cursor, name):
|
||||
self.module = module
|
||||
self.cursor = cursor
|
||||
self.name = name
|
||||
self.exists = False
|
||||
self.owner = ''
|
||||
self.settings = {}
|
||||
self.location = ''
|
||||
self.executed_queries = []
|
||||
self.new_name = ''
|
||||
self.opt_not_supported = False
|
||||
# Collect info:
|
||||
self.get_info()
|
||||
|
||||
def get_info(self):
|
||||
"""Get tablespace information."""
|
||||
# Check that spcoptions exists:
|
||||
opt = exec_sql(self, "SELECT 1 FROM information_schema.columns "
|
||||
"WHERE table_name = 'pg_tablespace' "
|
||||
"AND column_name = 'spcoptions'", add_to_executed=False)
|
||||
|
||||
# For 9.1 version and earlier:
|
||||
location = exec_sql(self, "SELECT 1 FROM information_schema.columns "
|
||||
"WHERE table_name = 'pg_tablespace' "
|
||||
"AND column_name = 'spclocation'", add_to_executed=False)
|
||||
if location:
|
||||
location = 'spclocation'
|
||||
else:
|
||||
location = 'pg_tablespace_location(t.oid)'
|
||||
|
||||
if not opt:
|
||||
self.opt_not_supported = True
|
||||
query = ("SELECT r.rolname, (SELECT Null), %s "
|
||||
"FROM pg_catalog.pg_tablespace AS t "
|
||||
"JOIN pg_catalog.pg_roles AS r "
|
||||
"ON t.spcowner = r.oid " % location)
|
||||
else:
|
||||
query = ("SELECT r.rolname, t.spcoptions, %s "
|
||||
"FROM pg_catalog.pg_tablespace AS t "
|
||||
"JOIN pg_catalog.pg_roles AS r "
|
||||
"ON t.spcowner = r.oid " % location)
|
||||
|
||||
res = exec_sql(self, query + "WHERE t.spcname = %(name)s",
|
||||
query_params={'name': self.name}, add_to_executed=False)
|
||||
|
||||
if not res:
|
||||
self.exists = False
|
||||
return False
|
||||
|
||||
if res[0][0]:
|
||||
self.exists = True
|
||||
self.owner = res[0][0]
|
||||
|
||||
if res[0][1]:
|
||||
# Options exist:
|
||||
for i in res[0][1]:
|
||||
i = i.split('=')
|
||||
self.settings[i[0]] = i[1]
|
||||
|
||||
if res[0][2]:
|
||||
# Location exists:
|
||||
self.location = res[0][2]
|
||||
|
||||
def create(self, location):
|
||||
"""Create tablespace.
|
||||
|
||||
Return True if success, otherwise, return False.
|
||||
|
||||
args:
|
||||
location (str) -- tablespace directory path in the FS
|
||||
"""
|
||||
query = ('CREATE TABLESPACE "%s" LOCATION \'%s\'' % (self.name, location))
|
||||
return exec_sql(self, query, return_bool=True)
|
||||
|
||||
def drop(self):
|
||||
"""Drop tablespace.
|
||||
|
||||
Return True if success, otherwise, return False.
|
||||
"""
|
||||
return exec_sql(self, 'DROP TABLESPACE "%s"' % self.name, return_bool=True)
|
||||
|
||||
def set_owner(self, new_owner):
|
||||
"""Set tablespace owner.
|
||||
|
||||
Return True if success, otherwise, return False.
|
||||
|
||||
args:
|
||||
new_owner (str) -- name of a new owner for the tablespace"
|
||||
"""
|
||||
if new_owner == self.owner:
|
||||
return False
|
||||
|
||||
query = 'ALTER TABLESPACE "%s" OWNER TO "%s"' % (self.name, new_owner)
|
||||
return exec_sql(self, query, return_bool=True)
|
||||
|
||||
def rename(self, newname):
|
||||
"""Rename tablespace.
|
||||
|
||||
Return True if success, otherwise, return False.
|
||||
|
||||
args:
|
||||
newname (str) -- new name for the tablespace"
|
||||
"""
|
||||
query = 'ALTER TABLESPACE "%s" RENAME TO "%s"' % (self.name, newname)
|
||||
self.new_name = newname
|
||||
return exec_sql(self, query, return_bool=True)
|
||||
|
||||
def set_settings(self, new_settings):
|
||||
"""Set tablespace settings (options).
|
||||
|
||||
If some setting has been changed, set changed = True.
|
||||
After all settings list is handling, return changed.
|
||||
|
||||
args:
|
||||
new_settings (list) -- list of new settings
|
||||
"""
|
||||
# settings must be a dict {'key': 'value'}
|
||||
if self.opt_not_supported:
|
||||
return False
|
||||
|
||||
changed = False
|
||||
|
||||
# Apply new settings:
|
||||
for i in new_settings:
|
||||
if new_settings[i] == 'reset':
|
||||
if i in self.settings:
|
||||
changed = self.__reset_setting(i)
|
||||
self.settings[i] = None
|
||||
|
||||
elif (i not in self.settings) or (str(new_settings[i]) != self.settings[i]):
|
||||
changed = self.__set_setting("%s = '%s'" % (i, new_settings[i]))
|
||||
|
||||
return changed
|
||||
|
||||
def __reset_setting(self, setting):
|
||||
"""Reset tablespace setting.
|
||||
|
||||
Return True if success, otherwise, return False.
|
||||
|
||||
args:
|
||||
setting (str) -- string in format "setting_name = 'setting_value'"
|
||||
"""
|
||||
query = 'ALTER TABLESPACE "%s" RESET (%s)' % (self.name, setting)
|
||||
return exec_sql(self, query, return_bool=True)
|
||||
|
||||
def __set_setting(self, setting):
|
||||
"""Set tablespace setting.
|
||||
|
||||
Return True if success, otherwise, return False.
|
||||
|
||||
args:
|
||||
setting (str) -- string in format "setting_name = 'setting_value'"
|
||||
"""
|
||||
query = 'ALTER TABLESPACE "%s" SET (%s)' % (self.name, setting)
|
||||
return exec_sql(self, query, return_bool=True)
|
||||
|
||||
|
||||
# ===========================================
|
||||
# Module execution.
|
||||
#
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = postgres_common_argument_spec()
|
||||
argument_spec.update(
|
||||
tablespace=dict(type='str', required=True, aliases=['name']),
|
||||
state=dict(type='str', default="present", choices=["absent", "present"]),
|
||||
location=dict(type='path', aliases=['path']),
|
||||
owner=dict(type='str'),
|
||||
set=dict(type='dict'),
|
||||
rename_to=dict(type='str'),
|
||||
db=dict(type='str', aliases=['login_db']),
|
||||
session_role=dict(type='str'),
|
||||
trust_input=dict(type='bool', default=True),
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
mutually_exclusive=(('positional_args', 'named_args'),),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
tablespace = module.params["tablespace"]
|
||||
state = module.params["state"]
|
||||
location = module.params["location"]
|
||||
owner = module.params["owner"]
|
||||
rename_to = module.params["rename_to"]
|
||||
settings = module.params["set"]
|
||||
session_role = module.params["session_role"]
|
||||
trust_input = module.params["trust_input"]
|
||||
|
||||
if state == 'absent' and (location or owner or rename_to or settings):
|
||||
module.fail_json(msg="state=absent is mutually exclusive location, "
|
||||
"owner, rename_to, and set")
|
||||
|
||||
if not trust_input:
|
||||
# Check input for potentially dangerous elements:
|
||||
if not settings:
|
||||
settings_list = None
|
||||
else:
|
||||
settings_list = ['%s = %s' % (k, v) for k, v in iteritems(settings)]
|
||||
|
||||
check_input(module, tablespace, location, owner,
|
||||
rename_to, session_role, settings_list)
|
||||
|
||||
conn_params = get_conn_params(module, module.params, warn_db_default=False)
|
||||
db_connection = connect_to_db(module, conn_params, autocommit=True)
|
||||
cursor = db_connection.cursor(cursor_factory=DictCursor)
|
||||
|
||||
# Change autocommit to False if check_mode:
|
||||
if module.check_mode:
|
||||
if PSYCOPG2_VERSION >= '2.4.2':
|
||||
db_connection.set_session(autocommit=False)
|
||||
else:
|
||||
db_connection.set_isolation_level(READ_COMMITTED)
|
||||
|
||||
# Set defaults:
|
||||
autocommit = False
|
||||
changed = False
|
||||
|
||||
##############
|
||||
# Create PgTablespace object and do main job:
|
||||
tblspace = PgTablespace(module, cursor, tablespace)
|
||||
|
||||
# If tablespace exists with different location, exit:
|
||||
if tblspace.exists and location and location != tblspace.location:
|
||||
module.fail_json(msg="Tablespace '%s' exists with "
|
||||
"different location '%s'" % (tblspace.name, tblspace.location))
|
||||
|
||||
# Create new tablespace:
|
||||
if not tblspace.exists and state == 'present':
|
||||
if rename_to:
|
||||
module.fail_json(msg="Tablespace %s does not exist, nothing to rename" % tablespace)
|
||||
|
||||
if not location:
|
||||
module.fail_json(msg="'location' parameter must be passed with "
|
||||
"state=present if the tablespace doesn't exist")
|
||||
|
||||
# Because CREATE TABLESPACE can not be run inside the transaction block:
|
||||
autocommit = True
|
||||
if PSYCOPG2_VERSION >= '2.4.2':
|
||||
db_connection.set_session(autocommit=True)
|
||||
else:
|
||||
db_connection.set_isolation_level(AUTOCOMMIT)
|
||||
|
||||
changed = tblspace.create(location)
|
||||
|
||||
# Drop non-existing tablespace:
|
||||
elif not tblspace.exists and state == 'absent':
|
||||
# Nothing to do:
|
||||
module.fail_json(msg="Tries to drop nonexistent tablespace '%s'" % tblspace.name)
|
||||
|
||||
# Drop existing tablespace:
|
||||
elif tblspace.exists and state == 'absent':
|
||||
# Because DROP TABLESPACE can not be run inside the transaction block:
|
||||
autocommit = True
|
||||
if PSYCOPG2_VERSION >= '2.4.2':
|
||||
db_connection.set_session(autocommit=True)
|
||||
else:
|
||||
db_connection.set_isolation_level(AUTOCOMMIT)
|
||||
|
||||
changed = tblspace.drop()
|
||||
|
||||
# Rename tablespace:
|
||||
elif tblspace.exists and rename_to:
|
||||
if tblspace.name != rename_to:
|
||||
changed = tblspace.rename(rename_to)
|
||||
|
||||
if state == 'present':
|
||||
# Refresh information:
|
||||
tblspace.get_info()
|
||||
|
||||
# Change owner and settings:
|
||||
if state == 'present' and tblspace.exists:
|
||||
if owner:
|
||||
changed = tblspace.set_owner(owner)
|
||||
|
||||
if settings:
|
||||
changed = tblspace.set_settings(settings)
|
||||
|
||||
tblspace.get_info()
|
||||
|
||||
# Rollback if it's possible and check_mode:
|
||||
if not autocommit:
|
||||
if module.check_mode:
|
||||
db_connection.rollback()
|
||||
else:
|
||||
db_connection.commit()
|
||||
|
||||
cursor.close()
|
||||
db_connection.close()
|
||||
|
||||
# Make return values:
|
||||
kw = dict(
|
||||
changed=changed,
|
||||
state='present',
|
||||
tablespace=tblspace.name,
|
||||
owner=tblspace.owner,
|
||||
queries=tblspace.executed_queries,
|
||||
options=tblspace.settings,
|
||||
location=tblspace.location,
|
||||
)
|
||||
|
||||
if state == 'present':
|
||||
kw['state'] = 'present'
|
||||
|
||||
if tblspace.new_name:
|
||||
kw['newname'] = tblspace.new_name
|
||||
|
||||
elif state == 'absent':
|
||||
kw['state'] = 'absent'
|
||||
|
||||
module.exit_json(**kw)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,993 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: postgresql_user
|
||||
short_description: Create, alter, or remove a user (role) from a PostgreSQL server instance
|
||||
description:
|
||||
- Creates, alters, or removes a user (role) from a PostgreSQL server instance
|
||||
("cluster" in PostgreSQL terminology) and, optionally,
|
||||
grants the user access to an existing database or tables.
|
||||
- A user is a role with login privilege.
|
||||
- You can also use it to grant or revoke user's privileges in a particular database.
|
||||
- You cannot remove a user while it still has any privileges granted to it in any database.
|
||||
- Set I(fail_on_user) to C(no) to make the module ignore failures when trying to remove a user.
|
||||
In this case, the module reports if changes happened as usual and separately reports
|
||||
whether the user has been removed or not.
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the user (role) to add or remove.
|
||||
type: str
|
||||
required: true
|
||||
aliases:
|
||||
- user
|
||||
password:
|
||||
description:
|
||||
- Set the user's password, before 1.4 this was required.
|
||||
- Password can be passed unhashed or hashed (MD5-hashed).
|
||||
- An unhashed password is automatically hashed when saved into the
|
||||
database if I(encrypted) is set, otherwise it is saved in
|
||||
plain text format.
|
||||
- When passing an MD5-hashed password, you must generate it with the format
|
||||
C('str["md5"] + md5[ password + username ]'), resulting in a total of
|
||||
35 characters. An easy way to do this is
|
||||
C(echo "md5`echo -n 'verysecretpasswordJOE' | md5sum | awk '{print $1}'`").
|
||||
- Note that if the provided password string is already in MD5-hashed
|
||||
format, then it is used as-is, regardless of I(encrypted) option.
|
||||
type: str
|
||||
db:
|
||||
description:
|
||||
- Name of database to connect to and where user's permissions are granted.
|
||||
type: str
|
||||
aliases:
|
||||
- login_db
|
||||
fail_on_user:
|
||||
description:
|
||||
- If C(yes), fails when the user (role) cannot be removed. Otherwise just log and continue.
|
||||
default: yes
|
||||
type: bool
|
||||
aliases:
|
||||
- fail_on_role
|
||||
priv:
|
||||
description:
|
||||
- "Slash-separated PostgreSQL privileges string: C(priv1/priv2), where
|
||||
you can define the user's privileges for the database ( allowed options - 'CREATE',
|
||||
'CONNECT', 'TEMPORARY', 'TEMP', 'ALL'. For example C(CONNECT) ) or
|
||||
for table ( allowed options - 'SELECT', 'INSERT', 'UPDATE', 'DELETE',
|
||||
'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL'. For example
|
||||
C(table:SELECT) ). Mixed example of this string:
|
||||
C(CONNECT/CREATE/table1:SELECT/table2:INSERT)."
|
||||
type: str
|
||||
role_attr_flags:
|
||||
description:
|
||||
- "PostgreSQL user attributes string in the format: CREATEDB,CREATEROLE,SUPERUSER."
|
||||
- Note that '[NO]CREATEUSER' is deprecated.
|
||||
- To create a simple role for using it like a group, use C(NOLOGIN) flag.
|
||||
type: str
|
||||
choices: [ '[NO]SUPERUSER', '[NO]CREATEROLE', '[NO]CREATEDB',
|
||||
'[NO]INHERIT', '[NO]LOGIN', '[NO]REPLICATION', '[NO]BYPASSRLS' ]
|
||||
session_role:
|
||||
description:
|
||||
- Switch to session role after connecting.
|
||||
- The specified session role must be a role that the current login_user is a member of.
|
||||
- Permissions checking for SQL commands is carried out as though the session role
|
||||
were the one that had logged in originally.
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- The user (role) state.
|
||||
type: str
|
||||
default: present
|
||||
choices: [ absent, present ]
|
||||
encrypted:
|
||||
description:
|
||||
- Whether the password is stored hashed in the database.
|
||||
- You can specify an unhashed password, and PostgreSQL ensures
|
||||
the stored password is hashed when I(encrypted=yes) is set.
|
||||
If you specify a hashed password, the module uses it as-is,
|
||||
regardless of the setting of I(encrypted).
|
||||
- "Note: Postgresql 10 and newer does not support unhashed passwords."
|
||||
- Previous to Ansible 2.6, this was C(no) by default.
|
||||
default: yes
|
||||
type: bool
|
||||
expires:
|
||||
description:
|
||||
- The date at which the user's password is to expire.
|
||||
- If set to C('infinity'), user's password never expires.
|
||||
- Note that this value must be a valid SQL date and time type.
|
||||
type: str
|
||||
no_password_changes:
|
||||
description:
|
||||
- If C(yes), does not inspect the database for password changes.
|
||||
Useful when C(pg_authid) is not accessible (such as in AWS RDS).
|
||||
Otherwise, makes password changes as necessary.
|
||||
default: no
|
||||
type: bool
|
||||
conn_limit:
|
||||
description:
|
||||
- Specifies the user (role) connection limit.
|
||||
type: int
|
||||
ssl_mode:
|
||||
description:
|
||||
- Determines how an SSL session is negotiated with the server.
|
||||
- See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
|
||||
- Default of C(prefer) matches libpq default.
|
||||
type: str
|
||||
default: prefer
|
||||
choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
|
||||
ca_cert:
|
||||
description:
|
||||
- Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
|
||||
- If the file exists, verifies that the server's certificate is signed by one of these authorities.
|
||||
type: str
|
||||
aliases: [ ssl_rootcert ]
|
||||
groups:
|
||||
description:
|
||||
- The list of groups (roles) that you want to grant to the user.
|
||||
type: list
|
||||
elements: str
|
||||
comment:
|
||||
description:
|
||||
- Adds a comment on the user (equivalent to the C(COMMENT ON ROLE) statement).
|
||||
type: str
|
||||
version_added: '0.2.0'
|
||||
trust_input:
|
||||
description:
|
||||
- If C(no), checks whether values of options I(name), I(password), I(privs), I(expires),
|
||||
I(role_attr_flags), I(groups), I(comment), I(session_role) are potentially dangerous.
|
||||
- It makes sense to use C(no) only when SQL injections through the options are possible.
|
||||
type: bool
|
||||
default: yes
|
||||
version_added: '0.2.0'
|
||||
notes:
|
||||
- The module creates a user (role) with login privilege by default.
|
||||
Use C(NOLOGIN) I(role_attr_flags) to change this behaviour.
|
||||
- If you specify C(PUBLIC) as the user (role), then the privilege changes apply to all users (roles).
|
||||
You may not specify password or role_attr_flags when the C(PUBLIC) user is specified.
|
||||
- SCRAM-SHA-256-hashed passwords (SASL Authentication) require PostgreSQL version 10 or newer.
|
||||
On the previous versions the whole hashed string is used as a password.
|
||||
- 'Working with SCRAM-SHA-256-hashed passwords, be sure you use the I(environment:) variable
|
||||
C(PGOPTIONS: "-c password_encryption=scram-sha-256") (see the provided example).'
|
||||
- Supports ``check_mode``.
|
||||
seealso:
|
||||
- module: community.general.postgresql_privs
|
||||
- module: community.general.postgresql_membership
|
||||
- module: community.general.postgresql_owner
|
||||
- name: PostgreSQL database roles
|
||||
description: Complete reference of the PostgreSQL database roles documentation.
|
||||
link: https://www.postgresql.org/docs/current/user-manag.html
|
||||
- name: PostgreSQL SASL Authentication
|
||||
description: Complete reference of the PostgreSQL SASL Authentication.
|
||||
link: https://www.postgresql.org/docs/current/sasl-authentication.html
|
||||
author:
|
||||
- Ansible Core Team
|
||||
extends_documentation_fragment:
|
||||
- community.general.postgres
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Connect to acme database, create django user, and grant access to database and products table
|
||||
community.general.postgresql_user:
|
||||
db: acme
|
||||
name: django
|
||||
password: ceec4eif7ya
|
||||
priv: "CONNECT/products:ALL"
|
||||
expires: "Jan 31 2020"
|
||||
|
||||
- name: Add a comment on django user
|
||||
community.general.postgresql_user:
|
||||
db: acme
|
||||
name: django
|
||||
comment: This is a test user
|
||||
|
||||
# Connect to default database, create rails user, set its password (MD5-hashed),
|
||||
# and grant privilege to create other databases and demote rails from super user status if user exists
|
||||
- name: Create rails user, set MD5-hashed password, grant privs
|
||||
community.general.postgresql_user:
|
||||
name: rails
|
||||
password: md59543f1d82624df2b31672ec0f7050460
|
||||
role_attr_flags: CREATEDB,NOSUPERUSER
|
||||
|
||||
- name: Connect to acme database and remove test user privileges from there
|
||||
community.general.postgresql_user:
|
||||
db: acme
|
||||
name: test
|
||||
priv: "ALL/products:ALL"
|
||||
state: absent
|
||||
fail_on_user: no
|
||||
|
||||
- name: Connect to test database, remove test user from cluster
|
||||
community.general.postgresql_user:
|
||||
db: test
|
||||
name: test
|
||||
priv: ALL
|
||||
state: absent
|
||||
|
||||
- name: Connect to acme database and set user's password with no expire date
|
||||
community.general.postgresql_user:
|
||||
db: acme
|
||||
name: django
|
||||
password: mysupersecretword
|
||||
priv: "CONNECT/products:ALL"
|
||||
expires: infinity
|
||||
|
||||
# Example privileges string format
|
||||
# INSERT,UPDATE/table:SELECT/anothertable:ALL
|
||||
|
||||
- name: Connect to test database and remove an existing user's password
|
||||
community.general.postgresql_user:
|
||||
db: test
|
||||
user: test
|
||||
password: ""
|
||||
|
||||
- name: Create user test and grant group user_ro and user_rw to it
|
||||
community.general.postgresql_user:
|
||||
name: test
|
||||
groups:
|
||||
- user_ro
|
||||
- user_rw
|
||||
|
||||
# Create user with a cleartext password if it does not exist or update its password.
|
||||
# The password will be encrypted with SCRAM algorithm (available since PostgreSQL 10)
|
||||
- name: Create appclient user with SCRAM-hashed password
|
||||
community.general.postgresql_user:
|
||||
name: appclient
|
||||
password: "secret123"
|
||||
environment:
|
||||
PGOPTIONS: "-c password_encryption=scram-sha-256"
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
queries:
|
||||
description: List of executed queries.
|
||||
returned: always
|
||||
type: list
|
||||
sample: ['CREATE USER "alice"', 'GRANT CONNECT ON DATABASE "acme" TO "alice"']
|
||||
'''
|
||||
|
||||
import itertools
|
||||
import re
|
||||
import traceback
|
||||
from hashlib import md5, sha256
|
||||
import hmac
|
||||
from base64 import b64decode
|
||||
|
||||
try:
|
||||
import psycopg2
|
||||
from psycopg2.extras import DictCursor
|
||||
except ImportError:
|
||||
# psycopg2 is checked by connect_to_db()
|
||||
# from ansible.module_utils.postgres
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.database import (
|
||||
pg_quote_identifier,
|
||||
SQLParseError,
|
||||
check_input,
|
||||
)
|
||||
from ansible_collections.community.general.plugins.module_utils.postgres import (
|
||||
connect_to_db,
|
||||
get_conn_params,
|
||||
PgMembership,
|
||||
postgres_common_argument_spec,
|
||||
)
|
||||
from ansible.module_utils._text import to_bytes, to_native, to_text
|
||||
from ansible.module_utils.six import iteritems
|
||||
import ansible_collections.community.general.plugins.module_utils.saslprep as saslprep
|
||||
|
||||
try:
|
||||
# pbkdf2_hmac is missing on python 2.6, we can safely assume,
|
||||
# that postresql 10 capable instance have at least python 2.7 installed
|
||||
from hashlib import pbkdf2_hmac
|
||||
pbkdf2_found = True
|
||||
except ImportError:
|
||||
pbkdf2_found = False
|
||||
|
||||
|
||||
FLAGS = ('SUPERUSER', 'CREATEROLE', 'CREATEDB', 'INHERIT', 'LOGIN', 'REPLICATION')
|
||||
FLAGS_BY_VERSION = {'BYPASSRLS': 90500}
|
||||
|
||||
SCRAM_SHA256_REGEX = r'^SCRAM-SHA-256\$(\d+):([A-Za-z0-9+\/=]+)\$([A-Za-z0-9+\/=]+):([A-Za-z0-9+\/=]+)$'
|
||||
|
||||
VALID_PRIVS = dict(table=frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL')),
|
||||
database=frozenset(
|
||||
('CREATE', 'CONNECT', 'TEMPORARY', 'TEMP', 'ALL')),
|
||||
)
|
||||
|
||||
# map to cope with idiosyncracies of SUPERUSER and LOGIN
|
||||
PRIV_TO_AUTHID_COLUMN = dict(SUPERUSER='rolsuper', CREATEROLE='rolcreaterole',
|
||||
CREATEDB='rolcreatedb', INHERIT='rolinherit', LOGIN='rolcanlogin',
|
||||
REPLICATION='rolreplication', BYPASSRLS='rolbypassrls')
|
||||
|
||||
executed_queries = []
|
||||
|
||||
|
||||
class InvalidFlagsError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidPrivsError(Exception):
|
||||
pass
|
||||
|
||||
# ===========================================
|
||||
# PostgreSQL module specific support methods.
|
||||
#
|
||||
|
||||
|
||||
def user_exists(cursor, user):
|
||||
# The PUBLIC user is a special case that is always there
|
||||
if user == 'PUBLIC':
|
||||
return True
|
||||
query = "SELECT rolname FROM pg_roles WHERE rolname=%(user)s"
|
||||
cursor.execute(query, {'user': user})
|
||||
return cursor.rowcount > 0
|
||||
|
||||
|
||||
def user_add(cursor, user, password, role_attr_flags, encrypted, expires, conn_limit):
|
||||
"""Create a new database user (role)."""
|
||||
# Note: role_attr_flags escaped by parse_role_attrs and encrypted is a
|
||||
# literal
|
||||
query_password_data = dict(password=password, expires=expires)
|
||||
query = ['CREATE USER "%(user)s"' %
|
||||
{"user": user}]
|
||||
if password is not None and password != '':
|
||||
query.append("WITH %(crypt)s" % {"crypt": encrypted})
|
||||
query.append("PASSWORD %(password)s")
|
||||
if expires is not None:
|
||||
query.append("VALID UNTIL %(expires)s")
|
||||
if conn_limit is not None:
|
||||
query.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit})
|
||||
query.append(role_attr_flags)
|
||||
query = ' '.join(query)
|
||||
executed_queries.append(query)
|
||||
cursor.execute(query, query_password_data)
|
||||
return True
|
||||
|
||||
|
||||
def user_should_we_change_password(current_role_attrs, user, password, encrypted):
|
||||
"""Check if we should change the user's password.
|
||||
|
||||
Compare the proposed password with the existing one, comparing
|
||||
hashes if encrypted. If we can't access it assume yes.
|
||||
"""
|
||||
|
||||
if current_role_attrs is None:
|
||||
# on some databases, E.g. AWS RDS instances, there is no access to
|
||||
# the pg_authid relation to check the pre-existing password, so we
|
||||
# just assume password is different
|
||||
return True
|
||||
|
||||
# Do we actually need to do anything?
|
||||
pwchanging = False
|
||||
if password is not None:
|
||||
# Empty password means that the role shouldn't have a password, which
|
||||
# means we need to check if the current password is None.
|
||||
if password == '':
|
||||
if current_role_attrs['rolpassword'] is not None:
|
||||
pwchanging = True
|
||||
|
||||
# SCRAM hashes are represented as a special object, containing hash data:
|
||||
# `SCRAM-SHA-256$<iteration count>:<salt>$<StoredKey>:<ServerKey>`
|
||||
# for reference, see https://www.postgresql.org/docs/current/catalog-pg-authid.html
|
||||
elif current_role_attrs['rolpassword'] is not None \
|
||||
and pbkdf2_found \
|
||||
and re.match(SCRAM_SHA256_REGEX, current_role_attrs['rolpassword']):
|
||||
|
||||
r = re.match(SCRAM_SHA256_REGEX, current_role_attrs['rolpassword'])
|
||||
try:
|
||||
# extract SCRAM params from rolpassword
|
||||
it = int(r.group(1))
|
||||
salt = b64decode(r.group(2))
|
||||
server_key = b64decode(r.group(4))
|
||||
# we'll never need `storedKey` as it is only used for server auth in SCRAM
|
||||
# storedKey = b64decode(r.group(3))
|
||||
|
||||
# from RFC5802 https://tools.ietf.org/html/rfc5802#section-3
|
||||
# SaltedPassword := Hi(Normalize(password), salt, i)
|
||||
# ServerKey := HMAC(SaltedPassword, "Server Key")
|
||||
normalized_password = saslprep.saslprep(to_text(password))
|
||||
salted_password = pbkdf2_hmac('sha256', to_bytes(normalized_password), salt, it)
|
||||
|
||||
server_key_verifier = hmac.new(salted_password, digestmod=sha256)
|
||||
server_key_verifier.update(b'Server Key')
|
||||
|
||||
if server_key_verifier.digest() != server_key:
|
||||
pwchanging = True
|
||||
except Exception:
|
||||
# We assume the password is not scram encrypted
|
||||
# or we cannot check it properly, e.g. due to missing dependencies
|
||||
pwchanging = True
|
||||
|
||||
# 32: MD5 hashes are represented as a sequence of 32 hexadecimal digits
|
||||
# 3: The size of the 'md5' prefix
|
||||
# When the provided password looks like a MD5-hash, value of
|
||||
# 'encrypted' is ignored.
|
||||
elif (password.startswith('md5') and len(password) == 32 + 3) or encrypted == 'UNENCRYPTED':
|
||||
if password != current_role_attrs['rolpassword']:
|
||||
pwchanging = True
|
||||
elif encrypted == 'ENCRYPTED':
|
||||
hashed_password = 'md5{0}'.format(md5(to_bytes(password) + to_bytes(user)).hexdigest())
|
||||
if hashed_password != current_role_attrs['rolpassword']:
|
||||
pwchanging = True
|
||||
|
||||
return pwchanging
|
||||
|
||||
|
||||
def user_alter(db_connection, module, user, password, role_attr_flags, encrypted, expires, no_password_changes, conn_limit):
|
||||
"""Change user password and/or attributes. Return True if changed, False otherwise."""
|
||||
changed = False
|
||||
|
||||
cursor = db_connection.cursor(cursor_factory=DictCursor)
|
||||
# Note: role_attr_flags escaped by parse_role_attrs and encrypted is a
|
||||
# literal
|
||||
if user == 'PUBLIC':
|
||||
if password is not None:
|
||||
module.fail_json(msg="cannot change the password for PUBLIC user")
|
||||
elif role_attr_flags != '':
|
||||
module.fail_json(msg="cannot change the role_attr_flags for PUBLIC user")
|
||||
else:
|
||||
return False
|
||||
|
||||
# Handle passwords.
|
||||
if not no_password_changes and (password is not None or role_attr_flags != '' or expires is not None or conn_limit is not None):
|
||||
# Select password and all flag-like columns in order to verify changes.
|
||||
try:
|
||||
select = "SELECT * FROM pg_authid where rolname=%(user)s"
|
||||
cursor.execute(select, {"user": user})
|
||||
# Grab current role attributes.
|
||||
current_role_attrs = cursor.fetchone()
|
||||
except psycopg2.ProgrammingError:
|
||||
current_role_attrs = None
|
||||
db_connection.rollback()
|
||||
|
||||
pwchanging = user_should_we_change_password(current_role_attrs, user, password, encrypted)
|
||||
|
||||
if current_role_attrs is None:
|
||||
try:
|
||||
# AWS RDS instances does not allow user to access pg_authid
|
||||
# so try to get current_role_attrs from pg_roles tables
|
||||
select = "SELECT * FROM pg_roles where rolname=%(user)s"
|
||||
cursor.execute(select, {"user": user})
|
||||
# Grab current role attributes from pg_roles
|
||||
current_role_attrs = cursor.fetchone()
|
||||
except psycopg2.ProgrammingError as e:
|
||||
db_connection.rollback()
|
||||
module.fail_json(msg="Failed to get role details for current user %s: %s" % (user, e))
|
||||
|
||||
role_attr_flags_changing = False
|
||||
if role_attr_flags:
|
||||
role_attr_flags_dict = {}
|
||||
for r in role_attr_flags.split(' '):
|
||||
if r.startswith('NO'):
|
||||
role_attr_flags_dict[r.replace('NO', '', 1)] = False
|
||||
else:
|
||||
role_attr_flags_dict[r] = True
|
||||
|
||||
for role_attr_name, role_attr_value in role_attr_flags_dict.items():
|
||||
if current_role_attrs[PRIV_TO_AUTHID_COLUMN[role_attr_name]] != role_attr_value:
|
||||
role_attr_flags_changing = True
|
||||
|
||||
if expires is not None:
|
||||
cursor.execute("SELECT %s::timestamptz;", (expires,))
|
||||
expires_with_tz = cursor.fetchone()[0]
|
||||
expires_changing = expires_with_tz != current_role_attrs.get('rolvaliduntil')
|
||||
else:
|
||||
expires_changing = False
|
||||
|
||||
conn_limit_changing = (conn_limit is not None and conn_limit != current_role_attrs['rolconnlimit'])
|
||||
|
||||
if not pwchanging and not role_attr_flags_changing and not expires_changing and not conn_limit_changing:
|
||||
return False
|
||||
|
||||
alter = ['ALTER USER "%(user)s"' % {"user": user}]
|
||||
if pwchanging:
|
||||
if password != '':
|
||||
alter.append("WITH %(crypt)s" % {"crypt": encrypted})
|
||||
alter.append("PASSWORD %(password)s")
|
||||
else:
|
||||
alter.append("WITH PASSWORD NULL")
|
||||
alter.append(role_attr_flags)
|
||||
elif role_attr_flags:
|
||||
alter.append('WITH %s' % role_attr_flags)
|
||||
if expires is not None:
|
||||
alter.append("VALID UNTIL %(expires)s")
|
||||
if conn_limit is not None:
|
||||
alter.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit})
|
||||
|
||||
query_password_data = dict(password=password, expires=expires)
|
||||
try:
|
||||
cursor.execute(' '.join(alter), query_password_data)
|
||||
changed = True
|
||||
except psycopg2.InternalError as e:
|
||||
if e.pgcode == '25006':
|
||||
# Handle errors due to read-only transactions indicated by pgcode 25006
|
||||
# ERROR: cannot execute ALTER ROLE in a read-only transaction
|
||||
changed = False
|
||||
module.fail_json(msg=e.pgerror, exception=traceback.format_exc())
|
||||
return changed
|
||||
else:
|
||||
raise psycopg2.InternalError(e)
|
||||
except psycopg2.NotSupportedError as e:
|
||||
module.fail_json(msg=e.pgerror, exception=traceback.format_exc())
|
||||
|
||||
elif no_password_changes and role_attr_flags != '':
|
||||
# Grab role information from pg_roles instead of pg_authid
|
||||
select = "SELECT * FROM pg_roles where rolname=%(user)s"
|
||||
cursor.execute(select, {"user": user})
|
||||
# Grab current role attributes.
|
||||
current_role_attrs = cursor.fetchone()
|
||||
|
||||
role_attr_flags_changing = False
|
||||
|
||||
if role_attr_flags:
|
||||
role_attr_flags_dict = {}
|
||||
for r in role_attr_flags.split(' '):
|
||||
if r.startswith('NO'):
|
||||
role_attr_flags_dict[r.replace('NO', '', 1)] = False
|
||||
else:
|
||||
role_attr_flags_dict[r] = True
|
||||
|
||||
for role_attr_name, role_attr_value in role_attr_flags_dict.items():
|
||||
if current_role_attrs[PRIV_TO_AUTHID_COLUMN[role_attr_name]] != role_attr_value:
|
||||
role_attr_flags_changing = True
|
||||
|
||||
if not role_attr_flags_changing:
|
||||
return False
|
||||
|
||||
alter = ['ALTER USER "%(user)s"' %
|
||||
{"user": user}]
|
||||
if role_attr_flags:
|
||||
alter.append('WITH %s' % role_attr_flags)
|
||||
|
||||
try:
|
||||
cursor.execute(' '.join(alter))
|
||||
except psycopg2.InternalError as e:
|
||||
if e.pgcode == '25006':
|
||||
# Handle errors due to read-only transactions indicated by pgcode 25006
|
||||
# ERROR: cannot execute ALTER ROLE in a read-only transaction
|
||||
changed = False
|
||||
module.fail_json(msg=e.pgerror, exception=traceback.format_exc())
|
||||
return changed
|
||||
else:
|
||||
raise psycopg2.InternalError(e)
|
||||
|
||||
# Grab new role attributes.
|
||||
cursor.execute(select, {"user": user})
|
||||
new_role_attrs = cursor.fetchone()
|
||||
|
||||
# Detect any differences between current_ and new_role_attrs.
|
||||
changed = current_role_attrs != new_role_attrs
|
||||
|
||||
return changed
|
||||
|
||||
|
||||
def user_delete(cursor, user):
|
||||
"""Try to remove a user. Returns True if successful otherwise False"""
|
||||
cursor.execute("SAVEPOINT ansible_pgsql_user_delete")
|
||||
try:
|
||||
query = 'DROP USER "%s"' % user
|
||||
executed_queries.append(query)
|
||||
cursor.execute(query)
|
||||
except Exception:
|
||||
cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_user_delete")
|
||||
cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete")
|
||||
return False
|
||||
|
||||
cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete")
|
||||
return True
|
||||
|
||||
|
||||
def has_table_privileges(cursor, user, table, privs):
|
||||
"""
|
||||
Return the difference between the privileges that a user already has and
|
||||
the privileges that they desire to have.
|
||||
|
||||
:returns: tuple of:
|
||||
* privileges that they have and were requested
|
||||
* privileges they currently hold but were not requested
|
||||
* privileges requested that they do not hold
|
||||
"""
|
||||
cur_privs = get_table_privileges(cursor, user, table)
|
||||
have_currently = cur_privs.intersection(privs)
|
||||
other_current = cur_privs.difference(privs)
|
||||
desired = privs.difference(cur_privs)
|
||||
return (have_currently, other_current, desired)
|
||||
|
||||
|
||||
def get_table_privileges(cursor, user, table):
|
||||
if '.' in table:
|
||||
schema, table = table.split('.', 1)
|
||||
else:
|
||||
schema = 'public'
|
||||
query = ("SELECT privilege_type FROM information_schema.role_table_grants "
|
||||
"WHERE grantee=%(user)s AND table_name=%(table)s AND table_schema=%(schema)s")
|
||||
cursor.execute(query, {'user': user, 'table': table, 'schema': schema})
|
||||
return frozenset([x[0] for x in cursor.fetchall()])
|
||||
|
||||
|
||||
def grant_table_privileges(cursor, user, table, privs):
|
||||
# Note: priv escaped by parse_privs
|
||||
privs = ', '.join(privs)
|
||||
query = 'GRANT %s ON TABLE %s TO "%s"' % (
|
||||
privs, pg_quote_identifier(table, 'table'), user)
|
||||
executed_queries.append(query)
|
||||
cursor.execute(query)
|
||||
|
||||
|
||||
def revoke_table_privileges(cursor, user, table, privs):
|
||||
# Note: priv escaped by parse_privs
|
||||
privs = ', '.join(privs)
|
||||
query = 'REVOKE %s ON TABLE %s FROM "%s"' % (
|
||||
privs, pg_quote_identifier(table, 'table'), user)
|
||||
executed_queries.append(query)
|
||||
cursor.execute(query)
|
||||
|
||||
|
||||
def get_database_privileges(cursor, user, db):
|
||||
priv_map = {
|
||||
'C': 'CREATE',
|
||||
'T': 'TEMPORARY',
|
||||
'c': 'CONNECT',
|
||||
}
|
||||
query = 'SELECT datacl FROM pg_database WHERE datname = %s'
|
||||
cursor.execute(query, (db,))
|
||||
datacl = cursor.fetchone()[0]
|
||||
if datacl is None:
|
||||
return set()
|
||||
r = re.search(r'%s\\?"?=(C?T?c?)/[^,]+,?' % user, datacl)
|
||||
if r is None:
|
||||
return set()
|
||||
o = set()
|
||||
for v in r.group(1):
|
||||
o.add(priv_map[v])
|
||||
return normalize_privileges(o, 'database')
|
||||
|
||||
|
||||
def has_database_privileges(cursor, user, db, privs):
|
||||
"""
|
||||
Return the difference between the privileges that a user already has and
|
||||
the privileges that they desire to have.
|
||||
|
||||
:returns: tuple of:
|
||||
* privileges that they have and were requested
|
||||
* privileges they currently hold but were not requested
|
||||
* privileges requested that they do not hold
|
||||
"""
|
||||
cur_privs = get_database_privileges(cursor, user, db)
|
||||
have_currently = cur_privs.intersection(privs)
|
||||
other_current = cur_privs.difference(privs)
|
||||
desired = privs.difference(cur_privs)
|
||||
return (have_currently, other_current, desired)
|
||||
|
||||
|
||||
def grant_database_privileges(cursor, user, db, privs):
|
||||
# Note: priv escaped by parse_privs
|
||||
privs = ', '.join(privs)
|
||||
if user == "PUBLIC":
|
||||
query = 'GRANT %s ON DATABASE %s TO PUBLIC' % (
|
||||
privs, pg_quote_identifier(db, 'database'))
|
||||
else:
|
||||
query = 'GRANT %s ON DATABASE %s TO "%s"' % (
|
||||
privs, pg_quote_identifier(db, 'database'), user)
|
||||
|
||||
executed_queries.append(query)
|
||||
cursor.execute(query)
|
||||
|
||||
|
||||
def revoke_database_privileges(cursor, user, db, privs):
|
||||
# Note: priv escaped by parse_privs
|
||||
privs = ', '.join(privs)
|
||||
if user == "PUBLIC":
|
||||
query = 'REVOKE %s ON DATABASE %s FROM PUBLIC' % (
|
||||
privs, pg_quote_identifier(db, 'database'))
|
||||
else:
|
||||
query = 'REVOKE %s ON DATABASE %s FROM "%s"' % (
|
||||
privs, pg_quote_identifier(db, 'database'), user)
|
||||
|
||||
executed_queries.append(query)
|
||||
cursor.execute(query)
|
||||
|
||||
|
||||
def revoke_privileges(cursor, user, privs):
|
||||
if privs is None:
|
||||
return False
|
||||
|
||||
revoke_funcs = dict(table=revoke_table_privileges,
|
||||
database=revoke_database_privileges)
|
||||
check_funcs = dict(table=has_table_privileges,
|
||||
database=has_database_privileges)
|
||||
|
||||
changed = False
|
||||
for type_ in privs:
|
||||
for name, privileges in iteritems(privs[type_]):
|
||||
# Check that any of the privileges requested to be removed are
|
||||
# currently granted to the user
|
||||
differences = check_funcs[type_](cursor, user, name, privileges)
|
||||
if differences[0]:
|
||||
revoke_funcs[type_](cursor, user, name, privileges)
|
||||
changed = True
|
||||
return changed
|
||||
|
||||
|
||||
def grant_privileges(cursor, user, privs):
|
||||
if privs is None:
|
||||
return False
|
||||
|
||||
grant_funcs = dict(table=grant_table_privileges,
|
||||
database=grant_database_privileges)
|
||||
check_funcs = dict(table=has_table_privileges,
|
||||
database=has_database_privileges)
|
||||
|
||||
changed = False
|
||||
for type_ in privs:
|
||||
for name, privileges in iteritems(privs[type_]):
|
||||
# Check that any of the privileges requested for the user are
|
||||
# currently missing
|
||||
differences = check_funcs[type_](cursor, user, name, privileges)
|
||||
if differences[2]:
|
||||
grant_funcs[type_](cursor, user, name, privileges)
|
||||
changed = True
|
||||
return changed
|
||||
|
||||
|
||||
def parse_role_attrs(cursor, role_attr_flags):
|
||||
"""
|
||||
Parse role attributes string for user creation.
|
||||
Format:
|
||||
|
||||
attributes[,attributes,...]
|
||||
|
||||
Where:
|
||||
|
||||
attributes := CREATEDB,CREATEROLE,NOSUPERUSER,...
|
||||
[ "[NO]SUPERUSER","[NO]CREATEROLE", "[NO]CREATEDB",
|
||||
"[NO]INHERIT", "[NO]LOGIN", "[NO]REPLICATION",
|
||||
"[NO]BYPASSRLS" ]
|
||||
|
||||
Note: "[NO]BYPASSRLS" role attribute introduced in 9.5
|
||||
Note: "[NO]CREATEUSER" role attribute is deprecated.
|
||||
|
||||
"""
|
||||
flags = frozenset(role.upper() for role in role_attr_flags.split(',') if role)
|
||||
|
||||
valid_flags = frozenset(itertools.chain(FLAGS, get_valid_flags_by_version(cursor)))
|
||||
valid_flags = frozenset(itertools.chain(valid_flags, ('NO%s' % flag for flag in valid_flags)))
|
||||
|
||||
if not flags.issubset(valid_flags):
|
||||
raise InvalidFlagsError('Invalid role_attr_flags specified: %s' %
|
||||
' '.join(flags.difference(valid_flags)))
|
||||
|
||||
return ' '.join(flags)
|
||||
|
||||
|
||||
def normalize_privileges(privs, type_):
|
||||
new_privs = set(privs)
|
||||
if 'ALL' in new_privs:
|
||||
new_privs.update(VALID_PRIVS[type_])
|
||||
new_privs.remove('ALL')
|
||||
if 'TEMP' in new_privs:
|
||||
new_privs.add('TEMPORARY')
|
||||
new_privs.remove('TEMP')
|
||||
|
||||
return new_privs
|
||||
|
||||
|
||||
def parse_privs(privs, db):
|
||||
"""
|
||||
Parse privilege string to determine permissions for database db.
|
||||
Format:
|
||||
|
||||
privileges[/privileges/...]
|
||||
|
||||
Where:
|
||||
|
||||
privileges := DATABASE_PRIVILEGES[,DATABASE_PRIVILEGES,...] |
|
||||
TABLE_NAME:TABLE_PRIVILEGES[,TABLE_PRIVILEGES,...]
|
||||
"""
|
||||
if privs is None:
|
||||
return privs
|
||||
|
||||
o_privs = {
|
||||
'database': {},
|
||||
'table': {}
|
||||
}
|
||||
for token in privs.split('/'):
|
||||
if ':' not in token:
|
||||
type_ = 'database'
|
||||
name = db
|
||||
priv_set = frozenset(x.strip().upper()
|
||||
for x in token.split(',') if x.strip())
|
||||
else:
|
||||
type_ = 'table'
|
||||
name, privileges = token.split(':', 1)
|
||||
priv_set = frozenset(x.strip().upper()
|
||||
for x in privileges.split(',') if x.strip())
|
||||
|
||||
if not priv_set.issubset(VALID_PRIVS[type_]):
|
||||
raise InvalidPrivsError('Invalid privs specified for %s: %s' %
|
||||
(type_, ' '.join(priv_set.difference(VALID_PRIVS[type_]))))
|
||||
|
||||
priv_set = normalize_privileges(priv_set, type_)
|
||||
o_privs[type_][name] = priv_set
|
||||
|
||||
return o_privs
|
||||
|
||||
|
||||
def get_valid_flags_by_version(cursor):
|
||||
"""
|
||||
Some role attributes were introduced after certain versions. We want to
|
||||
compile a list of valid flags against the current Postgres version.
|
||||
"""
|
||||
current_version = cursor.connection.server_version
|
||||
|
||||
return [
|
||||
flag
|
||||
for flag, version_introduced in FLAGS_BY_VERSION.items()
|
||||
if current_version >= version_introduced
|
||||
]
|
||||
|
||||
|
||||
def get_comment(cursor, user):
|
||||
"""Get user's comment."""
|
||||
query = ("SELECT pg_catalog.shobj_description(r.oid, 'pg_authid') "
|
||||
"FROM pg_catalog.pg_roles r "
|
||||
"WHERE r.rolname = %(user)s")
|
||||
cursor.execute(query, {'user': user})
|
||||
return cursor.fetchone()[0]
|
||||
|
||||
|
||||
def add_comment(cursor, user, comment):
|
||||
"""Add comment on user."""
|
||||
if comment != get_comment(cursor, user):
|
||||
query = 'COMMENT ON ROLE "%s" IS ' % user
|
||||
cursor.execute(query + '%(comment)s', {'comment': comment})
|
||||
executed_queries.append(cursor.mogrify(query + '%(comment)s', {'comment': comment}))
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
# ===========================================
|
||||
# Module execution.
|
||||
#
|
||||
|
||||
def main():
|
||||
argument_spec = postgres_common_argument_spec()
|
||||
argument_spec.update(
|
||||
user=dict(type='str', required=True, aliases=['name']),
|
||||
password=dict(type='str', default=None, no_log=True),
|
||||
state=dict(type='str', default='present', choices=['absent', 'present']),
|
||||
priv=dict(type='str', default=None),
|
||||
db=dict(type='str', default='', aliases=['login_db']),
|
||||
fail_on_user=dict(type='bool', default=True, aliases=['fail_on_role']),
|
||||
role_attr_flags=dict(type='str', default=''),
|
||||
encrypted=dict(type='bool', default=True),
|
||||
no_password_changes=dict(type='bool', default=False, no_log=False),
|
||||
expires=dict(type='str', default=None),
|
||||
conn_limit=dict(type='int', default=None),
|
||||
session_role=dict(type='str'),
|
||||
groups=dict(type='list', elements='str'),
|
||||
comment=dict(type='str', default=None),
|
||||
trust_input=dict(type='bool', default=True),
|
||||
)
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
user = module.params["user"]
|
||||
password = module.params["password"]
|
||||
state = module.params["state"]
|
||||
fail_on_user = module.params["fail_on_user"]
|
||||
if module.params['db'] == '' and module.params["priv"] is not None:
|
||||
module.fail_json(msg="privileges require a database to be specified")
|
||||
privs = parse_privs(module.params["priv"], module.params["db"])
|
||||
no_password_changes = module.params["no_password_changes"]
|
||||
if module.params["encrypted"]:
|
||||
encrypted = "ENCRYPTED"
|
||||
else:
|
||||
encrypted = "UNENCRYPTED"
|
||||
expires = module.params["expires"]
|
||||
conn_limit = module.params["conn_limit"]
|
||||
role_attr_flags = module.params["role_attr_flags"]
|
||||
groups = module.params["groups"]
|
||||
if groups:
|
||||
groups = [e.strip() for e in groups]
|
||||
comment = module.params["comment"]
|
||||
session_role = module.params['session_role']
|
||||
|
||||
trust_input = module.params['trust_input']
|
||||
if not trust_input:
|
||||
# Check input for potentially dangerous elements:
|
||||
check_input(module, user, password, privs, expires,
|
||||
role_attr_flags, groups, comment, session_role)
|
||||
|
||||
conn_params = get_conn_params(module, module.params, warn_db_default=False)
|
||||
db_connection = connect_to_db(module, conn_params)
|
||||
cursor = db_connection.cursor(cursor_factory=DictCursor)
|
||||
|
||||
try:
|
||||
role_attr_flags = parse_role_attrs(cursor, role_attr_flags)
|
||||
except InvalidFlagsError as e:
|
||||
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
|
||||
|
||||
kw = dict(user=user)
|
||||
changed = False
|
||||
user_removed = False
|
||||
|
||||
if state == "present":
|
||||
if user_exists(cursor, user):
|
||||
try:
|
||||
changed = user_alter(db_connection, module, user, password,
|
||||
role_attr_flags, encrypted, expires, no_password_changes, conn_limit)
|
||||
except SQLParseError as e:
|
||||
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
|
||||
else:
|
||||
try:
|
||||
changed = user_add(cursor, user, password,
|
||||
role_attr_flags, encrypted, expires, conn_limit)
|
||||
except psycopg2.ProgrammingError as e:
|
||||
module.fail_json(msg="Unable to add user with given requirement "
|
||||
"due to : %s" % to_native(e),
|
||||
exception=traceback.format_exc())
|
||||
except SQLParseError as e:
|
||||
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
|
||||
try:
|
||||
changed = grant_privileges(cursor, user, privs) or changed
|
||||
except SQLParseError as e:
|
||||
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
|
||||
|
||||
if groups:
|
||||
target_roles = []
|
||||
target_roles.append(user)
|
||||
pg_membership = PgMembership(module, cursor, groups, target_roles)
|
||||
changed = pg_membership.grant() or changed
|
||||
executed_queries.extend(pg_membership.executed_queries)
|
||||
|
||||
if comment is not None:
|
||||
try:
|
||||
changed = add_comment(cursor, user, comment) or changed
|
||||
except Exception as e:
|
||||
module.fail_json(msg='Unable to add comment on role: %s' % to_native(e),
|
||||
exception=traceback.format_exc())
|
||||
|
||||
else:
|
||||
if user_exists(cursor, user):
|
||||
if module.check_mode:
|
||||
changed = True
|
||||
kw['user_removed'] = True
|
||||
else:
|
||||
try:
|
||||
changed = revoke_privileges(cursor, user, privs)
|
||||
user_removed = user_delete(cursor, user)
|
||||
except SQLParseError as e:
|
||||
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
|
||||
changed = changed or user_removed
|
||||
if fail_on_user and not user_removed:
|
||||
msg = "Unable to remove user"
|
||||
module.fail_json(msg=msg)
|
||||
kw['user_removed'] = user_removed
|
||||
|
||||
if changed:
|
||||
if module.check_mode:
|
||||
db_connection.rollback()
|
||||
else:
|
||||
db_connection.commit()
|
||||
|
||||
kw['changed'] = changed
|
||||
kw['queries'] = executed_queries
|
||||
module.exit_json(**kw)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,335 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2020, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: postgresql_user_obj_stat_info
|
||||
short_description: Gather statistics about PostgreSQL user objects
|
||||
description:
|
||||
- Gathers statistics about PostgreSQL user objects.
|
||||
version_added: '0.2.0'
|
||||
options:
|
||||
filter:
|
||||
description:
|
||||
- Limit the collected information by comma separated string or YAML list.
|
||||
- Allowable values are C(functions), C(indexes), C(tables).
|
||||
- By default, collects all subsets.
|
||||
- Unsupported values are ignored.
|
||||
type: list
|
||||
elements: str
|
||||
schema:
|
||||
description:
|
||||
- Restrict the output by certain schema.
|
||||
type: str
|
||||
db:
|
||||
description:
|
||||
- Name of database to connect.
|
||||
type: str
|
||||
aliases:
|
||||
- login_db
|
||||
session_role:
|
||||
description:
|
||||
- Switch to session_role after connecting. The specified session_role must
|
||||
be a role that the current login_user is a member of.
|
||||
- Permissions checking for SQL commands is carried out as though
|
||||
the session_role were the one that had logged in originally.
|
||||
type: str
|
||||
trust_input:
|
||||
description:
|
||||
- If C(no), check the value of I(session_role) is potentially dangerous.
|
||||
- It makes sense to use C(no) only when SQL injections via I(session_role) are possible.
|
||||
type: bool
|
||||
default: yes
|
||||
version_added: '0.2.0'
|
||||
|
||||
notes:
|
||||
- C(size) and C(total_size) returned values are presented in bytes.
|
||||
- For tracking function statistics the PostgreSQL C(track_functions) parameter must be enabled.
|
||||
See U(https://www.postgresql.org/docs/current/runtime-config-statistics.html) for more information.
|
||||
seealso:
|
||||
- module: community.general.postgresql_info
|
||||
- module: community.general.postgresql_ping
|
||||
- name: PostgreSQL statistics collector reference
|
||||
description: Complete reference of the PostgreSQL statistics collector documentation.
|
||||
link: https://www.postgresql.org/docs/current/monitoring-stats.html
|
||||
author:
|
||||
- Andrew Klychkov (@Andersson007)
|
||||
- Thomas O'Donnell (@andytom)
|
||||
extends_documentation_fragment:
|
||||
- community.general.postgres
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Collect information about all supported user objects of the acme database
|
||||
community.general.postgresql_user_obj_stat_info:
|
||||
db: acme
|
||||
|
||||
- name: Collect information about all supported user objects in the custom schema of the acme database
|
||||
community.general.postgresql_user_obj_stat_info:
|
||||
db: acme
|
||||
schema: custom
|
||||
|
||||
- name: Collect information about user tables and indexes in the acme database
|
||||
community.general.postgresql_user_obj_stat_info:
|
||||
db: acme
|
||||
filter: tables, indexes
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
indexes:
|
||||
description: User index statistics
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {"public": {"test_id_idx": {"idx_scan": 0, "idx_tup_fetch": 0, "idx_tup_read": 0, "relname": "test", "size": 8192, ...}}}
|
||||
tables:
|
||||
description: User table statistics.
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {"public": {"test": {"analyze_count": 3, "n_dead_tup": 0, "n_live_tup": 0, "seq_scan": 2, "size": 0, "total_size": 8192, ...}}}
|
||||
functions:
|
||||
description: User function statistics.
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {"public": {"inc": {"calls": 1, "funcid": 26722, "self_time": 0.23, "total_time": 0.23}}}
|
||||
'''
|
||||
|
||||
try:
|
||||
from psycopg2.extras import DictCursor
|
||||
except ImportError:
|
||||
# psycopg2 is checked by connect_to_db()
|
||||
# from ansible.module_utils.postgres
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.database import (
|
||||
check_input,
|
||||
)
|
||||
from ansible_collections.community.general.plugins.module_utils.postgres import (
|
||||
connect_to_db,
|
||||
exec_sql,
|
||||
get_conn_params,
|
||||
postgres_common_argument_spec,
|
||||
)
|
||||
from ansible.module_utils.six import iteritems
|
||||
|
||||
|
||||
# ===========================================
|
||||
# PostgreSQL module specific support methods.
|
||||
#
|
||||
|
||||
|
||||
class PgUserObjStatInfo():
|
||||
"""Class to collect information about PostgreSQL user objects.
|
||||
|
||||
Args:
|
||||
module (AnsibleModule): Object of AnsibleModule class.
|
||||
cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
|
||||
|
||||
Attributes:
|
||||
module (AnsibleModule): Object of AnsibleModule class.
|
||||
cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
|
||||
executed_queries (list): List of executed queries.
|
||||
info (dict): Statistics dictionary.
|
||||
obj_func_mapping (dict): Mapping of object types to corresponding functions.
|
||||
schema (str): Name of a schema to restrict stat collecting.
|
||||
"""
|
||||
|
||||
def __init__(self, module, cursor):
|
||||
self.module = module
|
||||
self.cursor = cursor
|
||||
self.info = {
|
||||
'functions': {},
|
||||
'indexes': {},
|
||||
'tables': {},
|
||||
}
|
||||
self.obj_func_mapping = {
|
||||
'functions': self.get_func_stat,
|
||||
'indexes': self.get_idx_stat,
|
||||
'tables': self.get_tbl_stat,
|
||||
}
|
||||
self.schema = None
|
||||
|
||||
def collect(self, filter_=None, schema=None):
|
||||
"""Collect statistics information of user objects.
|
||||
|
||||
Kwargs:
|
||||
filter_ (list): List of subsets which need to be collected.
|
||||
schema (str): Restrict stat collecting by certain schema.
|
||||
|
||||
Returns:
|
||||
``self.info``.
|
||||
"""
|
||||
if schema:
|
||||
self.set_schema(schema)
|
||||
|
||||
if filter_:
|
||||
for obj_type in filter_:
|
||||
obj_type = obj_type.strip()
|
||||
obj_func = self.obj_func_mapping.get(obj_type)
|
||||
|
||||
if obj_func is not None:
|
||||
obj_func()
|
||||
else:
|
||||
self.module.warn("Unknown filter option '%s'" % obj_type)
|
||||
|
||||
else:
|
||||
for obj_func in self.obj_func_mapping.values():
|
||||
obj_func()
|
||||
|
||||
return self.info
|
||||
|
||||
def get_func_stat(self):
|
||||
"""Get function statistics and fill out self.info dictionary."""
|
||||
query = "SELECT * FROM pg_stat_user_functions"
|
||||
if self.schema:
|
||||
query = "SELECT * FROM pg_stat_user_functions WHERE schemaname = %s"
|
||||
|
||||
result = exec_sql(self, query, query_params=(self.schema,),
|
||||
add_to_executed=False)
|
||||
|
||||
if not result:
|
||||
return
|
||||
|
||||
self.__fill_out_info(result,
|
||||
info_key='functions',
|
||||
schema_key='schemaname',
|
||||
name_key='funcname')
|
||||
|
||||
def get_idx_stat(self):
|
||||
"""Get index statistics and fill out self.info dictionary."""
|
||||
query = "SELECT * FROM pg_stat_user_indexes"
|
||||
if self.schema:
|
||||
query = "SELECT * FROM pg_stat_user_indexes WHERE schemaname = %s"
|
||||
|
||||
result = exec_sql(self, query, query_params=(self.schema,),
|
||||
add_to_executed=False)
|
||||
|
||||
if not result:
|
||||
return
|
||||
|
||||
self.__fill_out_info(result,
|
||||
info_key='indexes',
|
||||
schema_key='schemaname',
|
||||
name_key='indexrelname')
|
||||
|
||||
def get_tbl_stat(self):
|
||||
"""Get table statistics and fill out self.info dictionary."""
|
||||
query = "SELECT * FROM pg_stat_user_tables"
|
||||
if self.schema:
|
||||
query = "SELECT * FROM pg_stat_user_tables WHERE schemaname = %s"
|
||||
|
||||
result = exec_sql(self, query, query_params=(self.schema,),
|
||||
add_to_executed=False)
|
||||
|
||||
if not result:
|
||||
return
|
||||
|
||||
self.__fill_out_info(result,
|
||||
info_key='tables',
|
||||
schema_key='schemaname',
|
||||
name_key='relname')
|
||||
|
||||
def __fill_out_info(self, result, info_key=None, schema_key=None, name_key=None):
|
||||
# Convert result to list of dicts to handle it easier:
|
||||
result = [dict(row) for row in result]
|
||||
|
||||
for elem in result:
|
||||
# Add schema name as a key if not presented:
|
||||
if not self.info[info_key].get(elem[schema_key]):
|
||||
self.info[info_key][elem[schema_key]] = {}
|
||||
|
||||
# Add object name key as a subkey
|
||||
# (they must be uniq over a schema, so no need additional checks):
|
||||
self.info[info_key][elem[schema_key]][elem[name_key]] = {}
|
||||
|
||||
# Add other other attributes to a certain index:
|
||||
for key, val in iteritems(elem):
|
||||
if key not in (schema_key, name_key):
|
||||
self.info[info_key][elem[schema_key]][elem[name_key]][key] = val
|
||||
|
||||
if info_key in ('tables', 'indexes'):
|
||||
schemaname = elem[schema_key]
|
||||
if self.schema:
|
||||
schemaname = self.schema
|
||||
|
||||
relname = '%s.%s' % (schemaname, elem[name_key])
|
||||
|
||||
result = exec_sql(self, "SELECT pg_relation_size (%s)",
|
||||
query_params=(relname,),
|
||||
add_to_executed=False)
|
||||
|
||||
self.info[info_key][elem[schema_key]][elem[name_key]]['size'] = result[0][0]
|
||||
|
||||
if info_key == 'tables':
|
||||
result = exec_sql(self, "SELECT pg_total_relation_size (%s)",
|
||||
query_params=(relname,),
|
||||
add_to_executed=False)
|
||||
|
||||
self.info[info_key][elem[schema_key]][elem[name_key]]['total_size'] = result[0][0]
|
||||
|
||||
def set_schema(self, schema):
|
||||
"""If schema exists, sets self.schema, otherwise fails."""
|
||||
query = ("SELECT 1 FROM information_schema.schemata "
|
||||
"WHERE schema_name = %s")
|
||||
result = exec_sql(self, query, query_params=(schema,),
|
||||
add_to_executed=False)
|
||||
|
||||
if result and result[0][0]:
|
||||
self.schema = schema
|
||||
else:
|
||||
self.module.fail_json(msg="Schema '%s' does not exist" % (schema))
|
||||
|
||||
|
||||
# ===========================================
|
||||
# Module execution.
|
||||
#
|
||||
|
||||
def main():
|
||||
argument_spec = postgres_common_argument_spec()
|
||||
argument_spec.update(
|
||||
db=dict(type='str', aliases=['login_db']),
|
||||
filter=dict(type='list', elements='str'),
|
||||
session_role=dict(type='str'),
|
||||
schema=dict(type='str'),
|
||||
trust_input=dict(type="bool", default=True),
|
||||
)
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
filter_ = module.params["filter"]
|
||||
schema = module.params["schema"]
|
||||
|
||||
if not module.params["trust_input"]:
|
||||
check_input(module, module.params['session_role'])
|
||||
|
||||
# Connect to DB and make cursor object:
|
||||
pg_conn_params = get_conn_params(module, module.params)
|
||||
# We don't need to commit anything, so, set it to False:
|
||||
db_connection = connect_to_db(module, pg_conn_params, autocommit=False)
|
||||
cursor = db_connection.cursor(cursor_factory=DictCursor)
|
||||
|
||||
############################
|
||||
# Create object and do work:
|
||||
pg_obj_info = PgUserObjStatInfo(module, cursor)
|
||||
|
||||
info_dict = pg_obj_info.collect(filter_, schema)
|
||||
|
||||
# Clean up:
|
||||
cursor.close()
|
||||
db_connection.close()
|
||||
|
||||
# Return information:
|
||||
module.exit_json(**info_dict)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1 +0,0 @@
|
|||
./database/postgresql/postgresql_copy.py
|
|
@ -1 +0,0 @@
|
|||
./database/postgresql/postgresql_db.py
|
|
@ -1 +0,0 @@
|
|||
./database/postgresql/postgresql_ext.py
|
|
@ -1 +0,0 @@
|
|||
./database/postgresql/postgresql_idx.py
|
|
@ -1 +0,0 @@
|
|||
./database/postgresql/postgresql_info.py
|
|
@ -1 +0,0 @@
|
|||
./database/postgresql/postgresql_lang.py
|
|
@ -1 +0,0 @@
|
|||
./database/postgresql/postgresql_membership.py
|
|
@ -1 +0,0 @@
|
|||
./database/postgresql/postgresql_owner.py
|
|
@ -1 +0,0 @@
|
|||
./database/postgresql/postgresql_pg_hba.py
|
|
@ -1 +0,0 @@
|
|||
./database/postgresql/postgresql_ping.py
|
|
@ -1 +0,0 @@
|
|||
./database/postgresql/postgresql_privs.py
|
|
@ -1 +0,0 @@
|
|||
./database/postgresql/postgresql_publication.py
|
|
@ -1 +0,0 @@
|
|||
./database/postgresql/postgresql_query.py
|
|
@ -1 +0,0 @@
|
|||
./database/postgresql/postgresql_schema.py
|
|
@ -1 +0,0 @@
|
|||
./database/postgresql/postgresql_sequence.py
|
|
@ -1 +0,0 @@
|
|||
./database/postgresql/postgresql_set.py
|
|
@ -1 +0,0 @@
|
|||
./database/postgresql/postgresql_slot.py
|
|
@ -1 +0,0 @@
|
|||
./database/postgresql/postgresql_subscription.py
|
|
@ -1 +0,0 @@
|
|||
./database/postgresql/postgresql_table.py
|
|
@ -1 +0,0 @@
|
|||
./database/postgresql/postgresql_tablespace.py
|
|
@ -1 +0,0 @@
|
|||
./database/postgresql/postgresql_user.py
|
|
@ -1 +0,0 @@
|
|||
./database/postgresql/postgresql_user_obj_stat_info.py
|
|
@ -37,12 +37,7 @@
|
|||
# Name setup database
|
||||
#
|
||||
- name: Create a user to run the tests with
|
||||
postgresql_user:
|
||||
name: "{{ my_user }}"
|
||||
password: "{{ my_pass }}"
|
||||
encrypted: 'yes'
|
||||
role_attr_flags: "SUPERUSER"
|
||||
db: postgres
|
||||
shell: echo "CREATE USER {{ my_user }} SUPERUSER PASSWORD '{{ my_pass }}'" | psql postgres
|
||||
become_user: "{{ pg_user }}"
|
||||
become: True
|
||||
|
||||
|
|
|
@ -1,6 +0,0 @@
|
|||
destructive
|
||||
shippable/posix/group4
|
||||
skip/aix
|
||||
skip/osx
|
||||
skip/macos
|
||||
disabled # tests already running in community.postgresql
|
|
@ -1,2 +0,0 @@
|
|||
dependencies:
|
||||
- setup_postgresql_db
|
|
@ -1,8 +0,0 @@
|
|||
####################################################################
|
||||
# WARNING: These are designed specifically for Ansible tests #
|
||||
# and should not be used as examples of how to write Ansible roles #
|
||||
####################################################################
|
||||
|
||||
# Initial CI tests of postgresql_copy module
|
||||
- import_tasks: postgresql_copy_initial.yml
|
||||
when: postgres_version_resp.stdout is version('9.4', '>=')
|
|
@ -1,278 +0,0 @@
|
|||
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# The file for testing postgresql_copy module.
|
||||
|
||||
- vars:
|
||||
test_table: acme
|
||||
data_file_txt: /tmp/data.txt
|
||||
data_file_csv: /tmp/data.csv
|
||||
task_parameters: &task_parameters
|
||||
become_user: '{{ pg_user }}'
|
||||
become: yes
|
||||
register: result
|
||||
pg_parameters: &pg_parameters
|
||||
login_user: '{{ pg_user }}'
|
||||
login_db: postgres
|
||||
|
||||
block:
|
||||
# Test preparation:
|
||||
- name: postgresql_copy - create test table
|
||||
<<: *task_parameters
|
||||
postgresql_table:
|
||||
<<: *pg_parameters
|
||||
name: '{{ test_table }}'
|
||||
columns:
|
||||
- id int
|
||||
- name text
|
||||
|
||||
# Insert the data:
|
||||
- name: postgresql_copy - insert rows into test table
|
||||
<<: *task_parameters
|
||||
postgresql_query:
|
||||
<<: *pg_parameters
|
||||
query: "INSERT INTO {{ test_table }} (id, name) VALUES (1, 'first')"
|
||||
|
||||
- name: postgresql_copy - ensure that test data files don't exist
|
||||
<<: *task_parameters
|
||||
file:
|
||||
path: '{{ item }}'
|
||||
state: absent
|
||||
with_items:
|
||||
- '{{ data_file_csv }}'
|
||||
- '{{ data_file_txt }}'
|
||||
|
||||
# ##############
|
||||
# Do main tests:
|
||||
|
||||
# check_mode - if it's OK, must always return changed=True:
|
||||
- name: postgresql_copy - check_mode, copy test table content to data_file_txt
|
||||
check_mode: yes
|
||||
<<: *task_parameters
|
||||
postgresql_copy:
|
||||
<<: *pg_parameters
|
||||
copy_to: '{{ data_file_txt }}'
|
||||
src: '{{ test_table }}'
|
||||
trust_input: no
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
|
||||
# check that nothing changed after the previous step:
|
||||
- name: postgresql_copy - check that data_file_txt doesn't exist
|
||||
<<: *task_parameters
|
||||
ignore_errors: yes
|
||||
shell: head -n 1 '{{ data_file_txt }}'
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.failed == true
|
||||
- result.rc == 1
|
||||
|
||||
# check_mode - if it's OK, must always return changed=True:
|
||||
- name: postgresql_copy - check_mode, copy test table content from data_file_txt
|
||||
check_mode: yes
|
||||
<<: *task_parameters
|
||||
postgresql_copy:
|
||||
<<: *pg_parameters
|
||||
copy_from: '{{ data_file_txt }}'
|
||||
dst: '{{ test_table }}'
|
||||
trust_input: no
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
|
||||
# check that nothing changed after the previous step:
|
||||
- name: postgresql_copy - check that test table continue to have one row
|
||||
<<: *task_parameters
|
||||
postgresql_query:
|
||||
<<: *pg_parameters
|
||||
query: 'SELECT * FROM {{ test_table }}'
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.rowcount == 1
|
||||
|
||||
# check_mode - test must fail because test table doesn't exist:
|
||||
- name: postgresql_copy - check_mode, copy non existent table to data_file_txt
|
||||
check_mode: yes
|
||||
ignore_errors: yes
|
||||
<<: *task_parameters
|
||||
postgresql_copy:
|
||||
<<: *pg_parameters
|
||||
copy_to: '{{ data_file_txt }}'
|
||||
src: non_existent_table
|
||||
trust_input: no
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.failed == true
|
||||
- result.queries is not defined
|
||||
|
||||
- name: postgresql_copy - check trust_input
|
||||
<<: *task_parameters
|
||||
postgresql_copy:
|
||||
<<: *pg_parameters
|
||||
copy_to: '{{ data_file_txt }}'
|
||||
src: '{{ test_table }}'
|
||||
session_role: 'curious.anonymous"; SELECT * FROM information_schema.tables; --'
|
||||
trust_input: no
|
||||
ignore_errors: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is failed
|
||||
- result.msg is search('is potentially dangerous')
|
||||
|
||||
- name: postgresql_copy - copy test table data to data_file_txt
|
||||
<<: *task_parameters
|
||||
postgresql_copy:
|
||||
<<: *pg_parameters
|
||||
copy_to: '{{ data_file_txt }}'
|
||||
src: '{{ test_table }}'
|
||||
trust_input: no
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.queries == ["COPY \"{{ test_table }}\" TO '{{ data_file_txt }}'"]
|
||||
- result.src == '{{ test_table }}'
|
||||
- result.dst == '{{ data_file_txt }}'
|
||||
|
||||
# check the prev test
|
||||
- name: postgresql_copy - check data_file_txt exists and not empty
|
||||
<<: *task_parameters
|
||||
shell: 'head -n 1 {{ data_file_txt }}'
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.stdout == '1\tfirst'
|
||||
|
||||
# test different options and columns
|
||||
- name: postgresql_copy - copy test table data to data_file_csv with options and columns
|
||||
<<: *task_parameters
|
||||
postgresql_copy:
|
||||
<<: *pg_parameters
|
||||
copy_to: '{{ data_file_csv }}'
|
||||
src: '{{ test_table }}'
|
||||
columns:
|
||||
- id
|
||||
- name
|
||||
options:
|
||||
format: csv
|
||||
trust_input: no
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.queries == ["COPY \"{{ test_table }}\" (id,name) TO '{{ data_file_csv }}' (format csv)"]
|
||||
- result.src == '{{ test_table }}'
|
||||
- result.dst == '{{ data_file_csv }}'
|
||||
|
||||
# check the prev test
|
||||
- name: postgresql_copy - check data_file_csv exists and not empty
|
||||
<<: *task_parameters
|
||||
shell: 'head -n 1 {{ data_file_csv }}'
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.stdout == '1,first'
|
||||
|
||||
- name: postgresql_copy - copy from data_file_csv to test table
|
||||
<<: *task_parameters
|
||||
postgresql_copy:
|
||||
<<: *pg_parameters
|
||||
copy_from: '{{ data_file_csv }}'
|
||||
dst: '{{ test_table }}'
|
||||
columns:
|
||||
- id
|
||||
- name
|
||||
options:
|
||||
format: csv
|
||||
trust_input: no
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.queries == ["COPY \"{{ test_table }}\" (id,name) FROM '{{ data_file_csv }}' (format csv)"]
|
||||
- result.dst == '{{ test_table }}'
|
||||
- result.src == '{{ data_file_csv }}'
|
||||
|
||||
- name: postgresql_copy - check that there are two rows in test table after the prev step
|
||||
<<: *task_parameters
|
||||
postgresql_query:
|
||||
<<: *pg_parameters
|
||||
query: "SELECT * FROM {{ test_table }} WHERE id = '1' AND name = 'first'"
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.rowcount == 2
|
||||
|
||||
- name: postgresql_copy - test program option, copy to program
|
||||
<<: *task_parameters
|
||||
postgresql_copy:
|
||||
<<: *pg_parameters
|
||||
src: '{{ test_table }}'
|
||||
copy_to: '/bin/true'
|
||||
program: yes
|
||||
columns: id, name
|
||||
options:
|
||||
delimiter: '|'
|
||||
trust_input: no
|
||||
when: ansible_distribution != 'FreeBSD'
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.queries == ["COPY \"{{ test_table }}\" (id, name) TO PROGRAM '/bin/true' (delimiter '|')"]
|
||||
- result.src == '{{ test_table }}'
|
||||
- result.dst == '/bin/true'
|
||||
when: ansible_distribution != 'FreeBSD'
|
||||
|
||||
- name: postgresql_copy - test program option, copy from program
|
||||
<<: *task_parameters
|
||||
postgresql_copy:
|
||||
<<: *pg_parameters
|
||||
dst: '{{ test_table }}'
|
||||
copy_from: 'echo 1,first'
|
||||
program: yes
|
||||
columns: id, name
|
||||
options:
|
||||
delimiter: ','
|
||||
trust_input: no
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.queries == ["COPY \"{{ test_table }}\" (id, name) FROM PROGRAM 'echo 1,first' (delimiter ',')"]
|
||||
- result.dst == '{{ test_table }}'
|
||||
- result.src == 'echo 1,first'
|
||||
when: ansible_distribution != 'FreeBSD'
|
||||
|
||||
- name: postgresql_copy - check that there are three rows in test table after the prev step
|
||||
<<: *task_parameters
|
||||
postgresql_query:
|
||||
<<: *pg_parameters
|
||||
query: "SELECT * FROM {{ test_table }} WHERE id = '1' AND name = 'first'"
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.rowcount == 3
|
||||
|
||||
# clean up
|
||||
- name: postgresql_copy - remove test table
|
||||
<<: *task_parameters
|
||||
postgresql_table:
|
||||
<<: *pg_parameters
|
||||
name: '{{ test_table }}'
|
||||
state: absent
|
||||
|
||||
- name: postgresql_copy - remove test data files
|
||||
<<: *task_parameters
|
||||
file:
|
||||
path: '{{ item }}'
|
||||
state: absent
|
||||
with_items:
|
||||
- '{{ data_file_csv }}'
|
||||
- '{{ data_file_txt }}'
|
|
@ -1,7 +0,0 @@
|
|||
destructive
|
||||
shippable/posix/group4
|
||||
postgresql_db
|
||||
skip/aix
|
||||
skip/osx
|
||||
skip/macos
|
||||
disabled # tests already running in community.postgresql
|
|
@ -1,11 +0,0 @@
|
|||
db_name: 'ansible_db'
|
||||
db_user1: 'ansible.db.user1'
|
||||
db_user2: 'ansible.db.user2'
|
||||
tmp_dir: '/tmp'
|
||||
db_session_role1: 'session_role1'
|
||||
db_session_role2: 'session_role2'
|
||||
|
||||
# To test trust_input parameter and
|
||||
# possibility to create a database with dots in its name
|
||||
db_name_with_dot: 'db.name'
|
||||
suspicious_db_name: '{{ db_name_with_dot }}"; --'
|
|
@ -1,2 +0,0 @@
|
|||
dependencies:
|
||||
- setup_postgresql_db
|
|
@ -1,36 +0,0 @@
|
|||
####################################################################
|
||||
# WARNING: These are designed specifically for Ansible tests #
|
||||
# and should not be used as examples of how to write Ansible roles #
|
||||
####################################################################
|
||||
|
||||
- import_tasks: postgresql_db_session_role.yml
|
||||
|
||||
# Initial tests of postgresql_db module:
|
||||
- import_tasks: postgresql_db_initial.yml
|
||||
|
||||
# General tests:
|
||||
- import_tasks: postgresql_db_general.yml
|
||||
|
||||
# Dump/restore tests per format:
|
||||
- include_tasks: state_dump_restore.yml
|
||||
vars:
|
||||
test_fixture: user
|
||||
file: '{{ loop_item }}'
|
||||
loop:
|
||||
- dbdata.sql
|
||||
- dbdata.sql.gz
|
||||
- dbdata.sql.bz2
|
||||
- dbdata.sql.xz
|
||||
- dbdata.tar
|
||||
- dbdata.tar.gz
|
||||
- dbdata.tar.bz2
|
||||
- dbdata.tar.xz
|
||||
- dbdata.pgc
|
||||
loop_control:
|
||||
loop_var: loop_item
|
||||
|
||||
# Dump/restore tests per other logins:
|
||||
- import_tasks: state_dump_restore.yml
|
||||
vars:
|
||||
file: dbdata.tar
|
||||
test_fixture: admin
|
|
@ -1,152 +0,0 @@
|
|||
- become_user: '{{ pg_user }}'
|
||||
become: true
|
||||
vars:
|
||||
db_tablespace: bar
|
||||
tblspc_location: /ssd
|
||||
db_name: acme
|
||||
block_parameters:
|
||||
become_user: '{{ pg_user }}'
|
||||
become: true
|
||||
task_parameters:
|
||||
register: result
|
||||
pg_parameters:
|
||||
login_user: '{{ pg_user }}'
|
||||
block:
|
||||
- name: postgresql_db - drop dir for test tablespace
|
||||
become: true
|
||||
become_user: root
|
||||
file:
|
||||
path: '{{ tblspc_location }}'
|
||||
state: absent
|
||||
ignore_errors: true
|
||||
- name: postgresql_db - disable selinux
|
||||
become: true
|
||||
become_user: root
|
||||
shell: setenforce 0
|
||||
ignore_errors: true
|
||||
- name: postgresql_db - create dir for test tablespace
|
||||
become: true
|
||||
become_user: root
|
||||
file:
|
||||
path: '{{ tblspc_location }}'
|
||||
state: directory
|
||||
owner: '{{ pg_user }}'
|
||||
group: '{{ pg_user }}'
|
||||
mode: '0700'
|
||||
- name: postgresql_db_ - create a new tablespace
|
||||
postgresql_tablespace:
|
||||
login_user: '{{ pg_user }}'
|
||||
login_db: postgres
|
||||
name: '{{ db_tablespace }}'
|
||||
location: '{{ tblspc_location }}'
|
||||
- register: result
|
||||
name: postgresql_db_tablespace - Create DB with tablespace option in check mode
|
||||
check_mode: true
|
||||
postgresql_db:
|
||||
login_user: '{{ pg_user }}'
|
||||
maintenance_db: postgres
|
||||
name: '{{ db_name }}'
|
||||
tablespace: '{{ db_tablespace }}'
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- register: result
|
||||
name: postgresql_db_tablespace - Check actual DB tablespace, rowcount must be 0 because actually nothing changed
|
||||
postgresql_query:
|
||||
login_user: '{{ pg_user }}'
|
||||
login_db: postgres
|
||||
query: 'SELECT 1 FROM pg_database AS d JOIN pg_tablespace AS t ON d.dattablespace = t.oid WHERE d.datname = ''{{ db_name }}'' AND t.spcname = ''{{ db_tablespace }}''
|
||||
|
||||
'
|
||||
- assert:
|
||||
that:
|
||||
- result.rowcount == 0
|
||||
- register: result
|
||||
name: postgresql_db_tablespace - Create DB with tablespace option
|
||||
postgresql_db:
|
||||
login_user: '{{ pg_user }}'
|
||||
maintenance_db: postgres
|
||||
name: '{{ db_name }}'
|
||||
tablespace: '{{ db_tablespace }}'
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.executed_commands == ['CREATE DATABASE "{{ db_name }}" TABLESPACE "{{ db_tablespace }}"']
|
||||
- register: result
|
||||
name: postgresql_db_tablespace - Check actual DB tablespace, rowcount must be 1
|
||||
postgresql_query:
|
||||
login_user: '{{ pg_user }}'
|
||||
login_db: postgres
|
||||
query: 'SELECT 1 FROM pg_database AS d JOIN pg_tablespace AS t ON d.dattablespace = t.oid WHERE d.datname = ''{{ db_name }}'' AND t.spcname = ''{{ db_tablespace }}''
|
||||
|
||||
'
|
||||
- assert:
|
||||
that:
|
||||
- result.rowcount == 1
|
||||
- register: result
|
||||
name: postgresql_db_tablespace - The same DB with tablespace option again
|
||||
postgresql_db:
|
||||
login_user: '{{ pg_user }}'
|
||||
maintenance_db: postgres
|
||||
name: '{{ db_name }}'
|
||||
tablespace: '{{ db_tablespace }}'
|
||||
- assert:
|
||||
that:
|
||||
- result is not changed
|
||||
- register: result
|
||||
name: postgresql_db_tablespace - Change tablespace in check_mode
|
||||
check_mode: true
|
||||
postgresql_db:
|
||||
login_user: '{{ pg_user }}'
|
||||
maintenance_db: postgres
|
||||
name: '{{ db_name }}'
|
||||
tablespace: pg_default
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- register: result
|
||||
name: postgresql_db_tablespace - Check actual DB tablespace, rowcount must be 1 because actually nothing changed
|
||||
postgresql_query:
|
||||
login_user: '{{ pg_user }}'
|
||||
login_db: postgres
|
||||
query: 'SELECT 1 FROM pg_database AS d JOIN pg_tablespace AS t ON d.dattablespace = t.oid WHERE d.datname = ''{{ db_name }}'' AND t.spcname = ''{{ db_tablespace }}''
|
||||
|
||||
'
|
||||
- assert:
|
||||
that:
|
||||
- result.rowcount == 1
|
||||
- register: result
|
||||
name: postgresql_db_tablespace - Change tablespace in actual mode
|
||||
postgresql_db:
|
||||
login_user: '{{ pg_user }}'
|
||||
maintenance_db: postgres
|
||||
name: '{{ db_name }}'
|
||||
tablespace: pg_default
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- register: result
|
||||
name: postgresql_db_tablespace - Check actual DB tablespace, rowcount must be 1
|
||||
postgresql_query:
|
||||
login_user: '{{ pg_user }}'
|
||||
login_db: postgres
|
||||
query: 'SELECT 1 FROM pg_database AS d JOIN pg_tablespace AS t ON d.dattablespace = t.oid WHERE d.datname = ''{{ db_name }}'' AND t.spcname = ''pg_default''
|
||||
|
||||
'
|
||||
- assert:
|
||||
that:
|
||||
- result.rowcount == 1
|
||||
- register: result
|
||||
name: postgresql_db_tablespace - Drop test DB
|
||||
postgresql_db:
|
||||
login_user: '{{ pg_user }}'
|
||||
maintenance_db: postgres
|
||||
name: '{{ db_name }}'
|
||||
state: absent
|
||||
- register: result
|
||||
name: postgresql_db_tablespace - Remove tablespace
|
||||
postgresql_tablespace:
|
||||
login_user: '{{ pg_user }}'
|
||||
login_db: postgres
|
||||
name: '{{ db_tablespace }}'
|
||||
state: absent
|
|
@ -1,366 +0,0 @@
|
|||
#
|
||||
# Create and destroy db
|
||||
#
|
||||
- name: Create DB
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_db:
|
||||
state: present
|
||||
name: "{{ db_name }}"
|
||||
login_user: "{{ pg_user }}"
|
||||
register: result
|
||||
|
||||
- name: assert that module reports the db was created
|
||||
assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.db == "{{ db_name }}"
|
||||
- result.executed_commands == ['CREATE DATABASE "{{ db_name }}"']
|
||||
|
||||
- name: Check that database created
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
shell: echo "select datname from pg_database where datname = '{{ db_name }}';" | psql -d postgres
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- "result.stdout_lines[-1] == '(1 row)'"
|
||||
|
||||
- name: Run create on an already created db
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_db:
|
||||
state: present
|
||||
name: "{{ db_name }}"
|
||||
login_user: "{{ pg_user }}"
|
||||
register: result
|
||||
|
||||
- name: assert that module reports the db was unchanged
|
||||
assert:
|
||||
that:
|
||||
- result is not changed
|
||||
|
||||
- name: Destroy DB
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_db:
|
||||
state: absent
|
||||
name: "{{ db_name }}"
|
||||
login_user: "{{ pg_user }}"
|
||||
register: result
|
||||
|
||||
- name: assert that module reports the db was changed
|
||||
assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.executed_commands == ['DROP DATABASE "{{ db_name }}"']
|
||||
|
||||
- name: Check that database was destroyed
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
shell: echo "select datname from pg_database where datname = '{{ db_name }}';" | psql -d postgres
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- "result.stdout_lines[-1] == '(0 rows)'"
|
||||
|
||||
- name: Destroy DB
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_db:
|
||||
state: absent
|
||||
name: "{{ db_name }}"
|
||||
login_user: "{{ pg_user }}"
|
||||
register: result
|
||||
|
||||
- name: assert that removing an already removed db makes no change
|
||||
assert:
|
||||
that:
|
||||
- result is not changed
|
||||
|
||||
|
||||
# This corner case works to add but not to drop. This is sufficiently crazy
|
||||
# that I'm not going to attempt to fix it unless someone lets me know that they
|
||||
# need the functionality
|
||||
#
|
||||
# - postgresql_db:
|
||||
# state: 'present'
|
||||
# name: '"silly.""name"'
|
||||
# - shell: echo "select datname from pg_database where datname = 'silly.""name';" | psql
|
||||
# register: result
|
||||
#
|
||||
# - assert:
|
||||
# that: "result.stdout_lines[-1] == '(1 row)'"
|
||||
# - postgresql_db:
|
||||
# state: absent
|
||||
# name: '"silly.""name"'
|
||||
# - shell: echo "select datname from pg_database where datname = 'silly.""name';" | psql
|
||||
# register: result
|
||||
#
|
||||
# - assert:
|
||||
# that: "result.stdout_lines[-1] == '(0 rows)'"
|
||||
|
||||
#
|
||||
# Test conn_limit, encoding, collate, ctype, template options
|
||||
#
|
||||
- name: Create a DB with conn_limit, encoding, collate, ctype, and template options
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_db:
|
||||
name: '{{ db_name }}'
|
||||
state: 'present'
|
||||
conn_limit: '100'
|
||||
encoding: 'LATIN1'
|
||||
lc_collate: 'pt_BR{{ locale_latin_suffix }}'
|
||||
lc_ctype: 'es_ES{{ locale_latin_suffix }}'
|
||||
template: 'template0'
|
||||
login_user: "{{ pg_user }}"
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.executed_commands == ["CREATE DATABASE \"{{ db_name }}\" TEMPLATE \"template0\" ENCODING 'LATIN1' LC_COLLATE 'pt_BR{{ locale_latin_suffix }}' LC_CTYPE 'es_ES{{ locale_latin_suffix }}' CONNECTION LIMIT 100"] or result.executed_commands == ["CREATE DATABASE \"{{ db_name }}\" TEMPLATE \"template0\" ENCODING E'LATIN1' LC_COLLATE E'pt_BR{{ locale_latin_suffix }}' LC_CTYPE E'es_ES{{ locale_latin_suffix }}' CONNECTION LIMIT 100"]
|
||||
|
||||
- name: Check that the DB has all of our options
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
shell: echo "select datname, datconnlimit, pg_encoding_to_char(encoding), datcollate, datctype from pg_database where datname = '{{ db_name }}';" | psql -d postgres
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- "result.stdout_lines[-1] == '(1 row)'"
|
||||
- "'LATIN1' in result.stdout_lines[-2]"
|
||||
- "'pt_BR' in result.stdout_lines[-2]"
|
||||
- "'es_ES' in result.stdout_lines[-2]"
|
||||
- "'UTF8' not in result.stdout_lines[-2]"
|
||||
- "'en_US' not in result.stdout_lines[-2]"
|
||||
- "'100' in result.stdout_lines[-2]"
|
||||
|
||||
- name: Check that running db creation with options a second time does nothing
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_db:
|
||||
name: '{{ db_name }}'
|
||||
state: 'present'
|
||||
conn_limit: '100'
|
||||
encoding: 'LATIN1'
|
||||
lc_collate: 'pt_BR{{ locale_latin_suffix }}'
|
||||
lc_ctype: 'es_ES{{ locale_latin_suffix }}'
|
||||
template: 'template0'
|
||||
login_user: "{{ pg_user }}"
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is not changed
|
||||
|
||||
|
||||
- name: Check that attempting to change encoding returns an error
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_db:
|
||||
name: '{{ db_name }}'
|
||||
state: 'present'
|
||||
encoding: 'UTF8'
|
||||
lc_collate: 'pt_BR{{ locale_utf8_suffix }}'
|
||||
lc_ctype: 'es_ES{{ locale_utf8_suffix }}'
|
||||
template: 'template0'
|
||||
login_user: "{{ pg_user }}"
|
||||
register: result
|
||||
ignore_errors: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is failed
|
||||
|
||||
- name: Check that changing the conn_limit actually works
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_db:
|
||||
name: '{{ db_name }}'
|
||||
state: 'present'
|
||||
conn_limit: '200'
|
||||
encoding: 'LATIN1'
|
||||
lc_collate: 'pt_BR{{ locale_latin_suffix }}'
|
||||
lc_ctype: 'es_ES{{ locale_latin_suffix }}'
|
||||
template: 'template0'
|
||||
login_user: "{{ pg_user }}"
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.executed_commands == ['ALTER DATABASE "{{ db_name }}" CONNECTION LIMIT 200']
|
||||
|
||||
- name: Check that conn_limit has actually been set / updated to 200
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
shell: echo "SELECT datconnlimit AS conn_limit FROM pg_database WHERE datname = '{{ db_name }}';" | psql -d postgres
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- "result.stdout_lines[-1] == '(1 row)'"
|
||||
- "'200' == '{{ result.stdout_lines[-2] | trim }}'"
|
||||
|
||||
- name: Cleanup test DB
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_db:
|
||||
name: '{{ db_name }}'
|
||||
state: 'absent'
|
||||
login_user: "{{ pg_user }}"
|
||||
|
||||
- shell: echo "select datname, pg_encoding_to_char(encoding), datcollate, datctype from pg_database where datname = '{{ db_name }}';" | psql -d postgres
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- "result.stdout_lines[-1] == '(0 rows)'"
|
||||
|
||||
#
|
||||
# Test db ownership
|
||||
#
|
||||
- name: Create an unprivileged user to own a DB
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_user:
|
||||
name: "{{ item }}"
|
||||
encrypted: 'yes'
|
||||
password: "md55c8ccfd9d6711fc69a7eae647fc54f51"
|
||||
login_user: "{{ pg_user }}"
|
||||
db: postgres
|
||||
loop:
|
||||
- "{{ db_user1 }}"
|
||||
- "{{ db_user2 }}"
|
||||
|
||||
- name: Create db with user ownership
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_db:
|
||||
name: "{{ db_name }}"
|
||||
state: "present"
|
||||
owner: "{{ db_user1 }}"
|
||||
login_user: "{{ pg_user }}"
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.executed_commands == ['CREATE DATABASE "{{ db_name }}" OWNER "{{ db_user1 }}"']
|
||||
|
||||
- name: Check that the user owns the newly created DB
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_query:
|
||||
db: postgres
|
||||
login_user: "{{ pg_user }}"
|
||||
query: >
|
||||
SELECT 1 FROM pg_catalog.pg_database
|
||||
WHERE datname = '{{ db_name }}'
|
||||
AND pg_catalog.pg_get_userbyid(datdba) = '{{ db_user1 }}'
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.rowcount == 1
|
||||
|
||||
- name: Change the owner on an existing db, username with dots
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_db:
|
||||
name: "{{ db_name }}"
|
||||
state: "present"
|
||||
owner: "{{ db_user2 }}"
|
||||
login_user: "{{ pg_user }}"
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.executed_commands == ['ALTER DATABASE "{{ db_name }}" OWNER TO "{{ db_user2 }}"']
|
||||
|
||||
- name: Check the previous step
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_query:
|
||||
login_user: "{{ pg_user }}"
|
||||
db: postgres
|
||||
query: >
|
||||
SELECT 1 FROM pg_catalog.pg_database
|
||||
WHERE datname = '{{ db_name }}'
|
||||
AND pg_catalog.pg_get_userbyid(datdba) = '{{ db_user2 }}'
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.rowcount == 1
|
||||
|
||||
- name: Change the owner on an existing db
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_db:
|
||||
name: "{{ db_name }}"
|
||||
state: "present"
|
||||
owner: "{{ pg_user }}"
|
||||
login_user: "{{ pg_user }}"
|
||||
register: result
|
||||
|
||||
- name: assert that ansible says it changed the db
|
||||
assert:
|
||||
that:
|
||||
- result is changed
|
||||
|
||||
- name: Check that the user owns the newly created DB
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
shell: echo "select pg_catalog.pg_get_userbyid(datdba) from pg_catalog.pg_database where datname = '{{ db_name }}';" | psql -d postgres
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- "result.stdout_lines[-1] == '(1 row)'"
|
||||
- "'{{ pg_user }}' == '{{ result.stdout_lines[-2] | trim }}'"
|
||||
|
||||
- name: Cleanup db
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_db:
|
||||
name: "{{ db_name }}"
|
||||
state: "absent"
|
||||
login_user: "{{ pg_user }}"
|
||||
|
||||
- name: Check that database was destroyed
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
shell: echo "select datname from pg_database where datname = '{{ db_name }}';" | psql -d postgres
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- "result.stdout_lines[-1] == '(0 rows)'"
|
||||
|
||||
- name: Cleanup test user
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_user:
|
||||
name: "{{ db_user1 }}"
|
||||
state: 'absent'
|
||||
login_user: "{{ pg_user }}"
|
||||
db: postgres
|
||||
|
||||
- name: Check that they were removed
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
shell: echo "select * from pg_user where usename='{{ db_user1 }}';" | psql -d postgres
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- "result.stdout_lines[-1] == '(0 rows)'"
|
|
@ -1,80 +0,0 @@
|
|||
- name: Check that becoming an non-existing user throws an error
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_db:
|
||||
state: present
|
||||
name: must_fail
|
||||
login_user: "{{ pg_user }}"
|
||||
session_role: "{{ db_session_role1 }}"
|
||||
register: result
|
||||
ignore_errors: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is failed
|
||||
|
||||
- name: Create a high privileged user
|
||||
become: yes
|
||||
become_user: "{{ pg_user }}"
|
||||
postgresql_user:
|
||||
name: "{{ db_session_role1 }}"
|
||||
state: "present"
|
||||
password: "password"
|
||||
role_attr_flags: "CREATEDB,LOGIN,CREATEROLE"
|
||||
login_user: "{{ pg_user }}"
|
||||
db: postgres
|
||||
|
||||
- name: Create a low privileged user using the newly created user
|
||||
become: yes
|
||||
become_user: "{{ pg_user }}"
|
||||
postgresql_user:
|
||||
name: "{{ db_session_role2 }}"
|
||||
state: "present"
|
||||
password: "password"
|
||||
role_attr_flags: "LOGIN"
|
||||
login_user: "{{ pg_user }}"
|
||||
session_role: "{{ db_session_role1 }}"
|
||||
db: postgres
|
||||
|
||||
- name: Create DB as session_role
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_db:
|
||||
state: present
|
||||
name: "{{ db_session_role1 }}"
|
||||
login_user: "{{ pg_user }}"
|
||||
session_role: "{{ db_session_role1 }}"
|
||||
register: result
|
||||
|
||||
- name: Check that database created and is owned by correct user
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
shell: echo "select rolname from pg_database join pg_roles on datdba = pg_roles.oid where datname = '{{ db_session_role1 }}';" | psql -AtXq postgres
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- "result.stdout_lines[-1] == '{{ db_session_role1 }}'"
|
||||
|
||||
- name: Fail when creating database as low privileged user
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_db:
|
||||
state: present
|
||||
name: "{{ db_session_role2 }}"
|
||||
login_user: "{{ pg_user }}"
|
||||
session_role: "{{ db_session_role2 }}"
|
||||
register: result
|
||||
ignore_errors: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is failed
|
||||
|
||||
- name: Drop test db
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_db:
|
||||
state: absent
|
||||
name: "{{ db_session_role1 }}"
|
||||
login_user: "{{ pg_user }}"
|
|
@ -1,235 +0,0 @@
|
|||
# test code for state dump and restore for postgresql_db module
|
||||
# copied from mysql_db/tasks/state_dump_import.yml
|
||||
# (c) 2014, Wayne Rosario <wrosario@ansible.com>
|
||||
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# ============================================================
|
||||
|
||||
- name: Create a test user
|
||||
become: yes
|
||||
become_user: "{{ pg_user }}"
|
||||
postgresql_user:
|
||||
name: "{{ db_user1 }}"
|
||||
state: "present"
|
||||
encrypted: 'yes'
|
||||
password: "password"
|
||||
role_attr_flags: "CREATEDB,LOGIN,CREATEROLE"
|
||||
login_user: "{{ pg_user }}"
|
||||
db: postgres
|
||||
|
||||
- set_fact: db_file_name="{{tmp_dir}}/{{file}}"
|
||||
|
||||
- set_fact:
|
||||
admin_str: "psql -U {{ pg_user }}"
|
||||
|
||||
- set_fact:
|
||||
user_str: "env PGPASSWORD=password psql -h localhost -U {{ db_user1 }} {{ db_name }}"
|
||||
when: test_fixture == "user"
|
||||
# "-n public" is required to work around pg_restore issues with plpgsql
|
||||
|
||||
- set_fact:
|
||||
user_str: "psql -U {{ pg_user }} {{ db_name }}"
|
||||
when: test_fixture == "admin"
|
||||
|
||||
|
||||
|
||||
- set_fact:
|
||||
sql_create: "create table employee(id int, name varchar(100));"
|
||||
sql_insert: "insert into employee values (47,'Joe Smith');"
|
||||
sql_select: "select * from employee;"
|
||||
|
||||
- name: state dump/restore - create database
|
||||
postgresql_db:
|
||||
state: present
|
||||
name: "{{ db_name }}"
|
||||
owner: "{{ db_user1 }}"
|
||||
login_user: "{{ pg_user }}"
|
||||
|
||||
- name: state dump/restore - create table employee
|
||||
command: '{{ user_str }} -c "{{ sql_create }}"'
|
||||
|
||||
- name: state dump/restore - insert data into table employee
|
||||
command: '{{ user_str }} -c "{{ sql_insert }}"'
|
||||
|
||||
- name: state dump/restore - file name should not exist
|
||||
file: name={{ db_file_name }} state=absent
|
||||
|
||||
- name: test state=dump to backup the database (expect changed=true)
|
||||
postgresql_db:
|
||||
name: "{{ db_name }}"
|
||||
target: "{{ db_file_name }}"
|
||||
owner: "{{ db_user1 }}"
|
||||
login_user: '{{(test_fixture == "user")|ternary(db_user1, pg_user)}}'
|
||||
target_opts: '{{(test_fixture == "user")|ternary("-n public", omit)}}'
|
||||
login_host: '{{(test_fixture == "user")|ternary("localhost", omit)}}'
|
||||
login_password: '{{(test_fixture == "user")|ternary("password", omit)}}'
|
||||
state: dump
|
||||
dump_extra_args: --exclude-table=fake
|
||||
register: result
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
|
||||
- name: assert output message backup the database
|
||||
assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.executed_commands[0] is search("--exclude-table=fake")
|
||||
|
||||
- name: assert database was backed up successfully
|
||||
command: file {{ db_file_name }}
|
||||
register: result
|
||||
|
||||
- name: state dump/restore - remove database for restore
|
||||
postgresql_db:
|
||||
name: "{{ db_name }}"
|
||||
target: "{{ db_file_name }}"
|
||||
owner: "{{ db_user1 }}"
|
||||
login_user: '{{(test_fixture == "user")|ternary(db_user1, pg_user)}}'
|
||||
target_opts: '{{(test_fixture == "user")|ternary("-n public", omit)}}'
|
||||
login_host: '{{(test_fixture == "user")|ternary("localhost", omit)}}'
|
||||
login_password: '{{(test_fixture == "user")|ternary("password", omit)}}'
|
||||
state: absent
|
||||
|
||||
- name: state dump/restore - re-create database
|
||||
postgresql_db:
|
||||
state: present
|
||||
name: "{{ db_name }}"
|
||||
owner: "{{ db_user1 }}"
|
||||
login_user: "{{ pg_user }}"
|
||||
|
||||
- name: test state=restore to restore the database (expect changed=true)
|
||||
postgresql_db:
|
||||
name: "{{ db_name }}"
|
||||
target: "{{ db_file_name }}"
|
||||
owner: "{{ db_user1 }}"
|
||||
login_user: '{{(test_fixture == "user")|ternary(db_user1, pg_user)}}'
|
||||
target_opts: '{{(test_fixture == "user")|ternary("-n public", omit)}}'
|
||||
login_host: '{{(test_fixture == "user")|ternary("localhost", omit)}}'
|
||||
login_password: '{{(test_fixture == "user")|ternary("password", omit)}}'
|
||||
state: restore
|
||||
register: result
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
|
||||
- name: assert output message restore the database
|
||||
assert:
|
||||
that:
|
||||
- result is changed
|
||||
|
||||
- name: select data from table employee
|
||||
command: '{{ user_str }} -c "{{ sql_select }}"'
|
||||
register: result
|
||||
|
||||
- name: assert data in database is from the restore database
|
||||
assert:
|
||||
that:
|
||||
- "'47' in result.stdout"
|
||||
- "'Joe Smith' in result.stdout"
|
||||
|
||||
############################
|
||||
# 1. Test trust_input parameter
|
||||
# 2. Test db name containing dots
|
||||
|
||||
- name: state dump/restore - create database, trust_input no
|
||||
become: yes
|
||||
become_user: "{{ pg_user }}"
|
||||
postgresql_db:
|
||||
state: present
|
||||
name: "{{ suspicious_db_name }}"
|
||||
owner: "{{ db_user1 }}"
|
||||
login_user: "{{ pg_user }}"
|
||||
trust_input: no
|
||||
register: result
|
||||
ignore_errors: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is failed
|
||||
- result.msg == 'Passed input \'{{ suspicious_db_name }}\' is potentially dangerous'
|
||||
|
||||
- name: state dump/restore - create database, trust_input yes explicitly
|
||||
become: yes
|
||||
become_user: "{{ pg_user }}"
|
||||
postgresql_db:
|
||||
state: present
|
||||
name: "{{ suspicious_db_name }}"
|
||||
owner: "{{ db_user1 }}"
|
||||
login_user: "{{ pg_user }}"
|
||||
trust_input: yes
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
|
||||
- name: test state=restore to restore the database (expect changed=true)
|
||||
become: yes
|
||||
become_user: "{{ pg_user }}"
|
||||
postgresql_db:
|
||||
name: "{{ db_name_with_dot }}"
|
||||
target: "{{ db_file_name }}"
|
||||
owner: "{{ db_user1 }}"
|
||||
login_user: '{{(test_fixture == "user")|ternary(db_user1, pg_user)}}'
|
||||
target_opts: '{{(test_fixture == "user")|ternary("-n public", omit)}}'
|
||||
login_host: '{{(test_fixture == "user")|ternary("localhost", omit)}}'
|
||||
login_password: '{{(test_fixture == "user")|ternary("password", omit)}}'
|
||||
state: restore
|
||||
register: result
|
||||
|
||||
- name: assert output message restore the database
|
||||
assert:
|
||||
that:
|
||||
- result is changed
|
||||
|
||||
- name: state dump/restore - remove databases
|
||||
become: yes
|
||||
become_user: "{{ pg_user }}"
|
||||
postgresql_db:
|
||||
state: absent
|
||||
name: "{{ db_name_with_dot }}"
|
||||
owner: "{{ db_user1 }}"
|
||||
login_user: "{{ pg_user }}"
|
||||
trust_input: yes
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
|
||||
# Clean up
|
||||
- name: state dump/restore - remove database name
|
||||
postgresql_db:
|
||||
name: "{{ db_name }}"
|
||||
target: "{{ db_file_name }}"
|
||||
owner: "{{ db_user1 }}"
|
||||
login_user: '{{(test_fixture == "user")|ternary(db_user1, pg_user)}}'
|
||||
target_opts: '{{(test_fixture == "user")|ternary("-n public", omit)}}'
|
||||
login_host: '{{(test_fixture == "user")|ternary("localhost", omit)}}'
|
||||
login_password: '{{(test_fixture == "user")|ternary("password", omit)}}'
|
||||
state: absent
|
||||
|
||||
- name: remove file name
|
||||
file: name={{ db_file_name }} state=absent
|
||||
|
||||
- name: Remove the test user
|
||||
become: yes
|
||||
become_user: "{{ pg_user }}"
|
||||
postgresql_user:
|
||||
name: "{{ db_user1 }}"
|
||||
state: "absent"
|
||||
login_user: "{{ pg_user }}"
|
||||
db: postgres
|
|
@ -1,7 +0,0 @@
|
|||
destructive
|
||||
shippable/posix/group4
|
||||
skip/aix
|
||||
skip/osx
|
||||
skip/macos
|
||||
skip/freebsd
|
||||
disabled # tests already running in community.postgresql
|
|
@ -1,2 +0,0 @@
|
|||
db_session_role1: 'session_role1'
|
||||
db_session_role2: 'session_role2'
|
|
@ -1,3 +0,0 @@
|
|||
dependencies:
|
||||
- setup_pkg_mgr
|
||||
- setup_postgresql_db
|
|
@ -1,26 +0,0 @@
|
|||
####################################################################
|
||||
# WARNING: These are designed specifically for Ansible tests #
|
||||
# and should not be used as examples of how to write Ansible roles #
|
||||
####################################################################
|
||||
|
||||
- import_tasks: postgresql_ext_session_role.yml
|
||||
|
||||
# Initial CI tests of postgresql_ext module.
|
||||
# pg_extension system view is available from PG 9.1.
|
||||
# The tests are restricted by Fedora because there will be errors related with
|
||||
# attempts to change the environment during postgis installation or
|
||||
# missing postgis package in repositories.
|
||||
# Anyway, these tests completely depend on Postgres version,
|
||||
# not specific distributions.
|
||||
- import_tasks: postgresql_ext_initial.yml
|
||||
when:
|
||||
- postgres_version_resp.stdout is version('9.1', '>=')
|
||||
- ansible_distribution == 'Fedora'
|
||||
|
||||
# CI tests of "version" option.
|
||||
# It uses a mock extension, see test/integration/targets/setup_postgresql_db/.
|
||||
# TODO: change postgresql_ext_initial.yml to use the mock extension too.
|
||||
- import_tasks: postgresql_ext_version_opt.yml
|
||||
when:
|
||||
- ansible_distribution == 'Ubuntu'
|
||||
- postgres_version_resp.stdout is version('9.1', '>=')
|
|
@ -1,208 +0,0 @@
|
|||
---
|
||||
- name: postgresql_ext - install postgis on Linux
|
||||
package: name=postgis state=present
|
||||
when: ansible_os_family != "Windows"
|
||||
|
||||
- name: postgresql_ext - create schema schema1
|
||||
become_user: '{{ pg_user }}'
|
||||
become: true
|
||||
postgresql_schema:
|
||||
database: postgres
|
||||
name: schema1
|
||||
state: present
|
||||
|
||||
- name: postgresql_ext - drop extension if exists
|
||||
become_user: '{{ pg_user }}'
|
||||
become: true
|
||||
postgresql_query:
|
||||
db: postgres
|
||||
query: DROP EXTENSION IF EXISTS postgis
|
||||
ignore_errors: true
|
||||
|
||||
- name: postgresql_ext - create extension postgis in check_mode
|
||||
become_user: '{{ pg_user }}'
|
||||
become: true
|
||||
postgresql_ext:
|
||||
login_db: postgres
|
||||
login_port: 5432
|
||||
name: postgis
|
||||
check_mode: true
|
||||
ignore_errors: true
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.queries == []
|
||||
|
||||
- name: postgresql_ext - check that extension doesn't exist after the previous step
|
||||
become_user: '{{ pg_user }}'
|
||||
become: true
|
||||
postgresql_query:
|
||||
db: postgres
|
||||
query: SELECT extname FROM pg_extension WHERE extname='postgis'
|
||||
ignore_errors: true
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.rowcount == 0
|
||||
|
||||
- name: postgresql_ext - create extension postgis
|
||||
become_user: '{{ pg_user }}'
|
||||
become: true
|
||||
postgresql_ext:
|
||||
login_db: postgres
|
||||
login_port: 5432
|
||||
name: postgis
|
||||
ignore_errors: true
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.queries == ['CREATE EXTENSION "postgis"']
|
||||
|
||||
- name: postgresql_ext - check that extension exists after the previous step
|
||||
become_user: '{{ pg_user }}'
|
||||
become: true
|
||||
postgresql_query:
|
||||
db: postgres
|
||||
query: SELECT extname FROM pg_extension WHERE extname='postgis'
|
||||
ignore_errors: true
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.rowcount == 1
|
||||
|
||||
- name: postgresql_ext - drop extension postgis
|
||||
become_user: '{{ pg_user }}'
|
||||
become: true
|
||||
postgresql_ext:
|
||||
db: postgres
|
||||
name: postgis
|
||||
state: absent
|
||||
ignore_errors: true
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.queries == ['DROP EXTENSION "postgis"']
|
||||
|
||||
- name: postgresql_ext - check that extension doesn't exist after the previous step
|
||||
become_user: '{{ pg_user }}'
|
||||
become: true
|
||||
postgresql_query:
|
||||
db: postgres
|
||||
query: SELECT extname FROM pg_extension WHERE extname='postgis'
|
||||
ignore_errors: true
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.rowcount == 0
|
||||
|
||||
- name: postgresql_ext - create extension postgis
|
||||
become_user: '{{ pg_user }}'
|
||||
become: true
|
||||
postgresql_ext:
|
||||
db: postgres
|
||||
name: postgis
|
||||
schema: schema1
|
||||
ignore_errors: true
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.queries == ['CREATE EXTENSION "postgis" WITH SCHEMA "schema1"']
|
||||
|
||||
- name: postgresql_ext - check that extension exists after the previous step
|
||||
become_user: '{{ pg_user }}'
|
||||
become: true
|
||||
postgresql_query:
|
||||
db: postgres
|
||||
query: "SELECT extname FROM pg_extension AS e LEFT JOIN pg_catalog.pg_namespace AS n \nON n.oid = e.extnamespace WHERE e.extname='postgis' AND n.nspname='schema1'\n"
|
||||
ignore_errors: true
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.rowcount == 1
|
||||
|
||||
- name: postgresql_ext - drop extension postgis cascade
|
||||
become_user: '{{ pg_user }}'
|
||||
become: true
|
||||
postgresql_ext:
|
||||
db: postgres
|
||||
name: postgis
|
||||
state: absent
|
||||
cascade: true
|
||||
ignore_errors: true
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.queries == ['DROP EXTENSION "postgis" CASCADE']
|
||||
|
||||
- name: postgresql_ext - check that extension doesn't exist after the previous step
|
||||
become_user: '{{ pg_user }}'
|
||||
become: true
|
||||
postgresql_query:
|
||||
db: postgres
|
||||
query: SELECT extname FROM pg_extension WHERE extname='postgis'
|
||||
ignore_errors: true
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.rowcount == 0
|
||||
|
||||
- name: postgresql_ext - create extension postgis cascade
|
||||
become_user: '{{ pg_user }}'
|
||||
become: true
|
||||
postgresql_ext:
|
||||
db: postgres
|
||||
name: postgis
|
||||
cascade: true
|
||||
ignore_errors: true
|
||||
register: result
|
||||
when: postgres_version_resp.stdout is version('9.6', '<=')
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.queries == ['CREATE EXTENSION "postgis" CASCADE"']
|
||||
when: postgres_version_resp.stdout is version('9.6', '<=')
|
||||
|
||||
- name: postgresql_ext - check that extension exists after the previous step
|
||||
become_user: '{{ pg_user }}'
|
||||
become: true
|
||||
postgresql_query:
|
||||
db: postgres
|
||||
query: SELECT extname FROM pg_extension WHERE extname='postgis'
|
||||
ignore_errors: true
|
||||
register: result
|
||||
when: postgres_version_resp.stdout is version('9.6', '<=')
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.rowcount == 1
|
||||
when: postgres_version_resp.stdout is version('9.6', '<=')
|
||||
|
||||
- name: postgresql_ext - check that using a dangerous name fails
|
||||
postgresql_ext:
|
||||
db: postgres
|
||||
name: postgis
|
||||
session_role: 'curious.anonymous"; SELECT * FROM information_schema.tables; --'
|
||||
trust_input: no
|
||||
ignore_errors: true
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is failed
|
||||
- result.msg is search('is potentially dangerous')
|
|
@ -1,114 +0,0 @@
|
|||
- name: Create a high privileged user
|
||||
become: yes
|
||||
become_user: "{{ pg_user }}"
|
||||
postgresql_user:
|
||||
name: "{{ db_session_role1 }}"
|
||||
state: "present"
|
||||
password: "password"
|
||||
role_attr_flags: "CREATEDB,LOGIN,CREATEROLE"
|
||||
login_user: "{{ pg_user }}"
|
||||
db: postgres
|
||||
|
||||
- name: Create DB as session_role
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_db:
|
||||
state: present
|
||||
name: "{{ db_session_role1 }}"
|
||||
login_user: "{{ pg_user }}"
|
||||
session_role: "{{ db_session_role1 }}"
|
||||
register: result
|
||||
|
||||
- name: Check that pg_extension exists (PostgreSQL >= 9.1)
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
shell: echo "select count(*) from pg_class where relname='pg_extension' and relkind='r'" | psql -AtXq postgres
|
||||
register: pg_extension
|
||||
|
||||
- name: Remove plpgsql from testdb using postgresql_ext
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_ext:
|
||||
name: plpgsql
|
||||
db: "{{ db_session_role1 }}"
|
||||
login_user: "{{ pg_user }}"
|
||||
state: absent
|
||||
when:
|
||||
"pg_extension.stdout_lines[-1] == '1'"
|
||||
|
||||
- name: Fail when trying to create an extension as a mere mortal user
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_ext:
|
||||
name: plpgsql
|
||||
db: "{{ db_session_role1 }}"
|
||||
login_user: "{{ pg_user }}"
|
||||
session_role: "{{ db_session_role2 }}"
|
||||
ignore_errors: yes
|
||||
register: result
|
||||
when:
|
||||
"pg_extension.stdout_lines[-1] == '1'"
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is failed
|
||||
when:
|
||||
"pg_extension.stdout_lines[-1] == '1'"
|
||||
|
||||
- name: Install extension as session_role
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_ext:
|
||||
name: plpgsql
|
||||
db: "{{ db_session_role1 }}"
|
||||
login_user: "{{ pg_user }}"
|
||||
session_role: "{{ db_session_role1 }}"
|
||||
trust_input: no
|
||||
when:
|
||||
"pg_extension.stdout_lines[-1] == '1'"
|
||||
|
||||
- name: Check that extension is created and is owned by session_role
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
shell: echo "select rolname from pg_extension join pg_roles on extowner=pg_roles.oid where extname='plpgsql';" | psql -AtXq "{{ db_session_role1 }}"
|
||||
register: result
|
||||
when:
|
||||
"pg_extension.stdout_lines[-1] == '1'"
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- "result.stdout_lines[-1] == '{{ db_session_role1 }}'"
|
||||
when:
|
||||
"pg_extension.stdout_lines[-1] == '1'"
|
||||
|
||||
- name: Remove plpgsql from testdb using postgresql_ext
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_ext:
|
||||
name: plpgsql
|
||||
db: "{{ db_session_role1 }}"
|
||||
login_user: "{{ pg_user }}"
|
||||
state: absent
|
||||
trust_input: no
|
||||
when:
|
||||
"pg_extension.stdout_lines[-1] == '1'"
|
||||
|
||||
- name: Drop test db
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_db:
|
||||
state: absent
|
||||
name: "{{ db_session_role1 }}"
|
||||
login_user: "{{ pg_user }}"
|
||||
|
||||
- name: Drop test users
|
||||
become: yes
|
||||
become_user: "{{ pg_user }}"
|
||||
postgresql_user:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
login_user: "{{ pg_user }}"
|
||||
db: postgres
|
||||
with_items:
|
||||
- "{{ db_session_role1 }}"
|
||||
- "{{ db_session_role2 }}"
|
|
@ -1,364 +0,0 @@
|
|||
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# Tests for postgresql_ext version option
|
||||
|
||||
- vars:
|
||||
test_ext: dummy
|
||||
test_schema: schema1
|
||||
task_parameters: &task_parameters
|
||||
become_user: '{{ pg_user }}'
|
||||
become: yes
|
||||
register: result
|
||||
pg_parameters: &pg_parameters
|
||||
login_user: '{{ pg_user }}'
|
||||
login_db: postgres
|
||||
|
||||
block:
|
||||
# Preparation:
|
||||
- name: postgresql_ext_version - create schema schema1
|
||||
<<: *task_parameters
|
||||
postgresql_schema:
|
||||
<<: *pg_parameters
|
||||
name: "{{ test_schema }}"
|
||||
|
||||
# Do tests:
|
||||
- name: postgresql_ext_version - create extension of specific version, check mode
|
||||
<<: *task_parameters
|
||||
postgresql_ext:
|
||||
<<: *pg_parameters
|
||||
name: "{{ test_ext }}"
|
||||
schema: "{{ test_schema }}"
|
||||
version: '1.0'
|
||||
trust_input: no
|
||||
check_mode: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
|
||||
- name: postgresql_ext_version - check that nothing was actually changed
|
||||
<<: *task_parameters
|
||||
postgresql_query:
|
||||
<<: *pg_parameters
|
||||
query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}'"
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.rowcount == 0
|
||||
|
||||
- name: postgresql_ext_version - create extension of specific version
|
||||
<<: *task_parameters
|
||||
postgresql_ext:
|
||||
<<: *pg_parameters
|
||||
name: "{{ test_ext }}"
|
||||
schema: "{{ test_schema }}"
|
||||
version: '1.0'
|
||||
trust_input: no
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.queries == ["CREATE EXTENSION \"{{ test_ext }}\" WITH SCHEMA \"{{ test_schema }}\" VERSION '1.0'"]
|
||||
|
||||
- name: postgresql_ext_version - check
|
||||
<<: *task_parameters
|
||||
postgresql_query:
|
||||
<<: *pg_parameters
|
||||
query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}' AND extversion = '1.0'"
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.rowcount == 1
|
||||
|
||||
- name: postgresql_ext_version - try to create extension of the same version again in check_mode
|
||||
<<: *task_parameters
|
||||
postgresql_ext:
|
||||
<<: *pg_parameters
|
||||
name: "{{ test_ext }}"
|
||||
schema: "{{ test_schema }}"
|
||||
version: '1.0'
|
||||
trust_input: no
|
||||
check_mode: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is not changed
|
||||
|
||||
- name: postgresql_ext_version - check
|
||||
<<: *task_parameters
|
||||
postgresql_query:
|
||||
<<: *pg_parameters
|
||||
query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}' AND extversion = '1.0'"
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.rowcount == 1
|
||||
|
||||
- name: postgresql_ext_version - try to create extension of the same version again in actual mode
|
||||
<<: *task_parameters
|
||||
postgresql_ext:
|
||||
<<: *pg_parameters
|
||||
name: "{{ test_ext }}"
|
||||
schema: "{{ test_schema }}"
|
||||
version: '1.0'
|
||||
trust_input: no
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is not changed
|
||||
|
||||
- name: postgresql_ext_version - check
|
||||
<<: *task_parameters
|
||||
postgresql_query:
|
||||
<<: *pg_parameters
|
||||
query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}' AND extversion = '1.0'"
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.rowcount == 1
|
||||
|
||||
- name: postgresql_ext_version - update the extension to the next version in check_mode
|
||||
<<: *task_parameters
|
||||
postgresql_ext:
|
||||
<<: *pg_parameters
|
||||
name: "{{ test_ext }}"
|
||||
schema: "{{ test_schema }}"
|
||||
version: '2.0'
|
||||
trust_input: no
|
||||
check_mode: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
|
||||
- name: postgresql_ext_version - check, the version must be 1.0
|
||||
<<: *task_parameters
|
||||
postgresql_query:
|
||||
<<: *pg_parameters
|
||||
query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}' AND extversion = '1.0'"
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.rowcount == 1
|
||||
|
||||
- name: postgresql_ext_version - update the extension to the next version
|
||||
<<: *task_parameters
|
||||
postgresql_ext:
|
||||
<<: *pg_parameters
|
||||
name: "{{ test_ext }}"
|
||||
schema: "{{ test_schema }}"
|
||||
version: '2.0'
|
||||
trust_input: no
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.queries == ["ALTER EXTENSION \"{{ test_ext }}\" UPDATE TO '2.0'"]
|
||||
|
||||
- name: postgresql_ext_version - check, the version must be 2.0
|
||||
<<: *task_parameters
|
||||
postgresql_query:
|
||||
<<: *pg_parameters
|
||||
query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}' AND extversion = '2.0'"
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.rowcount == 1
|
||||
|
||||
- name: postgresql_ext_version - check that version won't be changed if version won't be passed
|
||||
<<: *task_parameters
|
||||
postgresql_ext:
|
||||
<<: *pg_parameters
|
||||
name: "{{ test_ext }}"
|
||||
schema: "{{ test_schema }}"
|
||||
trust_input: no
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is not changed
|
||||
|
||||
- name: postgresql_ext_version - check, the version must be 2.0
|
||||
<<: *task_parameters
|
||||
postgresql_query:
|
||||
<<: *pg_parameters
|
||||
query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}' AND extversion = '2.0'"
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.rowcount == 1
|
||||
|
||||
- name: postgresql_ext_version - update the extension to the latest version
|
||||
<<: *task_parameters
|
||||
postgresql_ext:
|
||||
<<: *pg_parameters
|
||||
name: "{{ test_ext }}"
|
||||
schema: "{{ test_schema }}"
|
||||
version: latest
|
||||
trust_input: no
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.queries == ["ALTER EXTENSION \"{{ test_ext }}\" UPDATE TO '3.0'"]
|
||||
|
||||
- name: postgresql_ext_version - check
|
||||
<<: *task_parameters
|
||||
postgresql_query:
|
||||
<<: *pg_parameters
|
||||
query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}' AND extversion = '3.0'"
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.rowcount == 1
|
||||
|
||||
- name: postgresql_ext_version - try to update the extension to the latest version again
|
||||
<<: *task_parameters
|
||||
postgresql_ext:
|
||||
<<: *pg_parameters
|
||||
name: "{{ test_ext }}"
|
||||
schema: "{{ test_schema }}"
|
||||
version: latest
|
||||
trust_input: no
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is not changed
|
||||
|
||||
- name: postgresql_ext_version - try to downgrade the extension version, must fail
|
||||
<<: *task_parameters
|
||||
postgresql_ext:
|
||||
<<: *pg_parameters
|
||||
name: "{{ test_ext }}"
|
||||
schema: "{{ test_schema }}"
|
||||
version: '1.0'
|
||||
trust_input: no
|
||||
ignore_errors: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.failed == true
|
||||
|
||||
- name: postgresql_ext_version - drop the extension in check_mode
|
||||
<<: *task_parameters
|
||||
postgresql_ext:
|
||||
<<: *pg_parameters
|
||||
name: "{{ test_ext }}"
|
||||
state: absent
|
||||
trust_input: no
|
||||
check_mode: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
|
||||
- name: postgresql_ext_version - check that extension exists
|
||||
<<: *task_parameters
|
||||
postgresql_query:
|
||||
<<: *pg_parameters
|
||||
query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}' AND extversion = '3.0'"
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.rowcount == 1
|
||||
|
||||
- name: postgresql_ext_version - drop the extension in actual mode
|
||||
<<: *task_parameters
|
||||
postgresql_ext:
|
||||
<<: *pg_parameters
|
||||
name: "{{ test_ext }}"
|
||||
state: absent
|
||||
trust_input: no
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
|
||||
- name: postgresql_ext_version - check that extension doesn't exist after the prev step
|
||||
<<: *task_parameters
|
||||
postgresql_query:
|
||||
<<: *pg_parameters
|
||||
query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}'"
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.rowcount == 0
|
||||
|
||||
- name: postgresql_ext_version - try to drop the non-existent extension again
|
||||
<<: *task_parameters
|
||||
postgresql_ext:
|
||||
<<: *pg_parameters
|
||||
name: "{{ test_ext }}"
|
||||
state: absent
|
||||
trust_input: no
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is not changed
|
||||
|
||||
- name: postgresql_ext_version - create the extension without passing version
|
||||
<<: *task_parameters
|
||||
postgresql_ext:
|
||||
<<: *pg_parameters
|
||||
name: "{{ test_ext }}"
|
||||
trust_input: no
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.queries == ["CREATE EXTENSION \"{{ test_ext }}\""]
|
||||
|
||||
- name: postgresql_ext_version - check
|
||||
<<: *task_parameters
|
||||
postgresql_query:
|
||||
<<: *pg_parameters
|
||||
query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}' AND extversion = '3.0'"
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.rowcount == 1
|
||||
|
||||
- name: postgresql_ext_version - try to install non-existent version
|
||||
<<: *task_parameters
|
||||
postgresql_ext:
|
||||
<<: *pg_parameters
|
||||
name: non_existent
|
||||
trust_input: no
|
||||
ignore_errors: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.failed == true
|
||||
- result.msg == "Extension non_existent is not installed"
|
||||
|
||||
######################################################################
|
||||
# https://github.com/ansible-collections/community.general/issues/1095
|
||||
- name: Install postgis
|
||||
package:
|
||||
name: postgis
|
||||
|
||||
- name: Create postgis extension
|
||||
<<: *task_parameters
|
||||
postgresql_ext:
|
||||
<<: *pg_parameters
|
||||
name: postgis
|
||||
version: latest
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
|
||||
# Cleanup:
|
||||
- name: postgresql_ext_version - drop the extension
|
||||
<<: *task_parameters
|
||||
postgresql_ext:
|
||||
<<: *pg_parameters
|
||||
name: "{{ test_ext }}"
|
||||
state: absent
|
||||
trust_input: no
|
||||
|
||||
- name: postgresql_ext_version - drop the schema
|
||||
<<: *task_parameters
|
||||
postgresql_schema:
|
||||
<<: *pg_parameters
|
||||
name: "{{ test_schema }}"
|
||||
state: absent
|
|
@ -1,6 +0,0 @@
|
|||
destructive
|
||||
shippable/posix/group4
|
||||
skip/aix
|
||||
skip/osx
|
||||
skip/macos
|
||||
disabled # tests already running in community.postgresql
|
|
@ -1,2 +0,0 @@
|
|||
dependencies:
|
||||
- setup_postgresql_db
|
|
@ -1,7 +0,0 @@
|
|||
####################################################################
|
||||
# WARNING: These are designed specifically for Ansible tests #
|
||||
# and should not be used as examples of how to write Ansible roles #
|
||||
####################################################################
|
||||
|
||||
# Initial CI tests of postgresql_idx module
|
||||
- import_tasks: postgresql_idx_initial.yml
|
|
@ -1,377 +0,0 @@
|
|||
- name: postgresql_idx - create test table called test_table
|
||||
become_user: '{{ pg_user }}'
|
||||
become: true
|
||||
shell: psql postgres -U "{{ pg_user }}" -t -c "CREATE TABLE test_table (id int, story text);"
|
||||
ignore_errors: true
|
||||
|
||||
- name: postgresql_idx - drop test tablespace called ssd if exists
|
||||
become_user: '{{ pg_user }}'
|
||||
become: true
|
||||
shell: psql postgres -U "{{ pg_user }}" -t -c "DROP TABLESPACE IF EXISTS ssd;"
|
||||
ignore_errors: true
|
||||
|
||||
- name: postgresql_idx - drop dir for test tablespace
|
||||
become: true
|
||||
file:
|
||||
path: /mnt/ssd
|
||||
state: absent
|
||||
ignore_errors: true
|
||||
|
||||
- name: postgresql_idx - create dir for test tablespace
|
||||
become: true
|
||||
file:
|
||||
path: /mnt/ssd
|
||||
state: directory
|
||||
owner: '{{ pg_user }}'
|
||||
mode: '0755'
|
||||
ignore_errors: true
|
||||
|
||||
- name: postgresql_idx - create test tablespace called ssd
|
||||
become_user: '{{ pg_user }}'
|
||||
become: true
|
||||
shell: psql postgres -U "{{ pg_user }}" -t -c "CREATE TABLESPACE ssd LOCATION '/mnt/ssd';"
|
||||
ignore_errors: true
|
||||
register: tablespace
|
||||
|
||||
- name: postgresql_idx - create test schema
|
||||
become_user: '{{ pg_user }}'
|
||||
become: true
|
||||
shell: psql postgres -U "{{ pg_user }}" -t -c "CREATE SCHEMA foo;"
|
||||
ignore_errors: true
|
||||
|
||||
- name: postgresql_idx - create table in non-default schema
|
||||
become_user: '{{ pg_user }}'
|
||||
become: true
|
||||
shell: psql postgres -U "{{ pg_user }}" -t -c "CREATE TABLE foo.foo_table (id int, story text);"
|
||||
ignore_errors: true
|
||||
|
||||
- name: postgresql_idx - create btree index in check_mode
|
||||
become_user: '{{ pg_user }}'
|
||||
become: true
|
||||
postgresql_idx:
|
||||
db: postgres
|
||||
login_user: '{{ pg_user }}'
|
||||
table: test_table
|
||||
columns: id, story
|
||||
idxname: Test0_idx
|
||||
check_mode: true
|
||||
register: result
|
||||
ignore_errors: true
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.tblname == ''
|
||||
- result.name == 'Test0_idx'
|
||||
- result.state == 'absent'
|
||||
- result.valid != ''
|
||||
- result.tblspace == ''
|
||||
- result.storage_params == []
|
||||
- result.schema == ''
|
||||
- result.query == ''
|
||||
|
||||
- name: postgresql_idx - check nothing changed after the previous step
|
||||
become_user: '{{ pg_user }}'
|
||||
become: true
|
||||
postgresql_query:
|
||||
db: postgres
|
||||
login_user: '{{ pg_user }}'
|
||||
query: SELECT 1 FROM pg_indexes WHERE indexname = 'Test0_idx'
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.rowcount == 0
|
||||
|
||||
- name: postgresql_idx - create btree index concurrently
|
||||
become_user: '{{ pg_user }}'
|
||||
become: true
|
||||
postgresql_idx:
|
||||
db: postgres
|
||||
login_user: '{{ pg_user }}'
|
||||
table: test_table
|
||||
columns: id, story
|
||||
idxname: Test0_idx
|
||||
trust_input: no
|
||||
register: result
|
||||
ignore_errors: true
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.tblname == 'test_table'
|
||||
- result.name == 'Test0_idx'
|
||||
- result.state == 'present'
|
||||
- result.valid != ''
|
||||
- result.tblspace == ''
|
||||
- result.storage_params == []
|
||||
- result.schema == 'public'
|
||||
- result.query == 'CREATE INDEX CONCURRENTLY "Test0_idx" ON "public"."test_table" USING BTREE (id, story)'
|
||||
|
||||
- name: postgresql_idx - check the index exists after the previous step
|
||||
become_user: '{{ pg_user }}'
|
||||
become: true
|
||||
postgresql_query:
|
||||
db: postgres
|
||||
login_user: '{{ pg_user }}'
|
||||
query: SELECT 1 FROM pg_indexes WHERE indexname = 'Test0_idx'
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.rowcount == 1
|
||||
|
||||
- name: postgresql_idx - try to create existing index again
|
||||
become_user: '{{ pg_user }}'
|
||||
become: true
|
||||
postgresql_idx:
|
||||
db: postgres
|
||||
login_user: '{{ pg_user }}'
|
||||
table: test_table
|
||||
columns: id, story
|
||||
idxname: Test0_idx
|
||||
register: result
|
||||
ignore_errors: true
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is not changed
|
||||
- result.tblname == 'test_table'
|
||||
- result.name == 'Test0_idx'
|
||||
- result.state == 'present'
|
||||
- result.valid != ''
|
||||
- result.tblspace == ''
|
||||
- result.storage_params == []
|
||||
- result.schema == 'public'
|
||||
- result.query == ''
|
||||
|
||||
- name: postgresql_idx - create btree index - non-default schema, tablespace, storage parameter
|
||||
become_user: '{{ pg_user }}'
|
||||
become: true
|
||||
postgresql_idx:
|
||||
db: postgres
|
||||
login_user: '{{ pg_user }}'
|
||||
schema: foo
|
||||
table: foo_table
|
||||
columns:
|
||||
- id
|
||||
- story
|
||||
idxname: foo_test_idx
|
||||
tablespace: ssd
|
||||
storage_params: fillfactor=90
|
||||
trust_input: no
|
||||
register: result
|
||||
ignore_errors: true
|
||||
when: tablespace.rc == 0
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.tblname == 'foo_table'
|
||||
- result.name == 'foo_test_idx'
|
||||
- result.state == 'present'
|
||||
- result.valid != ''
|
||||
- result.tblspace == 'ssd'
|
||||
- result.storage_params == [ "fillfactor=90" ]
|
||||
- result.schema == 'foo'
|
||||
- result.query == 'CREATE INDEX CONCURRENTLY "foo_test_idx" ON "foo"."foo_table" USING BTREE (id,story) WITH (fillfactor=90) TABLESPACE "ssd"'
|
||||
when: tablespace.rc == 0
|
||||
|
||||
- name: postgresql_idx - create brin index not concurrently
|
||||
become_user: '{{ pg_user }}'
|
||||
become: true
|
||||
postgresql_idx:
|
||||
db: postgres
|
||||
login_user: '{{ pg_user }}'
|
||||
schema: public
|
||||
table: test_table
|
||||
state: present
|
||||
type: brin
|
||||
columns: id
|
||||
idxname: test_brin_idx
|
||||
concurrent: false
|
||||
trust_input: no
|
||||
register: result
|
||||
ignore_errors: true
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.tblname == 'test_table'
|
||||
- result.name == 'test_brin_idx'
|
||||
- result.state == 'present'
|
||||
- result.valid != ''
|
||||
- result.tblspace == ''
|
||||
- result.storage_params == []
|
||||
- result.schema == 'public'
|
||||
- result.query == 'CREATE INDEX "test_brin_idx" ON "public"."test_table" USING brin (id)'
|
||||
when: postgres_version_resp.stdout is version('9.5', '>=')
|
||||
|
||||
- name: postgresql_idx - create index with condition
|
||||
become_user: '{{ pg_user }}'
|
||||
become: true
|
||||
postgresql_idx:
|
||||
db: postgres
|
||||
login_user: '{{ pg_user }}'
|
||||
table: test_table
|
||||
columns: id
|
||||
idxname: test1_idx
|
||||
cond: id > 1 AND id != 10
|
||||
trust_input: no
|
||||
register: result
|
||||
ignore_errors: true
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.tblname == 'test_table'
|
||||
- result.name == 'test1_idx'
|
||||
- result.state == 'present'
|
||||
- result.valid != ''
|
||||
- result.tblspace == ''
|
||||
- result.storage_params == []
|
||||
- result.schema == 'public'
|
||||
- result.query == 'CREATE INDEX CONCURRENTLY "test1_idx" ON "public"."test_table" USING BTREE (id) WHERE id > 1 AND id != 10'
|
||||
|
||||
- name: postgresql_idx - create unique index
|
||||
become_user: '{{ pg_user }}'
|
||||
become: true
|
||||
postgresql_idx:
|
||||
db: postgres
|
||||
login_user: '{{ pg_user }}'
|
||||
table: test_table
|
||||
columns: story
|
||||
idxname: test_unique0_idx
|
||||
unique: true
|
||||
trust_input: no
|
||||
register: result
|
||||
ignore_errors: true
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.tblname == 'test_table'
|
||||
- result.name == 'test_unique0_idx'
|
||||
- result.state == 'present'
|
||||
- result.valid != ''
|
||||
- result.tblspace == ''
|
||||
- result.storage_params == []
|
||||
- result.schema == 'public'
|
||||
- result.query == 'CREATE UNIQUE INDEX CONCURRENTLY "test_unique0_idx" ON "public"."test_table" USING BTREE (story)'
|
||||
|
||||
- name: postgresql_idx - avoid unique index with type different of btree
|
||||
become_user: '{{ pg_user }}'
|
||||
become: true
|
||||
postgresql_idx:
|
||||
db: postgres
|
||||
login_user: '{{ pg_user }}'
|
||||
table: test_table
|
||||
columns: story
|
||||
idxname: test_unique0_idx
|
||||
unique: true
|
||||
concurrent: false
|
||||
type: brin
|
||||
register: result
|
||||
ignore_errors: true
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is not changed
|
||||
- result.msg == 'Only btree currently supports unique indexes'
|
||||
|
||||
- name: postgresql_idx - drop index from specific schema cascade in check_mode
|
||||
become_user: '{{ pg_user }}'
|
||||
become: true
|
||||
postgresql_idx:
|
||||
db: postgres
|
||||
login_user: '{{ pg_user }}'
|
||||
schema: foo
|
||||
name: foo_test_idx
|
||||
cascade: true
|
||||
state: absent
|
||||
concurrent: false
|
||||
trust_input: yes
|
||||
check_mode: true
|
||||
register: result
|
||||
ignore_errors: true
|
||||
when: tablespace.rc == 0
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.name == 'foo_test_idx'
|
||||
- result.state == 'present'
|
||||
- result.schema == 'foo'
|
||||
- result.query == ''
|
||||
when: tablespace.rc == 0
|
||||
|
||||
- name: postgresql_idx - check the index exists after the previous step
|
||||
become_user: '{{ pg_user }}'
|
||||
become: true
|
||||
postgresql_query:
|
||||
db: postgres
|
||||
login_user: '{{ pg_user }}'
|
||||
query: SELECT 1 FROM pg_indexes WHERE indexname = 'foo_test_idx' AND schemaname = 'foo'
|
||||
register: result
|
||||
when: tablespace.rc == 0
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.rowcount == 1
|
||||
when: tablespace.rc == 0
|
||||
|
||||
- name: postgresql_idx - drop index from specific schema cascade
|
||||
become_user: '{{ pg_user }}'
|
||||
become: true
|
||||
postgresql_idx:
|
||||
db: postgres
|
||||
login_user: '{{ pg_user }}'
|
||||
schema: foo
|
||||
name: foo_test_idx
|
||||
cascade: true
|
||||
state: absent
|
||||
concurrent: false
|
||||
register: result
|
||||
ignore_errors: true
|
||||
when: tablespace.rc == 0
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.name == 'foo_test_idx'
|
||||
- result.state == 'absent'
|
||||
- result.schema == 'foo'
|
||||
- result.query == 'DROP INDEX "foo"."foo_test_idx" CASCADE'
|
||||
when: tablespace.rc == 0
|
||||
|
||||
- name: postgresql_idx - check the index doesn't exist after the previous step
|
||||
become_user: '{{ pg_user }}'
|
||||
become: true
|
||||
postgresql_query:
|
||||
db: postgres
|
||||
login_user: '{{ pg_user }}'
|
||||
query: SELECT 1 FROM pg_indexes WHERE indexname = 'foo_test_idx' and schemaname = 'foo'
|
||||
register: result
|
||||
when: tablespace.rc == 0
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.rowcount == 0
|
||||
when: tablespace.rc == 0
|
||||
|
||||
- name: postgresql_idx - try to drop not existing index
|
||||
become_user: '{{ pg_user }}'
|
||||
become: true
|
||||
postgresql_idx:
|
||||
db: postgres
|
||||
login_user: '{{ pg_user }}'
|
||||
schema: foo
|
||||
name: foo_test_idx
|
||||
state: absent
|
||||
register: result
|
||||
ignore_errors: true
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is not changed
|
||||
- result.query == ''
|
|
@ -1,8 +0,0 @@
|
|||
destructive
|
||||
shippable/posix/group1
|
||||
skip/aix
|
||||
skip/osx
|
||||
skip/macos
|
||||
skip/freebsd
|
||||
skip/rhel
|
||||
disabled # tests already running in community.postgresql
|
|
@ -1,15 +0,0 @@
|
|||
---
|
||||
pg_user: postgres
|
||||
db_default: postgres
|
||||
master_port: 5433
|
||||
replica_port: 5434
|
||||
|
||||
test_table1: acme1
|
||||
test_pub: first_publication
|
||||
test_pub2: second_publication
|
||||
replication_role: logical_replication
|
||||
replication_pass: alsdjfKJKDf1#
|
||||
test_db: acme_db
|
||||
test_subscription: test
|
||||
test_subscription2: test2
|
||||
conn_timeout: 100
|
|
@ -1,2 +0,0 @@
|
|||
dependencies:
|
||||
- setup_postgresql_replication
|
|
@ -1,12 +0,0 @@
|
|||
####################################################################
|
||||
# WARNING: These are designed specifically for Ansible tests #
|
||||
# and should not be used as examples of how to write Ansible roles #
|
||||
####################################################################
|
||||
|
||||
# For testing getting publication and subscription info
|
||||
- import_tasks: setup_publication.yml
|
||||
when: ansible_distribution == 'Ubuntu' and ansible_distribution_major_version >= '18'
|
||||
|
||||
# Initial CI tests of postgresql_info module
|
||||
- import_tasks: postgresql_info_initial.yml
|
||||
when: ansible_distribution == 'Ubuntu' and ansible_distribution_major_version >= '18'
|
|
@ -1,177 +0,0 @@
|
|||
# Copyright: (c) 2020, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
- vars:
|
||||
task_parameters: &task_parameters
|
||||
become_user: '{{ pg_user }}'
|
||||
become: yes
|
||||
register: result
|
||||
pg_parameters: &pg_parameters
|
||||
login_user: '{{ pg_user }}'
|
||||
login_db: '{{ db_default }}'
|
||||
|
||||
block:
|
||||
|
||||
- name: Create test subscription
|
||||
<<: *task_parameters
|
||||
postgresql_subscription:
|
||||
<<: *pg_parameters
|
||||
login_port: '{{ replica_port }}'
|
||||
name: '{{ test_subscription }}'
|
||||
login_db: '{{ test_db }}'
|
||||
state: present
|
||||
publications: '{{ test_pub }}'
|
||||
connparams:
|
||||
host: 127.0.0.1
|
||||
port: '{{ master_port }}'
|
||||
user: '{{ replication_role }}'
|
||||
password: '{{ replication_pass }}'
|
||||
dbname: '{{ test_db }}'
|
||||
|
||||
- name: Create test subscription
|
||||
<<: *task_parameters
|
||||
postgresql_subscription:
|
||||
<<: *pg_parameters
|
||||
login_port: '{{ replica_port }}'
|
||||
name: '{{ test_subscription2 }}'
|
||||
login_db: '{{ test_db }}'
|
||||
state: present
|
||||
publications: '{{ test_pub2 }}'
|
||||
connparams:
|
||||
host: 127.0.0.1
|
||||
port: '{{ master_port }}'
|
||||
user: '{{ replication_role }}'
|
||||
password: '{{ replication_pass }}'
|
||||
dbname: '{{ test_db }}'
|
||||
|
||||
- name: postgresql_info - create role to check session_role
|
||||
<<: *task_parameters
|
||||
postgresql_user:
|
||||
<<: *pg_parameters
|
||||
login_port: '{{ replica_port }}'
|
||||
login_user: "{{ pg_user }}"
|
||||
name: session_superuser
|
||||
role_attr_flags: SUPERUSER
|
||||
|
||||
- name: postgresql_info - test return values and session_role param
|
||||
<<: *task_parameters
|
||||
postgresql_info:
|
||||
<<: *pg_parameters
|
||||
login_port: '{{ replica_port }}'
|
||||
session_role: session_superuser
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.version != {}
|
||||
- result.in_recovery == false
|
||||
- result.databases.{{ db_default }}.collate
|
||||
- result.databases.{{ db_default }}.languages
|
||||
- result.databases.{{ db_default }}.namespaces
|
||||
- result.databases.{{ db_default }}.extensions
|
||||
- result.databases.{{ test_db }}.subscriptions.{{ test_subscription }}
|
||||
- result.databases.{{ test_db }}.subscriptions.{{ test_subscription2 }}
|
||||
- result.settings
|
||||
- result.tablespaces
|
||||
- result.roles
|
||||
|
||||
- name: postgresql_info - check filter param passed by list
|
||||
<<: *task_parameters
|
||||
postgresql_info:
|
||||
<<: *pg_parameters
|
||||
login_port: '{{ replica_port }}'
|
||||
filter:
|
||||
- ver*
|
||||
- rol*
|
||||
- in_recov*
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.version != {}
|
||||
- result.roles
|
||||
- result.in_recovery == false
|
||||
- result.databases == {}
|
||||
- result.repl_slots == {}
|
||||
- result.replications == {}
|
||||
- result.settings == {}
|
||||
- result.tablespaces == {}
|
||||
|
||||
- name: postgresql_info - check filter param passed by string
|
||||
<<: *task_parameters
|
||||
postgresql_info:
|
||||
<<: *pg_parameters
|
||||
filter: ver*,role*
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.version != {}
|
||||
- result.roles
|
||||
- result.databases == {}
|
||||
- result.repl_slots == {}
|
||||
- result.replications == {}
|
||||
- result.settings == {}
|
||||
- result.tablespaces == {}
|
||||
|
||||
- name: postgresql_info - check filter param passed by string
|
||||
<<: *task_parameters
|
||||
postgresql_info:
|
||||
<<: *pg_parameters
|
||||
filter: ver*
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.version
|
||||
- result.roles == {}
|
||||
|
||||
- name: postgresql_info - check excluding filter param passed by list
|
||||
<<: *task_parameters
|
||||
postgresql_info:
|
||||
<<: *pg_parameters
|
||||
filter:
|
||||
- "!ver*"
|
||||
- "!rol*"
|
||||
- "!in_rec*"
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.version == {}
|
||||
- result.in_recovery == None
|
||||
- result.roles == {}
|
||||
- result.databases
|
||||
|
||||
- name: postgresql_info - test return publication info
|
||||
<<: *task_parameters
|
||||
postgresql_info:
|
||||
<<: *pg_parameters
|
||||
login_db: '{{ test_db }}'
|
||||
login_port: '{{ master_port }}'
|
||||
trust_input: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.version != {}
|
||||
- result.in_recovery == false
|
||||
- result.databases.{{ db_default }}.collate
|
||||
- result.databases.{{ db_default }}.languages
|
||||
- result.databases.{{ db_default }}.namespaces
|
||||
- result.databases.{{ db_default }}.extensions
|
||||
- result.databases.{{ test_db }}.publications.{{ test_pub }}.ownername == '{{ pg_user }}'
|
||||
- result.databases.{{ test_db }}.publications.{{ test_pub2 }}.puballtables == true
|
||||
- result.settings
|
||||
- result.tablespaces
|
||||
- result.roles
|
||||
|
||||
- name: postgresql_info - test trust_input parameter
|
||||
<<: *task_parameters
|
||||
postgresql_info:
|
||||
<<: *pg_parameters
|
||||
login_db: '{{ test_db }}'
|
||||
login_port: '{{ master_port }}'
|
||||
trust_input: no
|
||||
session_role: 'curious.anonymous"; SELECT * FROM information_schema.tables; --'
|
||||
register: result
|
||||
ignore_errors: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is failed
|
||||
- result.msg is search('is potentially dangerous')
|
|
@ -1,61 +0,0 @@
|
|||
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# Preparation for further tests of postgresql_subscription module.
|
||||
|
||||
- vars:
|
||||
task_parameters: &task_parameters
|
||||
become_user: '{{ pg_user }}'
|
||||
become: yes
|
||||
register: result
|
||||
pg_parameters: &pg_parameters
|
||||
login_user: '{{ pg_user }}'
|
||||
login_db: '{{ test_db }}'
|
||||
|
||||
block:
|
||||
- name: Create test db
|
||||
<<: *task_parameters
|
||||
postgresql_db:
|
||||
login_user: '{{ pg_user }}'
|
||||
login_port: '{{ master_port }}'
|
||||
maintenance_db: '{{ db_default }}'
|
||||
name: '{{ test_db }}'
|
||||
|
||||
- name: Create test role
|
||||
<<: *task_parameters
|
||||
postgresql_user:
|
||||
<<: *pg_parameters
|
||||
login_port: '{{ master_port }}'
|
||||
name: '{{ replication_role }}'
|
||||
password: '{{ replication_pass }}'
|
||||
role_attr_flags: LOGIN,REPLICATION
|
||||
|
||||
- name: Create test table
|
||||
<<: *task_parameters
|
||||
postgresql_table:
|
||||
<<: *pg_parameters
|
||||
login_port: '{{ master_port }}'
|
||||
name: '{{ test_table1 }}'
|
||||
columns:
|
||||
- id int
|
||||
|
||||
- name: Master - dump schema
|
||||
<<: *task_parameters
|
||||
shell: pg_dumpall -p '{{ master_port }}' -s > /tmp/schema.sql
|
||||
|
||||
- name: Replicat restore schema
|
||||
<<: *task_parameters
|
||||
shell: psql -p '{{ replica_port }}' -f /tmp/schema.sql
|
||||
|
||||
- name: Create publication
|
||||
<<: *task_parameters
|
||||
postgresql_publication:
|
||||
<<: *pg_parameters
|
||||
login_port: '{{ master_port }}'
|
||||
name: '{{ test_pub }}'
|
||||
|
||||
- name: Create publication
|
||||
<<: *task_parameters
|
||||
postgresql_publication:
|
||||
<<: *pg_parameters
|
||||
login_port: '{{ master_port }}'
|
||||
name: '{{ test_pub2 }}'
|
|
@ -1,6 +0,0 @@
|
|||
destructive
|
||||
shippable/posix/group4
|
||||
skip/aix
|
||||
skip/osx
|
||||
skip/macos
|
||||
disabled # tests already running in community.postgresql
|
|
@ -1,2 +0,0 @@
|
|||
dependencies:
|
||||
- setup_postgresql_db
|
|
@ -1,25 +0,0 @@
|
|||
####################################################################
|
||||
# WARNING: These are designed specifically for Ansible tests #
|
||||
# and should not be used as examples of how to write Ansible roles #
|
||||
####################################################################
|
||||
|
||||
- name: Include distribution specific variables
|
||||
include_vars: "{{ lookup('first_found', params) }}"
|
||||
vars:
|
||||
params:
|
||||
files:
|
||||
- "{{ ansible_facts.distribution }}-{{ ansible_facts.distribution_major_version }}.yml"
|
||||
- default.yml
|
||||
paths:
|
||||
- vars
|
||||
|
||||
# Only run on CentOS 7 because there is a stack trace on CentOS 8 because the module
|
||||
# is looking for the incorrect version of plpython.
|
||||
# https://gist.github.com/samdoran/8fc1b4ae834d3e66d1895d087419b8d8
|
||||
- name: Initial CI tests of postgresql_lang module
|
||||
when:
|
||||
- ansible_facts.distribution == 'CentOS'
|
||||
- ansible_facts.distribution_major_version is version ('7', '==')
|
||||
block:
|
||||
- include_tasks: postgresql_lang_initial.yml
|
||||
- include_tasks: postgresql_lang_add_owner_param.yml
|
|
@ -1,199 +0,0 @@
|
|||
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
- vars:
|
||||
test_user1: alice
|
||||
test_user2: bob
|
||||
test_lang: plperl
|
||||
non_existent_role: fake_role
|
||||
task_parameters: &task_parameters
|
||||
become_user: '{{ pg_user }}'
|
||||
become: yes
|
||||
register: result
|
||||
pg_parameters: &pg_parameters
|
||||
login_user: '{{ pg_user }}'
|
||||
login_db: postgres
|
||||
|
||||
block:
|
||||
- name: Create roles for tests
|
||||
<<: *task_parameters
|
||||
postgresql_user:
|
||||
<<: *pg_parameters
|
||||
name: '{{ item }}'
|
||||
loop:
|
||||
- '{{ test_user1 }}'
|
||||
- '{{ test_user2 }}'
|
||||
|
||||
- name: Create lang with owner in check_mode
|
||||
<<: *task_parameters
|
||||
postgresql_lang:
|
||||
<<: *pg_parameters
|
||||
name: '{{ test_lang }}'
|
||||
owner: '{{ test_user1 }}'
|
||||
trust_input: no
|
||||
check_mode: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.queries == []
|
||||
|
||||
- name: Check that nothing was actually changed
|
||||
<<: *task_parameters
|
||||
postgresql_query:
|
||||
<<: *pg_parameters
|
||||
query: >
|
||||
SELECT r.rolname FROM pg_language l
|
||||
JOIN pg_roles r ON l.lanowner = r.oid
|
||||
WHERE l.lanname = '{{ test_lang }}'
|
||||
AND r.rolname = '{{ test_user1 }}'
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.rowcount == 0
|
||||
|
||||
- name: Create lang with owner
|
||||
<<: *task_parameters
|
||||
postgresql_lang:
|
||||
<<: *pg_parameters
|
||||
name: '{{ test_lang }}'
|
||||
owner: '{{ test_user1 }}'
|
||||
trust_input: no
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.queries == ['CREATE LANGUAGE "{{ test_lang }}"', 'ALTER LANGUAGE "{{ test_lang }}" OWNER TO "{{ test_user1 }}"']
|
||||
|
||||
- name: Check
|
||||
<<: *task_parameters
|
||||
postgresql_query:
|
||||
<<: *pg_parameters
|
||||
query: >
|
||||
SELECT r.rolname FROM pg_language l
|
||||
JOIN pg_roles r ON l.lanowner = r.oid
|
||||
WHERE l.lanname = '{{ test_lang }}'
|
||||
AND r.rolname = '{{ test_user1 }}'
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.rowcount == 1
|
||||
|
||||
- name: Change lang owner in check_mode
|
||||
<<: *task_parameters
|
||||
postgresql_lang:
|
||||
<<: *pg_parameters
|
||||
name: '{{ test_lang }}'
|
||||
owner: '{{ test_user2 }}'
|
||||
trust_input: yes
|
||||
check_mode: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.queries == ['ALTER LANGUAGE "{{ test_lang }}" OWNER TO "{{ test_user2 }}"']
|
||||
|
||||
- name: Check that nothing was actually changed
|
||||
<<: *task_parameters
|
||||
postgresql_query:
|
||||
<<: *pg_parameters
|
||||
query: >
|
||||
SELECT r.rolname FROM pg_language l
|
||||
JOIN pg_roles r ON l.lanowner = r.oid
|
||||
WHERE l.lanname = '{{ test_lang }}'
|
||||
AND r.rolname = '{{ test_user2 }}'
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.rowcount == 0
|
||||
|
||||
- name: Change lang owner
|
||||
<<: *task_parameters
|
||||
postgresql_lang:
|
||||
<<: *pg_parameters
|
||||
name: '{{ test_lang }}'
|
||||
owner: '{{ test_user2 }}'
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
# TODO: the first elem of the returned list below
|
||||
# looks like a bug, not related with the option owner, needs to be checked
|
||||
- result.queries == ["UPDATE pg_language SET lanpltrusted = false WHERE lanname = '{{ test_lang }}'", 'ALTER LANGUAGE "{{ test_lang }}" OWNER TO "{{ test_user2 }}"']
|
||||
|
||||
- name: Check
|
||||
<<: *task_parameters
|
||||
postgresql_query:
|
||||
<<: *pg_parameters
|
||||
query: >
|
||||
SELECT r.rolname FROM pg_language l
|
||||
JOIN pg_roles r ON l.lanowner = r.oid
|
||||
WHERE l.lanname = '{{ test_lang }}'
|
||||
AND r.rolname = '{{ test_user2 }}'
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.rowcount == 1
|
||||
|
||||
- name: Try to change lang owner again to the same role
|
||||
<<: *task_parameters
|
||||
postgresql_lang:
|
||||
<<: *pg_parameters
|
||||
name: '{{ test_lang }}'
|
||||
owner: '{{ test_user2 }}'
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is not changed
|
||||
- result.queries == []
|
||||
|
||||
- name: Check
|
||||
<<: *task_parameters
|
||||
postgresql_query:
|
||||
<<: *pg_parameters
|
||||
query: >
|
||||
SELECT r.rolname FROM pg_language l
|
||||
JOIN pg_roles r ON l.lanowner = r.oid
|
||||
WHERE l.lanname = '{{ test_lang }}'
|
||||
AND r.rolname = '{{ test_user2 }}'
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.rowcount == 1
|
||||
|
||||
- name: Drop test lang with owner, must ignore
|
||||
<<: *task_parameters
|
||||
postgresql_lang:
|
||||
<<: *pg_parameters
|
||||
name: '{{ test_lang }}'
|
||||
state: absent
|
||||
owner: '{{ non_existent_role }}'
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.queries == ["DROP LANGUAGE \"{{ test_lang }}\""]
|
||||
|
||||
- name: Check
|
||||
<<: *task_parameters
|
||||
postgresql_query:
|
||||
<<: *pg_parameters
|
||||
query: >
|
||||
SELECT r.rolname FROM pg_language l
|
||||
JOIN pg_roles r ON l.lanowner = r.oid
|
||||
WHERE l.lanname = '{{ test_lang }}'
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.rowcount == 0
|
||||
|
||||
# Clean up
|
||||
- name: Drop test roles
|
||||
<<: *task_parameters
|
||||
postgresql_user:
|
||||
<<: *pg_parameters
|
||||
name: '{{ item }}'
|
||||
state: absent
|
||||
loop:
|
||||
- '{{ test_user1 }}'
|
||||
- '{{ test_user2 }}'
|
|
@ -1,231 +0,0 @@
|
|||
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
# Preparation for tests:
|
||||
- name: Install PostgreSQL support packages
|
||||
become: yes
|
||||
action: "{{ ansible_facts.pkg_mgr }}"
|
||||
args:
|
||||
name: "{{ postgresql_lang_packages }}"
|
||||
state: present
|
||||
|
||||
###############
|
||||
# Do main tests
|
||||
#
|
||||
|
||||
# Create language in check_mode:
|
||||
- name: postgresql_lang - create plperl in check_mode
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_lang:
|
||||
db: postgres
|
||||
login_user: "{{ pg_user }}"
|
||||
name: plperl
|
||||
register: result
|
||||
ignore_errors: yes
|
||||
check_mode: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.queries == []
|
||||
|
||||
- name: postgresql_lang - check that lang doesn't exist after previous step, rowcount must be 0
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_query:
|
||||
db: postgres
|
||||
login_user: "{{ pg_user }}"
|
||||
query: "SELECT 1 FROM pg_language WHERE lanname = 'plperl'"
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.rowcount == 0
|
||||
|
||||
# Create language:
|
||||
- name: postgresql_lang - create plperl
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_lang:
|
||||
db: postgres
|
||||
login_user: "{{ pg_user }}"
|
||||
name: plperl
|
||||
register: result
|
||||
ignore_errors: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.queries == ['CREATE LANGUAGE "plperl"']
|
||||
|
||||
- name: postgresql_lang - check that lang exists after previous step
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_query:
|
||||
db: postgres
|
||||
login_user: "{{ pg_user }}"
|
||||
query: "SELECT 1 FROM pg_language WHERE lanname = 'plperl'"
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.rowcount == 1
|
||||
|
||||
# Drop language in check_mode:
|
||||
- name: postgresql_lang - drop plperl in check_mode
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_lang:
|
||||
db: postgres
|
||||
login_user: "{{ pg_user }}"
|
||||
name: plperl
|
||||
state: absent
|
||||
register: result
|
||||
ignore_errors: yes
|
||||
check_mode: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.queries == []
|
||||
|
||||
- name: postgresql_lang - check that lang exists after previous step, rowcount must be 1
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_query:
|
||||
db: postgres
|
||||
login_user: "{{ pg_user }}"
|
||||
query: "SELECT 1 FROM pg_language WHERE lanname = 'plperl'"
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.rowcount == 1
|
||||
|
||||
# Drop language:
|
||||
- name: postgresql_lang - drop plperl
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_lang:
|
||||
db: postgres
|
||||
login_user: "{{ pg_user }}"
|
||||
name: plperl
|
||||
state: absent
|
||||
register: result
|
||||
ignore_errors: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.queries == ['DROP LANGUAGE "plperl"']
|
||||
|
||||
- name: postgresql_lang - check that lang doesn't exist after previous step, rowcount must be 0
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_query:
|
||||
db: postgres
|
||||
login_user: "{{ pg_user }}"
|
||||
query: "SELECT 1 FROM pg_language WHERE lanname = 'plperl'"
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.rowcount == 0
|
||||
|
||||
# Check fail_on_drop yes
|
||||
- name: postgresql_lang - drop c language to check fail_on_drop yes
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_lang:
|
||||
db: postgres
|
||||
login_user: "{{ pg_user }}"
|
||||
name: c
|
||||
state: absent
|
||||
fail_on_drop: yes
|
||||
register: result
|
||||
ignore_errors: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.failed == true
|
||||
|
||||
# Check fail_on_drop no
|
||||
- name: postgresql_lang - drop c language to check fail_on_drop no
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_lang:
|
||||
db: postgres
|
||||
login_user: "{{ pg_user }}"
|
||||
name: c
|
||||
state: absent
|
||||
fail_on_drop: no
|
||||
register: result
|
||||
ignore_errors: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.failed == false
|
||||
|
||||
# Create trusted language:
|
||||
- name: postgresql_lang - create plpythonu
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_lang:
|
||||
db: postgres
|
||||
login_user: "{{ pg_user }}"
|
||||
name: plpythonu
|
||||
trust: yes
|
||||
force_trust: yes
|
||||
register: result
|
||||
ignore_errors: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.queries == ['CREATE TRUSTED LANGUAGE "plpythonu"', "UPDATE pg_language SET lanpltrusted = true WHERE lanname = 'plpythonu'"]
|
||||
|
||||
- name: postgresql_lang - check that lang exists and it's trusted after previous step
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_query:
|
||||
db: postgres
|
||||
login_user: "{{ pg_user }}"
|
||||
query: "SELECT 1 FROM pg_language WHERE lanname = 'plpythonu' AND lanpltrusted = 't'"
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.rowcount == 1
|
||||
|
||||
# Drop language cascade, tests of aliases:
|
||||
- name: postgresql_lang - drop plpythonu cascade
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_lang:
|
||||
login_db: postgres
|
||||
login_user: "{{ pg_user }}"
|
||||
login_port: 5432
|
||||
lang: plpythonu
|
||||
state: absent
|
||||
cascade: yes
|
||||
register: result
|
||||
ignore_errors: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.queries == ['DROP LANGUAGE "plpythonu" CASCADE']
|
||||
|
||||
- name: postgresql_lang - check that lang doesn't exist after previous step, rowcount must be 0
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_query:
|
||||
db: postgres
|
||||
login_user: "{{ pg_user }}"
|
||||
query: "SELECT 1 FROM pg_language WHERE lanname = 'plpythonu'"
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.rowcount == 0
|
|
@ -1,3 +0,0 @@
|
|||
postgresql_lang_packages:
|
||||
- postgresql-plperl
|
||||
- postgresql-plpython
|
|
@ -1,3 +0,0 @@
|
|||
postgresql_lang_packages:
|
||||
- postgresql-plperl
|
||||
- postgresql-plpython3
|
|
@ -1,6 +0,0 @@
|
|||
destructive
|
||||
shippable/posix/group4
|
||||
skip/aix
|
||||
skip/osx
|
||||
skip/macos
|
||||
disabled # tests already running in community.postgresql
|
|
@ -1,6 +0,0 @@
|
|||
test_group1: group1
|
||||
test_group2: group2
|
||||
test_group3: group.with.dots
|
||||
test_user1: user1
|
||||
test_user2: user.with.dots
|
||||
dangerous_name: 'curious.anonymous"; SELECT * FROM information_schema.tables; --'
|
|
@ -1,2 +0,0 @@
|
|||
dependencies:
|
||||
- setup_postgresql_db
|
|
@ -1,7 +0,0 @@
|
|||
####################################################################
|
||||
# WARNING: These are designed specifically for Ansible tests #
|
||||
# and should not be used as examples of how to write Ansible roles #
|
||||
####################################################################
|
||||
|
||||
# Initial CI tests of postgresql_membership module
|
||||
- import_tasks: postgresql_membership_initial.yml
|
|
@ -1,390 +0,0 @@
|
|||
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
####################
|
||||
# Prepare for tests:
|
||||
|
||||
# Create test roles:
|
||||
- name: postgresql_membership - create test roles
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_user:
|
||||
login_user: "{{ pg_user }}"
|
||||
db: postgres
|
||||
name: "{{ item }}"
|
||||
ignore_errors: yes
|
||||
with_items:
|
||||
- "{{ test_group1 }}"
|
||||
- "{{ test_group2 }}"
|
||||
- "{{ test_group3 }}"
|
||||
- "{{ test_user1 }}"
|
||||
- "{{ test_user2 }}"
|
||||
|
||||
################
|
||||
# Do main tests:
|
||||
|
||||
### Test check_mode
|
||||
# Grant test_group1 to test_user1 in check_mode:
|
||||
- name: postgresql_membership - grant test_group1 to test_user1 in check_mode
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_membership:
|
||||
login_user: "{{ pg_user }}"
|
||||
db: postgres
|
||||
group: "{{ test_group1 }}"
|
||||
user: "{{ test_user1 }}"
|
||||
state: present
|
||||
register: result
|
||||
ignore_errors: yes
|
||||
check_mode: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.groups == ["{{ test_group1 }}"]
|
||||
- result.queries == ["GRANT \"{{ test_group1 }}\" TO \"{{ test_user1 }}\""]
|
||||
- result.granted.{{ test_group1 }} == ["{{ test_user1 }}"]
|
||||
- result.state == "present"
|
||||
- result.target_roles == ["{{ test_user1 }}"]
|
||||
|
||||
# Try to revoke test_group1 from test_user1 to check that
|
||||
# nothing actually changed in check_mode at the previous step:
|
||||
- name: postgresql_membership - try to revoke test_group1 from test_user1 for checking check_mode
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_membership:
|
||||
login_user: "{{ pg_user }}"
|
||||
db: postgres
|
||||
group: "{{ test_group1 }}"
|
||||
user: "{{ test_user1 }}"
|
||||
state: absent
|
||||
register: result
|
||||
ignore_errors: yes
|
||||
check_mode: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is not changed
|
||||
- result.groups == ["{{ test_group1 }}"]
|
||||
- result.queries == []
|
||||
- result.revoked.{{ test_group1 }} == []
|
||||
- result.state == "absent"
|
||||
- result.target_roles == ["{{ test_user1 }}"]
|
||||
### End of test check_mode
|
||||
|
||||
# Grant test_group1 to test_user1:
|
||||
- name: postgresql_membership - grant test_group1 to test_user1
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_membership:
|
||||
login_user: "{{ pg_user }}"
|
||||
db: postgres
|
||||
group: "{{ test_group1 }}"
|
||||
user: "{{ test_user1 }}"
|
||||
state: present
|
||||
register: result
|
||||
ignore_errors: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.groups == ["{{ test_group1 }}"]
|
||||
- result.queries == ["GRANT \"{{ test_group1 }}\" TO \"{{ test_user1 }}\""]
|
||||
- result.granted.{{ test_group1 }} == ["{{ test_user1 }}"]
|
||||
- result.state == "present"
|
||||
- result.target_roles == ["{{ test_user1 }}"]
|
||||
|
||||
# Grant test_group1 to test_user1 again to check that nothing changes:
|
||||
- name: postgresql_membership - grant test_group1 to test_user1 again
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_membership:
|
||||
login_user: "{{ pg_user }}"
|
||||
db: postgres
|
||||
group: "{{ test_group1 }}"
|
||||
user: "{{ test_user1 }}"
|
||||
state: present
|
||||
register: result
|
||||
ignore_errors: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is not changed
|
||||
- result.groups == ["{{ test_group1 }}"]
|
||||
- result.queries == []
|
||||
- result.granted.{{ test_group1 }} == []
|
||||
- result.state == "present"
|
||||
- result.target_roles == ["{{ test_user1 }}"]
|
||||
|
||||
# Revoke test_group1 from test_user1:
|
||||
- name: postgresql_membership - revoke test_group1 from test_user1
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_membership:
|
||||
login_user: "{{ pg_user }}"
|
||||
db: postgres
|
||||
group: "{{ test_group1 }}"
|
||||
user: "{{ test_user1 }}"
|
||||
state: absent
|
||||
register: result
|
||||
ignore_errors: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.groups == ["{{ test_group1 }}"]
|
||||
- result.queries == ["REVOKE \"{{ test_group1 }}\" FROM \"{{ test_user1 }}\""]
|
||||
- result.revoked.{{ test_group1 }} == ["{{ test_user1 }}"]
|
||||
- result.state == "absent"
|
||||
- result.target_roles == ["{{ test_user1 }}"]
|
||||
|
||||
# Revoke test_group1 from test_user1 again to check that nothing changes:
|
||||
- name: postgresql_membership - revoke test_group1 from test_user1 again
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_membership:
|
||||
login_user: "{{ pg_user }}"
|
||||
db: postgres
|
||||
group: "{{ test_group1 }}"
|
||||
user: "{{ test_user1 }}"
|
||||
state: absent
|
||||
register: result
|
||||
ignore_errors: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is not changed
|
||||
- result.groups == ["{{ test_group1 }}"]
|
||||
- result.queries == []
|
||||
- result.revoked.{{ test_group1 }} == []
|
||||
- result.state == "absent"
|
||||
- result.target_roles == ["{{ test_user1 }}"]
|
||||
|
||||
# Grant test_group1 and test_group2 to test_user1 and test_user2:
|
||||
- name: postgresql_membership - grant two groups to two users
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_membership:
|
||||
login_user: "{{ pg_user }}"
|
||||
db: postgres
|
||||
group:
|
||||
- "{{ test_group1 }}"
|
||||
- "{{ test_group2 }}"
|
||||
user:
|
||||
- "{{ test_user1 }}"
|
||||
- "{{ test_user2 }}"
|
||||
state: present
|
||||
register: result
|
||||
ignore_errors: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.groups == ["{{ test_group1 }}", "{{ test_group2 }}"]
|
||||
- result.queries == ["GRANT \"{{ test_group1 }}\" TO \"{{ test_user1 }}\"", "GRANT \"{{ test_group1 }}\" TO \"{{ test_user2 }}\"", "GRANT \"{{ test_group2 }}\" TO \"{{ test_user1 }}\"", "GRANT \"{{ test_group2 }}\" TO \"{{ test_user2 }}\""]
|
||||
- result.granted.{{ test_group1 }} == ["{{ test_user1 }}", "{{ test_user2 }}"]
|
||||
- result.granted.{{ test_group2 }} == ["{{ test_user1 }}", "{{ test_user2 }}"]
|
||||
- result.state == "present"
|
||||
- result.target_roles == ["{{ test_user1 }}", "{{ test_user2 }}"]
|
||||
|
||||
# Grant test_group1 and test_group2 to test_user1 and test_user2 again to check that nothing changes:
|
||||
- name: postgresql_membership - grant two groups to two users again
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_membership:
|
||||
login_user: "{{ pg_user }}"
|
||||
db: postgres
|
||||
group:
|
||||
- "{{ test_group1 }}"
|
||||
- "{{ test_group2 }}"
|
||||
user:
|
||||
- "{{ test_user1 }}"
|
||||
- "{{ test_user2 }}"
|
||||
state: present
|
||||
register: result
|
||||
ignore_errors: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is not changed
|
||||
- result.groups == ["{{ test_group1 }}", "{{ test_group2 }}"]
|
||||
- result.queries == []
|
||||
- result.granted.{{ test_group1 }} == []
|
||||
- result.granted.{{ test_group2 }} == []
|
||||
- result.state == "present"
|
||||
- result.target_roles == ["{{ test_user1 }}", "{{ test_user2 }}"]
|
||||
|
||||
# Revoke only test_group1 from test_user1:
|
||||
- name: postgresql_membership - revoke one group from one user
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_membership:
|
||||
login_user: "{{ pg_user }}"
|
||||
db: postgres
|
||||
group: "{{ test_group1 }}"
|
||||
user: "{{ test_user1 }}"
|
||||
state: absent
|
||||
register: result
|
||||
ignore_errors: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.groups == ["{{ test_group1 }}"]
|
||||
- result.queries == ["REVOKE \"{{ test_group1 }}\" FROM \"{{ test_user1 }}\""]
|
||||
- result.revoked.{{ test_group1 }} == ["{{ test_user1 }}"]
|
||||
- result.state == "absent"
|
||||
- result.target_roles == ["{{ test_user1 }}"]
|
||||
|
||||
# Try to grant test_group1 and test_group2 to test_user1 and test_user2 again
|
||||
# to check that nothing changes with test_user2:
|
||||
- name: postgresql_membership - grant two groups to two users again
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_membership:
|
||||
login_user: "{{ pg_user }}"
|
||||
db: postgres
|
||||
group:
|
||||
- "{{ test_group1 }}"
|
||||
- "{{ test_group2 }}"
|
||||
user:
|
||||
- "{{ test_user1 }}"
|
||||
- "{{ test_user2 }}"
|
||||
state: present
|
||||
register: result
|
||||
ignore_errors: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.groups == ["{{ test_group1 }}", "{{ test_group2 }}"]
|
||||
- result.queries == ["GRANT \"{{ test_group1 }}\" TO \"{{ test_user1 }}\""]
|
||||
- result.granted.{{ test_group1 }} == ["{{ test_user1 }}"]
|
||||
- result.granted.{{ test_group2 }} == []
|
||||
- result.state == "present"
|
||||
- result.target_roles == ["{{ test_user1 }}", "{{ test_user2 }}"]
|
||||
|
||||
#####################
|
||||
# Check fail_on_role:
|
||||
|
||||
# Try to grant non existent group to non existent role with fail_on_role=yes:
|
||||
- name: postgresql_membership - revoke non existen group from non existen role
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_membership:
|
||||
login_user: "{{ pg_user }}"
|
||||
db: postgres
|
||||
group: fake_group
|
||||
user: fake_user
|
||||
state: present
|
||||
fail_on_role: yes
|
||||
register: result
|
||||
ignore_errors: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is not changed
|
||||
|
||||
# Try to grant non existent group to non existent role with fail_on_role=no:
|
||||
- name: postgresql_membership - revoke non existen group from non existen role
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_membership:
|
||||
login_user: "{{ pg_user }}"
|
||||
db: postgres
|
||||
group: fake_group
|
||||
user: fake_user
|
||||
state: present
|
||||
fail_on_role: no
|
||||
register: result
|
||||
ignore_errors: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is not changed
|
||||
- result.granted == {}
|
||||
- result.groups == []
|
||||
- result.target_roles == []
|
||||
- result.state == 'present'
|
||||
|
||||
# Try to revoke non existent group from non existent role with fail_on_role=no:
|
||||
- name: postgresql_membership - revoke non existen group from non existen role
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_membership:
|
||||
login_user: "{{ pg_user }}"
|
||||
db: postgres
|
||||
group: fake_group
|
||||
user: fake_user
|
||||
state: absent
|
||||
fail_on_role: no
|
||||
register: result
|
||||
ignore_errors: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is not changed
|
||||
- result.revoked == {}
|
||||
- result.groups == []
|
||||
- result.target_roles == []
|
||||
- result.state == 'absent'
|
||||
|
||||
# Grant test_group3 with a name containing dots to test_user1.
|
||||
- name: postgresql_membership - grant test_group3 with dots to test_user1
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_membership:
|
||||
login_user: "{{ pg_user }}"
|
||||
db: postgres
|
||||
group: "{{ test_group3 }}"
|
||||
user: "{{ test_user1 }}"
|
||||
state: present
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.queries == ["GRANT \"{{ test_group3 }}\" TO \"{{ test_user1 }}\""]
|
||||
|
||||
#############################
|
||||
# Check trust_input parameter
|
||||
|
||||
- name: postgresql_membership - try to use dangerous input, don't trust
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_membership:
|
||||
login_user: "{{ pg_user }}"
|
||||
db: postgres
|
||||
group:
|
||||
- "{{ test_group3}}"
|
||||
- "{{ dangerous_name }}"
|
||||
user: "{{ test_user1 }}"
|
||||
state: present
|
||||
trust_input: no
|
||||
register: result
|
||||
ignore_errors: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is failed
|
||||
- result.msg == 'Passed input \'{{ dangerous_name }}\' is potentially dangerous'
|
||||
|
||||
- name: postgresql_membership - try to use dangerous input, trust explicitly
|
||||
become_user: "{{ pg_user }}"
|
||||
become: yes
|
||||
postgresql_membership:
|
||||
login_user: "{{ pg_user }}"
|
||||
db: postgres
|
||||
group:
|
||||
- "{{ test_group3}}"
|
||||
- "{{ dangerous_name }}"
|
||||
user: "{{ test_user1 }}"
|
||||
state: present
|
||||
trust_input: yes
|
||||
register: result
|
||||
ignore_errors: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is failed
|
||||
- result.msg == 'Role {{ dangerous_name }} does not exist'
|
|
@ -1,6 +0,0 @@
|
|||
destructive
|
||||
shippable/posix/group4
|
||||
skip/aix
|
||||
skip/osx
|
||||
skip/macos
|
||||
disabled # tests already running in community.postgresql
|
|
@ -1,3 +0,0 @@
|
|||
test_tablespace_path: "/ssd"
|
||||
|
||||
dangerous_name: 'curious.anonymous"; SELECT * FROM information_schema.tables; --'
|
|
@ -1,2 +0,0 @@
|
|||
dependencies:
|
||||
- setup_postgresql_db
|
|
@ -1,9 +0,0 @@
|
|||
####################################################################
|
||||
# WARNING: These are designed specifically for Ansible tests #
|
||||
# and should not be used as examples of how to write Ansible roles #
|
||||
####################################################################
|
||||
|
||||
# Initial CI tests of postgresql_owner module
|
||||
- import_tasks: postgresql_owner_initial.yml
|
||||
when:
|
||||
- postgres_version_resp.stdout is version('9.4', '>=')
|
File diff suppressed because it is too large
Load diff
|
@ -1,6 +0,0 @@
|
|||
destructive
|
||||
shippable/posix/group4
|
||||
skip/aix
|
||||
skip/osx
|
||||
skip/macos
|
||||
disabled # tests already running in community.postgresql
|
|
@ -1,23 +0,0 @@
|
|||
---
|
||||
pg_hba_test_ips:
|
||||
- contype: local
|
||||
users: 'all,postgres,test'
|
||||
- source: '0000:ffff::'
|
||||
netmask: 'ffff:fff0::'
|
||||
- source: '192.168.0.0/24'
|
||||
netmask: ''
|
||||
databases: 'all,replication'
|
||||
- source: '192.168.1.0/24'
|
||||
netmask: ''
|
||||
databases: 'all'
|
||||
method: reject
|
||||
- source: '127.0.0.1/32'
|
||||
netmask: ''
|
||||
- source: '::1/128'
|
||||
netmask: ''
|
||||
- source: '0000:ff00::'
|
||||
netmask: 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ff00'
|
||||
method: scram-sha-256
|
||||
- source: '172.16.0.0'
|
||||
netmask: '255.255.0.0'
|
||||
method: trust
|
|
@ -1,2 +0,0 @@
|
|||
dependencies:
|
||||
- setup_postgresql_db
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue