1
0
Fork 0
mirror of https://github.com/ansible-collections/community.general.git synced 2024-09-14 20:13:21 +02:00

Module postgresql_idx: added ci tests, new params, returned values, code refactoring (#52230)

* postgresql_idx: ci tests, refactoring, return values

* postgresql_idx: ci tests, new params, return values

* postgresql_idx: ci tests, fix

* postgresql_idx: ci tests, fix

* postgresql_idx: ci tests, fix

* postgresql_idx: ci tests, fix

* postgresql_idx: ci tests, fix

* postgresql_idx: ci tests, fix

* postgresql_idx: ci tests, fix

* postgresql_idx: ci tests, fix

* postgresql_idx: ci tests, fix

* New module postgresql_table - fix tests

* New module postgresql_table - fix tests

* New module postgresql_table - fix tests

* New module postgresql_table - fix state choices order
This commit is contained in:
Andrey Klychkov 2019-02-15 17:38:56 +03:00 committed by Dag Wieers
parent b0606213dc
commit 8e0f95951d
3 changed files with 614 additions and 208 deletions

View file

@ -1,148 +1,211 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Andrey Klychkov (@Andersson007) <aaklychkov@mail.ru>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: postgresql_idx
short_description: Creates or drops indexes from a PostgreSQL database.
short_description: Create or drop indexes from a PostgreSQL database
description:
- Create or drop indexes from a remote PostgreSQL database.
- Creates or drops indexes from a remote PostgreSQL database
U(https://www.postgresql.org/docs/current/sql-createindex.html).
version_added: "2.8"
options:
idxname:
description:
- Name of the index to create or drop.
- Name of the index to create or drop.
type: str
required: true
aliases:
- name
db:
description:
- Name of database where the index will be created/dropped.
- Name of database where the index will be created/dropped.
type: str
port:
description:
- Database port to connect.
- Database port to connect.
type: int
default: 5432
login_user:
description:
- User (role) used to authenticate with PostgreSQL.
- User (role) used to authenticate with PostgreSQL.
type: str
default: postgres
session_role:
description:
- Switch to session_role after connecting.
The specified session_role must be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though
the session_role were the one that had logged in originally.
type: str
schema:
description:
- Name of a database schema.
login_password:
description:
- Password used to authenticate with PostgreSQL.
- Password used to authenticate with PostgreSQL.
type: str
login_host:
description:
- Host running PostgreSQL.
- Host running PostgreSQL.
type: str
login_unix_socket:
description:
- Path to a Unix domain socket for local connections.
- Path to a Unix domain socket for local connections.
type: str
ssl_mode:
description:
- Determines whether or with what priority a secure SSL TCP/IP connection
will be negotiated with the server.
- See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for
more information on the modes.
- Default of C(prefer) matches libpq default.
- Determines whether or with what priority a secure SSL TCP/IP connection
will be negotiated with the server.
- See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for
more information on the modes.
- Default of C(prefer) matches libpq default.
type: str
default: prefer
choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
ssl_rootcert:
description:
- Specifies the name of a file containing SSL certificate authority (CA)
certificate(s). If the file exists, the server's certificate will be
verified to be signed by one of these authorities.
- Specifies the name of a file containing SSL certificate authority (CA)
certificate(s). If the file exists, the server's certificate will be
verified to be signed by one of these authorities.
type: str
state:
description:
- Index state.
- Index state.
type: str
default: present
choices: ["present", "absent"]
choices: [ absent, present ]
table:
description:
- Table to create index on it.
- Table to create index on it.
- Mutually exclusive with I(state=absent).
type: str
required: true
columns:
description:
- List of index columns.
type: str
- List of index columns.
- Mutually exclusive with I(state=absent).
type: list
cond:
description:
- Index conditions.
- Index conditions.
- Mutually exclusive with I(state=absent).
type: str
idxtype:
description:
- Index type (like btree, gist, gin, etc.).
- Index type (like btree, gist, gin, etc.).
- Mutually exclusive with I(state=absent).
type: str
aliases:
- type
concurrent:
description:
- Enable or disable concurrent mode (CREATE / DROP INDEX CONCURRENTLY).
- Enable or disable concurrent mode (CREATE / DROP INDEX CONCURRENTLY).
- Mutually exclusive with check mode and I(cascade=yes).
type: bool
default: yes
tablespace:
description:
- Set a tablespace for the index.
- Mutually exclusive with I(state=absent).
required: false
type: str
storage_params:
description:
- Storage parameters like fillfactor, vacuum_cleanup_index_scale_factor, etc.
- Mutually exclusive with I(state=absent).
type: list
cascade:
description:
- Automatically drop objects that depend on the index,
and in turn all objects that depend on those objects U(https://www.postgresql.org/docs/current/sql-dropindex.html).
- It used only with I(state=absent).
- Mutually exclusive with I(concurrent=yes)
type: bool
default: no
notes:
- The default authentication assumes that you are either logging in as or
sudo'ing to the postgres account on the host.
- This module uses psycopg2, a Python PostgreSQL database adapter. You must
ensure that psycopg2 is installed on the host before using this module. If
the remote host is the PostgreSQL server (which is the default case), then
PostgreSQL must also be installed on the remote host. For Ubuntu-based
systems, install the postgresql, libpq-dev, and python-psycopg2 packages
on the remote host before using this module.
- The default authentication assumes that you are either logging in as or
sudo'ing to the postgres account on the host.
- I(cuncurrent=yes) cannot be used in check mode because
"CREATE INDEX CONCURRENTLY" cannot run inside a transaction block.
- This module uses psycopg2, a Python PostgreSQL database adapter. You must
ensure that psycopg2 is installed on the host before using this module.
- If the remote host is the PostgreSQL server (which is the default case), then
PostgreSQL must also be installed on the remote host.
- For Ubuntu-based systems, install the postgresql, libpq-dev, and python-psycopg2 packages
on the remote host before using this module.
requirements: [ psycopg2 ]
author: "Andrew Klychkov (@Andersson007)"
author:
- Andrew Klychkov (@Andersson007)
'''
EXAMPLES = '''
# Create btree index test_idx concurrently covering columns id and name of table products
- postgresql_idx:
EXAMPLES = r'''
# For create / drop index in check mode use concurrent=no and --check
- name: Create btree index if not exists test_idx concurrently covering columns id and name of table products
postgresql_idx:
db: acme
table: products
columns: id,name
idxname: test_idx
# Create gist index test_gist_idx concurrently on column geo_data of table map
- postgresql_idx:
- name: Create btree index test_idx concurrently with tablespace called ssd and storage parameter
postgresql_idx:
db: acme
table: products
columns:
- id
- name
idxname: test_idx
tablespace: ssd
storage_params:
- fillfactor=90
- name: Create gist index test_gist_idx concurrently on column geo_data of table map
postgresql_idx:
db: somedb
table: map
idxtype: gist
columns: geo_data
idxname: test_gist_idx
# Create gin index gin0_idx not concurrently on column comment of table test
# (Note: pg_trgm extension must be installed for gin_trgm_ops)
- postgresql_idx:
# Note: for the example below pg_trgm extension must be installed for gin_trgm_ops
- name: Create gin index gin0_idx not concurrently on column comment of table test
postgresql_idx:
idxname: gin0_idx
table: test
columns: comment gin_trgm_ops
concurrent: no
idxtype: gin
# Drop btree test_idx concurrently
- postgresql_idx:
- name: Drop btree test_idx concurrently
postgresql_idx:
db: mydb
idxname: test_idx
state: absent
# Create btree index test_idx concurrently on columns id,comment where column id > 1
- postgresql_idx:
- name: Drop test_idx cascade
postgresql_idx:
db: mydb
idxname: test_idx
state: absent
cascade: yes
concurrent: no
- name: Create btree index test_idx concurrently on columns id,comment where column id > 1
postgresql_idx:
db: mydb
table: test
columns: id,comment
@ -150,24 +213,56 @@ EXAMPLES = '''
cond: id > 1
'''
RETURN = ''' # '''
RETURN = r'''
name:
description: Index name.
returned: always
type: str
sample: 'foo_idx'
state:
description: Index state.
returned: always
type: str
sample: 'present'
schema:
description: Schema where index exists.
returned: always
type: str
sample: 'public'
tablespace:
description: Tablespace where index exists.
returned: always
type: str
sample: 'ssd'
query:
description: Query that was tried to be execute.
returned: always
type: str
sample: 'CREATE INDEX CONCURRENTLY foo_idx ON test_table USING BTREE (id)'
storage_params:
description: Index storage parameters.
returned: always
type: list
sample: [ "fillfactor=90" ]
valid:
description: Index validity.
returned: always
type: bool
sample: true
'''
import traceback
PSYCOPG2_IMP_ERR = None
try:
import psycopg2
import psycopg2.extras
HAS_PSYCOPG2 = True
except ImportError:
PSYCOPG2_IMP_ERR = traceback.format_exc()
postgresqldb_found = False
else:
postgresqldb_found = True
HAS_PSYCOPG2 = False
import ansible.module_utils.postgres as pgutils
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.database import SQLParseError
from ansible.module_utils.postgres import postgres_common_argument_spec
from ansible.module_utils._text import to_native
from ansible.module_utils.six import iteritems
@ -179,93 +274,152 @@ VALID_IDX_TYPES = ('BTREE', 'HASH', 'GIST', 'SPGIST', 'GIN', 'BRIN')
# PostgreSQL module specific support methods.
#
class Index(object):
def __init__(self, module, cursor, schema, name):
self.name = name
if schema:
self.schema = schema
else:
self.schema = 'public'
self.module = module
self.cursor = cursor
self.info = {
'name': self.name,
'state': 'absent',
'schema': '',
'tblname': '',
'tblspace': '',
'valid': True,
'storage_params': [],
}
self.exists = False
self.__exists_in_db()
self.executed_query = ''
def index_exists(cursor, idxname):
query = "SELECT indexname FROM pg_indexes "\
"WHERE indexname = '%s'" % idxname
cursor.execute(query)
exists = cursor.fetchone()
if exists is not None:
return True
return False
def get_info(self):
"""
Getter to refresh and return table info
"""
self.__exists_in_db()
return self.info
def __exists_in_db(self):
"""
Check index and collect info
"""
query = ("SELECT i.schemaname, i.tablename, i.tablespace, "
"pi.indisvalid, c.reloptions "
"FROM pg_catalog.pg_indexes AS i "
"JOIN pg_catalog.pg_class AS c "
"ON i.indexname = c.relname "
"JOIN pg_catalog.pg_index AS pi "
"ON c.oid = pi.indexrelid "
"WHERE i.indexname = '%s'" % self.name)
def index_valid(cursor, idxname, module):
query = "SELECT i.indisvalid FROM pg_catalog.pg_index AS i "\
"WHERE i.indexrelid = (SELECT oid "\
"FROM pg_class WHERE relname = '%s')" % idxname
cursor.execute(query)
valid = cursor.fetchone()
if valid is None:
module.fail_json(msg="Validity check: returns "
"no information about %s" % idxname)
return valid
res = self.__exec_sql(query)
if res:
self.exists = True
self.info = dict(
name=self.name,
state='present',
schema=res[0][0],
tblname=res[0][1],
tblspace=res[0][2] if res[0][2] else '',
valid=res[0][3],
storage_params=res[0][4] if res[0][4] else [],
)
return True
def index_create(cursor, module, idxname, tblname, idxtype,
columns, cond, concurrent=True):
"""Create new index"""
changed = False
if idxtype is None:
idxtype = "BTREE"
mode = 'CONCURRENTLY'
if not concurrent:
mode = ''
if cond is None:
condition = ''
else:
condition = 'WHERE %s' % cond
for column in columns.split(','):
column.strip()
query = "CREATE INDEX %s %s ON %s USING %s (%s)%s" % (
mode, idxname, tblname, idxtype, columns, condition)
try:
if index_exists(cursor, idxname):
else:
self.exists = False
return False
cursor.execute(query)
# In any case, even the created index is not valid,
# the database schema has been changed:
changed = True
except psycopg2.InternalError as e:
if e.pgcode == '25006':
# Handle errors due to read-only transactions indicated by pgcode 25006
# ERROR: cannot execute ALTER ROLE in a read-only transaction
changed = False
module.fail_json(msg=e.pgerror, exception=traceback.format_exc())
def create(self, tblname, idxtype, columns, cond, tblspace, storage_params, concurrent=True):
"""
Create PostgreSQL index.
"""
# To change existing index we should write
# 'postgresql_alter_table' standalone module.
if self.exists:
return False
changed = False
if idxtype is None:
idxtype = "BTREE"
query = 'CREATE INDEX'
if concurrent:
query += ' CONCURRENTLY'
query += ' %s' % self.name
if self.schema:
query += ' ON %s.%s ' % (self.schema, tblname)
else:
raise psycopg2.InternalError(e)
return changed
query += 'public.%s ' % tblname
query += 'USING %s (%s)' % (idxtype, columns)
def index_drop(cursor, module, idxname, concurrent=True):
"""Drop index"""
changed = False
if not index_exists(cursor, idxname):
return changed
if storage_params:
query += ' WITH (%s)' % storage_params
mode = 'CONCURRENTLY'
if not concurrent:
mode = ''
if tblspace:
query += ' TABLESPACE %s' % tblspace
query = 'DROP INDEX %s %s' % (mode, idxname)
try:
cursor.execute(query)
changed = True
except psycopg2.InternalError as e:
if e.pgcode == '25006':
# Handle errors due to read-only transactions indicated by pgcode 25006
# ERROR: cannot execute ALTER ROLE in a read-only transaction
changed = False
module.fail_json(msg=e.pgerror, exception=traceback.format_exc())
if cond:
query += ' WHERE %s' % cond
self.executed_query = query
if self.__exec_sql(query, ddl=True):
return True
return False
def drop(self, schema, cascade=False, concurrent=True):
"""
Drop PostgreSQL index.
"""
changed = False
if not self.exists:
return False
query = 'DROP INDEX'
if concurrent:
query += ' CONCURRENTLY'
if not schema:
query += ' public.%s' % self.name
else:
raise psycopg2.InternalError(e)
return changed
query += ' %s.%s' % (schema, self.name)
if cascade:
query += ' CASCADE'
self.executed_query = query
if self.__exec_sql(query, ddl=True):
return True
return False
def __exec_sql(self, query, ddl=False):
try:
self.cursor.execute(query)
if not ddl:
res = self.cursor.fetchall()
return res
return True
except SQLParseError as e:
self.module.fail_json(msg=to_native(e))
except Exception as e:
self.module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e)))
return False
# ===========================================
@ -274,23 +428,27 @@ def index_drop(cursor, module, idxname, concurrent=True):
def main():
argument_spec = pgutils.postgres_common_argument_spec()
argument_spec.update(dict(
idxname=dict(type='str', required=True, aliases=['idxname']),
db=dict(type='str', default=''),
ssl_mode=dict(type='str', default='prefer', choices=[
'allow', 'disable', 'prefer', 'require', 'verify-ca', 'verify-full']),
argument_spec = postgres_common_argument_spec()
argument_spec.update(
idxname=dict(type='str', required=True, aliases=['name']),
db=dict(type='str'),
ssl_mode=dict(type='str', default='prefer', choices=['allow', 'disable', 'prefer', 'require', 'verify-ca', 'verify-full']),
ssl_rootcert=dict(type='str'),
state=dict(type='str', default="present", choices=["absent", "present"]),
state=dict(type='str', default='present', choices=['absent', 'present']),
concurrent=dict(type='bool', default=True),
table=dict(type='str'),
idxtype=dict(type='str'),
columns=dict(type='str'),
cond=dict(type='str')
))
idxtype=dict(type='str', aliases=['type']),
columns=dict(type='list'),
cond=dict(type='str'),
session_role=dict(type='str'),
tablespace=dict(type='str'),
storage_params=dict(type='list'),
cascade=dict(type='bool', default=False),
schema=dict(type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
supports_check_mode=True,
)
idxname = module.params["idxname"]
@ -301,28 +459,31 @@ def main():
columns = module.params["columns"]
cond = module.params["cond"]
sslrootcert = module.params["ssl_rootcert"]
session_role = module.params["session_role"]
tablespace = module.params["tablespace"]
storage_params = module.params["storage_params"]
cascade = module.params["cascade"]
schema = module.params["schema"]
if concurrent and (module.check_mode or cascade):
module.fail_json(msg="Cuncurrent mode and check mode/cascade are mutually exclusive")
if state == 'present':
if table is None:
if not table:
module.fail_json(msg="Table must be specified")
if columns is None:
if not columns:
module.fail_json(msg="At least one column must be specified")
else:
if table is not None:
if table or columns or cond or idxtype or tablespace:
module.fail_json(msg="Index %s is going to be removed, so it does not "
"make sense to pass a table name" % idxname)
if columns is not None:
module.fail_json(msg="Index %s is going to be removed, so it does not "
"make sense to pass column names" % idxname)
if cond is not None:
module.fail_json(msg="Index %s is going to be removed, so it does not "
"make sense to pass any conditions" % idxname)
if idxtype is not None:
module.fail_json(msg="Index %s is going to be removed, so it does not "
"make sense to pass an index type" % idxname)
"make sense to pass a table name, columns, conditions, "
"index type, or tablespace" % idxname)
if not postgresqldb_found:
module.fail_json(msg=missing_required_lib('psycopg2'), exception=PSYCOPG2_IMP_ERR)
if cascade and state != 'absent':
module.fail_json(msg="cascade parameter used only with state=absent")
if not HAS_PSYCOPG2:
module.fail_json(msg="the python psycopg2 module is required")
# To use defaults values, keyword arguments must be absent, so
# check which values are empty and don't include in the **kw
@ -340,13 +501,12 @@ def main():
if k in params_map and v != "" and v is not None)
# If a login_unix_socket is specified, incorporate it here.
is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
is_localhost = "host" not in kw or kw["host"] is None or kw["host"] == "localhost"
if is_localhost and module.params["login_unix_socket"] != "":
kw["host"] = module.params["login_unix_socket"]
if psycopg2.__version__ < '2.4.3' and sslrootcert is not None:
module.fail_json(
msg='psycopg2 must be at least 2.4.3 in order to user the ssl_rootcert parameter')
module.fail_json(msg='psycopg2 must be at least 2.4.3 in order to user the ssl_rootcert parameter')
if module.check_mode and concurrent:
module.fail_json(msg="Cannot concurrently create or drop index %s "
@ -356,66 +516,68 @@ def main():
try:
db_connection = psycopg2.connect(**kw)
if concurrent:
db_connection.set_session(autocommit=True)
if psycopg2.__version__ >= '2.4.2':
db_connection.set_session(autocommit=True)
else:
db_connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
cursor = db_connection.cursor(
cursor_factory=psycopg2.extras.DictCursor)
cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
except TypeError as e:
if 'sslrootcert' in e.args[0]:
module.fail_json(
msg='Postgresql server must be at least version 8.4 to support sslrootcert')
module.fail_json(msg="unable to connect to database: %s" % to_native(e),
exception=traceback.format_exc())
module.fail_json(msg='Postgresql server must be at least version 8.4 to support sslrootcert')
module.fail_json(msg="unable to connect to database: %s" % to_native(e))
except Exception as e:
module.fail_json(msg="unable to connect to database: %s" % to_native(e),
exception=traceback.format_exc())
module.fail_json(msg="unable to connect to database: %s" % to_native(e))
if state == 'present' and index_exists(cursor, idxname):
kw['changed'] = False
del kw['login_password']
module.exit_json(**kw)
if session_role:
try:
cursor.execute('SET ROLE %s' % session_role)
except Exception as e:
module.fail_json(msg="Could not switch role: %s" % to_native(e))
# Set defaults:
changed = False
if state == "present":
if idxtype is not None and idxtype.upper() not in VALID_IDX_TYPES:
module.fail_json(msg="Index type '%s' of %s is not "
"in valid types" % (idxtype, idxname))
# Do job:
index = Index(module, cursor, schema, idxname)
kw = index.get_info()
kw['query'] = ''
if state == "present":
if idxtype and idxtype.upper() not in VALID_IDX_TYPES:
module.fail_json(msg="Index type '%s' of %s is not in valid types" % (idxtype, idxname))
columns = ','.join(columns)
if storage_params:
storage_params = ','.join(storage_params)
changed = index.create(table, idxtype, columns, cond, tablespace, storage_params, concurrent)
if changed:
kw = index.get_info()
kw['state'] = 'present'
kw['query'] = index.executed_query
try:
changed = index_create(cursor, module, idxname, table,
idxtype, columns, cond, concurrent)
kw['index_created'] = True
except SQLParseError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
except psycopg2.ProgrammingError as e:
module.fail_json(msg="Unable to create %s index with given "
"requirement due to : %s" % (idxname, to_native(e)),
exception=traceback.format_exc())
else:
try:
changed = index_drop(cursor, module, idxname, concurrent)
kw['index_dropped'] = True
except SQLParseError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
except psycopg2.ProgrammingError as e:
module.fail_json(msg="Unable to drop index %s due to : %s" % (idxname, to_native(e)),
exception=traceback.format_exc())
changed = index.drop(schema, cascade, concurrent)
if changed:
kw['state'] = 'absent'
kw['query'] = index.executed_query
if not module.check_mode and not kw['valid'] and concurrent:
db_connection.rollback()
module.warn(msg="Index %s is invalid! ROLLBACK" % idxname)
if not concurrent:
if changed:
if module.check_mode:
db_connection.rollback()
else:
db_connection.commit()
if not module.check_mode and state != 'absent':
if not index_valid(cursor, idxname, module):
kw['changed'] = changed
module.fail_json(msg="Index %s is invalid!" % idxname)
if module.check_mode:
db_connection.rollback()
else:
db_connection.commit()
kw['changed'] = changed
del kw['login_password']
module.exit_json(**kw)

View file

@ -765,6 +765,9 @@
# Verify different session_role scenarios
- include: session_role.yml
# Test postgresql_idx module
- include: postgresql_idx.yml
# dump/restore tests per format
# ============================================================
- include: state_dump_restore.yml test_fixture=user file=dbdata.sql

View file

@ -0,0 +1,241 @@
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Preparation for tests.
# To implement the next steps, create the test table:
- name: postgresql_idx - create test table called test_table
become_user: "{{ pg_user }}"
become: yes
shell: psql postgres -U "{{ pg_user }}" -t -c "CREATE TABLE test_table (id int, story text);"
ignore_errors: yes
# Create a directory for test tablespace:
- name: postgresql_idx - drop test tablespace called ssd if exists
become_user: "{{ pg_user }}"
become: yes
shell: psql postgres -U "{{ pg_user }}" -t -c "DROP TABLESPACE IF EXISTS ssd;"
ignore_errors: yes
- name: postgresql_idx - drop dir for test tablespace
become: yes
file:
path: /mnt/ssd
state: absent
ignore_errors: yes
- name: postgresql_idx - create dir for test tablespace
become: yes
file:
path: /mnt/ssd
state: directory
owner: "{{ pg_user }}"
mode: 0755
ignore_errors: yes
# Then create a test tablespace:
- name: postgresql_idx - create test tablespace called ssd
become_user: "{{ pg_user }}"
become: yes
shell: psql postgres -U "{{ pg_user }}" -t -c "CREATE TABLESPACE ssd LOCATION '/mnt/ssd';"
ignore_errors: yes
register: tablespace
# Create a test schema:
- name: postgresql_idx - create test schema
become_user: "{{ pg_user }}"
become: yes
shell: psql postgres -U "{{ pg_user }}" -t -c "CREATE SCHEMA foo;"
ignore_errors: yes
# Create a table in schema foo:
- name: postgresql_idx - create table in non-default schema
become_user: "{{ pg_user }}"
become: yes
shell: psql postgres -U "{{ pg_user }}" -t -c "CREATE TABLE foo.foo_table (id int, story text);"
ignore_errors: yes
###############
# Do main tests
#
# Create btree index if not exists test_idx concurrently covering id and story columns
- name: postgresql_idx - create btree index concurrently
become_user: "{{ pg_user }}"
become: yes
postgresql_idx:
db: postgres
login_user: "{{ pg_user }}"
table: test_table
columns: id, story
idxname: test0_idx
register: result
ignore_errors: yes
- assert:
that:
- result.changed == true
- result.tblname == 'test_table'
- result.name == 'test0_idx'
- result.state == 'present'
- result.valid != ''
- result.tblspace == ''
- result.storage_params == []
- result.schema == 'public'
- result.query == 'CREATE INDEX CONCURRENTLY test0_idx ON public.test_table USING BTREE (id, story)'
# Check that if index exists that changes nothing
- name: postgresql_idx - try to create existing index again
become_user: "{{ pg_user }}"
become: yes
postgresql_idx:
db: postgres
login_user: "{{ pg_user }}"
table: test_table
columns: id, story
idxname: test0_idx
register: result
ignore_errors: yes
- assert:
that:
- result.changed == false
- result.tblname == 'test_table'
- result.name == 'test0_idx'
- result.state == 'present'
- result.valid != ''
- result.tblspace == ''
- result.storage_params == []
- result.schema == 'public'
- result.query == ''
# Create btree index foo_test_idx concurrently with tablespace called ssd,
# storage parameter, and non-default schema
- name: postgresql_idx - create btree index - non-default schema, tablespace, storage parameter
become_user: "{{ pg_user }}"
become: yes
postgresql_idx:
db: postgres
login_user: "{{ pg_user }}"
schema: foo
table: foo_table
columns:
- id
- story
idxname: foo_test_idx
tablespace: ssd
storage_params: fillfactor=90
register: result
ignore_errors: yes
- assert:
that:
- result.changed == true
- result.tblname == 'foo_table'
- result.name == 'foo_test_idx'
- result.state == 'present'
- result.valid != ''
- result.tblspace == 'ssd'
- result.storage_params == [ "fillfactor=90" ]
- result.schema == 'foo'
- result.query == 'CREATE INDEX CONCURRENTLY foo_test_idx ON foo.foo_table USING BTREE (id,story) WITH (fillfactor=90) TABLESPACE ssd'
when: tablespace.rc == 0
# Create brin index not in concurrently mode
- name: postgresql_idx - create brin index not concurrently
become_user: "{{ pg_user }}"
become: yes
postgresql_idx:
db: postgres
login_user: "{{ pg_user }}"
schema: public
table: test_table
state: present
type: brin
columns: id
idxname: test_brin_idx
concurrent: no
register: result
ignore_errors: yes
- assert:
that:
- result.changed == true
- result.tblname == 'test_table'
- result.name == 'test_brin_idx'
- result.state == 'present'
- result.valid != ''
- result.tblspace == ''
- result.storage_params == []
- result.schema == 'public'
- result.query == 'CREATE INDEX test_brin_idx ON public.test_table USING brin (id)'
when: postgres_version_resp.stdout is version('9.5', '>=')
# Create index where column id > 1
- name: postgresql_idx - create index with condition
become_user: "{{ pg_user }}"
become: yes
postgresql_idx:
db: postgres
login_user: "{{ pg_user }}"
table: test_table
columns: id
idxname: test1_idx
cond: id > 1 AND id != 10
register: result
ignore_errors: yes
- assert:
that:
- result.changed == true
- result.tblname == 'test_table'
- result.name == 'test1_idx'
- result.state == 'present'
- result.valid != ''
- result.tblspace == ''
- result.storage_params == []
- result.schema == 'public'
- result.query == 'CREATE INDEX CONCURRENTLY test1_idx ON public.test_table USING BTREE (id) WHERE id > 1 AND id != 10'
# Drop index from spacific schema with cascade
- name: postgresql_idx - drop index from specific schema cascade
become_user: "{{ pg_user }}"
become: yes
postgresql_idx:
db: postgres
login_user: "{{ pg_user }}"
schema: foo
name: foo_test_idx
cascade: yes
state: absent
concurrent: no
register: result
ignore_errors: yes
- assert:
that:
- result.changed == true
- result.name == 'foo_test_idx'
- result.state == 'absent'
- result.schema == 'foo'
- result.query == 'DROP INDEX foo.foo_test_idx CASCADE'
when: tablespace.rc == 0
# Try to drop not existing index
- name: postgresql_idx - try to drop not existing index
become_user: "{{ pg_user }}"
become: yes
postgresql_idx:
db: postgres
login_user: "{{ pg_user }}"
schema: foo
name: foo_test_idx
state: absent
register: result
ignore_errors: yes
- assert:
that:
- result.changed == false
- result.query == ''