mirror of
https://github.com/ansible-collections/community.general.git
synced 2024-09-14 20:13:21 +02:00
Module postgresql_idx: added ci tests, new params, returned values, code refactoring (#52230)
* postgresql_idx: ci tests, refactoring, return values * postgresql_idx: ci tests, new params, return values * postgresql_idx: ci tests, fix * postgresql_idx: ci tests, fix * postgresql_idx: ci tests, fix * postgresql_idx: ci tests, fix * postgresql_idx: ci tests, fix * postgresql_idx: ci tests, fix * postgresql_idx: ci tests, fix * postgresql_idx: ci tests, fix * postgresql_idx: ci tests, fix * New module postgresql_table - fix tests * New module postgresql_table - fix tests * New module postgresql_table - fix tests * New module postgresql_table - fix state choices order
This commit is contained in:
parent
b0606213dc
commit
8e0f95951d
3 changed files with 614 additions and 208 deletions
|
@ -1,148 +1,211 @@
|
||||||
#!/usr/bin/python
|
#!/usr/bin/python
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
# Copyright: (c) 2018, Andrey Klychkov (@Andersson007) <aaklychkov@mail.ru>
|
# Copyright: (c) 2018, Andrey Klychkov (@Andersson007) <aaklychkov@mail.ru>
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
from __future__ import absolute_import, division, print_function
|
from __future__ import absolute_import, division, print_function
|
||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
|
|
||||||
ANSIBLE_METADATA = {
|
ANSIBLE_METADATA = {
|
||||||
'metadata_version': '1.1',
|
'metadata_version': '1.1',
|
||||||
'status': ['preview'],
|
'status': ['preview'],
|
||||||
'supported_by': 'community'
|
'supported_by': 'community'
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
DOCUMENTATION = r'''
|
DOCUMENTATION = r'''
|
||||||
---
|
---
|
||||||
module: postgresql_idx
|
module: postgresql_idx
|
||||||
short_description: Creates or drops indexes from a PostgreSQL database.
|
short_description: Create or drop indexes from a PostgreSQL database
|
||||||
description:
|
description:
|
||||||
- Create or drop indexes from a remote PostgreSQL database.
|
- Creates or drops indexes from a remote PostgreSQL database
|
||||||
|
U(https://www.postgresql.org/docs/current/sql-createindex.html).
|
||||||
version_added: "2.8"
|
version_added: "2.8"
|
||||||
options:
|
options:
|
||||||
idxname:
|
idxname:
|
||||||
description:
|
description:
|
||||||
- Name of the index to create or drop.
|
- Name of the index to create or drop.
|
||||||
type: str
|
type: str
|
||||||
required: true
|
required: true
|
||||||
|
aliases:
|
||||||
|
- name
|
||||||
db:
|
db:
|
||||||
description:
|
description:
|
||||||
- Name of database where the index will be created/dropped.
|
- Name of database where the index will be created/dropped.
|
||||||
type: str
|
type: str
|
||||||
port:
|
port:
|
||||||
description:
|
description:
|
||||||
- Database port to connect.
|
- Database port to connect.
|
||||||
type: int
|
type: int
|
||||||
default: 5432
|
default: 5432
|
||||||
login_user:
|
login_user:
|
||||||
description:
|
description:
|
||||||
- User (role) used to authenticate with PostgreSQL.
|
- User (role) used to authenticate with PostgreSQL.
|
||||||
type: str
|
type: str
|
||||||
default: postgres
|
default: postgres
|
||||||
|
session_role:
|
||||||
|
description:
|
||||||
|
- Switch to session_role after connecting.
|
||||||
|
The specified session_role must be a role that the current login_user is a member of.
|
||||||
|
- Permissions checking for SQL commands is carried out as though
|
||||||
|
the session_role were the one that had logged in originally.
|
||||||
|
type: str
|
||||||
|
schema:
|
||||||
|
description:
|
||||||
|
- Name of a database schema.
|
||||||
login_password:
|
login_password:
|
||||||
description:
|
description:
|
||||||
- Password used to authenticate with PostgreSQL.
|
- Password used to authenticate with PostgreSQL.
|
||||||
type: str
|
type: str
|
||||||
login_host:
|
login_host:
|
||||||
description:
|
description:
|
||||||
- Host running PostgreSQL.
|
- Host running PostgreSQL.
|
||||||
type: str
|
type: str
|
||||||
login_unix_socket:
|
login_unix_socket:
|
||||||
description:
|
description:
|
||||||
- Path to a Unix domain socket for local connections.
|
- Path to a Unix domain socket for local connections.
|
||||||
type: str
|
type: str
|
||||||
ssl_mode:
|
ssl_mode:
|
||||||
description:
|
description:
|
||||||
- Determines whether or with what priority a secure SSL TCP/IP connection
|
- Determines whether or with what priority a secure SSL TCP/IP connection
|
||||||
will be negotiated with the server.
|
will be negotiated with the server.
|
||||||
- See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for
|
- See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for
|
||||||
more information on the modes.
|
more information on the modes.
|
||||||
- Default of C(prefer) matches libpq default.
|
- Default of C(prefer) matches libpq default.
|
||||||
type: str
|
type: str
|
||||||
default: prefer
|
default: prefer
|
||||||
choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
|
choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
|
||||||
ssl_rootcert:
|
ssl_rootcert:
|
||||||
description:
|
description:
|
||||||
- Specifies the name of a file containing SSL certificate authority (CA)
|
- Specifies the name of a file containing SSL certificate authority (CA)
|
||||||
certificate(s). If the file exists, the server's certificate will be
|
certificate(s). If the file exists, the server's certificate will be
|
||||||
verified to be signed by one of these authorities.
|
verified to be signed by one of these authorities.
|
||||||
type: str
|
type: str
|
||||||
state:
|
state:
|
||||||
description:
|
description:
|
||||||
- Index state.
|
- Index state.
|
||||||
type: str
|
type: str
|
||||||
default: present
|
default: present
|
||||||
choices: ["present", "absent"]
|
choices: [ absent, present ]
|
||||||
table:
|
table:
|
||||||
description:
|
description:
|
||||||
- Table to create index on it.
|
- Table to create index on it.
|
||||||
|
- Mutually exclusive with I(state=absent).
|
||||||
type: str
|
type: str
|
||||||
required: true
|
required: true
|
||||||
columns:
|
columns:
|
||||||
description:
|
description:
|
||||||
- List of index columns.
|
- List of index columns.
|
||||||
type: str
|
- Mutually exclusive with I(state=absent).
|
||||||
|
type: list
|
||||||
cond:
|
cond:
|
||||||
description:
|
description:
|
||||||
- Index conditions.
|
- Index conditions.
|
||||||
|
- Mutually exclusive with I(state=absent).
|
||||||
type: str
|
type: str
|
||||||
idxtype:
|
idxtype:
|
||||||
description:
|
description:
|
||||||
- Index type (like btree, gist, gin, etc.).
|
- Index type (like btree, gist, gin, etc.).
|
||||||
|
- Mutually exclusive with I(state=absent).
|
||||||
type: str
|
type: str
|
||||||
|
aliases:
|
||||||
|
- type
|
||||||
concurrent:
|
concurrent:
|
||||||
description:
|
description:
|
||||||
- Enable or disable concurrent mode (CREATE / DROP INDEX CONCURRENTLY).
|
- Enable or disable concurrent mode (CREATE / DROP INDEX CONCURRENTLY).
|
||||||
|
- Mutually exclusive with check mode and I(cascade=yes).
|
||||||
type: bool
|
type: bool
|
||||||
default: yes
|
default: yes
|
||||||
|
tablespace:
|
||||||
|
description:
|
||||||
|
- Set a tablespace for the index.
|
||||||
|
- Mutually exclusive with I(state=absent).
|
||||||
|
required: false
|
||||||
|
type: str
|
||||||
|
storage_params:
|
||||||
|
description:
|
||||||
|
- Storage parameters like fillfactor, vacuum_cleanup_index_scale_factor, etc.
|
||||||
|
- Mutually exclusive with I(state=absent).
|
||||||
|
type: list
|
||||||
|
cascade:
|
||||||
|
description:
|
||||||
|
- Automatically drop objects that depend on the index,
|
||||||
|
and in turn all objects that depend on those objects U(https://www.postgresql.org/docs/current/sql-dropindex.html).
|
||||||
|
- It used only with I(state=absent).
|
||||||
|
- Mutually exclusive with I(concurrent=yes)
|
||||||
|
type: bool
|
||||||
|
default: no
|
||||||
notes:
|
notes:
|
||||||
- The default authentication assumes that you are either logging in as or
|
- The default authentication assumes that you are either logging in as or
|
||||||
sudo'ing to the postgres account on the host.
|
sudo'ing to the postgres account on the host.
|
||||||
- This module uses psycopg2, a Python PostgreSQL database adapter. You must
|
- I(cuncurrent=yes) cannot be used in check mode because
|
||||||
ensure that psycopg2 is installed on the host before using this module. If
|
"CREATE INDEX CONCURRENTLY" cannot run inside a transaction block.
|
||||||
the remote host is the PostgreSQL server (which is the default case), then
|
- This module uses psycopg2, a Python PostgreSQL database adapter. You must
|
||||||
PostgreSQL must also be installed on the remote host. For Ubuntu-based
|
ensure that psycopg2 is installed on the host before using this module.
|
||||||
systems, install the postgresql, libpq-dev, and python-psycopg2 packages
|
- If the remote host is the PostgreSQL server (which is the default case), then
|
||||||
on the remote host before using this module.
|
PostgreSQL must also be installed on the remote host.
|
||||||
|
- For Ubuntu-based systems, install the postgresql, libpq-dev, and python-psycopg2 packages
|
||||||
|
on the remote host before using this module.
|
||||||
requirements: [ psycopg2 ]
|
requirements: [ psycopg2 ]
|
||||||
author: "Andrew Klychkov (@Andersson007)"
|
author:
|
||||||
|
- Andrew Klychkov (@Andersson007)
|
||||||
'''
|
'''
|
||||||
|
|
||||||
EXAMPLES = '''
|
EXAMPLES = r'''
|
||||||
# Create btree index test_idx concurrently covering columns id and name of table products
|
# For create / drop index in check mode use concurrent=no and --check
|
||||||
- postgresql_idx:
|
|
||||||
|
- name: Create btree index if not exists test_idx concurrently covering columns id and name of table products
|
||||||
|
postgresql_idx:
|
||||||
db: acme
|
db: acme
|
||||||
table: products
|
table: products
|
||||||
columns: id,name
|
columns: id,name
|
||||||
idxname: test_idx
|
idxname: test_idx
|
||||||
|
|
||||||
# Create gist index test_gist_idx concurrently on column geo_data of table map
|
- name: Create btree index test_idx concurrently with tablespace called ssd and storage parameter
|
||||||
- postgresql_idx:
|
postgresql_idx:
|
||||||
|
db: acme
|
||||||
|
table: products
|
||||||
|
columns:
|
||||||
|
- id
|
||||||
|
- name
|
||||||
|
idxname: test_idx
|
||||||
|
tablespace: ssd
|
||||||
|
storage_params:
|
||||||
|
- fillfactor=90
|
||||||
|
|
||||||
|
- name: Create gist index test_gist_idx concurrently on column geo_data of table map
|
||||||
|
postgresql_idx:
|
||||||
db: somedb
|
db: somedb
|
||||||
table: map
|
table: map
|
||||||
idxtype: gist
|
idxtype: gist
|
||||||
columns: geo_data
|
columns: geo_data
|
||||||
idxname: test_gist_idx
|
idxname: test_gist_idx
|
||||||
|
|
||||||
# Create gin index gin0_idx not concurrently on column comment of table test
|
# Note: for the example below pg_trgm extension must be installed for gin_trgm_ops
|
||||||
# (Note: pg_trgm extension must be installed for gin_trgm_ops)
|
- name: Create gin index gin0_idx not concurrently on column comment of table test
|
||||||
- postgresql_idx:
|
postgresql_idx:
|
||||||
idxname: gin0_idx
|
idxname: gin0_idx
|
||||||
table: test
|
table: test
|
||||||
columns: comment gin_trgm_ops
|
columns: comment gin_trgm_ops
|
||||||
concurrent: no
|
concurrent: no
|
||||||
idxtype: gin
|
idxtype: gin
|
||||||
|
|
||||||
# Drop btree test_idx concurrently
|
- name: Drop btree test_idx concurrently
|
||||||
- postgresql_idx:
|
postgresql_idx:
|
||||||
db: mydb
|
db: mydb
|
||||||
idxname: test_idx
|
idxname: test_idx
|
||||||
state: absent
|
state: absent
|
||||||
|
|
||||||
# Create btree index test_idx concurrently on columns id,comment where column id > 1
|
- name: Drop test_idx cascade
|
||||||
- postgresql_idx:
|
postgresql_idx:
|
||||||
|
db: mydb
|
||||||
|
idxname: test_idx
|
||||||
|
state: absent
|
||||||
|
cascade: yes
|
||||||
|
concurrent: no
|
||||||
|
|
||||||
|
- name: Create btree index test_idx concurrently on columns id,comment where column id > 1
|
||||||
|
postgresql_idx:
|
||||||
db: mydb
|
db: mydb
|
||||||
table: test
|
table: test
|
||||||
columns: id,comment
|
columns: id,comment
|
||||||
|
@ -150,24 +213,56 @@ EXAMPLES = '''
|
||||||
cond: id > 1
|
cond: id > 1
|
||||||
'''
|
'''
|
||||||
|
|
||||||
RETURN = ''' # '''
|
RETURN = r'''
|
||||||
|
name:
|
||||||
|
description: Index name.
|
||||||
|
returned: always
|
||||||
|
type: str
|
||||||
|
sample: 'foo_idx'
|
||||||
|
state:
|
||||||
|
description: Index state.
|
||||||
|
returned: always
|
||||||
|
type: str
|
||||||
|
sample: 'present'
|
||||||
|
schema:
|
||||||
|
description: Schema where index exists.
|
||||||
|
returned: always
|
||||||
|
type: str
|
||||||
|
sample: 'public'
|
||||||
|
tablespace:
|
||||||
|
description: Tablespace where index exists.
|
||||||
|
returned: always
|
||||||
|
type: str
|
||||||
|
sample: 'ssd'
|
||||||
|
query:
|
||||||
|
description: Query that was tried to be execute.
|
||||||
|
returned: always
|
||||||
|
type: str
|
||||||
|
sample: 'CREATE INDEX CONCURRENTLY foo_idx ON test_table USING BTREE (id)'
|
||||||
|
storage_params:
|
||||||
|
description: Index storage parameters.
|
||||||
|
returned: always
|
||||||
|
type: list
|
||||||
|
sample: [ "fillfactor=90" ]
|
||||||
|
valid:
|
||||||
|
description: Index validity.
|
||||||
|
returned: always
|
||||||
|
type: bool
|
||||||
|
sample: true
|
||||||
|
'''
|
||||||
|
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
PSYCOPG2_IMP_ERR = None
|
|
||||||
try:
|
try:
|
||||||
import psycopg2
|
import psycopg2
|
||||||
import psycopg2.extras
|
HAS_PSYCOPG2 = True
|
||||||
except ImportError:
|
except ImportError:
|
||||||
PSYCOPG2_IMP_ERR = traceback.format_exc()
|
HAS_PSYCOPG2 = False
|
||||||
postgresqldb_found = False
|
|
||||||
else:
|
|
||||||
postgresqldb_found = True
|
|
||||||
|
|
||||||
import ansible.module_utils.postgres as pgutils
|
import ansible.module_utils.postgres as pgutils
|
||||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
from ansible.module_utils.basic import AnsibleModule
|
||||||
from ansible.module_utils.database import SQLParseError
|
from ansible.module_utils.database import SQLParseError
|
||||||
|
from ansible.module_utils.postgres import postgres_common_argument_spec
|
||||||
from ansible.module_utils._text import to_native
|
from ansible.module_utils._text import to_native
|
||||||
from ansible.module_utils.six import iteritems
|
from ansible.module_utils.six import iteritems
|
||||||
|
|
||||||
|
@ -179,93 +274,152 @@ VALID_IDX_TYPES = ('BTREE', 'HASH', 'GIST', 'SPGIST', 'GIN', 'BRIN')
|
||||||
# PostgreSQL module specific support methods.
|
# PostgreSQL module specific support methods.
|
||||||
#
|
#
|
||||||
|
|
||||||
|
class Index(object):
|
||||||
|
def __init__(self, module, cursor, schema, name):
|
||||||
|
self.name = name
|
||||||
|
if schema:
|
||||||
|
self.schema = schema
|
||||||
|
else:
|
||||||
|
self.schema = 'public'
|
||||||
|
self.module = module
|
||||||
|
self.cursor = cursor
|
||||||
|
self.info = {
|
||||||
|
'name': self.name,
|
||||||
|
'state': 'absent',
|
||||||
|
'schema': '',
|
||||||
|
'tblname': '',
|
||||||
|
'tblspace': '',
|
||||||
|
'valid': True,
|
||||||
|
'storage_params': [],
|
||||||
|
}
|
||||||
|
self.exists = False
|
||||||
|
self.__exists_in_db()
|
||||||
|
self.executed_query = ''
|
||||||
|
|
||||||
def index_exists(cursor, idxname):
|
def get_info(self):
|
||||||
query = "SELECT indexname FROM pg_indexes "\
|
"""
|
||||||
"WHERE indexname = '%s'" % idxname
|
Getter to refresh and return table info
|
||||||
cursor.execute(query)
|
"""
|
||||||
exists = cursor.fetchone()
|
self.__exists_in_db()
|
||||||
if exists is not None:
|
return self.info
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
def __exists_in_db(self):
|
||||||
|
"""
|
||||||
|
Check index and collect info
|
||||||
|
"""
|
||||||
|
query = ("SELECT i.schemaname, i.tablename, i.tablespace, "
|
||||||
|
"pi.indisvalid, c.reloptions "
|
||||||
|
"FROM pg_catalog.pg_indexes AS i "
|
||||||
|
"JOIN pg_catalog.pg_class AS c "
|
||||||
|
"ON i.indexname = c.relname "
|
||||||
|
"JOIN pg_catalog.pg_index AS pi "
|
||||||
|
"ON c.oid = pi.indexrelid "
|
||||||
|
"WHERE i.indexname = '%s'" % self.name)
|
||||||
|
|
||||||
def index_valid(cursor, idxname, module):
|
res = self.__exec_sql(query)
|
||||||
query = "SELECT i.indisvalid FROM pg_catalog.pg_index AS i "\
|
if res:
|
||||||
"WHERE i.indexrelid = (SELECT oid "\
|
self.exists = True
|
||||||
"FROM pg_class WHERE relname = '%s')" % idxname
|
self.info = dict(
|
||||||
cursor.execute(query)
|
name=self.name,
|
||||||
valid = cursor.fetchone()
|
state='present',
|
||||||
if valid is None:
|
schema=res[0][0],
|
||||||
module.fail_json(msg="Validity check: returns "
|
tblname=res[0][1],
|
||||||
"no information about %s" % idxname)
|
tblspace=res[0][2] if res[0][2] else '',
|
||||||
return valid
|
valid=res[0][3],
|
||||||
|
storage_params=res[0][4] if res[0][4] else [],
|
||||||
|
)
|
||||||
|
return True
|
||||||
|
|
||||||
|
else:
|
||||||
def index_create(cursor, module, idxname, tblname, idxtype,
|
self.exists = False
|
||||||
columns, cond, concurrent=True):
|
|
||||||
"""Create new index"""
|
|
||||||
changed = False
|
|
||||||
if idxtype is None:
|
|
||||||
idxtype = "BTREE"
|
|
||||||
|
|
||||||
mode = 'CONCURRENTLY'
|
|
||||||
if not concurrent:
|
|
||||||
mode = ''
|
|
||||||
|
|
||||||
if cond is None:
|
|
||||||
condition = ''
|
|
||||||
else:
|
|
||||||
condition = 'WHERE %s' % cond
|
|
||||||
|
|
||||||
for column in columns.split(','):
|
|
||||||
column.strip()
|
|
||||||
|
|
||||||
query = "CREATE INDEX %s %s ON %s USING %s (%s)%s" % (
|
|
||||||
mode, idxname, tblname, idxtype, columns, condition)
|
|
||||||
|
|
||||||
try:
|
|
||||||
if index_exists(cursor, idxname):
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
cursor.execute(query)
|
def create(self, tblname, idxtype, columns, cond, tblspace, storage_params, concurrent=True):
|
||||||
# In any case, even the created index is not valid,
|
"""
|
||||||
# the database schema has been changed:
|
Create PostgreSQL index.
|
||||||
changed = True
|
"""
|
||||||
except psycopg2.InternalError as e:
|
# To change existing index we should write
|
||||||
if e.pgcode == '25006':
|
# 'postgresql_alter_table' standalone module.
|
||||||
# Handle errors due to read-only transactions indicated by pgcode 25006
|
|
||||||
# ERROR: cannot execute ALTER ROLE in a read-only transaction
|
if self.exists:
|
||||||
changed = False
|
return False
|
||||||
module.fail_json(msg=e.pgerror, exception=traceback.format_exc())
|
|
||||||
|
changed = False
|
||||||
|
if idxtype is None:
|
||||||
|
idxtype = "BTREE"
|
||||||
|
|
||||||
|
query = 'CREATE INDEX'
|
||||||
|
|
||||||
|
if concurrent:
|
||||||
|
query += ' CONCURRENTLY'
|
||||||
|
|
||||||
|
query += ' %s' % self.name
|
||||||
|
|
||||||
|
if self.schema:
|
||||||
|
query += ' ON %s.%s ' % (self.schema, tblname)
|
||||||
else:
|
else:
|
||||||
raise psycopg2.InternalError(e)
|
query += 'public.%s ' % tblname
|
||||||
return changed
|
|
||||||
|
|
||||||
|
query += 'USING %s (%s)' % (idxtype, columns)
|
||||||
|
|
||||||
def index_drop(cursor, module, idxname, concurrent=True):
|
if storage_params:
|
||||||
"""Drop index"""
|
query += ' WITH (%s)' % storage_params
|
||||||
changed = False
|
|
||||||
if not index_exists(cursor, idxname):
|
|
||||||
return changed
|
|
||||||
|
|
||||||
mode = 'CONCURRENTLY'
|
if tblspace:
|
||||||
if not concurrent:
|
query += ' TABLESPACE %s' % tblspace
|
||||||
mode = ''
|
|
||||||
|
|
||||||
query = 'DROP INDEX %s %s' % (mode, idxname)
|
if cond:
|
||||||
try:
|
query += ' WHERE %s' % cond
|
||||||
cursor.execute(query)
|
|
||||||
changed = True
|
self.executed_query = query
|
||||||
except psycopg2.InternalError as e:
|
|
||||||
if e.pgcode == '25006':
|
if self.__exec_sql(query, ddl=True):
|
||||||
# Handle errors due to read-only transactions indicated by pgcode 25006
|
return True
|
||||||
# ERROR: cannot execute ALTER ROLE in a read-only transaction
|
|
||||||
changed = False
|
return False
|
||||||
module.fail_json(msg=e.pgerror, exception=traceback.format_exc())
|
|
||||||
|
def drop(self, schema, cascade=False, concurrent=True):
|
||||||
|
"""
|
||||||
|
Drop PostgreSQL index.
|
||||||
|
"""
|
||||||
|
|
||||||
|
changed = False
|
||||||
|
if not self.exists:
|
||||||
|
return False
|
||||||
|
|
||||||
|
query = 'DROP INDEX'
|
||||||
|
|
||||||
|
if concurrent:
|
||||||
|
query += ' CONCURRENTLY'
|
||||||
|
|
||||||
|
if not schema:
|
||||||
|
query += ' public.%s' % self.name
|
||||||
else:
|
else:
|
||||||
raise psycopg2.InternalError(e)
|
query += ' %s.%s' % (schema, self.name)
|
||||||
return changed
|
|
||||||
|
if cascade:
|
||||||
|
query += ' CASCADE'
|
||||||
|
|
||||||
|
self.executed_query = query
|
||||||
|
|
||||||
|
if self.__exec_sql(query, ddl=True):
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
def __exec_sql(self, query, ddl=False):
|
||||||
|
try:
|
||||||
|
self.cursor.execute(query)
|
||||||
|
if not ddl:
|
||||||
|
res = self.cursor.fetchall()
|
||||||
|
return res
|
||||||
|
return True
|
||||||
|
except SQLParseError as e:
|
||||||
|
self.module.fail_json(msg=to_native(e))
|
||||||
|
except Exception as e:
|
||||||
|
self.module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e)))
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
# ===========================================
|
# ===========================================
|
||||||
|
@ -274,23 +428,27 @@ def index_drop(cursor, module, idxname, concurrent=True):
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
argument_spec = pgutils.postgres_common_argument_spec()
|
argument_spec = postgres_common_argument_spec()
|
||||||
argument_spec.update(dict(
|
argument_spec.update(
|
||||||
idxname=dict(type='str', required=True, aliases=['idxname']),
|
idxname=dict(type='str', required=True, aliases=['name']),
|
||||||
db=dict(type='str', default=''),
|
db=dict(type='str'),
|
||||||
ssl_mode=dict(type='str', default='prefer', choices=[
|
ssl_mode=dict(type='str', default='prefer', choices=['allow', 'disable', 'prefer', 'require', 'verify-ca', 'verify-full']),
|
||||||
'allow', 'disable', 'prefer', 'require', 'verify-ca', 'verify-full']),
|
|
||||||
ssl_rootcert=dict(type='str'),
|
ssl_rootcert=dict(type='str'),
|
||||||
state=dict(type='str', default="present", choices=["absent", "present"]),
|
state=dict(type='str', default='present', choices=['absent', 'present']),
|
||||||
concurrent=dict(type='bool', default=True),
|
concurrent=dict(type='bool', default=True),
|
||||||
table=dict(type='str'),
|
table=dict(type='str'),
|
||||||
idxtype=dict(type='str'),
|
idxtype=dict(type='str', aliases=['type']),
|
||||||
columns=dict(type='str'),
|
columns=dict(type='list'),
|
||||||
cond=dict(type='str')
|
cond=dict(type='str'),
|
||||||
))
|
session_role=dict(type='str'),
|
||||||
|
tablespace=dict(type='str'),
|
||||||
|
storage_params=dict(type='list'),
|
||||||
|
cascade=dict(type='bool', default=False),
|
||||||
|
schema=dict(type='str'),
|
||||||
|
)
|
||||||
module = AnsibleModule(
|
module = AnsibleModule(
|
||||||
argument_spec=argument_spec,
|
argument_spec=argument_spec,
|
||||||
supports_check_mode=True
|
supports_check_mode=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
idxname = module.params["idxname"]
|
idxname = module.params["idxname"]
|
||||||
|
@ -301,28 +459,31 @@ def main():
|
||||||
columns = module.params["columns"]
|
columns = module.params["columns"]
|
||||||
cond = module.params["cond"]
|
cond = module.params["cond"]
|
||||||
sslrootcert = module.params["ssl_rootcert"]
|
sslrootcert = module.params["ssl_rootcert"]
|
||||||
|
session_role = module.params["session_role"]
|
||||||
|
tablespace = module.params["tablespace"]
|
||||||
|
storage_params = module.params["storage_params"]
|
||||||
|
cascade = module.params["cascade"]
|
||||||
|
schema = module.params["schema"]
|
||||||
|
|
||||||
|
if concurrent and (module.check_mode or cascade):
|
||||||
|
module.fail_json(msg="Cuncurrent mode and check mode/cascade are mutually exclusive")
|
||||||
|
|
||||||
if state == 'present':
|
if state == 'present':
|
||||||
if table is None:
|
if not table:
|
||||||
module.fail_json(msg="Table must be specified")
|
module.fail_json(msg="Table must be specified")
|
||||||
if columns is None:
|
if not columns:
|
||||||
module.fail_json(msg="At least one column must be specified")
|
module.fail_json(msg="At least one column must be specified")
|
||||||
else:
|
else:
|
||||||
if table is not None:
|
if table or columns or cond or idxtype or tablespace:
|
||||||
module.fail_json(msg="Index %s is going to be removed, so it does not "
|
module.fail_json(msg="Index %s is going to be removed, so it does not "
|
||||||
"make sense to pass a table name" % idxname)
|
"make sense to pass a table name, columns, conditions, "
|
||||||
if columns is not None:
|
"index type, or tablespace" % idxname)
|
||||||
module.fail_json(msg="Index %s is going to be removed, so it does not "
|
|
||||||
"make sense to pass column names" % idxname)
|
|
||||||
if cond is not None:
|
|
||||||
module.fail_json(msg="Index %s is going to be removed, so it does not "
|
|
||||||
"make sense to pass any conditions" % idxname)
|
|
||||||
if idxtype is not None:
|
|
||||||
module.fail_json(msg="Index %s is going to be removed, so it does not "
|
|
||||||
"make sense to pass an index type" % idxname)
|
|
||||||
|
|
||||||
if not postgresqldb_found:
|
if cascade and state != 'absent':
|
||||||
module.fail_json(msg=missing_required_lib('psycopg2'), exception=PSYCOPG2_IMP_ERR)
|
module.fail_json(msg="cascade parameter used only with state=absent")
|
||||||
|
|
||||||
|
if not HAS_PSYCOPG2:
|
||||||
|
module.fail_json(msg="the python psycopg2 module is required")
|
||||||
|
|
||||||
# To use defaults values, keyword arguments must be absent, so
|
# To use defaults values, keyword arguments must be absent, so
|
||||||
# check which values are empty and don't include in the **kw
|
# check which values are empty and don't include in the **kw
|
||||||
|
@ -340,13 +501,12 @@ def main():
|
||||||
if k in params_map and v != "" and v is not None)
|
if k in params_map and v != "" and v is not None)
|
||||||
|
|
||||||
# If a login_unix_socket is specified, incorporate it here.
|
# If a login_unix_socket is specified, incorporate it here.
|
||||||
is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
|
is_localhost = "host" not in kw or kw["host"] is None or kw["host"] == "localhost"
|
||||||
if is_localhost and module.params["login_unix_socket"] != "":
|
if is_localhost and module.params["login_unix_socket"] != "":
|
||||||
kw["host"] = module.params["login_unix_socket"]
|
kw["host"] = module.params["login_unix_socket"]
|
||||||
|
|
||||||
if psycopg2.__version__ < '2.4.3' and sslrootcert is not None:
|
if psycopg2.__version__ < '2.4.3' and sslrootcert is not None:
|
||||||
module.fail_json(
|
module.fail_json(msg='psycopg2 must be at least 2.4.3 in order to user the ssl_rootcert parameter')
|
||||||
msg='psycopg2 must be at least 2.4.3 in order to user the ssl_rootcert parameter')
|
|
||||||
|
|
||||||
if module.check_mode and concurrent:
|
if module.check_mode and concurrent:
|
||||||
module.fail_json(msg="Cannot concurrently create or drop index %s "
|
module.fail_json(msg="Cannot concurrently create or drop index %s "
|
||||||
|
@ -356,66 +516,68 @@ def main():
|
||||||
try:
|
try:
|
||||||
db_connection = psycopg2.connect(**kw)
|
db_connection = psycopg2.connect(**kw)
|
||||||
if concurrent:
|
if concurrent:
|
||||||
db_connection.set_session(autocommit=True)
|
if psycopg2.__version__ >= '2.4.2':
|
||||||
|
db_connection.set_session(autocommit=True)
|
||||||
|
else:
|
||||||
|
db_connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
|
||||||
|
|
||||||
cursor = db_connection.cursor(
|
cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
|
||||||
cursor_factory=psycopg2.extras.DictCursor)
|
|
||||||
except TypeError as e:
|
except TypeError as e:
|
||||||
if 'sslrootcert' in e.args[0]:
|
if 'sslrootcert' in e.args[0]:
|
||||||
module.fail_json(
|
module.fail_json(msg='Postgresql server must be at least version 8.4 to support sslrootcert')
|
||||||
msg='Postgresql server must be at least version 8.4 to support sslrootcert')
|
|
||||||
module.fail_json(msg="unable to connect to database: %s" % to_native(e),
|
module.fail_json(msg="unable to connect to database: %s" % to_native(e))
|
||||||
exception=traceback.format_exc())
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
module.fail_json(msg="unable to connect to database: %s" % to_native(e),
|
module.fail_json(msg="unable to connect to database: %s" % to_native(e))
|
||||||
exception=traceback.format_exc())
|
|
||||||
|
|
||||||
if state == 'present' and index_exists(cursor, idxname):
|
if session_role:
|
||||||
kw['changed'] = False
|
try:
|
||||||
del kw['login_password']
|
cursor.execute('SET ROLE %s' % session_role)
|
||||||
module.exit_json(**kw)
|
except Exception as e:
|
||||||
|
module.fail_json(msg="Could not switch role: %s" % to_native(e))
|
||||||
|
|
||||||
|
# Set defaults:
|
||||||
changed = False
|
changed = False
|
||||||
|
|
||||||
if state == "present":
|
# Do job:
|
||||||
if idxtype is not None and idxtype.upper() not in VALID_IDX_TYPES:
|
index = Index(module, cursor, schema, idxname)
|
||||||
module.fail_json(msg="Index type '%s' of %s is not "
|
kw = index.get_info()
|
||||||
"in valid types" % (idxtype, idxname))
|
kw['query'] = ''
|
||||||
|
|
||||||
|
if state == "present":
|
||||||
|
if idxtype and idxtype.upper() not in VALID_IDX_TYPES:
|
||||||
|
module.fail_json(msg="Index type '%s' of %s is not in valid types" % (idxtype, idxname))
|
||||||
|
|
||||||
|
columns = ','.join(columns)
|
||||||
|
|
||||||
|
if storage_params:
|
||||||
|
storage_params = ','.join(storage_params)
|
||||||
|
|
||||||
|
changed = index.create(table, idxtype, columns, cond, tablespace, storage_params, concurrent)
|
||||||
|
|
||||||
|
if changed:
|
||||||
|
kw = index.get_info()
|
||||||
|
kw['state'] = 'present'
|
||||||
|
kw['query'] = index.executed_query
|
||||||
|
|
||||||
try:
|
|
||||||
changed = index_create(cursor, module, idxname, table,
|
|
||||||
idxtype, columns, cond, concurrent)
|
|
||||||
kw['index_created'] = True
|
|
||||||
except SQLParseError as e:
|
|
||||||
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
|
|
||||||
except psycopg2.ProgrammingError as e:
|
|
||||||
module.fail_json(msg="Unable to create %s index with given "
|
|
||||||
"requirement due to : %s" % (idxname, to_native(e)),
|
|
||||||
exception=traceback.format_exc())
|
|
||||||
else:
|
else:
|
||||||
try:
|
changed = index.drop(schema, cascade, concurrent)
|
||||||
changed = index_drop(cursor, module, idxname, concurrent)
|
|
||||||
kw['index_dropped'] = True
|
if changed:
|
||||||
except SQLParseError as e:
|
kw['state'] = 'absent'
|
||||||
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
|
kw['query'] = index.executed_query
|
||||||
except psycopg2.ProgrammingError as e:
|
|
||||||
module.fail_json(msg="Unable to drop index %s due to : %s" % (idxname, to_native(e)),
|
if not module.check_mode and not kw['valid'] and concurrent:
|
||||||
exception=traceback.format_exc())
|
db_connection.rollback()
|
||||||
|
module.warn(msg="Index %s is invalid! ROLLBACK" % idxname)
|
||||||
|
|
||||||
if not concurrent:
|
if not concurrent:
|
||||||
if changed:
|
if module.check_mode:
|
||||||
if module.check_mode:
|
db_connection.rollback()
|
||||||
db_connection.rollback()
|
else:
|
||||||
else:
|
db_connection.commit()
|
||||||
db_connection.commit()
|
|
||||||
|
|
||||||
if not module.check_mode and state != 'absent':
|
|
||||||
if not index_valid(cursor, idxname, module):
|
|
||||||
kw['changed'] = changed
|
|
||||||
module.fail_json(msg="Index %s is invalid!" % idxname)
|
|
||||||
|
|
||||||
kw['changed'] = changed
|
kw['changed'] = changed
|
||||||
del kw['login_password']
|
|
||||||
module.exit_json(**kw)
|
module.exit_json(**kw)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -765,6 +765,9 @@
|
||||||
# Verify different session_role scenarios
|
# Verify different session_role scenarios
|
||||||
- include: session_role.yml
|
- include: session_role.yml
|
||||||
|
|
||||||
|
# Test postgresql_idx module
|
||||||
|
- include: postgresql_idx.yml
|
||||||
|
|
||||||
# dump/restore tests per format
|
# dump/restore tests per format
|
||||||
# ============================================================
|
# ============================================================
|
||||||
- include: state_dump_restore.yml test_fixture=user file=dbdata.sql
|
- include: state_dump_restore.yml test_fixture=user file=dbdata.sql
|
||||||
|
|
241
test/integration/targets/postgresql/tasks/postgresql_idx.yml
Normal file
241
test/integration/targets/postgresql/tasks/postgresql_idx.yml
Normal file
|
@ -0,0 +1,241 @@
|
||||||
|
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
|
||||||
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
|
# Preparation for tests.
|
||||||
|
# To implement the next steps, create the test table:
|
||||||
|
- name: postgresql_idx - create test table called test_table
|
||||||
|
become_user: "{{ pg_user }}"
|
||||||
|
become: yes
|
||||||
|
shell: psql postgres -U "{{ pg_user }}" -t -c "CREATE TABLE test_table (id int, story text);"
|
||||||
|
ignore_errors: yes
|
||||||
|
|
||||||
|
# Create a directory for test tablespace:
|
||||||
|
- name: postgresql_idx - drop test tablespace called ssd if exists
|
||||||
|
become_user: "{{ pg_user }}"
|
||||||
|
become: yes
|
||||||
|
shell: psql postgres -U "{{ pg_user }}" -t -c "DROP TABLESPACE IF EXISTS ssd;"
|
||||||
|
ignore_errors: yes
|
||||||
|
|
||||||
|
- name: postgresql_idx - drop dir for test tablespace
|
||||||
|
become: yes
|
||||||
|
file:
|
||||||
|
path: /mnt/ssd
|
||||||
|
state: absent
|
||||||
|
ignore_errors: yes
|
||||||
|
|
||||||
|
- name: postgresql_idx - create dir for test tablespace
|
||||||
|
become: yes
|
||||||
|
file:
|
||||||
|
path: /mnt/ssd
|
||||||
|
state: directory
|
||||||
|
owner: "{{ pg_user }}"
|
||||||
|
mode: 0755
|
||||||
|
ignore_errors: yes
|
||||||
|
|
||||||
|
# Then create a test tablespace:
|
||||||
|
- name: postgresql_idx - create test tablespace called ssd
|
||||||
|
become_user: "{{ pg_user }}"
|
||||||
|
become: yes
|
||||||
|
shell: psql postgres -U "{{ pg_user }}" -t -c "CREATE TABLESPACE ssd LOCATION '/mnt/ssd';"
|
||||||
|
ignore_errors: yes
|
||||||
|
register: tablespace
|
||||||
|
|
||||||
|
# Create a test schema:
|
||||||
|
- name: postgresql_idx - create test schema
|
||||||
|
become_user: "{{ pg_user }}"
|
||||||
|
become: yes
|
||||||
|
shell: psql postgres -U "{{ pg_user }}" -t -c "CREATE SCHEMA foo;"
|
||||||
|
ignore_errors: yes
|
||||||
|
|
||||||
|
# Create a table in schema foo:
|
||||||
|
- name: postgresql_idx - create table in non-default schema
|
||||||
|
become_user: "{{ pg_user }}"
|
||||||
|
become: yes
|
||||||
|
shell: psql postgres -U "{{ pg_user }}" -t -c "CREATE TABLE foo.foo_table (id int, story text);"
|
||||||
|
ignore_errors: yes
|
||||||
|
|
||||||
|
|
||||||
|
###############
|
||||||
|
# Do main tests
|
||||||
|
#
|
||||||
|
|
||||||
|
# Create btree index if not exists test_idx concurrently covering id and story columns
|
||||||
|
- name: postgresql_idx - create btree index concurrently
|
||||||
|
become_user: "{{ pg_user }}"
|
||||||
|
become: yes
|
||||||
|
postgresql_idx:
|
||||||
|
db: postgres
|
||||||
|
login_user: "{{ pg_user }}"
|
||||||
|
table: test_table
|
||||||
|
columns: id, story
|
||||||
|
idxname: test0_idx
|
||||||
|
register: result
|
||||||
|
ignore_errors: yes
|
||||||
|
|
||||||
|
- assert:
|
||||||
|
that:
|
||||||
|
- result.changed == true
|
||||||
|
- result.tblname == 'test_table'
|
||||||
|
- result.name == 'test0_idx'
|
||||||
|
- result.state == 'present'
|
||||||
|
- result.valid != ''
|
||||||
|
- result.tblspace == ''
|
||||||
|
- result.storage_params == []
|
||||||
|
- result.schema == 'public'
|
||||||
|
- result.query == 'CREATE INDEX CONCURRENTLY test0_idx ON public.test_table USING BTREE (id, story)'
|
||||||
|
|
||||||
|
# Check that if index exists that changes nothing
|
||||||
|
- name: postgresql_idx - try to create existing index again
|
||||||
|
become_user: "{{ pg_user }}"
|
||||||
|
become: yes
|
||||||
|
postgresql_idx:
|
||||||
|
db: postgres
|
||||||
|
login_user: "{{ pg_user }}"
|
||||||
|
table: test_table
|
||||||
|
columns: id, story
|
||||||
|
idxname: test0_idx
|
||||||
|
register: result
|
||||||
|
ignore_errors: yes
|
||||||
|
|
||||||
|
- assert:
|
||||||
|
that:
|
||||||
|
- result.changed == false
|
||||||
|
- result.tblname == 'test_table'
|
||||||
|
- result.name == 'test0_idx'
|
||||||
|
- result.state == 'present'
|
||||||
|
- result.valid != ''
|
||||||
|
- result.tblspace == ''
|
||||||
|
- result.storage_params == []
|
||||||
|
- result.schema == 'public'
|
||||||
|
- result.query == ''
|
||||||
|
|
||||||
|
# Create btree index foo_test_idx concurrently with tablespace called ssd,
|
||||||
|
# storage parameter, and non-default schema
|
||||||
|
- name: postgresql_idx - create btree index - non-default schema, tablespace, storage parameter
|
||||||
|
become_user: "{{ pg_user }}"
|
||||||
|
become: yes
|
||||||
|
postgresql_idx:
|
||||||
|
db: postgres
|
||||||
|
login_user: "{{ pg_user }}"
|
||||||
|
schema: foo
|
||||||
|
table: foo_table
|
||||||
|
columns:
|
||||||
|
- id
|
||||||
|
- story
|
||||||
|
idxname: foo_test_idx
|
||||||
|
tablespace: ssd
|
||||||
|
storage_params: fillfactor=90
|
||||||
|
register: result
|
||||||
|
ignore_errors: yes
|
||||||
|
|
||||||
|
- assert:
|
||||||
|
that:
|
||||||
|
- result.changed == true
|
||||||
|
- result.tblname == 'foo_table'
|
||||||
|
- result.name == 'foo_test_idx'
|
||||||
|
- result.state == 'present'
|
||||||
|
- result.valid != ''
|
||||||
|
- result.tblspace == 'ssd'
|
||||||
|
- result.storage_params == [ "fillfactor=90" ]
|
||||||
|
- result.schema == 'foo'
|
||||||
|
- result.query == 'CREATE INDEX CONCURRENTLY foo_test_idx ON foo.foo_table USING BTREE (id,story) WITH (fillfactor=90) TABLESPACE ssd'
|
||||||
|
when: tablespace.rc == 0
|
||||||
|
|
||||||
|
# Create brin index not in concurrently mode
|
||||||
|
- name: postgresql_idx - create brin index not concurrently
|
||||||
|
become_user: "{{ pg_user }}"
|
||||||
|
become: yes
|
||||||
|
postgresql_idx:
|
||||||
|
db: postgres
|
||||||
|
login_user: "{{ pg_user }}"
|
||||||
|
schema: public
|
||||||
|
table: test_table
|
||||||
|
state: present
|
||||||
|
type: brin
|
||||||
|
columns: id
|
||||||
|
idxname: test_brin_idx
|
||||||
|
concurrent: no
|
||||||
|
register: result
|
||||||
|
ignore_errors: yes
|
||||||
|
|
||||||
|
- assert:
|
||||||
|
that:
|
||||||
|
- result.changed == true
|
||||||
|
- result.tblname == 'test_table'
|
||||||
|
- result.name == 'test_brin_idx'
|
||||||
|
- result.state == 'present'
|
||||||
|
- result.valid != ''
|
||||||
|
- result.tblspace == ''
|
||||||
|
- result.storage_params == []
|
||||||
|
- result.schema == 'public'
|
||||||
|
- result.query == 'CREATE INDEX test_brin_idx ON public.test_table USING brin (id)'
|
||||||
|
when: postgres_version_resp.stdout is version('9.5', '>=')
|
||||||
|
|
||||||
|
|
||||||
|
# Create index where column id > 1
|
||||||
|
- name: postgresql_idx - create index with condition
|
||||||
|
become_user: "{{ pg_user }}"
|
||||||
|
become: yes
|
||||||
|
postgresql_idx:
|
||||||
|
db: postgres
|
||||||
|
login_user: "{{ pg_user }}"
|
||||||
|
table: test_table
|
||||||
|
columns: id
|
||||||
|
idxname: test1_idx
|
||||||
|
cond: id > 1 AND id != 10
|
||||||
|
register: result
|
||||||
|
ignore_errors: yes
|
||||||
|
|
||||||
|
- assert:
|
||||||
|
that:
|
||||||
|
- result.changed == true
|
||||||
|
- result.tblname == 'test_table'
|
||||||
|
- result.name == 'test1_idx'
|
||||||
|
- result.state == 'present'
|
||||||
|
- result.valid != ''
|
||||||
|
- result.tblspace == ''
|
||||||
|
- result.storage_params == []
|
||||||
|
- result.schema == 'public'
|
||||||
|
- result.query == 'CREATE INDEX CONCURRENTLY test1_idx ON public.test_table USING BTREE (id) WHERE id > 1 AND id != 10'
|
||||||
|
|
||||||
|
# Drop index from spacific schema with cascade
|
||||||
|
- name: postgresql_idx - drop index from specific schema cascade
|
||||||
|
become_user: "{{ pg_user }}"
|
||||||
|
become: yes
|
||||||
|
postgresql_idx:
|
||||||
|
db: postgres
|
||||||
|
login_user: "{{ pg_user }}"
|
||||||
|
schema: foo
|
||||||
|
name: foo_test_idx
|
||||||
|
cascade: yes
|
||||||
|
state: absent
|
||||||
|
concurrent: no
|
||||||
|
register: result
|
||||||
|
ignore_errors: yes
|
||||||
|
|
||||||
|
- assert:
|
||||||
|
that:
|
||||||
|
- result.changed == true
|
||||||
|
- result.name == 'foo_test_idx'
|
||||||
|
- result.state == 'absent'
|
||||||
|
- result.schema == 'foo'
|
||||||
|
- result.query == 'DROP INDEX foo.foo_test_idx CASCADE'
|
||||||
|
when: tablespace.rc == 0
|
||||||
|
|
||||||
|
# Try to drop not existing index
|
||||||
|
- name: postgresql_idx - try to drop not existing index
|
||||||
|
become_user: "{{ pg_user }}"
|
||||||
|
become: yes
|
||||||
|
postgresql_idx:
|
||||||
|
db: postgres
|
||||||
|
login_user: "{{ pg_user }}"
|
||||||
|
schema: foo
|
||||||
|
name: foo_test_idx
|
||||||
|
state: absent
|
||||||
|
register: result
|
||||||
|
ignore_errors: yes
|
||||||
|
|
||||||
|
- assert:
|
||||||
|
that:
|
||||||
|
- result.changed == false
|
||||||
|
- result.query == ''
|
Loading…
Reference in a new issue