diff --git a/lib/ansible/module_utils/aws/core.py b/lib/ansible/module_utils/aws/core.py index 759a03e5ac..963bdf8049 100644 --- a/lib/ansible/module_utils/aws/core.py +++ b/lib/ansible/module_utils/aws/core.py @@ -283,3 +283,15 @@ def is_boto3_error_code(code, e=None): if isinstance(e, ClientError) and e.response['Error']['Code'] == code: return ClientError return type('NeverEverRaisedException', (Exception,), {}) + + +def get_boto3_client_method_parameters(client, method_name, required=False): + op = client.meta.method_to_api_mapping.get(method_name) + input_shape = client._service_model.operation_model(op).input_shape + if not input_shape: + parameters = [] + elif required: + parameters = list(input_shape.required_members) + else: + parameters = list(input_shape.members.keys()) + return parameters diff --git a/lib/ansible/module_utils/aws/rds.py b/lib/ansible/module_utils/aws/rds.py new file mode 100644 index 0000000000..e665a162d5 --- /dev/null +++ b/lib/ansible/module_utils/aws/rds.py @@ -0,0 +1,229 @@ +# Copyright: (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from ansible.module_utils._text import to_text +from ansible.module_utils.aws.waiters import get_waiter +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict +from ansible.module_utils.ec2 import compare_aws_tags, AWSRetry, ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict + +try: + from botocore.exceptions import BotoCoreError, ClientError, WaiterError +except ImportError: + pass + +from collections import namedtuple +from time import sleep + + +Boto3ClientMethod = namedtuple('Boto3ClientMethod', ['name', 'waiter', 'operation_description', 'cluster', 'instance']) +# Whitelist boto3 client methods for cluster and instance resources +cluster_method_names = [ + 'create_db_cluster', 'restore_db_cluster_from_db_snapshot', 'restore_db_cluster_from_s3', + 'restore_db_cluster_to_point_in_time', 'modify_db_cluster', 'delete_db_cluster', 'add_tags_to_resource', + 'remove_tags_from_resource', 'list_tags_for_resource', 'promote_read_replica_db_cluster' +] +instance_method_names = [ + 'create_db_instance', 'restore_db_instance_to_point_in_time', 'restore_db_instance_from_s3', + 'restore_db_instance_from_db_snapshot', 'create_db_instance_read_replica', 'modify_db_instance', + 'delete_db_instance', 'add_tags_to_resource', 'remove_tags_from_resource', 'list_tags_for_resource', + 'promote_read_replica', 'stop_db_instance', 'start_db_instance', 'reboot_db_instance' +] + + +def get_rds_method_attribute(method_name, module): + readable_op = method_name.replace('_', ' ').replace('db', 'DB') + if method_name in cluster_method_names and 'new_db_cluster_identifier' in module.params: + cluster = True + instance = False + if method_name == 'delete_db_cluster': + waiter = 'cluster_deleted' + else: + waiter = 'cluster_available' + elif method_name in instance_method_names and 'new_db_instance_identifier' in module.params: + cluster = False + instance = True + if method_name == 'delete_db_instance': + waiter = 'db_instance_deleted' + elif method_name == 'stop_db_instance': + waiter = 'db_instance_stopped' + else: + waiter = 'db_instance_available' + else: + raise NotImplementedError("method {0} hasn't been added to the list of accepted methods to use a waiter in module_utils/aws/rds.py".format(method_name)) + + return Boto3ClientMethod(name=method_name, waiter=waiter, operation_description=readable_op, cluster=cluster, instance=instance) + + +def get_final_identifier(method_name, module): + apply_immediately = module.params['apply_immediately'] + if get_rds_method_attribute(method_name, module).cluster: + identifier = module.params['db_cluster_identifier'] + updated_identifier = module.params['new_db_cluster_identifier'] + elif get_rds_method_attribute(method_name, module).instance: + identifier = module.params['db_instance_identifier'] + updated_identifier = module.params['new_db_instance_identifier'] + else: + raise NotImplementedError("method {0} hasn't been added to the list of accepted methods in module_utils/aws/rds.py".format(method_name)) + if not module.check_mode and updated_identifier and apply_immediately: + identifier = updated_identifier + return identifier + + +def handle_errors(module, exception, method_name, parameters): + + if not isinstance(exception, ClientError): + module.fail_json_aws(exception, msg="Unexpected failure for method {0} with parameters {1}".format(method_name, parameters)) + + changed = True + error_code = exception.response['Error']['Code'] + if method_name == 'modify_db_instance' and error_code == 'InvalidParameterCombination': + if 'No modifications were requested' in to_text(exception): + changed = False + elif 'ModifyDbCluster API' in to_text(exception): + module.fail_json_aws(exception, msg='It appears you are trying to modify attributes that are managed at the cluster level. Please see rds_cluster') + else: + module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description)) + elif method_name == 'promote_read_replica' and error_code == 'InvalidDBInstanceState': + if 'DB Instance is not a read replica' in to_text(exception): + changed = False + else: + module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description)) + elif method_name == 'create_db_instance' and exception.response['Error']['Code'] == 'InvalidParameterValue': + accepted_engines = [ + 'aurora', 'aurora-mysql', 'aurora-postgresql', 'mariadb', 'mysql', 'oracle-ee', 'oracle-se', + 'oracle-se1', 'oracle-se2', 'postgres', 'sqlserver-ee', 'sqlserver-ex', 'sqlserver-se', 'sqlserver-web' + ] + if parameters.get('Engine') not in accepted_engines: + module.fail_json_aws(exception, msg='DB engine {0} should be one of {1}'.format(parameters.get('Engine'), accepted_engines)) + else: + module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description)) + else: + module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description)) + + return changed + + +def call_method(client, module, method_name, parameters): + result = {} + changed = True + if not module.check_mode: + wait = module.params['wait'] + # TODO: stabilize by adding get_rds_method_attribute(method_name).extra_retry_codes + method = getattr(client, method_name) + try: + if method_name == 'modify_db_instance': + # check if instance is in an available state first, if possible + if wait: + wait_for_status(client, module, module.params['db_instance_identifier'], method_name) + result = AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidDBInstanceState'])(method)(**parameters) + else: + result = AWSRetry.jittered_backoff()(method)(**parameters) + except (BotoCoreError, ClientError) as e: + changed = handle_errors(module, e, method_name, parameters) + + if wait and changed: + identifier = get_final_identifier(method_name, module) + wait_for_status(client, module, identifier, method_name) + return result, changed + + +def wait_for_instance_status(client, module, db_instance_id, waiter_name): + def wait(client, db_instance_id, waiter_name, extra_retry_codes): + retry = AWSRetry.jittered_backoff(catch_extra_error_codes=extra_retry_codes) + try: + waiter = client.get_waiter(waiter_name) + except ValueError: + # using a waiter in ansible.module_utils.aws.waiters + waiter = get_waiter(client, waiter_name) + waiter.wait(WaiterConfig={'Delay': 60, 'MaxAttempts': 60}, DBInstanceIdentifier=db_instance_id) + + waiter_expected_status = { + 'db_instance_deleted': 'deleted', + 'db_instance_stopped': 'stopped', + } + expected_status = waiter_expected_status.get(waiter_name, 'available') + if expected_status == 'available': + extra_retry_codes = ['DBInstanceNotFound'] + else: + extra_retry_codes = [] + for attempt_to_wait in range(0, 10): + try: + wait(client, db_instance_id, waiter_name, extra_retry_codes) + break + except WaiterError as e: + # Instance may be renamed and AWSRetry doesn't handle WaiterError + if e.last_response.get('Error', {}).get('Code') == 'DBInstanceNotFound': + sleep(10) + continue + module.fail_json_aws(e, msg='Error while waiting for DB instance {0} to be {1}'.format(db_instance_id, expected_status)) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg='Unexpected error while waiting for DB instance {0} to be {1}'.format( + db_instance_id, expected_status) + ) + + +def wait_for_cluster_status(client, module, db_cluster_id, waiter_name): + try: + waiter = get_waiter(client, waiter_name).wait(DBClusterIdentifier=db_cluster_id) + except WaiterError as e: + if waiter_name == 'cluster_deleted': + msg = "Failed to wait for DB cluster {0} to be deleted".format(db_cluster_id) + else: + msg = "Failed to wait for DB cluster {0} to be available".format(db_cluster_id) + module.fail_json_aws(e, msg=msg) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed with an unexpected error while waiting for the DB cluster {0}".format(db_cluster_id)) + + +def wait_for_status(client, module, identifier, method_name): + waiter_name = get_rds_method_attribute(method_name, module).waiter + if get_rds_method_attribute(method_name, module).cluster: + wait_for_cluster_status(client, module, identifier, waiter_name) + elif get_rds_method_attribute(method_name, module).instance: + wait_for_instance_status(client, module, identifier, waiter_name) + else: + raise NotImplementedError("method {0} hasn't been added to the whitelist of handled methods".format(method_name)) + + +def get_tags(client, module, cluster_arn): + try: + return boto3_tag_list_to_ansible_dict( + client.list_tags_for_resource(ResourceName=cluster_arn)['TagList'] + ) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to describe tags") + + +def arg_spec_to_rds_params(options_dict): + tags = options_dict.pop('tags') + has_processor_features = False + if 'processor_features' in options_dict: + has_processor_features = True + processor_features = options_dict.pop('processor_features') + camel_options = snake_dict_to_camel_dict(options_dict, capitalize_first=True) + for key in list(camel_options.keys()): + for old, new in (('Db', 'DB'), ('Iam', 'IAM'), ('Az', 'AZ')): + if old in key: + camel_options[key.replace(old, new)] = camel_options.pop(key) + camel_options['Tags'] = tags + if has_processor_features: + camel_options['ProcessorFeatures'] = processor_features + return camel_options + + +def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags): + if tags is None: + return False + tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, tags, purge_tags) + changed = bool(tags_to_add or tags_to_remove) + if tags_to_add: + call_method( + client, module, method_name='add_tags_to_resource', + parameters={'ResourceName': resource_arn, 'Tags': ansible_dict_to_boto3_tag_list(tags_to_add)} + ) + if tags_to_remove: + call_method( + client, module, method_name='remove_tags_from_resource', + parameters={'ResourceName': resource_arn, 'TagKeys': tags_to_remove} + ) + return changed diff --git a/lib/ansible/module_utils/aws/waiters.py b/lib/ansible/module_utils/aws/waiters.py index dd4b58ada0..5b7da17938 100644 --- a/lib/ansible/module_utils/aws/waiters.py +++ b/lib/ansible/module_utils/aws/waiters.py @@ -204,6 +204,26 @@ eks_data = { } +rds_data = { + "version": 2, + "waiters": { + "DBInstanceStopped": { + "delay": 20, + "maxAttempts": 60, + "operation": "DescribeDBInstances", + "acceptors": [ + { + "state": "success", + "matcher": "pathAll", + "argument": "DBInstances[].DBInstanceStatus", + "expected": "stopped" + }, + ] + } + } +} + + def ec2_model(name): ec2_models = core_waiter.WaiterModel(waiter_config=ec2_data) return ec2_models.get_waiter(name) @@ -219,6 +239,11 @@ def eks_model(name): return eks_models.get_waiter(name) +def rds_model(name): + rds_models = core_waiter.WaiterModel(waiter_config=rds_data) + return rds_models.get_waiter(name) + + waiters_by_name = { ('EC2', 'route_table_exists'): lambda ec2: core_waiter.Waiter( 'route_table_exists', @@ -286,6 +311,12 @@ waiters_by_name = { core_waiter.NormalizedOperationMethod( eks.describe_cluster )), + ('RDS', 'db_instance_stopped'): lambda rds: core_waiter.Waiter( + 'db_instance_stopped', + rds_model('DBInstanceStopped'), + core_waiter.NormalizedOperationMethod( + rds.describe_db_instances + )), } diff --git a/lib/ansible/modules/cloud/amazon/rds_instance.py b/lib/ansible/modules/cloud/amazon/rds_instance.py new file mode 100644 index 0000000000..834b8521f6 --- /dev/null +++ b/lib/ansible/modules/cloud/amazon/rds_instance.py @@ -0,0 +1,1157 @@ +#!/usr/bin/python +# Copyright (c) 2018 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: rds_instance +version_added: "2.7" +short_description: Manage RDS instances +description: + - Create, modify, and delete RDS instances. + +requirements: + - botocore + - boto3 >= 1.5.0 +extends_documentation_fragment: + - aws + - ec2 +author: + - Sloane Hertel (@s-hertel) + +options: + # General module options + state: + description: + - Whether the snapshot should exist or not. I(rebooted) is not idempotent and will leave the DB instance in a running state + and start it prior to rebooting if it was stopped. I(present) will leave the DB instance in the current running/stopped state, + (running if creating the DB instance). + - I(state=running) and I(state=started) are synonyms, as are I(state=rebooted) and I(state=restarted). Note - rebooting the instance + is not idempotent. + choices: ['present', 'absent', 'terminated', 'running', 'started', 'stopped', 'rebooted', 'restarted'] + default: 'present' + creation_source: + description: Which source to use if restoring from a template (an existing instance, S3 bucket, or snapshot). + choices: ['snapshot', 's3', 'instance'] + force_update_password: + description: + - Set to True to update your cluster password with I(master_user_password). Since comparing passwords to determine + if it needs to be updated is not possible this is set to False by default to allow idempotence. + type: bool + default: False + purge_cloudwatch_logs_exports: + description: Set to False to retain any enabled cloudwatch logs that aren't specified in the task and are associated with the instance. + type: bool + default: True + purge_tags: + description: Set to False to retain any tags that aren't specified in task and are associated with the instance. + type: bool + default: True + read_replica: + description: + - Set to False to promote a read replica cluster or true to create one. When creating a read replica C(creation_source) should + be set to 'instance' or not provided. C(source_db_instance_identifier) must be provided with this option. + type: bool + wait: + description: + - Whether to wait for the cluster to be available, stopped, or deleted. At a later time a wait_timeout option may be added. + Following each API call to create/modify/delete the instance a waiter is used with a 60 second delay 30 times until the instance reaches + the expected state (available/stopped/deleted). The total task time may also be influenced by AWSRetry which helps stabilize if the + instance is in an invalid state to operate on to begin with (such as if you try to stop it when it is in the process of rebooting). + If setting this to False task retries and delays may make your playbook execution better handle timeouts for major modifications. + type: bool + default: True + + # Options that have a corresponding boto3 parameter + allocated_storage: + description: + - The amount of storage (in gibibytes) to allocate for the DB instance. + allow_major_version_upgrade: + description: + - Whether to allow major version upgrades. + type: bool + apply_immediately: + description: + - A value that specifies whether modifying a cluster with I(new_db_instance_identifier) and I(master_user_password) + should be applied as soon as possible, regardless of the I(preferred_maintenance_window) setting. If false, changes + are applied during the next maintenance window. + type: bool + default: False + auto_minor_version_upgrade: + description: + - Whether minor version upgrades are applied automatically to the DB instance during the maintenance window. + type: bool + availability_zone: + description: + - A list of EC2 Availability Zones that instances in the DB cluster can be created in. + May be used when creating a cluster or when restoring from S3 or a snapshot. Mutually exclusive with I(multi_az). + aliases: + - az + - zone + backup_retention_period: + description: + - The number of days for which automated backups are retained (must be greater or equal to 1). + May be used when creating a new cluster, when restoring from S3, or when modifying a cluster. + ca_certificate_identifier: + description: + - The identifier of the CA certificate for the DB instance. + character_set_name: + description: + - The character set to associate with the DB cluster. + copy_tags_to_snapshot: + description: + - Whether or not to copy all tags from the DB instance to snapshots of the instance. When initially creating + a DB instance the RDS API defaults this to false if unspecified. + type: bool + db_cluster_identifier: + description: + - The DB cluster (lowercase) identifier to add the aurora DB instance to. The identifier must contain from 1 to + 63 letters, numbers, or hyphens and the first character must be a letter and may not end in a hyphen or + contain consecutive hyphens. + aliases: + - cluster_id + db_instance_class: + description: + - The compute and memory capacity of the DB instance, for example db.t2.micro. + aliases: + - class + - instance_type + db_instance_identifier: + description: + - The DB instance (lowercase) identifier. The identifier must contain from 1 to 63 letters, numbers, or + hyphens and the first character must be a letter and may not end in a hyphen or contain consecutive hyphens. + aliases: + - instance_id + - id + required: True + db_name: + description: + - The name for your database. If a name is not provided Amazon RDS will not create a database. + db_parameter_group_name: + description: + - The name of the DB parameter group to associate with this DB instance. When creating the DB instance if this + argument is omitted the default DBParameterGroup for the specified engine is used. + db_security_groups: + description: + - (EC2-Classic platform) A list of DB security groups to associate with this DB instance. + type: list + db_snapshot_identifier: + description: + - The identifier for the DB snapshot to restore from if using I(creation_source=snapshot). + db_subnet_group_name: + description: + - The DB subnet group name to use for the DB instance. + aliases: + - subnet_group + domain: + description: + - The Active Directory Domain to restore the instance in. + domain_iam_role_name: + description: + - The name of the IAM role to be used when making API calls to the Directory Service. + enable_cloudwatch_logs_exports: + description: + - A list of log types that need to be enabled for exporting to CloudWatch Logs. + aliases: + - cloudwatch_log_exports + type: list + enable_iam_database_authentication: + description: + - Enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. + If this option is omitted when creating the cluster, Amazon RDS sets this to False. + type: bool + enable_performance_insights: + description: + - Whether to enable Performance Insights for the DB instance. + type: bool + engine: + description: + - The name of the database engine to be used for this DB instance. This is required to create an instance. + Valid choices are aurora | aurora-mysql | aurora-postgresql | mariadb | mysql | oracle-ee | oracle-se | + oracle-se1 | oracle-se2 | postgres | sqlserver-ee | sqlserver-ex | sqlserver-se | sqlserver-web + engine_version: + description: + - The version number of the database engine to use. For Aurora MySQL that could be 5.6.10a , 5.7.12. + Aurora PostgreSQL example, 9.6.3 + final_db_snapshot_identifier: + description: + - The DB instance snapshot identifier of the new DB instance snapshot created when I(skip_final_snapshot) is false. + aliases: + - final_snapshot_identifier + force_failover: + description: + - Set to true to conduct the reboot through a MultiAZ failover. + type: bool + iops: + description: + - The Provisioned IOPS (I/O operations per second) value. + kms_key_id: + description: + - The ARN of the AWS KMS key identifier for an encrypted DB instance. If you are creating a DB instance with the + same AWS account that owns the KMS encryption key used to encrypt the new DB instance, then you can use the KMS key + alias instead of the ARN for the KM encryption key. + - If I(storage_encrypted) is true and and this option is not provided, the default encryption key is used. + license_model: + description: + - The license model for the DB instance. + choices: + - license-included + - bring-your-own-license + - general-public-license + master_user_password: + description: + - An 8-41 character password for the master database user. The password can contain any printable ASCII character + except "/", """, or "@". To modify the password use I(force_password_update). Use I(apply immediately) to change + the password immediately, otherwise it is updated during the next maintenance window. + aliases: + - password + master_username: + description: + - The name of the master user for the DB cluster. Must be 1-16 letters or numbers and begin with a letter. + aliases: + - username + monitoring_interval: + description: + - The interval, in seconds, when Enhanced Monitoring metrics are collected for the DB instance. To disable collecting + metrics, specify 0. Amazon RDS defaults this to 0 if omitted when initially creating a DB instance. + monitoring_role_arn: + description: + - The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to Amazon CloudWatch Logs. + multi_az: + description: + - Specifies if the DB instance is a Multi-AZ deployment. Mutually exclusive with I(availability_zone). + type: bool + new_db_instance_identifier: + description: + - The new DB cluster (lowercase) identifier for the DB cluster when renaming a DB instance. The identifier must contain + from 1 to 63 letters, numbers, or hyphens and the first character must be a letter and may not end in a hyphen or + contain consecutive hyphens. Use I(apply_immediately) to rename immediately, otherwise it is updated during the + next maintenance window. + aliases: + - new_instance_id + - new_id + option_group_name: + description: + - The option group to associate with the DB instance. + performance_insights_kms_key_id: + description: + - The AWS KMS key identifier (ARN, name, or alias) for encryption of Performance Insights data. + performance_insights_retention_period: + description: + - The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731. + port: + description: + - The port number on which the instances accept connections. + preferred_backup_window: + description: + - The daily time range (in UTC) of at least 30 minutes, during which automated backups are created if automated backups are + enabled using I(backup_retention_period). The option must be in the format of "hh24:mi-hh24:mi" and not conflict with + I(preferred_maintenance_window). + aliases: + - backup_window + preferred_maintenance_window: + description: + - The weekly time range (in UTC) of at least 30 minutes, during which system maintenance can occur. The option must + be in the format "ddd:hh24:mi-ddd:hh24:mi" where ddd is one of Mon, Tue, Wed, Thu, Fri, Sat, Sun. + aliases: + - maintenance_window + processor_features: + description: + - A dictionary of Name, Value pairs to indicate the number of CPU cores and the number of threads per core for the + DB instance class of the DB instance. Names are threadsPerCore and coreCount. + Set this option to an empty dictionary to use the default processor features. + suboptions: + threadsPerCore: + description: The number of threads per core + coreCount: + description: The number of CPU cores + promotion_tier: + description: + - An integer that specifies the order in which an Aurora Replica is promoted to the primary instance after a failure of + the existing primary instance. + publicly_accessible: + description: + - Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with + a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal + instance with a DNS name that resolves to a private IP address. + type: bool + restore_time: + description: + - If using I(creation_source=instance) this indicates the UTC date and time to restore from the source instance. + For example, "2009-09-07T23:45:00Z". May alternatively set c(use_latest_restore_time) to True. + s3_bucket_name: + description: + - The name of the Amazon S3 bucket that contains the data used to create the Amazon DB instance. + s3_ingestion_role_arn: + description: + - The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that authorizes Amazon RDS to access + the Amazon S3 bucket on your behalf. + s3_prefix: + description: + - The prefix for all of the file names that contain the data used to create the Amazon DB instance. If you do not + specify a SourceS3Prefix value, then the Amazon DB instance is created by using all of the files in the Amazon S3 bucket. + skip_final_snapshot: + description: + - Whether a final DB cluster snapshot is created before the DB cluster is deleted. If this is false I(final_db_snapshot_identifier) + must be provided. + type: bool + default: false + snapshot_identifier: + description: + - The ARN of the DB snapshot to restore from when using I(creation_source=snapshot). + source_db_instance_identifier: + description: + - The identifier or ARN of the source DB instance from which to restore when creating a read replica or spinning up a point-in-time + DB instance using I(creation_source=instance). If the source DB is not in the same region this should be an ARN. + source_engine: + description: + - The identifier for the database engine that was backed up to create the files stored in the Amazon S3 bucket. + choices: + - mysql + source_engine_version: + description: + - The version of the database that the backup files were created from. + source_region: + description: + - The region of the DB instance from which the replica is created. + storage_encrypted: + description: + - Whether the DB instance is encrypted. + type: bool + storage_type: + description: + - The storage type to be associated with the DB instance. I(storage_type) does not apply to Aurora DB instances. + choices: + - standard + - gp2 + - io1 + tags: + description: + - A dictionary of key value pairs to assign the DB cluster. + tde_credential_arn: + description: + - The ARN from the key store with which to associate the instance for Transparent Data Encryption. This is + supported by Oracle or SQL Server DB instances and may be used in conjunction with C(storage_encrypted) + though it might slightly affect the performance of your database. + aliases: + - transparent_data_encryption_arn + tde_credential_password: + description: + - The password for the given ARN from the key store in order to access the device. + aliases: + - transparent_data_encryption_password + timezone: + description: + - The time zone of the DB instance. + use_latest_restorable_time: + description: + - Whether to restore the DB instance to the latest restorable backup time. Only one of I(use_latest_restorable_time) + and I(restore_to_time) may be provided. + type: bool + aliases: + - restore_from_latest + vpc_security_group_ids: + description: + - A list of EC2 VPC security groups to associate with the DB cluster. + type: list +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. +- name: create minimal aurora instance in default VPC and default subnet group + rds_instance: + engine: aurora + db_instance_identifier: ansible-test-aurora-db-instance + instance_type: db.t2.small + password: "{{ password }}" + username: "{{ username }}" + cluster_id: ansible-test-cluster # This cluster must exist - see rds_cluster to manage it + +- name: Create a DB instance using the default AWS KMS encryption key + rds_instance: + id: test-encrypted-db + state: present + engine: mariadb + storage_encrypted: True + db_instance_class: db.t2.medium + username: "{{ username }}" + password: "{{ password }}" + allocated_storage: "{{ allocated_storage }}" + +- name: remove the DB instance without a final snapshot + rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: True + +- name: remove the DB instance with a final snapshot + rds_instance: + id: "{{ instance_id }}" + state: absent + final_snapshot_identifier: "{{ snapshot_id }}" +''' + +RETURN = ''' +allocated_storage: + description: The allocated storage size in gibibytes. This is always 1 for aurora database engines. + returned: always + type: int + sample: 20 +auto_minor_version_upgrade: + description: Whether minor engine upgrades are applied automatically to the DB instance during the maintenance window. + returned: always + type: bool + sample: true +availability_zone: + description: The availability zone for the DB instance. + returned: always + type: string + sample: us-east-1f +backup_retention_period: + description: The number of days for which automated backups are retained. + returned: always + type: int + sample: 1 +ca_certificate_identifier: + description: The identifier of the CA certificate for the DB instance. + returned: always + type: string + sample: rds-ca-2015 +copy_tags_to_snapshot: + description: Whether tags are copied from the DB instance to snapshots of the DB instance. + returned: always + type: bool + sample: false +db_instance_arn: + description: The Amazon Resource Name (ARN) for the DB instance. + returned: always + type: string + sample: arn:aws:rds:us-east-1:123456789012:db:ansible-test +db_instance_class: + description: The name of the compute and memory capacity class of the DB instance. + returned: always + type: string + sample: db.m4.large +db_instance_identifier: + description: The identifier of the DB instance + returned: always + type: string + sample: ansible-test +db_instance_port: + description: The port that the DB instance listens on. + returned: always + type: int + sample: 0 +db_instance_status: + description: The current state of this database. + returned: always + type: string + sample: stopped +db_parameter_groups: + description: The list of DB parameter groups applied to this DB instance. + returned: always + type: complex + contains: + db_parameter_group_name: + description: The name of the DP parameter group. + returned: always + type: string + sample: default.mariadb10.0 + parameter_apply_status: + description: The status of parameter updates. + returned: always + type: string + sample: in-sync +db_security_groups: + description: A list of DB security groups associated with this DB instance. + returned: always + type: list + sample: [] +db_subnet_group: + description: The subnet group associated with the DB instance. + returned: always + type: complex + contains: + db_subnet_group_description: + description: The description of the DB subnet group. + returned: always + type: string + sample: default + db_subnet_group_name: + description: The name of the DB subnet group. + returned: always + type: string + sample: default + subnet_group_status: + description: The status of the DB subnet group. + returned: always + type: string + sample: Complete + subnets: + description: A list of Subnet elements. + returned: always + type: complex + contains: + subnet_availability_zone: + description: The availability zone of the subnet. + returned: always + type: complex + contains: + name: + description: The name of the Availability Zone. + returned: always + type: string + sample: us-east-1c + subnet_identifier: + description: The ID of the subnet. + returned: always + type: string + sample: subnet-12345678 + subnet_status: + description: The status of the subnet. + returned: always + type: string + sample: Active + vpc_id: + description: The VpcId of the DB subnet group. + returned: always + type: string + sample: vpc-12345678 +dbi_resource_id: + description: The AWS Region-unique, immutable identifier for the DB instance. + returned: always + type: string + sample: db-UHV3QRNWX4KB6GALCIGRML6QFA +domain_memberships: + description: The Active Directory Domain membership records associated with the DB instance. + returned: always + type: list + sample: [] +endpoint: + description: The connection endpoint. + returned: always + type: complex + contains: + address: + description: The DNS address of the DB instance. + returned: always + type: string + sample: ansible-test.cvlrtwiennww.us-east-1.rds.amazonaws.com + hosted_zone_id: + description: The ID that Amazon Route 53 assigns when you create a hosted zone. + returned: always + type: string + sample: ZTR2ITUGPA61AM + port: + description: The port that the database engine is listening on. + returned: always + type: int + sample: 3306 +engine: + description: The database engine version. + returned: always + type: string + sample: mariadb +engine_version: + description: The database engine version. + returned: always + type: string + sample: 10.0.35 +iam_database_authentication_enabled: + description: Whether mapping of AWS Identity and Access Management (IAM) accounts to database accounts is enabled. + returned: always + type: bool + sample: false +instance_create_time: + description: The date and time the DB instance was created. + returned: always + type: string + sample: '2018-07-04T16:48:35.332000+00:00' +kms_key_id: + description: The AWS KMS key identifier for the encrypted DB instance when storage_encrypted is true. + returned: When storage_encrypted is true + type: string + sample: arn:aws:kms:us-east-1:123456789012:key/70c45553-ad2e-4a85-9f14-cfeb47555c33 +latest_restorable_time: + description: The latest time to which a database can be restored with point-in-time restore. + returned: always + type: string + sample: '2018-07-04T16:50:50.642000+00:00' +license_model: + description: The License model information for this DB instance. + returned: always + type: string + sample: general-public-license +master_username: + description: The master username for the DB instance. + returned: always + type: string + sample: test +monitoring_interval: + description: + - The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. + 0 means collecting Enhanced Monitoring metrics is disabled. + returned: always + type: int + sample: 0 +multi_az: + description: Whether the DB instance is a Multi-AZ deployment. + returned: always + type: bool + sample: false +option_group_memberships: + description: The list of option group memberships for this DB instance. + returned: always + type: complex + contains: + option_group_name: + description: The name of the option group that the instance belongs to. + returned: always + type: string + sample: default:mariadb-10-0 + status: + description: The status of the DB instance's option group membership. + returned: always + type: string + sample: in-sync +pending_modified_values: + description: The changes to the DB instance that are pending. + returned: always + type: complex + contains: {} +performance_insights_enabled: + description: True if Performance Insights is enabled for the DB instance, and otherwise false. + returned: always + type: bool + sample: false +preferred_backup_window: + description: The daily time range during which automated backups are created if automated backups are enabled. + returned: always + type: string + sample: 07:01-07:31 +preferred_maintenance_window: + description: The weekly time range (in UTC) during which system maintenance can occur. + returned: always + type: string + sample: sun:09:31-sun:10:01 +publicly_accessible: + description: + - True for an Internet-facing instance with a publicly resolvable DNS name, False to indicate an + internal instance with a DNS name that resolves to a private IP address. + returned: always + type: bool + sample: true +read_replica_db_instance_identifiers: + description: Identifiers of the Read Replicas associated with this DB instance. + returned: always + type: list + sample: [] +storage_encrypted: + description: Whether the DB instance is encrypted. + returned: always + type: bool + sample: false +storage_type: + description: The storage type to be associated with the DB instance. + returned: always + type: string + sample: standard +tags: + description: A dictionary of tags associated with the DB instance. + returned: always + type: complex + contains: {} +vpc_security_groups: + description: A list of VPC security group elements that the DB instance belongs to. + returned: always + type: complex + contains: + status: + description: The status of the VPC security group. + returned: always + type: string + sample: active + vpc_security_group_id: + description: The name of the VPC security group. + returned: always + type: string + sample: sg-12345678 +''' + +from ansible.module_utils._text import to_text +from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters +from ansible.module_utils.aws.rds import ensure_tags, arg_spec_to_rds_params, call_method, get_rds_method_attribute, get_tags, get_final_identifier +from ansible.module_utils.aws.waiters import get_waiter +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.ec2 import ansible_dict_to_boto3_tag_list, AWSRetry +from ansible.module_utils.six import string_types + +from time import sleep + +try: + from botocore.exceptions import ClientError, BotoCoreError, WaiterError +except ImportError: + pass # caught by AnsibleAWSModule + + +def get_rds_method_attribute_name(instance, state, creation_source, read_replica): + method_name = None + if state == 'absent' or state == 'terminated': + if instance and instance['DBInstanceStatus'] not in ['deleting', 'deleted']: + method_name = 'delete_db_instance' + else: + if instance: + method_name = 'modify_db_instance' + elif read_replica is True: + method_name = 'create_db_instance_read_replica' + elif creation_source == 'snapshot': + method_name = 'restore_db_instance_from_db_snapshot' + elif creation_source == 's3': + method_name = 'restore_db_instance_from_s3' + elif creation_source == 'instance': + method_name = 'restore_db_instance_to_point_in_time' + else: + method_name = 'create_db_instance' + return method_name + + +def get_instance(client, module, db_instance_id): + try: + for i in range(3): + try: + instance = client.describe_db_instances(DBInstanceIdentifier=db_instance_id)['DBInstances'][0] + instance['Tags'] = get_tags(client, module, instance['DBInstanceArn']) + if instance.get('ProcessorFeatures'): + instance['ProcessorFeatures'] = dict((feature['Name'], feature['Value']) for feature in instance['ProcessorFeatures']) + if instance.get('PendingModifiedValues', {}).get('ProcessorFeatures'): + instance['PendingModifiedValues']['ProcessorFeatures'] = dict( + (feature['Name'], feature['Value']) + for feature in instance['PendingModifiedValues']['ProcessorFeatures'] + ) + break + except is_boto3_error_code('DBInstanceNotFound'): + sleep(3) + else: + instance = {} + except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg='Failed to describe DB instances') + return instance + + +def get_final_snapshot(client, module, snapshot_identifier): + try: + snapshots = AWSRetry.jittered_backoff()(client.describe_db_snapshots)(DBSnapshotIdentifier=snapshot_identifier) + if len(snapshots.get('DBSnapshots', [])) == 1: + return snapshots['DBSnapshots'][0] + return {} + except is_boto3_error_code('DBSnapshotNotFound') as e: # May not be using wait: True + return {} + except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg='Failed to retrieve information about the final snapshot') + + +def get_parameters(client, module, parameters, method_name): + required_options = get_boto3_client_method_parameters(client, method_name, required=True) + if any([parameters.get(k) is None for k in required_options]): + module.fail_json(msg='To {0} requires the parameters: {1}'.format( + get_rds_method_attribute(method_name, module).operation_description, required_options)) + options = get_boto3_client_method_parameters(client, method_name) + parameters = dict((k, v) for k, v in parameters.items() if k in options and v is not None) + + if parameters.get('ProcessorFeatures') is not None: + parameters['ProcessorFeatures'] = [{'Name': k, 'Value': to_text(v)} for k, v in parameters['ProcessorFeatures'].items()] + + # If this parameter is an empty list it can only be used with modify_db_instance (as the parameter UseDefaultProcessorFeatures) + if parameters.get('ProcessorFeatures') == [] and not method_name == 'modify_db_instance': + parameters.pop('ProcessorFeatures') + + if method_name == 'create_db_instance' and parameters.get('Tags'): + parameters['Tags'] = ansible_dict_to_boto3_tag_list(parameters['Tags']) + if method_name == 'modify_db_instance': + parameters = get_options_with_changing_values(client, module, parameters) + if method_name == 'restore_db_instance_to_point_in_time': + parameters['TargetDBInstanceIdentifier'] = module.params['db_instance_identifier'] + + return parameters + + +def get_options_with_changing_values(client, module, parameters): + instance_id = module.params['db_instance_identifier'] + purge_cloudwatch_logs = module.params['purge_cloudwatch_logs_exports'] + force_update_password = module.params['force_update_password'] + port = module.params['port'] + apply_immediately = parameters.pop('ApplyImmediately', None) + cloudwatch_logs_enabled = module.params['enable_cloudwatch_logs_exports'] + + if port: + parameters['DBPortNumber'] = port + if not force_update_password: + parameters.pop('MasterUserPassword', None) + if cloudwatch_logs_enabled: + parameters['CloudwatchLogsExportConfiguration'] = cloudwatch_logs_enabled + + instance = get_instance(client, module, instance_id) + updated_parameters = get_changing_options_with_inconsistent_keys(parameters, instance, purge_cloudwatch_logs) + updated_parameters.update(get_changing_options_with_consistent_keys(parameters, instance)) + parameters = updated_parameters + + if parameters.get('NewDBInstanceIdentifier') and instance.get('PendingModifiedValues', {}).get('DBInstanceIdentifier'): + if parameters['NewDBInstanceIdentifier'] == instance['PendingModifiedValues']['DBInstanceIdentifier'] and not apply_immediately: + parameters.pop('NewDBInstanceIdentifier') + + if parameters: + parameters['DBInstanceIdentifier'] = instance_id + if apply_immediately is not None: + parameters['ApplyImmediately'] = apply_immediately + + return parameters + + +def get_current_attributes_with_inconsistent_keys(instance): + options = {} + if instance.get('PendingModifiedValues', {}).get('PendingCloudwatchLogsExports', {}).get('LogTypesToEnable', []): + current_enabled = instance['PendingModifiedValues']['PendingCloudwatchLogsExports']['LogTypesToEnable'] + current_disabled = instance['PendingModifiedValues']['PendingCloudwatchLogsExports']['LogTypesToDisable'] + options['CloudwatchLogsExportConfiguration'] = {'LogTypesToEnable': current_enabled, 'LogTypesToDisable': current_disabled} + else: + options['CloudwatchLogsExportConfiguration'] = {'LogTypesToEnable': instance.get('EnabledCloudwatchLogsExports', []), 'LogTypesToDisable': []} + if instance.get('PendingModifiedValues', {}).get('Port'): + options['DBPortNumber'] = instance['PendingModifiedValues']['Port'] + else: + options['DBPortNumber'] = instance['Endpoint']['Port'] + if instance.get('PendingModifiedValues', {}).get('DBSubnetGroupName'): + options['DBSubnetGroupName'] = instance['PendingModifiedValues']['DBSubnetGroupName'] + else: + options['DBSubnetGroupName'] = instance['DBSubnetGroup']['DBSubnetGroupName'] + if instance.get('PendingModifiedValues', {}).get('ProcessorFeatures'): + options['ProcessorFeatures'] = instance['PendingModifiedValues']['ProcessorFeatures'] + else: + options['ProcessorFeatures'] = instance.get('ProcessorFeatures', {}) + options['OptionGroupName'] = [g['OptionGroupName'] for g in instance['OptionGroupMemberships']] + options['DBSecurityGroups'] = [sg['DBSecurityGroupName'] for sg in instance['DBSecurityGroups'] if sg['Status'] in ['adding', 'active']] + options['VpcSecurityGroupIds'] = [sg['VpcSecurityGroupId'] for sg in instance['VpcSecurityGroups'] if sg['Status'] in ['adding', 'active']] + options['DBParameterGroupName'] = [parameter_group['DBParameterGroupName'] for parameter_group in instance['DBParameterGroups']] + options['AllowMajorVersionUpgrade'] = None + options['EnableIAMDatabaseAuthentication'] = instance['IAMDatabaseAuthenticationEnabled'] + options['EnablePerformanceInsights'] = instance['PerformanceInsightsEnabled'] + options['MasterUserPassword'] = None + options['NewDBInstanceIdentifier'] = instance['DBInstanceIdentifier'] + + return options + + +def get_changing_options_with_inconsistent_keys(modify_params, instance, purge_cloudwatch_logs): + changing_params = {} + current_options = get_current_attributes_with_inconsistent_keys(instance) + + for option in current_options: + current_option = current_options[option] + desired_option = modify_params.pop(option, None) + if desired_option is None: + continue + + # TODO: allow other purge_option module parameters rather than just checking for things to add + if isinstance(current_option, list): + if isinstance(desired_option, list): + if set(desired_option) <= set(current_option): + continue + elif isinstance(desired_option, string_types): + if desired_option in current_option: + continue + + if current_option == desired_option: + continue + + if option == 'ProcessorFeatures' and desired_option == []: + changing_params['UseDefaultProcessorFeatures'] = True + elif option == 'CloudwatchLogsExportConfiguration': + format_option = {'EnableLogTypes': [], 'DisableLogTypes': []} + format_option['EnableLogTypes'] = list(desired_option.difference(current_option)) + if purge_cloudwatch_logs: + format_option['DisableLogTypes'] = list(current_option.difference(desired_option)) + if format_option['EnableLogTypes'] or format_option['DisableLogTypes']: + changing_params[option] = format_option + else: + changing_params[option] = desired_option + + return changing_params + + +def get_changing_options_with_consistent_keys(modify_params, instance): + inconsistent_parameters = list(modify_params.keys()) + changing_params = {} + + for param in modify_params: + current_option = instance.get('PendingModifiedValues', {}).get(param) + if current_option is None: + current_option = instance[param] + if modify_params[param] != current_option: + changing_params[param] = modify_params[param] + + return changing_params + + +def validate_options(client, module, instance): + state = module.params['state'] + skip_final_snapshot = module.params['skip_final_snapshot'] + snapshot_id = module.params['final_db_snapshot_identifier'] + modified_id = module.params['new_db_instance_identifier'] + engine = module.params['engine'] + tde_options = bool(module.params['tde_credential_password'] or module.params['tde_credential_arn']) + read_replica = module.params['read_replica'] + creation_source = module.params['creation_source'] + source_instance = module.params['source_db_instance_identifier'] + if module.params['source_region'] is not None: + same_region = bool(module.params['source_region'] == module.params['region']) + else: + same_region = True + + if modified_id: + modified_instance = get_instance(client, module, modified_id) + else: + modified_instance = {} + + if modified_id and instance and modified_instance: + module.fail_json(msg='A new instance ID {0} was provided but it already exists'.format(modified_id)) + if modified_id and not instance and modified_instance: + module.fail_json(msg='A new instance ID {0} was provided but the instance to be renamed does not exist'.format(modified_id)) + if state in ('absent', 'terminated') and instance and not skip_final_snapshot and snapshot_id is None: + module.fail_json(msg='skip_final_snapshot is false but all of the following are missing: final_db_snapshot_identifier') + if engine is not None and not (engine.startswith('mysql') or engine.startswith('oracle')) and tde_options: + module.fail_json(msg='TDE is available for MySQL and Oracle DB instances') + if read_replica is True and not instance and creation_source not in [None, 'instance']: + module.fail_json(msg='Cannot create a read replica from {0}. You must use a source DB instance'.format(creation_source)) + if read_replica is True and not instance and not source_instance: + module.fail_json(msg='read_replica is true and the instance does not exist yet but all of the following are missing: source_db_instance_identifier') + + +def update_instance(client, module, instance, instance_id): + changed = False + + # Get newly created DB instance + if not instance: + instance = get_instance(client, module, instance_id) + + # Check tagging/promoting/rebooting/starting/stopping instance + changed |= ensure_tags( + client, module, instance['DBInstanceArn'], instance['Tags'], module.params['tags'], module.params['purge_tags'] + ) + changed |= promote_replication_instance(client, module, instance, module.params['read_replica']) + changed |= update_instance_state(client, module, instance, module.params['state']) + + return changed + + +def promote_replication_instance(client, module, instance, read_replica): + changed = False + if read_replica is False: + changed = bool(instance.get('ReadReplicaSourceDBInstanceIdentifier') or instance.get('StatusInfos')) + if changed: + try: + call_method(client, module, method_name='promote_read_replica', parameters={'DBInstanceIdentifier': instance['DBInstanceIdentifier']}) + changed = True + except is_boto3_error_code('InvalidDBInstanceState') as e: + if 'DB Instance is not a read replica' in e.response['Error']['Message']: + pass + else: + raise e + return changed + + +def update_instance_state(client, module, instance, state): + changed = False + if state in ['rebooted', 'restarted']: + changed |= reboot_running_db_instance(client, module, instance) + if state in ['started', 'running', 'stopped']: + changed |= start_or_stop_instance(client, module, instance, state) + return changed + + +def reboot_running_db_instance(client, module, instance): + parameters = {'DBInstanceIdentifier': instance['DBInstanceIdentifier']} + if instance['DBInstanceStatus'] in ['stopped', 'stopping']: + call_method(client, module, 'start_db_instance', parameters) + if module.params.get('force_failover') is not None: + parameters['ForceFailover'] = module.params['force_failover'] + results, changed = call_method(client, module, 'reboot_db_instance', parameters) + return changed + + +def start_or_stop_instance(client, module, instance, state): + changed = False + parameters = {'DBInstanceIdentifier': instance['DBInstanceIdentifier']} + if state == 'stopped' and instance['DBInstanceStatus'] not in ['stopping', 'stopped']: + if module.params['db_snapshot_identifier']: + parameters['DBSnapshotIdentifier'] = module.params['db_snapshot_identifier'] + result, changed = call_method(client, module, 'stop_db_instance', parameters) + elif state == 'started' and instance['DBInstanceStatus'] not in ['available', 'starting', 'restarting']: + result, changed = call_method(client, module, 'start_db_instance', parameters) + return changed + + +def main(): + arg_spec = dict( + state=dict(choices=['present', 'absent', 'terminated', 'running', 'started', 'stopped', 'rebooted', 'restarted'], default='present'), + creation_source=dict(choices=['snapshot', 's3', 'instance']), + force_update_password=dict(type='bool', default=False), + purge_cloudwatch_logs_exports=dict(type='bool', default=True), + purge_tags=dict(type='bool', default=True), + read_replica=dict(type='bool'), + wait=dict(type='bool', default=True), + ) + + parameter_options = dict( + allocated_storage=dict(type='int'), + allow_major_version_upgrade=dict(type='bool'), + apply_immediately=dict(type='bool', default=False), + auto_minor_version_upgrade=dict(type='bool'), + availability_zone=dict(aliases=['az', 'zone']), + backup_retention_period=dict(type='int'), + ca_certificate_identifier=dict(), + character_set_name=dict(), + copy_tags_to_snapshot=dict(type='bool'), + db_cluster_identifier=dict(aliases=['cluster_id']), + db_instance_class=dict(aliases=['class', 'instance_type']), + db_instance_identifier=dict(required=True, aliases=['instance_id', 'id']), + db_name=dict(), + db_parameter_group_name=dict(), + db_security_groups=dict(type='list'), + db_snapshot_identifier=dict(), + db_subnet_group_name=dict(aliases=['subnet_group']), + domain=dict(), + domain_iam_role_name=dict(), + enable_cloudwatch_logs_exports=dict(type='list', aliases=['cloudwatch_log_exports']), + enable_iam_database_authentication=dict(type='bool'), + enable_performance_insights=dict(type='bool'), + engine=dict(), + engine_version=dict(), + final_db_snapshot_identifier=dict(aliases=['final_snapshot_identifier']), + force_failover=dict(type='bool'), + iops=dict(type='int'), + kms_key_id=dict(), + license_model=dict(choices=['license-included', 'bring-your-own-license', 'general-public-license']), + master_user_password=dict(aliases=['password'], no_log=True), + master_username=dict(aliases=['username']), + monitoring_interval=dict(type='int'), + monitoring_role_arn=dict(), + multi_az=dict(type='bool'), + new_db_instance_identifier=dict(aliases=['new_instance_id', 'new_id']), + option_group_name=dict(), + performance_insights_kms_key_id=dict(), + performance_insights_retention_period=dict(), + port=dict(type='int'), + preferred_backup_window=dict(aliases=['backup_window']), + preferred_maintenance_window=dict(aliases=['maintenance_window']), + processor_features=dict(type='dict'), + promotion_tier=dict(), + publicly_accessible=dict(type='bool'), + restore_time=dict(), + s3_bucket_name=dict(), + s3_ingestion_role_arn=dict(), + s3_prefix=dict(), + skip_final_snapshot=dict(type='bool', default=False), + snapshot_identifier=dict(), + source_db_instance_identifier=dict(), + source_engine=dict(choices=['mysql']), + source_engine_version=dict(), + source_region=dict(), + storage_encrypted=dict(type='bool'), + storage_type=dict(choices=['standard', 'gp2', 'io1']), + tags=dict(type='dict'), + tde_credential_arn=dict(aliases=['transparent_data_encryption_arn']), + tde_credential_password=dict(no_log=True, aliases=['transparent_data_encryption_password']), + timezone=dict(), + use_latest_restorable_time=dict(type='bool', aliases=['restore_from_latest']), + vpc_security_group_ids=dict(type='list') + ) + arg_spec.update(parameter_options) + + required_if = [ + ('engine', 'aurora', ('cluster_id',)), + ('engine', 'aurora-mysql', ('cluster_id',)), + ('engine', 'aurora-postresql', ('cluster_id',)), + ('creation_source', 'snapshot', ('snapshot_identifier', 'engine')), + ('creation_source', 's3', ( + 's3_bucket_name', 'engine', 'master_username', 'master_user_password', + 'source_engine', 'source_engine_version', 's3_ingestion_role_arn')), + ] + mutually_exclusive = [ + ('s3_bucket_name', 'source_db_instance_identifier', 'snapshot_identifier'), + ('use_latest_restorable_time', 'restore_to_time'), + ('availability_zone', 'multi_az'), + ] + + module = AnsibleAWSModule( + argument_spec=arg_spec, + required_if=required_if, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True + ) + + if not module.boto3_at_least('1.5.0'): + module.fail_json(msg="rds_instance requires boto3 > 1.5.0") + + # Sanitize instance identifiers + module.params['db_instance_identifier'] = module.params['db_instance_identifier'].lower() + if module.params['new_db_instance_identifier']: + module.params['new_db_instance_identifier'] = module.params['new_db_instance_identifier'].lower() + + # Sanitize processor features + if module.params['processor_features'] is not None: + module.params['processor_features'] = dict((k, to_text(v)) for k, v in module.params['processor_features'].items()) + + client = module.client('rds') + changed = False + state = module.params['state'] + instance_id = module.params['db_instance_identifier'] + instance = get_instance(client, module, instance_id) + validate_options(client, module, instance) + method_name = get_rds_method_attribute_name(instance, state, module.params['creation_source'], module.params['read_replica']) + + if method_name: + raw_parameters = arg_spec_to_rds_params(dict((k, module.params[k]) for k in module.params if k in parameter_options)) + parameters = get_parameters(client, module, raw_parameters, method_name) + + if parameters: + result, changed = call_method(client, module, method_name, parameters) + + instance_id = get_final_identifier(method_name, module) + + # Check tagging/promoting/rebooting/starting/stopping instance + if state != 'absent' and (not module.check_mode or instance): + changed |= update_instance(client, module, instance, instance_id) + + if changed: + instance = get_instance(client, module, instance_id) + if state != 'absent' and (instance or not module.check_mode): + for attempt_to_wait in range(0, 10): + instance = get_instance(client, module, instance_id) + if instance: + break + else: + sleep(5) + + if state == 'absent' and changed and not module.params['skip_final_snapshot']: + instance.update(FinalSnapshot=get_final_snapshot(client, module, module.params['final_db_snapshot_identifier'])) + + pending_processor_features = None + if instance.get('PendingModifiedValues', {}).get('ProcessorFeatures'): + pending_processor_features = instance['PendingModifiedValues'].pop('ProcessorFeatures') + instance = camel_dict_to_snake_dict(instance, ignore_list=['Tags', 'ProcessorFeatures']) + if pending_processor_features is not None: + instance['pending_modified_values']['processor_features'] = pending_processor_features + + module.exit_json(changed=changed, **instance) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/rds_instance/aliases b/test/integration/targets/rds_instance/aliases new file mode 100644 index 0000000000..5692719518 --- /dev/null +++ b/test/integration/targets/rds_instance/aliases @@ -0,0 +1,2 @@ +cloud/aws +unsupported diff --git a/test/integration/targets/rds_instance/defaults/main.yml b/test/integration/targets/rds_instance/defaults/main.yml new file mode 100644 index 0000000000..a2d215ba8a --- /dev/null +++ b/test/integration/targets/rds_instance/defaults/main.yml @@ -0,0 +1,23 @@ +--- +instance_id: "{{ resource_prefix }}" +modified_instance_id: "{{ resource_prefix }}-updated" +username: test +password: test12345678 +db_instance_class: db.t2.micro +storage_encrypted_db_instance_class: db.t2.small +modified_db_instance_class: db.t2.medium +allocated_storage: 20 +modified_allocated_storage: 30 + +# For aurora tests +cluster_id: "{{ resource_prefix }}-cluster" +aurora_db_instance_class: db.t2.medium + +# For oracle tests +oracle_ee_db_instance_class: db.r3.xlarge +processor_features: + coreCount: 1 + threadsPerCore: 1 +modified_processor_features: + coreCount: 2 + threadsPerCore: 2 diff --git a/test/integration/targets/rds_instance/tasks/credential_tests.yml b/test/integration/targets/rds_instance/tasks/credential_tests.yml new file mode 100644 index 0000000000..1aa1c3a23d --- /dev/null +++ b/test/integration/targets/rds_instance/tasks/credential_tests.yml @@ -0,0 +1,36 @@ +--- +- name: test without credentials + rds_instance: + db_instance_identifier: test-rds-instance + register: result + ignore_errors: yes + +- assert: + that: + - result.failed + - 'result.msg == "The rds_instance module requires a region and none was found in configuration, environment variables or module parameters"' + +- name: test without credentials + rds_instance: + db_instance_identifier: test-rds-instance + region: us-east-1 + register: result + ignore_errors: yes + +- assert: + that: + - result.failed + - '"Unable to locate credentials" in result.msg' + +- name: test with invalid credentials + rds_instance: + db_instance_identifier: test-rds-instance + region: us-east-1 + profile: doesnotexist + register: result + ignore_errors: yes + +- assert: + that: + - result.failed + - 'result.msg == "The config profile (doesnotexist) could not be found"' diff --git a/test/integration/targets/rds_instance/tasks/main.yml b/test/integration/targets/rds_instance/tasks/main.yml new file mode 100644 index 0000000000..cfece3ea79 --- /dev/null +++ b/test/integration/targets/rds_instance/tasks/main.yml @@ -0,0 +1,16 @@ +--- +- block: + + - include: ./credential_tests.yml + - include: ./test_states.yml + - include: ./test_tags.yml + - include: ./test_modification.yml # TODO: test availability_zone and multi_az + - include: ./test_bad_options.yml + - include: ./test_processor_features.yml + - include: ./test_encryption.yml + - include: ./test_final_snapshot.yml + - include: ./test_read_replica.yml + - include: ./test_vpc_security_groups.yml + #- include: ./test_restore_instance.yml # TODO: point-in-time, snapshot, s3 + # TODO: uncomment after adding rds_cluster module + #- include: ./test_aurora.yml diff --git a/test/integration/targets/rds_instance/tasks/test_aurora.yml b/test/integration/targets/rds_instance/tasks/test_aurora.yml new file mode 100644 index 0000000000..14d28b248d --- /dev/null +++ b/test/integration/targets/rds_instance/tasks/test_aurora.yml @@ -0,0 +1,144 @@ +--- + - block: + - name: set up aws connection info + set_fact: + aws_connection_info: &aws_connection_info + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token }}" + region: "{{ aws_region }}" + no_log: yes + + - name: Ensure the resource doesn't exist + rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: True + <<: *aws_connection_info + register: result + + - assert: + that: + - not result.changed + ignore_errors: yes + + - name: Create minimal aurora cluster in default VPC and default subnet group + rds_cluster: + state: present + engine: aurora + cluster_id: "{{ cluster_id }}" + username: "{{ username }}" + password: "{{ password }}" + <<: *aws_connection_info + + - name: Create an Aurora instance + rds_instance: + id: "{{ instance_id }}" + cluster_id: "{{ cluster_id }}" + engine: aurora + state: present + db_instance_class: "{{ aurora_db_instance_class }}" + tags: + CreatedBy: rds_instance integration tests + <<: *aws_connection_info + register: result + + - assert: + that: + - result.changed + - "result.db_instance_identifier == '{{ instance_id }}'" + - "result.tags | length == 1" + + - name: Modify tags + rds_instance: + id: "{{ instance_id }}" + state: present + tags: + Test: rds_instance + <<: *aws_connection_info + register: result + + - assert: + that: + - result.changed + - result.tags | length == 1 + - "result.tags.Test == 'rds_instance'" + + - name: Test idempotence + rds_instance: + id: "{{ instance_id }}" + state: present + <<: *aws_connection_info + register: result + + - assert: + that: + - not result.changed + + - name: Attempt to modify password (a cluster-managed attribute) + rds_instance: + id: "{{ instance_id }}" + state: present + password: "{{ password }}" + force_update_password: True + apply_immediately: True + <<: *aws_connection_info + register: result + ignore_errors: yes + + - assert: + that: + - result.failed + - "'Modify master user password for the DB Cluster using the ModifyDbCluster API' in result.msg" + - "'Please see rds_cluster' in result.msg" + + - name: Modify aurora instance port (a cluster-managed attribute) + rds_instance: + id: "{{ instance_id }}" + state: present + port: 1150 + <<: *aws_connection_info + register: result + ignore_errors: yes + + - assert: + that: + - not result.changed + - "'Modify database endpoint port number for the DB Cluster using the ModifyDbCluster API' in result.msg" + - "'Please see rds_cluster' in result.msg" + + - name: Modify Aurora instance identifier + rds_instance: + id: "{{ instance_id }}" + state: present + purge_tags: False + new_id: "{{ modified_instance_id }}" + apply_immediately: True + <<: *aws_connection_info + register: result + + - assert: + that: + - result.changed + - "result.db_instance_identifier == '{{ modified_instance_id }}'" + + always: + + - name: Delete the instance + rds_instance: + id: "{{ item }}" + state: absent + skip_final_snapshot: True + <<: *aws_connection_info + loop: + - "{{ instance_id }}" + - "{{ modified_instance_id }}" + ignore_errors: yes + + - name: Delete the cluster + rds_cluster: + cluster_id: "{{ cluster_id }}" + state: absent + skip_final_snapshot: True + <<: *aws_connection_info + ignore_errors: yes diff --git a/test/integration/targets/rds_instance/tasks/test_bad_options.yml b/test/integration/targets/rds_instance/tasks/test_bad_options.yml new file mode 100644 index 0000000000..21de862d22 --- /dev/null +++ b/test/integration/targets/rds_instance/tasks/test_bad_options.yml @@ -0,0 +1,41 @@ +--- + - block: + - name: set up aws connection info + set_fact: + aws_connection_info: &aws_connection_info + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token }}" + region: "{{ aws_region }}" + no_log: yes + + - name: Ensure the resource doesn't exist + rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: True + <<: *aws_connection_info + register: result + + - assert: + that: + - not result.changed + ignore_errors: yes + + - name: Create a DB instance with an invalid engine + rds_instance: + id: "{{ instance_id }}" + state: present + engine: thisisnotavalidengine + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + <<: *aws_connection_info + register: result + ignore_errors: True + + - assert: + that: + - result.failed + - '"DB engine thisisnotavalidengine should be one of" in result.msg' diff --git a/test/integration/targets/rds_instance/tasks/test_encryption.yml b/test/integration/targets/rds_instance/tasks/test_encryption.yml new file mode 100644 index 0000000000..dc9a8d9646 --- /dev/null +++ b/test/integration/targets/rds_instance/tasks/test_encryption.yml @@ -0,0 +1,53 @@ +--- + - block: + - name: set up aws connection info + set_fact: + aws_connection_info: &aws_connection_info + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token }}" + region: "{{ aws_region }}" + no_log: yes + + - name: Ensure the resource doesn't exist + rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: True + <<: *aws_connection_info + register: result + + - assert: + that: + - not result.changed + ignore_errors: yes + + - name: Create a mariadb instance + rds_instance: + id: "{{ instance_id }}" + state: present + engine: mariadb + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ storage_encrypted_db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + storage_encrypted: True + <<: *aws_connection_info + register: result + + - assert: + that: + - result.changed + - "result.db_instance_identifier == '{{ instance_id }}'" + - result.kms_key_id + - result.storage_encrypted == true + + always: + + - name: Delete DB instance + rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: True + <<: *aws_connection_info + register: result diff --git a/test/integration/targets/rds_instance/tasks/test_final_snapshot.yml b/test/integration/targets/rds_instance/tasks/test_final_snapshot.yml new file mode 100644 index 0000000000..91cb9797ff --- /dev/null +++ b/test/integration/targets/rds_instance/tasks/test_final_snapshot.yml @@ -0,0 +1,85 @@ +--- + - block: + - name: set up aws connection info + set_fact: + aws_connection_info: &aws_connection_info + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token }}" + region: "{{ aws_region }}" + no_log: yes + + - name: Ensure the resource doesn't exist + rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: True + <<: *aws_connection_info + register: result + + - assert: + that: + - not result.changed + ignore_errors: yes + + - name: Create a mariadb instance + rds_instance: + id: "{{ instance_id }}" + state: present + engine: mariadb + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + <<: *aws_connection_info + register: result + + - name: Delete the DB instance + rds_instance: + id: "{{ instance_id }}" + state: absent + final_snapshot_identifier: "{{ instance_id }}" + <<: *aws_connection_info + register: result + + - assert: + that: + - result.changed + - "result.final_snapshot.db_instance_identifier == '{{ instance_id }}'" + + - name: Check that snapshot exists + rds_snapshot_facts: + db_snapshot_identifier: "{{ instance_id }}" + <<: *aws_connection_info + register: result + + - assert: + that: + - "result.snapshots | length == 1" + - "result.snapshots.0.engine == 'mariadb'" + + always: + + - name: Use AWS CLI to delete the snapshot + command: "aws rds delete-db-snapshot --db-snapshot-identifier '{{ instance_id }}'" + environment: + AWS_ACCESS_KEY_ID: "{{ aws_access_key }}" + AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}" + AWS_SESSION_TOKEN: "{{ security_token }}" + AWS_DEFAULT_REGION: "{{ aws_region }}" + + # TODO: Uncomment once rds_snapshot module exists + #- name: Remove the snapshot + # rds_snapshot: + # db_snapshot_identifier: "{{ instance_id }}" + # state: absent + # <<: *aws_connection_info + # ignore_errors: yes + + - name: Remove the DB instance + rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: True + <<: *aws_connection_info + ignore_errors: yes diff --git a/test/integration/targets/rds_instance/tasks/test_modification.yml b/test/integration/targets/rds_instance/tasks/test_modification.yml new file mode 100644 index 0000000000..3e9cc968a7 --- /dev/null +++ b/test/integration/targets/rds_instance/tasks/test_modification.yml @@ -0,0 +1,199 @@ +--- + - block: + - name: set up aws connection info + set_fact: + aws_connection_info: &aws_connection_info + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token }}" + region: "{{ aws_region }}" + no_log: yes + + - name: Ensure the resource doesn't exist + rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: True + <<: *aws_connection_info + register: result + + - assert: + that: + - not result.changed + ignore_errors: yes + + - name: Create a mariadb instance + rds_instance: + id: "{{ instance_id }}" + state: present + engine: mariadb + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + <<: *aws_connection_info + register: result + + - assert: + that: + - result.changed + - "result.db_instance_identifier == '{{ instance_id }}'" + + - name: Modify the instance name without immediate application + rds_instance: + id: "{{ instance_id }}" + state: present + new_id: "{{ modified_instance_id }}" + apply_immediately: False + <<: *aws_connection_info + register: result + + - assert: + that: + - result.changed + - 'result.db_instance_identifier == "{{ instance_id }}"' + + - name: Immediately apply the pending update + rds_instance: + id: "{{ instance_id }}" + state: present + new_id: "{{ modified_instance_id }}" + apply_immediately: True + <<: *aws_connection_info + register: result + + - assert: + that: + - result.changed + - 'result.db_instance_identifier == "{{ modified_instance_id }}"' + + - name: Modify the instance immediately + rds_instance: + id: '{{ modified_instance_id }}' + state: present + new_id: '{{ instance_id }}' + apply_immediately: True + <<: *aws_connection_info + register: result + + - assert: + that: + - result.changed + - 'result.db_instance_identifier == "{{ instance_id }}"' + + - name: Check mode - modify the password + rds_instance: + id: '{{ instance_id }}' + state: present + password: '{{ password }}' + force_update_password: True + apply_immediately: True + <<: *aws_connection_info + register: result + check_mode: True + + - assert: + that: + - result.changed + + - name: Modify the password + rds_instance: + id: '{{ instance_id }}' + state: present + password: '{{ password }}' + force_update_password: True + apply_immediately: True + <<: *aws_connection_info + register: result + + - assert: + that: + - result.changed + + # TODO: test modifying db_subnet_group_name, db_security_groups, db_parameter_group_name, option_group_name, + # monitoring_role_arn, monitoring_interval, domain, domain_iam_role_name, cloudwatch_logs_export_configuration + + - name: Modify several attributes + rds_instance: + id: '{{ instance_id }}' + state: present + allocated_storage: 30 + db_instance_class: "{{ modified_db_instance_class }}" + backup_retention_period: 2 + preferred_backup_window: "05:00-06:00" + preferred_maintenance_window: "mon:06:20-mon:06:50" + engine_version: "10.1.26" + allow_major_version_upgrade: true + auto_minor_version_upgrade: false + port: 1150 + apply_immediately: True + <<: *aws_connection_info + register: result + + - assert: + that: + - result.changed + - result.pending_modified_values.allocated_storage == 30 + - result.pending_modified_values.port == 1150 + - 'result.pending_modified_values.db_instance_class == "db.t2.medium"' + - 'result.pending_modified_values.engine_version == "10.1.26"' + + - name: Idempotence modifying several pending attributes + rds_instance: + id: '{{ instance_id }}' + state: present + allocated_storage: 30 + db_instance_class: "{{ modified_db_instance_class }}" + backup_retention_period: 2 + preferred_backup_window: "05:00-06:00" + preferred_maintenance_window: "mon:06:20-mon:06:50" + engine_version: "10.1.26" + allow_major_version_upgrade: true + auto_minor_version_upgrade: false + port: 1150 + <<: *aws_connection_info + register: result + retries: 30 + delay: 10 + until: result is not failed + + - assert: + that: + - not result.changed + - '"allocated_storage" in result.pending_modified_values or result.allocated_storage == 30' + - '"port" in result.pending_modified_values or result.endpoint.port == 1150' + - '"db_instance_class" in result.pending_modified_values or result.db_instance_class == "db.t2.medium"' + - '"engine_version" in result.pending_modified_values or result.engine_version == "10.1.26"' + + - name: Reboot the instance to update the modified values and add tags + rds_instance: + id: '{{ instance_id }}' + state: rebooted + tags: + Created_by: Ansible rds_instance tests + <<: *aws_connection_info + register: result + + - name: Delete the instance + rds_instance: + id: '{{ instance_id }}' + state: absent + skip_final_snapshot: True + <<: *aws_connection_info + register: result + + - assert: + that: + - result.changed + - '"pending_modified_values" not in result' + + always: + + - name: Delete the instance + rds_instance: + id: '{{ item }}' + state: absent + skip_final_snapshot: True + <<: *aws_connection_info + loop: ['{{ instance_id }}', '{{ modified_instance_id }}'] + ignore_errors: yes diff --git a/test/integration/targets/rds_instance/tasks/test_processor_features.yml b/test/integration/targets/rds_instance/tasks/test_processor_features.yml new file mode 100644 index 0000000000..2fb3d8951c --- /dev/null +++ b/test/integration/targets/rds_instance/tasks/test_processor_features.yml @@ -0,0 +1,126 @@ +--- + - block: + - name: set up aws connection info + set_fact: + aws_connection_info: &aws_connection_info + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token }}" + region: "{{ aws_region }}" + no_log: yes + + - name: Ensure the resource doesn't exist + rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: True + <<: *aws_connection_info + register: result + + - assert: + that: + - not result.changed + ignore_errors: yes + + - name: Create an oracle-ee DB instance + rds_instance: + id: "{{ instance_id }}" + state: present + engine: oracle-ee + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ oracle_ee_db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + storage_encrypted: True + processor_features: "{{ processor_features }}" + <<: *aws_connection_info + register: result + + - assert: + that: + - result.changed + - 'result.processor_features.coreCount == "{{ processor_features.coreCount }}"' + - 'result.processor_features.threadsPerCore == "{{ processor_features.threadsPerCore }}"' + + - name: Check mode - modify the processor features + rds_instance: + id: "{{ instance_id }}" + state: present + engine: oracle-ee + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ oracle_ee_db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + storage_encrypted: True + processor_features: "{{ modified_processor_features }}" + apply_immediately: true + <<: *aws_connection_info + register: result + check_mode: True + + - assert: + that: + - result.changed + + - name: Modify the processor features + rds_instance: + id: "{{ instance_id }}" + state: present + engine: oracle-ee + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ oracle_ee_db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + storage_encrypted: True + processor_features: "{{ modified_processor_features }}" + apply_immediately: true + <<: *aws_connection_info + register: result + + - assert: + that: + - result.changed + - 'result.pending_modified_values.processor_features.coreCount == "{{ modified_processor_features.coreCount }}"' + - 'result.pending_modified_values.processor_features.threadsPerCore == "{{ modified_processor_features.threadsPerCore }}"' + + - name: Check mode - use the default processor features + rds_instance: + id: "{{ instance_id }}" + state: present + processor_features: {} + apply_immediately: True + <<: *aws_connection_info + register: result + + - assert: + that: + - result.changed + + - name: Use the default processor features + rds_instance: + id: "{{ instance_id }}" + state: present + processor_features: {} + apply_immediately: True + <<: *aws_connection_info + register: result + + - assert: + that: + - result.changed + - 'result.pending_modified_values.processor_features.coreCount == "DEFAULT"' + - 'result.pending_modified_values.processor_features.threadsPerCore == "DEFAULT"' + + always: + + - name: Delete the DB instance + rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: True + <<: *aws_connection_info + register: result + + - assert: + that: + - result.changed diff --git a/test/integration/targets/rds_instance/tasks/test_read_replica.yml b/test/integration/targets/rds_instance/tasks/test_read_replica.yml new file mode 100644 index 0000000000..0780ef0c72 --- /dev/null +++ b/test/integration/targets/rds_instance/tasks/test_read_replica.yml @@ -0,0 +1,140 @@ +--- + - block: + + - name: set the two regions for the source DB and the replica + set_fact: + region_src: "{{ aws_region }}" + region_dest: "us-east-2" + + - name: set up aws connection info + set_fact: + aws_connection_info: &aws_connection_info + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token }}" + no_log: yes + + - name: Ensure the resource doesn't exist + rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: True + region: "{{ region_src }}" + <<: *aws_connection_info + register: result + + - assert: + that: + - not result.changed + ignore_errors: yes + + - name: Create a source DB instance + rds_instance: + id: "{{ instance_id }}" + state: present + engine: mysql + backup_retention_period: 1 + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + region: "{{ region_src }}" + <<: *aws_connection_info + register: source_db + + - assert: + that: + - source_db.changed + - "source_db.db_instance_identifier == '{{ instance_id }}'" + + - name: Create a read replica in a different region + rds_instance: + id: "{{ instance_id }}-replica" + state: present + source_db_instance_identifier: "{{ source_db.db_instance_arn }}" + engine: mysql + username: "{{ username }}" + password: "{{ password }}" + read_replica: True + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + region: "{{ region_dest }}" + <<: *aws_connection_info + register: result + + - name: Test idempotence with a read replica + rds_instance: + id: "{{ instance_id }}-replica" + state: present + source_db_instance_identifier: "{{ source_db.db_instance_arn }}" + engine: mysql + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + region: "{{ region_dest }}" + <<: *aws_connection_info + register: result + + - assert: + that: + - not result.changed + + - name: Test idempotence with read_replica=True + rds_instance: + id: "{{ instance_id }}-replica" + state: present + read_replica: True + source_db_instance_identifier: "{{ source_db.db_instance_arn }}" + engine: mysql + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + region: "{{ region_dest }}" + <<: *aws_connection_info + register: result + + - name: Promote the read replica + rds_instance: + id: "{{ instance_id }}-replica" + state: present + read_replica: False + region: "{{ region_dest }}" + <<: *aws_connection_info + register: result + + - assert: + that: + - result.changed + + - name: Test idempotence + rds_instance: + id: "{{ instance_id }}-replica" + state: present + read_replica: False + region: "{{ region_dest }}" + <<: *aws_connection_info + register: result + + - assert: + that: + - not result.changed + + always: + + - name: Remove the DB instance + rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: True + region: "{{ region_src }}" + <<: *aws_connection_info + + - name: Remove the DB replica + rds_instance: + id: "{{ instance_id }}-replica" + state: absent + skip_final_snapshot: True + region: "{{ region_dest }}" + <<: *aws_connection_info diff --git a/test/integration/targets/rds_instance/tasks/test_states.yml b/test/integration/targets/rds_instance/tasks/test_states.yml new file mode 100644 index 0000000000..d79d184bd5 --- /dev/null +++ b/test/integration/targets/rds_instance/tasks/test_states.yml @@ -0,0 +1,198 @@ +--- + - block: + - name: set up aws connection info + set_fact: + aws_connection_info: &aws_connection_info + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token }}" + region: "{{ aws_region }}" + no_log: yes + + - name: Ensure the resource doesn't exist + rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: True + <<: *aws_connection_info + register: result + + - assert: + that: + - not result.changed + ignore_errors: yes + + - name: Check Mode - Create a mariadb instance + rds_instance: + id: "{{ instance_id }}" + state: present + engine: mariadb + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + <<: *aws_connection_info + register: result + check_mode: yes + + - assert: + that: + - result.changed + + - name: Create a mariadb instance + rds_instance: + id: "{{ instance_id }}" + state: present + engine: mariadb + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + <<: *aws_connection_info + register: result + + - assert: + that: + - result.changed + - "result.db_instance_identifier == '{{ instance_id }}'" + + - name: Idempotence + rds_instance: + id: '{{ instance_id }}' + state: present + engine: mariadb + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + <<: *aws_connection_info + register: result + + - assert: + that: + - not result.changed + - result.db_instance_identifier + + - name: Idempotence with minimal options + rds_instance: + id: '{{ instance_id }}' + state: present + <<: *aws_connection_info + register: result + + - assert: + that: + - not result.changed + - result.db_instance_identifier + + - name: Check Mode - stop the instance + rds_instance: + id: '{{ instance_id }}' + state: stopped + <<: *aws_connection_info + register: result + check_mode: yes + + - assert: + that: + - result.changed + + - name: Stop the instance + rds_instance: + id: '{{ instance_id }}' + state: stopped + <<: *aws_connection_info + register: result + + - assert: + that: + - result.changed + + - name: Check Mode - idempotence + rds_instance: + id: '{{ instance_id }}' + state: stopped + <<: *aws_connection_info + register: result + check_mode: yes + + - assert: + that: + - not result.changed + + - name: Idempotence + rds_instance: + id: '{{ instance_id }}' + state: stopped + <<: *aws_connection_info + register: result + + - assert: + that: + - not result.changed + + - name: Check mode - reboot a stopped instance + rds_instance: + id: '{{ instance_id }}' + state: rebooted + <<: *aws_connection_info + register: result + check_mode: yes + + - assert: + that: + - result.changed + + - name: Reboot a stopped instance + rds_instance: + id: '{{ instance_id }}' + state: rebooted + <<: *aws_connection_info + register: result + + - assert: + that: + - result.changed + + - name: Check Mode - start the instance + rds_instance: + id: '{{ instance_id }}' + state: started + <<: *aws_connection_info + register: result + check_mode: yes + + - assert: + that: + - not result.changed + + - name: Stop the instance + rds_instance: + id: '{{ instance_id }}' + state: stopped + <<: *aws_connection_info + + - name: Start the instance + rds_instance: + id: '{{ instance_id }}' + state: started + <<: *aws_connection_info + register: result + + - assert: + that: + - result.changed + + always: + + - name: Remove DB instance + rds_instance: + id: '{{ instance_id }}' + state: absent + skip_final_snapshot: True + <<: *aws_connection_info + register: result + + - assert: + that: + - result.changed diff --git a/test/integration/targets/rds_instance/tasks/test_tags.yml b/test/integration/targets/rds_instance/tasks/test_tags.yml new file mode 100644 index 0000000000..87500dc3ef --- /dev/null +++ b/test/integration/targets/rds_instance/tasks/test_tags.yml @@ -0,0 +1,131 @@ +--- + - block: + - name: set up aws connection info + set_fact: + aws_connection_info: &aws_connection_info + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token }}" + region: "{{ aws_region }}" + no_log: yes + + - name: Ensure the resource doesn't exist + rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: True + <<: *aws_connection_info + register: result + + - assert: + that: + - not result.changed + ignore_errors: yes + + - name: Create a mariadb instance + rds_instance: + id: "{{ instance_id }}" + state: present + engine: mariadb + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + tags: + Name: "{{ instance_id }}" + Created_by: Ansible rds_instance tests + <<: *aws_connection_info + register: result + + - assert: + that: + - result.changed + - "result.db_instance_identifier == '{{ instance_id }}'" + - "result.tags | length == 2" + - "result.tags.Name == '{{ instance_id }}'" + - "result.tags.Created_by == 'Ansible rds_instance tests'" + + - name: Test idempotence omitting tags + rds_instance: + id: "{{ instance_id }}" + state: present + engine: mariadb + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + <<: *aws_connection_info + register: result + + - assert: + that: + - not result.changed + - "result.tags | length == 2" + + - name: Test tags are not purged if purge_tags is False + rds_instance: + id: "{{ instance_id }}" + state: present + engine: mariadb + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + tags: {} + purge_tags: False + <<: *aws_connection_info + register: result + + - assert: + that: + - not result.changed + - "result.tags | length == 2" + + - name: Add a tag and remove a tag + rds_instance: + id: "{{ instance_id }}" + state: present + tags: + Name: "{{ instance_id }}-new" + Created_by: Ansible rds_instance tests + purge_tags: True + <<: *aws_connection_info + register: result + + - assert: + that: + - result.changed + - "result.tags | length == 2" + - "result.tags.Name == '{{ instance_id }}-new'" + + - name: Remove all tags + rds_instance: + id: "{{ instance_id }}" + state: present + engine: mariadb + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + tags: {} + <<: *aws_connection_info + register: result + + - assert: + that: + - result.changed + - not result.tags + + always: + + - name: Ensure the resource doesn't exist + rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: True + <<: *aws_connection_info + register: result + + - assert: + that: + - result.changed diff --git a/test/integration/targets/rds_instance/tasks/test_vpc_security_groups.yml b/test/integration/targets/rds_instance/tasks/test_vpc_security_groups.yml new file mode 100644 index 0000000000..4da38069b3 --- /dev/null +++ b/test/integration/targets/rds_instance/tasks/test_vpc_security_groups.yml @@ -0,0 +1,166 @@ +--- + - block: + - name: set up aws connection info + set_fact: + aws_connection_info: &aws_connection_info + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token }}" + region: "{{ aws_region }}" + no_log: yes + + - name: create a VPC + ec2_vpc_net: + name: "{{ resource_prefix }}-vpc" + state: present + cidr_block: "10.122.122.128/26" + tags: + Name: "{{ resource_prefix }}-vpc" + Description: "created by rds_instance integration tests" + <<: *aws_connection_info + register: vpc_result + + - name: create subnets + ec2_vpc_subnet: + cidr: "{{ item.cidr }}" + az: "{{ item.zone }}" + vpc_id: "{{ vpc_result.vpc.id }}" + tags: + Name: "{{ resource_prefix }}-subnet" + Description: "created by rds_instance integration tests" + state: present + <<: *aws_connection_info + register: subnets_result + loop: + - {"cidr": "10.122.122.128/28", "zone": "{{ aws_region }}a"} + - {"cidr": "10.122.122.144/28", "zone": "{{ aws_region }}b"} + - {"cidr": "10.122.122.160/28", "zone": "{{ aws_region }}c"} + - {"cidr": "10.122.122.176/28", "zone": "{{ aws_region }}d"} + + - name: Create security groups + ec2_group: + name: "{{ item }}" + description: "created by rds_instance integration tests" + state: present + <<: *aws_connection_info + register: sgs_result + loop: + - "{{ resource_prefix }}-sg-1" + - "{{ resource_prefix }}-sg-2" + - "{{ resource_prefix }}-sg-3" + + - debug: var=sgs_result + + - name: Ensure the resource doesn't exist + rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: True + <<: *aws_connection_info + register: result + + - assert: + that: + - not result.changed + ignore_errors: yes + + - name: Create a DB instance in the VPC with two security groups + rds_instance: + id: "{{ instance_id }}" + state: present + engine: mariadb + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + vpc_security_group_ids: + - "{{ sgs_result.results.0.group_id }}" + - "{{ sgs_result.results.1.group_id }}" + <<: *aws_connection_info + register: result + + - assert: + that: + - result.changed + - "result.db_instance_identifier == '{{ instance_id }}'" + + - name: Add a new security group + rds_instance: + id: "{{ instance_id }}" + state: present + vpc_security_group_ids: + - "{{ sgs_result.results.2.group_id }}" + <<: *aws_connection_info + register: result + + - assert: + that: + - result.changed + + always: + + - name: Ensure the resource doesn't exist + rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: True + <<: *aws_connection_info + register: result + ignore_errors: yes + + - name: Remove security groups + ec2_group: + name: "{{ item }}" + description: "created by rds_instance integration tests" + state: absent + <<: *aws_connection_info + register: sgs_result + loop: + - "{{ resource_prefix }}-sg-1" + - "{{ resource_prefix }}-sg-2" + - "{{ resource_prefix }}-sg-3" + + - name: remove subnets + ec2_vpc_subnet: + cidr: "{{ item.cidr }}" + az: "{{ item.zone }}" + vpc_id: "{{ vpc_result.vpc.id }}" + tags: + Name: "{{ resource_prefix }}-subnet" + Description: "created by rds_instance integration tests" + state: absent + <<: *aws_connection_info + register: subnets + ignore_errors: yes + retries: 30 + until: subnets is not failed + delay: 10 + loop: + - {"cidr": "10.122.122.128/28", "zone": "{{ aws_region }}a"} + - {"cidr": "10.122.122.144/28", "zone": "{{ aws_region }}b"} + - {"cidr": "10.122.122.160/28", "zone": "{{ aws_region }}c"} + - {"cidr": "10.122.122.176/28", "zone": "{{ aws_region }}d"} + + - name: create a VPC + ec2_vpc_net: + name: "{{ resource_prefix }}-vpc" + state: absent + cidr_block: "10.122.122.128/26" + tags: + Name: "{{ resource_prefix }}-vpc" + Description: "created by rds_instance integration tests" + <<: *aws_connection_info + register: vpc_result + ignore_errors: yes + retries: 30 + until: vpc_result is not failed + delay: 10 + + - name: Ensure the resource doesn't exist + rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: True + <<: *aws_connection_info + register: result + ignore_errors: yes