1
0
Fork 0
mirror of https://github.com/ansible-collections/community.general.git synced 2024-09-14 20:13:21 +02:00

parse botocore.endpoint logs into a list of AWS actions (#49312)

* Add an option to parse botocore.endpoint logs for the AWS actions performed during a task

Add a callback to consolidate all AWS actions used by modules

Added some documentation to the AWS guidelines

* Enable aws_resource_actions callback only for AWS tests

* Add script to help generate policies

* Set debug_botocore_endpoint_logs via environment variable for all AWS integration tests

Ensure AWS tests inherit environment

(also remove AWS CLI in aws_rds inventory tests and use the module)
This commit is contained in:
Sloane Hertel 2019-03-18 08:29:03 -05:00 committed by GitHub
parent eb790cd3c6
commit 7da565b3ae
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
34 changed files with 672 additions and 233 deletions

View file

@ -0,0 +1,327 @@
# Requires pandas, bs4, html5lib, and lxml
#
# Call script with the output from aws_resource_actions callback, e.g.
# python build_iam_policy_framework.py ['ec2:AuthorizeSecurityGroupEgress', 'ec2:AuthorizeSecurityGroupIngress', 'sts:GetCallerIdentity']
#
# The sample output:
# {
# "Version": "2012-10-17",
# "Statement": [
# {
# "Sid": "AnsibleEditor0",
# "Effect": "Allow",
# "Action": [
# "ec2:AuthorizeSecurityGroupEgress",
# "ec2:AuthorizeSecurityGroupIngress"
# ],
# "Resource": "arn:aws:ec2:${Region}:${Account}:security-group/${SecurityGroupId}"
# },
# {
# "Sid": "AnsibleEditor1",
# "Effect": "Allow",
# "Action": [
# "sts:GetCallerIdentity"
# ],
# "Resource": "*"
# }
# ]
# }
#
# Policy troubleshooting:
# - If there are more actions in the policy than you provided, AWS has documented dependencies for some of your actions and
# those have been added to the policy.
# - If there are fewer actions in the policy than you provided, some of your actions are not in the IAM table of actions for
# that service. For example, the API call s3:DeleteObjects does not actually correlate to the permission needed in a policy.
# In this case s3:DeleteObject is the permission required to allow both the s3:DeleteObjects action and the s3:DeleteObject action.
# - The policies output are only as accurate as the AWS documentation. If the policy does not permit the
# necessary actions, look for undocumented dependencies. For example, redshift:CreateCluster requires ec2:DescribeVpcs,
# ec2:DescribeSubnets, ec2:DescribeSecurityGroups, and ec2:DescribeInternetGateways, but AWS does not document this.
#
import json
import requests
import sys
missing_dependencies = []
try:
import pandas as pd
except ImportError:
missing_dependencies.append('pandas')
try:
import bs4
except ImportError:
missing_dependencies.append('bs4')
try:
import html5lib
except ImportError:
missing_dependencies.append('html5lib')
try:
import lxml
except ImportError:
missing_dependencies.append('lxml')
irregular_service_names = {
'a4b': 'alexaforbusiness',
'appstream': 'appstream2.0',
'acm': 'certificatemanager',
'acm-pca': 'certificatemanagerprivatecertificateauthority',
'aws-marketplace-management': 'marketplacemanagementportal',
'ce': 'costexplorerservice',
'cognito-identity': 'cognitoidentity',
'cognito-sync': 'cognitosync',
'cognito-idp': 'cognitouserpools',
'cur': 'costandusagereport',
'dax': 'dynamodbacceleratordax',
'dlm': 'datalifecyclemanager',
'dms': 'databasemigrationservice',
'ds': 'directoryservice',
'ec2messages': 'messagedeliveryservice',
'ecr': 'ec2containerregistry',
'ecs': 'elasticcontainerservice',
'eks': 'elasticcontainerserviceforkubernetes',
'efs': 'elasticfilesystem',
'es': 'elasticsearchservice',
'events': 'cloudwatchevents',
'firehose': 'kinesisfirehose',
'fms': 'firewallmanager',
'health': 'healthapisandnotifications',
'importexport': 'importexportdiskservice',
'iot1click': 'iot1-click',
'kafka': 'managedstreamingforkafka',
'kinesisvideo': 'kinesisvideostreams',
'kms': 'keymanagementservice',
'license-manager': 'licensemanager',
'logs': 'cloudwatchlogs',
'opsworks-cm': 'opsworksconfigurationmanagement',
'mediaconnect': 'elementalmediaconnect',
'mediaconvert': 'elementalmediaconvert',
'medialive': 'elementalmedialive',
'mediapackage': 'elementalmediapackage',
'mediastore': 'elementalmediastore',
'mgh': 'migrationhub',
'mobiletargeting': 'pinpoint',
'pi': 'performanceinsights',
'pricing': 'pricelist',
'ram': 'resourceaccessmanager',
'resource-groups': 'resourcegroups',
'sdb': 'simpledb',
'servicediscovery': 'cloudmap',
'serverlessrepo': 'serverlessapplicationrepository',
'sms': 'servermigrationservice',
'sms-voice': 'pinpointsmsandvoiceservice',
'sso-directory': 'ssodirectory',
'ssm': 'systemsmanager',
'ssmmessages': 'sessionmanagermessagegatewayservice',
'states': 'stepfunctions',
'sts': 'securitytokenservice',
'swf': 'simpleworkflowservice',
'tag': 'resourcegrouptaggingapi',
'transfer': 'transferforsftp',
'waf-regional': 'wafregional',
'wam': 'workspacesapplicationmanager',
'xray': 'x-ray'
}
irregular_service_links = {
'apigateway': [
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_manageamazonapigateway.html'
],
'aws-marketplace': [
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_awsmarketplace.html',
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_awsmarketplacemeteringservice.html',
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_awsprivatemarketplace.html'
],
'discovery': [
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_applicationdiscovery.html'
],
'elasticloadbalancing': [
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_elasticloadbalancing.html',
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_elasticloadbalancingv2.html'
],
'globalaccelerator': [
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_globalaccelerator.html'
]
}
def get_docs_by_prefix(prefix):
amazon_link_form = 'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazon{0}.html'
aws_link_form = 'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_aws{0}.html'
if prefix in irregular_service_links:
links = irregular_service_links[prefix]
else:
if prefix in irregular_service_names:
prefix = irregular_service_names[prefix]
links = [amazon_link_form.format(prefix), aws_link_form.format(prefix)]
return links
def get_html(links):
html_list = []
for link in links:
html = requests.get(link).content
try:
parsed_html = pd.read_html(html)
html_list.append(parsed_html)
except ValueError as e:
if 'No tables found' in str(e):
pass
else:
raise e
return html_list
def get_tables(service):
links = get_docs_by_prefix(service)
html_list = get_html(links)
action_tables = []
arn_tables = []
for df_list in html_list:
for df in df_list:
table = json.loads(df.to_json(orient='split'))
table_data = table['data'][0]
if 'Actions' in table_data and 'Resource Types (*required)' in table_data:
action_tables.append(table['data'][1::])
elif 'Resource Types' in table_data and 'ARN' in table_data:
arn_tables.append(table['data'][1::])
# Action table indices:
# 0: Action, 1: Description, 2: Access level, 3: Resource type, 4: Condition keys, 5: Dependent actions
# ARN tables indices:
# 0: Resource type, 1: ARN template, 2: Condition keys
return action_tables, arn_tables
def add_dependent_action(resources, dependency):
resource, action = dependency.split(':')
if resource in resources:
resources[resource].append(action)
else:
resources[resource] = [action]
return resources
def get_dependent_actions(resources):
for service in dict(resources):
action_tables, arn_tables = get_tables(service)
for found_action_table in action_tables:
for action_stuff in found_action_table:
if action_stuff is None:
continue
if action_stuff[0] in resources[service] and action_stuff[5]:
dependencies = action_stuff[5].split()
if isinstance(dependencies, list):
for dependency in dependencies:
resources = add_dependent_action(resources, dependency)
else:
resources = add_dependent_action(resources, dependencies)
return resources
def get_actions_by_service(resources):
service_action_dict = {}
dependencies = {}
for service in resources:
action_tables, arn_tables = get_tables(service)
# Create dict of the resource type to the corresponding ARN
arn_dict = {}
for found_arn_table in arn_tables:
for arn_stuff in found_arn_table:
arn_dict["{0}*".format(arn_stuff[0])] = arn_stuff[1]
# Create dict of the action to the corresponding ARN
action_dict = {}
for found_action_table in action_tables:
for action_stuff in found_action_table:
if action_stuff[0] is None:
continue
if arn_dict.get(action_stuff[3]):
action_dict[action_stuff[0]] = arn_dict[action_stuff[3]]
else:
action_dict[action_stuff[0]] = None
service_action_dict[service] = action_dict
return service_action_dict
def get_resource_arns(aws_actions, action_dict):
resource_arns = {}
for resource_action in aws_actions:
resource, action = resource_action.split(':')
if action not in action_dict:
continue
if action_dict[action] is None:
resource = "*"
else:
resource = action_dict[action].replace("${Partition}", "aws")
if resource not in resource_arns:
resource_arns[resource] = []
resource_arns[resource].append(resource_action)
return resource_arns
def get_resources(actions):
resources = {}
for action in actions:
resource, action = action.split(':')
if resource not in resources:
resources[resource] = []
resources[resource].append(action)
return resources
def combine_arn_actions(resources, service_action_arn_dict):
arn_actions = {}
for service in service_action_arn_dict:
service_arn_actions = get_resource_arns(aws_actions, service_action_arn_dict[service])
for resource in service_arn_actions:
if resource in arn_actions:
arn_actions[resource].extend(service_arn_actions[resource])
else:
arn_actions[resource] = service_arn_actions[resource]
return arn_actions
def combine_actions_and_dependent_actions(resources):
aws_actions = []
for resource in resources:
for action in resources[resource]:
aws_actions.append('{0}:{1}'.format(resource, action))
return set(aws_actions)
def get_actions_restricted_by_arn(aws_actions):
resources = get_resources(aws_actions)
resources = get_dependent_actions(resources)
service_action_arn_dict = get_actions_by_service(resources)
aws_actions = combine_actions_and_dependent_actions(resources)
return combine_arn_actions(aws_actions, service_action_arn_dict)
def main(aws_actions):
arn_actions = get_actions_restricted_by_arn(aws_actions)
statement = []
for resource_restriction in arn_actions:
statement.append({
"Sid": "AnsibleEditor{0}".format(len(statement)),
"Effect": "Allow",
"Action": arn_actions[resource_restriction],
"Resource": resource_restriction
})
policy = {"Version": "2012-10-17", "Statement": statement}
print(json.dumps(policy, indent=4))
if __name__ == '__main__':
if missing_dependencies:
sys.exit('Missing Python libraries: {0}'.format(', '.join(missing_dependencies)))
actions = sys.argv[1:]
if len(actions) == 1:
actions = sys.argv[1].split(',')
aws_actions = [action.strip('[], "\'') for action in actions]
main(aws_actions)

View file

@ -60,10 +60,18 @@ don't need to be wrapped in the backoff decorator.
"""
import re
import logging
import traceback
from functools import wraps
from distutils.version import LooseVersion
try:
from cStringIO import StringIO
except ImportError:
# Python 3
from io import StringIO
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native
from ansible.module_utils.ec2 import HAS_BOTO3, camel_dict_to_snake_dict, ec2_argument_spec, boto3_conn, get_aws_connection_info
@ -120,14 +128,38 @@ class AnsibleAWSModule(object):
self._diff = self._module._diff
self._name = self._module._name
self._botocore_endpoint_log_stream = StringIO()
self.logger = None
if self.params.get('debug_botocore_endpoint_logs'):
self.logger = logging.getLogger('botocore.endpoint')
self.logger.setLevel(logging.DEBUG)
self.logger.addHandler(logging.StreamHandler(self._botocore_endpoint_log_stream))
@property
def params(self):
return self._module.params
def _get_resource_action_list(self):
actions = []
for ln in self._botocore_endpoint_log_stream.getvalue().split('\n'):
ln = ln.strip()
if not ln:
continue
found_operational_request = re.search(r"OperationModel\(name=.*?\)", ln)
if found_operational_request:
operation_request = found_operational_request.group(0)[20:-1]
resource = re.search(r"https://.*?\.", ln).group(0)[8:-1]
actions.append("{0}:{1}".format(resource, operation_request))
return list(set(actions))
def exit_json(self, *args, **kwargs):
if self.params.get('debug_botocore_endpoint_logs'):
kwargs['resource_actions'] = self._get_resource_action_list()
return self._module.exit_json(*args, **kwargs)
def fail_json(self, *args, **kwargs):
if self.params.get('debug_botocore_endpoint_logs'):
kwargs['resource_actions'] = self._get_resource_action_list()
return self._module.fail_json(*args, **kwargs)
def debug(self, *args, **kwargs):
@ -190,7 +222,7 @@ class AnsibleAWSModule(object):
if response is not None:
failure.update(**camel_dict_to_snake_dict(response))
self._module.fail_json(**failure)
self.fail_json(**failure)
def _gather_versions(self):
"""Gather AWS SDK (boto3 and botocore) dependency versions

View file

@ -31,7 +31,7 @@ import re
import traceback
from ansible.module_utils.ansible_release import __version__
from ansible.module_utils.basic import missing_required_lib
from ansible.module_utils.basic import missing_required_lib, env_fallback
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.cloud import CloudRetry
from ansible.module_utils.six import string_types, binary_type, text_type
@ -177,6 +177,7 @@ def boto_exception(err):
def aws_common_argument_spec():
return dict(
debug_botocore_endpoint_logs=dict(fallback=(env_fallback, ['ANSIBLE_DEBUG_BOTOCORE_LOGS']), default=False, type='bool'),
ec2_url=dict(),
aws_secret_key=dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True),
aws_access_key=dict(aliases=['ec2_access_key', 'access_key']),

View file

@ -633,14 +633,66 @@ for every call, it's preferrable to use [YAML Anchors](http://blog.daemonl.com/2
As explained in the [Integration Test guide](https://docs.ansible.com/ansible/latest/dev_guide/testing_integration.html#iam-policies-for-aws)
there are defined IAM policies in `hacking/aws_config/testing_policies/` that contain the necessary permissions
to run the AWS integration test.
to run the AWS integration test. The permissions used by CI are more restrictive than those in `hacking/aws_config/testing_policies`; for CI we want
the most restrictive policy possible that still allows the given tests to pass.
If your module is interacting with a new service or otherwise requires new permissions you must update the
appropriate policy file to grant the permissions needed to run your integration test.
If your module interacts with a new service or otherwise requires new permissions, tests will fail when you submit a pull request and the
[Ansibullbot](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) will tag your PR as needing revision.
We do not automatically grant additional permissions to the roles used by the continuous integration builds. You must provide the minimum IAM permissions required to run your integration test.
There is no process for automatically granting additional permissions to the roles used by the continuous
integration builds, so the tests will initially fail when you submit a pull request and the
[Ansibullbot](https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md) will tag it as needing revision.
Once you're certain the failure is only due to the missing permissions, add a comment with the `ready_for_review`
If your PR has test failures, check carefully to be certain the failure is only due to the missing permissions. If you've ruled out other sources of failure, add a comment with the `ready_for_review`
tag and explain that it's due to missing permissions.
Your pull request cannot be merged until the tests are passing. If your pull request is failing due to missing permissions,
you must collect the minimum IAM permissions required to
run the tests.
There are two ways to figure out which IAM permissions you need for your PR to pass:
* Start with the most permissive IAM policy, run the tests to collect information about which resources your tests actually use, then construct a policy based on that output. This approach only works on modules that use `AnsibleAWSModule`.
* Start with the least permissive IAM policy, run the tests to discover a failure, add permissions for the resource that addresses that failure, then repeat. If your module uses `AnsibleModule` instead of `AnsibleAWSModule`, you must use this approach.
To start with the most permissive IAM policy:
1) [Create an IAM policy](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create.html#access_policies_create-start) that allows all actions (set `Action` and `Resource` to `*`).
2) Run your tests locally with this policy. On `AnsibleAWSModule`-based modules, the `debug_botocore_endpoint_logs` option is automatically set to `yes`, so you
should see a list of `AWS ACTIONS` after the `PLAY RECAP` showing all the permissions used. If your tests use a `boto`/`AnsibleModule` module, you must start with the least permissive policy (see below).
3) Modify your policy to allow only the actions your tests use. Restrict account, region, and prefix where possible. Wait a few minutes for your policy to update.
4) Run the tests again with a user or role that allows only the new policy.
5) If the tests fail, troubleshoot (see tips below), modify the policy, run the tests again, and repeat the process until the tests pass with a restrictive policy.
6) Share the minimum policy in a comment on your PR.
To start from the least permissive IAM policy:
1) Run the integration tests locally with no IAM permissions.
2) Examine the error when the tests reach a failure.
a) If the error message indicates the action used in the request, add the action to your policy.
b) If the error message does not indicate the action used in the request:
- Usually the action is a CamelCase version of the method name - for example, for an ec2 client the method `describe_security_groups` correlates to the action `ec2:DescribeSecurityGroups`.
- Refer to the documentation to identify the action.
c) If the error message indicates the resource ARN used in the request, limit the action to that resource.
d) If the error message does not indicate the resource ARN used:
- Determine if the action can be restricted to a resource by examining the documentation.
- If the action can be restricted, use the documentation to construct the ARN and add it to the policy.
3) Add the action or resource that caused the failure to [an IAM policy](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create.html#access_policies_create-start). Wait a few minutes for your policy to update.
4) Run the tests again with this policy attached to your user or role.
5) If the tests still fail at the same place with the same error you will need to troubleshoot (see tips below). If the first test passes, repeat steps 2 and 3 for the next error. Repeat the process until the tests pass with a restrictive policy.
6) Share the minimum policy in a comment on your PR.
Troubleshooting IAM policies:
- When you make changes to a policy, wait a few minutes for the policy to update before re-running the tests.
- Use the [policy simulator](https://policysim.aws.amazon.com/) to verify that each action (limited by resource when applicable) in your policy is allowed.
- If you're restricting actions to certain resources, replace resources temporarily with `*`. If the tests pass with wildcard resources, there is a problem with the resource definition in your policy.
- If the initial troubleshooting above doesn't provide any more insight, AWS may be using additional undisclosed resources and actions.
- Examine the AWS FullAccess policy for the service for clues.
- Re-read the AWS documentation, especially the [list of Actions, Resources and Condition Keys](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_actions-resources-contextkeys.html) for the various AWS services.
- Look at the [cloudonaut](https://iam.cloudonaut.io) documentation as a troubleshooting cross-reference.
- Use a search engine.
- Ask in the Ansible IRC channel #ansible-aws.
Some cases where tests should be marked as unsupported:
1) The tests take longer than 10 or 15 minutes to complete
2) The tests create expensive resources
3) The tests create inline policies

View file

@ -0,0 +1,72 @@
# (C) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: aws_resource_actions
type: aggregate
short_description: summarizes all "resource:actions" completed
version_added: "2.8"
description:
- Ansible callback plugin for collecting the AWS actions completed by all boto3 modules using
AnsibleAWSModule in a playbook. Botocore endpoint logs need to be enabled for those modules, which can
be done easily by setting debug_botocore_endpoint_logs to True for group/aws using module_defaults.
requirements:
- whitelisting in configuration - see examples section below for details.
'''
EXAMPLES = '''
example: >
To enable, add this to your ansible.cfg file in the defaults block
[defaults]
callback_whitelist = aws_resource_actions
sample output: >
#
# AWS ACTIONS: ['s3:PutBucketAcl', 's3:HeadObject', 's3:DeleteObject', 's3:PutObjectAcl', 's3:CreateMultipartUpload',
# 's3:DeleteBucket', 's3:GetObject', 's3:DeleteObjects', 's3:CreateBucket', 's3:CompleteMultipartUpload',
# 's3:ListObjectsV2', 's3:HeadBucket', 's3:UploadPart', 's3:PutObject']
#
sample output: >
#
# AWS ACTIONS: ['ec2:DescribeVpcAttribute', 'ec2:DescribeVpcClassicLink', 'ec2:ModifyVpcAttribute', 'ec2:CreateTags',
# 'sts:GetCallerIdentity', 'ec2:DescribeSecurityGroups', 'ec2:DescribeTags', 'ec2:DescribeVpcs', 'ec2:CreateVpc']
#
'''
from ansible.plugins.callback import CallbackBase
from ansible.module_utils._text import to_native
class CallbackModule(CallbackBase):
CALLBACK_VERSION = 2.8
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'aws_resource_actions'
CALLBACK_NEEDS_WHITELIST = True
def __init__(self):
self.aws_resource_actions = []
super(CallbackModule, self).__init__()
def extend_aws_resource_actions(self, result):
if result.get('resource_actions'):
self.aws_resource_actions.extend(result['resource_actions'])
def runner_on_ok(self, host, res):
self.extend_aws_resource_actions(res)
def runner_on_failed(self, host, res, ignore_errors=False):
self.extend_aws_resource_actions(res)
def v2_runner_item_on_ok(self, result):
self.extend_aws_resource_actions(result._result)
def v2_runner_item_on_failed(self, result):
self.extend_aws_resource_actions(result._result)
def playbook_on_stats(self, stats):
if self.aws_resource_actions:
self.aws_resource_actions = sorted(list(to_native(action) for action in set(self.aws_resource_actions)))
self._display.display("AWS ACTIONS: {0}".format(self.aws_resource_actions))

View file

@ -9,6 +9,14 @@ class ModuleDocFragment(object):
# AWS only documentation fragment
DOCUMENTATION = r'''
options:
debug_botocore_endpoint_logs:
description:
- Use a botocore.endpoint logger to parse the unique (rather than total) "resource:action" API calls made during a task, outputing
the set to the resource_actions key in the task results. Use the aws_resource_action callback to output to total list made during
a playbook. The ANSIBLE_DEBUG_BOTOCORE_LOGS environment variable may also be used.
type: bool
default: 'no'
version_added: "2.8"
ec2_url:
description:
- Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints).

View file

@ -1,5 +1,6 @@
- hosts: localhost
connection: local
environment: "{{ ansible_test.environment }}"
roles:
- aws_eks

View file

@ -1,5 +1,6 @@
- hosts: localhost
connection: local
environment: "{{ ansible_test.environment }}"
tasks:
- name: try and use aws_eks_cluster module

View file

@ -1,5 +1,6 @@
- hosts: localhost
connection: local
environment: "{{ ansible_test.environment }}"
roles:
- ../../cloudformation_stack_set

View file

@ -1,5 +1,6 @@
- hosts: localhost
connection: local
environment: "{{ ansible_test.environment }}"
roles:
- ec2_instance

View file

@ -1,5 +1,6 @@
- hosts: localhost
connection: local
environment: "{{ ansible_test.environment }}"
vars:
resource_prefix: 'ansible-testing'

View file

@ -1,4 +1,5 @@
- hosts: localhost
connection: local
environment: "{{ ansible_test.environment }}"
roles:
- ec2_launch_template

View file

@ -1,5 +1,6 @@
- hosts: localhost
connection: local
environment: "{{ ansible_test.environment }}"
vars:
resource_prefix: 'ansible-testing'
module_defaults:

View file

@ -1,5 +1,6 @@
- hosts: localhost
connection: local
environment: "{{ ansible_test.environment }}"
roles:
- ecs_cluster

View file

@ -1,5 +1,6 @@
- hosts: localhost
connection: local
environment: "{{ ansible_test.environment }}"
vars:
resource_prefix: 'ansible-testing'

View file

@ -1,5 +1,6 @@
- hosts: localhost
connection: local
environment: "{{ ansible_test.environment }}"
vars:
resource_prefix: 'ansible-testing'

View file

@ -1,5 +1,6 @@
- hosts: localhost
connection: local
environment: "{{ ansible_test.environment }}"
vars:
resource_prefix: 'ansible-testing-fnd'

View file

@ -1,5 +1,6 @@
- hosts: localhost
connection: local
environment: "{{ ansible_test.environment }}"
vars:
resource_prefix: 'ansible-testing-fndf'

View file

@ -1,5 +1,6 @@
- hosts: localhost
connection: local
environment: "{{ ansible_test.environment }}"
vars:
resource_prefix: 'ansible-testing'

View file

@ -1,5 +1,6 @@
- hosts: localhost
connection: local
environment: "{{ ansible_test.environment }}"
vars:
resource_prefix: 'ansible-testing'

View file

@ -1,5 +1,6 @@
- hosts: localhost
connection: local
environment: "{{ ansible_test.environment }}"
roles:
- elb_target

View file

@ -1,5 +1,6 @@
- hosts: localhost
connection: local
environment: "{{ ansible_test.environment }}"
tasks:
- name: set up aws connection info

View file

@ -1,5 +1,6 @@
- hosts: localhost
connection: local
environment: "{{ ansible_test.environment }}"
roles:
- elb_target_facts

View file

@ -2,6 +2,7 @@
- hosts: 127.0.0.1
connection: local
gather_facts: no
environment: "{{ ansible_test.environment }}"
tasks:
- block:

View file

@ -2,6 +2,7 @@
- hosts: 127.0.0.1
connection: local
gather_facts: no
environment: "{{ ansible_test.environment }}"
tasks:
- block:

View file

@ -2,6 +2,7 @@
- hosts: 127.0.0.1
connection: local
gather_facts: no
environment: "{{ ansible_test.environment }}"
tasks:
- block:

View file

@ -2,11 +2,12 @@
- hosts: 127.0.0.1
connection: local
gather_facts: no
environment: "{{ ansible_test.environment }}"
tasks:
- block:
- set_fact:
instance_id: 'rds-mariadb-{{ resource_prefix }}'
instance_id: '{{ resource_prefix }}-mariadb'
- name: assert group was populated with inventory but is empty
assert:
@ -16,36 +17,28 @@
# Create new host, add it to inventory and then terminate it without updating the cache
# TODO: Uncomment once rds_instance has been added
#- name: set connection information for all tasks
# set_fact:
# aws_connection_info: &aws_connection_info
# aws_access_key: '{{ aws_access_key }}'
# aws_secret_key: '{{ aws_secret_key }}'
# security_token: '{{ security_token }}'
# region: '{{ aws_region }}'
# no_log: yes
- name: set connection information for all tasks
set_fact:
aws_connection_info: &aws_connection_info
aws_access_key: '{{ aws_access_key }}'
aws_secret_key: '{{ aws_secret_key }}'
security_token: '{{ security_token }}'
region: '{{ aws_region }}'
no_log: yes
- name: Use AWS CLI to create an RDS DB instance
command: "aws rds create-db-instance --db-instance-identifier '{{ instance_id }}' --engine 'mariadb' --db-instance-class 'db.t2.micro' --allocated-storage 20 --master-user-password '{{ resource_prefix }}' --master-username 'ansibletestuser'"
environment:
AWS_ACCESS_KEY_ID: "{{ aws_access_key }}"
AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}"
AWS_SESSION_TOKEN: "{{ security_token }}"
AWS_DEFAULT_REGION: "{{ aws_region }}"
# TODO: Uncomment once rds_instance has been added
#- name: create minimal mariadb instance in default VPC and default subnet group
# rds_instance:
# state: present
# engine: mariadb
# instance_class: db.t2.micro
# storage: 20
# instance_id: 'rds-mariadb-{{ resource_prefix }}'
# master_username: 'ansible-test-user'
# master_password: 'password-{{ resource_prefix }}'
# <<: *aws_connection_info
# register: setup_instance
- name: create minimal mariadb instance in default VPC and default subnet group
rds_instance:
state: present
engine: mariadb
db_instance_class: db.t2.micro
allocated_storage: 20
instance_id: '{{ instance_id }}'
master_username: 'ansibletestuser'
master_user_password: 'password-{{ resource_prefix | regex_findall(".{8}$") | first }}'
tags:
workload_type: other
<<: *aws_connection_info
register: setup_instance
- meta: refresh_inventory
@ -55,22 +48,12 @@
always:
- name: Use AWS CLI to delete the DB instance
command: "aws rds delete-db-instance --db-instance-identifier '{{ instance_id }}' --skip-final-snapshot"
ignore_errors: True
environment:
AWS_ACCESS_KEY_ID: "{{ aws_access_key }}"
AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}"
AWS_SESSION_TOKEN: "{{ security_token }}"
AWS_DEFAULT_REGION: "{{ aws_region }}"
# TODO: Uncomment once rds_instance has been added
#- name: remove mariadb instance
# rds_instance:
# state: absent
# engine: mariadb
# skip_final_snapshot: yes
# instance_id: ansible-rds-mariadb-example
# <<: *aws_connection_info
# ignore_errors: yes
# when: setup_instance is defined
- name: remove mariadb instance
rds_instance:
state: absent
engine: mariadb
skip_final_snapshot: yes
instance_id: '{{ instance_id }}'
<<: *aws_connection_info
ignore_errors: yes
when: setup_instance is defined

View file

@ -2,6 +2,7 @@
- hosts: 127.0.0.1
connection: local
gather_facts: no
environment: "{{ ansible_test.environment }}"
tasks:
- block:
@ -18,35 +19,28 @@
# Create new host, refresh inventory, remove host, refresh inventory
#- name: set connection information for all tasks
# set_fact:
# aws_connection_info: &aws_connection_info
# aws_access_key: '{{ aws_access_key }}'
# aws_secret_key: '{{ aws_secret_key }}'
# security_token: '{{ security_token }}'
# region: '{{ aws_region }}'
# no_log: yes
- name: set connection information for all tasks
set_fact:
aws_connection_info: &aws_connection_info
aws_access_key: '{{ aws_access_key }}'
aws_secret_key: '{{ aws_secret_key }}'
security_token: '{{ security_token }}'
region: '{{ aws_region }}'
no_log: yes
- name: Use AWS CLI to create an RDS DB instance
command: "aws rds create-db-instance --db-instance-identifier '{{ instance_id }}' --engine 'mariadb' --db-instance-class 'db.t2.micro' --allocated-storage 20 --master-user-password '{{ resource_prefix }}' --master-username 'ansibletestuser'"
environment:
AWS_ACCESS_KEY_ID: "{{ aws_access_key }}"
AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}"
AWS_SESSION_TOKEN: "{{ security_token }}"
AWS_DEFAULT_REGION: "{{ aws_region }}"
# TODO: Uncomment once rds_instance has been added
#- name: create minimal mariadb instance in default VPC and default subnet group
# rds_instance:
# state: present
# engine: mariadb
# instance_class: db.t2.micro
# storage: 20
# instance_id: 'rds-mariadb-{{ resource_prefix }}'
# master_username: 'ansible-test-user'
# master_password: 'password-{{ resource_prefix }}'
# <<: *aws_connection_info
# register: setup_instance
- name: create minimal mariadb instance in default VPC and default subnet group
rds_instance:
state: present
engine: mariadb
db_instance_class: db.t2.micro
allocated_storage: 20
instance_id: '{{ instance_id }}'
master_username: 'ansibletestuser'
master_user_password: 'password-{{ resource_prefix | regex_findall(".{8}$") | first }}'
tags:
workload_type: other
<<: *aws_connection_info
register: setup_instance
- meta: refresh_inventory
@ -57,23 +51,13 @@
- "groups.aws_rds | length == 1"
- "groups.aws_rds.0 == '{{ instance_id }}'"
- name: Use AWS CLI to delete the DB instance
command: "aws rds delete-db-instance --db-instance-identifier '{{ instance_id }}' --skip-final-snapshot"
ignore_errors: True
environment:
AWS_ACCESS_KEY_ID: "{{ aws_access_key }}"
AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}"
AWS_SESSION_TOKEN: "{{ security_token }}"
AWS_DEFAULT_REGION: "{{ aws_region }}"
# TODO: Uncomment once rds_instance has been added
#- name: remove mariadb instance
# rds_instance:
# state: absent
# engine: mariadb
# skip_final_snapshot: yes
# instance_id: ansible-rds-mariadb-example
# <<: *aws_connection_info
- name: remove mariadb instance
rds_instance:
state: absent
engine: mariadb
skip_final_snapshot: yes
instance_id: '{{ instance_id }}'
<<: *aws_connection_info
- meta: refresh_inventory
@ -85,22 +69,12 @@
always:
- name: Use AWS CLI to delete the DB instance
command: "aws rds delete-db-instance --db-instance-identifier '{{ instance_id }}' --skip-final-snapshot"
ignore_errors: True
environment:
AWS_ACCESS_KEY_ID: "{{ aws_access_key }}"
AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}"
AWS_SESSION_TOKEN: "{{ security_token }}"
AWS_DEFAULT_REGION: "{{ aws_region }}"
# TODO: Uncomment once rds_instance has been added
#- name: remove mariadb instance
# rds_instance:
# state: absent
# engine: mariadb
# skip_final_snapshot: yes
# instance_id: ansible-rds-mariadb-example
# <<: *aws_connection_info
# ignore_errors: yes
# when: setup_instance is defined
- name: remove mariadb instance
rds_instance:
state: absent
engine: mariadb
skip_final_snapshot: yes
instance_id: '{{ instance_id }}'
<<: *aws_connection_info
ignore_errors: yes
when: setup_instance is defined

View file

@ -2,44 +2,38 @@
- hosts: 127.0.0.1
connection: local
gather_facts: no
environment: "{{ ansible_test.environment }}"
tasks:
- block:
- set_fact:
instance_id: "{{ resource_prefix }}constructed"
instance_id: "{{ resource_prefix }}-mariadb"
# Create new host, refresh inventory
#- name: set connection information for all tasks
# set_fact:
# aws_connection_info: &aws_connection_info
# aws_access_key: '{{ aws_access_key }}'
# aws_secret_key: '{{ aws_secret_key }}'
# security_token: '{{ security_token }}'
# region: '{{ aws_region }}'
# no_log: yes
- name: set connection information for all tasks
set_fact:
aws_connection_info: &aws_connection_info
aws_access_key: '{{ aws_access_key }}'
aws_secret_key: '{{ aws_secret_key }}'
security_token: '{{ security_token }}'
region: '{{ aws_region }}'
no_log: yes
# TODO: Uncomment once rds_instance has been added
#- name: create minimal mariadb instance in default VPC and default subnet group
# rds_instance:
# state: present
# engine: mariadb
# instance_class: db.t2.micro
# storage: 20
# instance_id: 'rds-mariadb-{{ resource_prefix }}'
# master_username: 'ansible-test-user'
# master_password: 'password-{{ resource_prefix }}'
# <<: *aws_connection_info
# register: setup_instance
- name: Use AWS CLI to create an RDS DB instance
command: "aws rds create-db-instance --db-instance-identifier '{{ instance_id }}' --engine 'mariadb' --db-instance-class 'db.t2.micro' --allocated-storage 20 --master-user-password '{{ resource_prefix }}' --master-username 'ansibletestuser' --tags Key='workload_type',Value='other'"
environment:
AWS_ACCESS_KEY_ID: "{{ aws_access_key }}"
AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}"
AWS_SESSION_TOKEN: "{{ security_token }}"
AWS_DEFAULT_REGION: "{{ aws_region }}"
- name: create minimal mariadb instance in default VPC and default subnet group
rds_instance:
state: present
engine: mariadb
db_instance_class: db.t2.micro
allocated_storage: 20
instance_id: '{{ resource_prefix }}-mariadb'
master_username: 'ansibletestuser'
master_user_password: 'password-{{ resource_prefix | regex_findall(".{8}$") | first }}'
tags:
workload_type: other
<<: *aws_connection_info
register: setup_instance
- meta: refresh_inventory
- debug: var=groups
@ -51,26 +45,16 @@
- "groups | length == 6"
- groups.tag_workload_type_other
- groups.rds_mariadb
- groups.rds_parameter_group_default_mariadb10_0
- groups.rds_parameter_group_default_mariadb10_3
always:
- name: Use AWS CLI to delete the DB instance
command: "aws rds delete-db-instance --db-instance-identifier '{{ instance_id }}' --skip-final-snapshot"
ignore_errors: True
environment:
AWS_ACCESS_KEY_ID: "{{ aws_access_key }}"
AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}"
AWS_SESSION_TOKEN: "{{ security_token }}"
AWS_DEFAULT_REGION: "{{ aws_region }}"
# TODO: Uncomment once rds_instance has been added
#- name: remove mariadb instance
# rds_instance:
# state: absent
# engine: mariadb
# skip_final_snapshot: yes
# instance_id: ansible-rds-mariadb-example
# <<: *aws_connection_info
# ignore_errors: yes
# when: setup_instance is defined
- name: remove mariadb instance
rds_instance:
state: absent
engine: mariadb
skip_final_snapshot: yes
instance_id: '{{ instance_id }}'
<<: *aws_connection_info
ignore_errors: yes
when: setup_instance is defined

View file

@ -9,34 +9,28 @@
- "'aws_rds' in groups"
- "not groups.aws_rds"
#- name: set connection information for all tasks
# set_fact:
# aws_connection_info: &aws_connection_info
# aws_access_key: "{{ aws_access_key }}"
# aws_secret_key: "{{ aws_secret_key }}"
# security_token: "{{ security_token }}"
# region: "{{ aws_region }}"
# no_log: yes
- name: set connection information for all tasks
set_fact:
aws_connection_info: &aws_connection_info
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
security_token: "{{ security_token }}"
region: "{{ aws_region }}"
no_log: yes
#- name: create minimal mariadb instance in default VPC and default subnet group
# rds_instance:
# state: present
# engine: mariadb
# instance_class: db.t2.micro
# storage: 20
# instance_id: 'rds-mariadb-{{ resource_prefix }}'
# master_username: 'ansible-test-user'
# master_password: 'password-{{ resource_prefix }}'
# <<: *aws_connection_info
# register: setup_instance
- name: Use AWS CLI to create an RDS DB instance
command: "aws rds create-db-instance --db-instance-identifier '{{ instance_id }}' --engine 'mariadb' --db-instance-class 'db.t2.micro' --allocated-storage 20 --master-user-password '{{ resource_prefix }}' --master-username 'ansibletestuser'"
environment:
AWS_ACCESS_KEY_ID: "{{ aws_access_key }}"
AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}"
AWS_SESSION_TOKEN: "{{ security_token }}"
AWS_DEFAULT_REGION: "{{ aws_region }}"
- name: create minimal mariadb instance in default VPC and default subnet group
rds_instance:
state: present
engine: mariadb
db_instance_class: db.t2.micro
allocated_storage: 20
instance_id: 'rds-mariadb-{{ resource_prefix }}'
master_username: 'ansibletestuser'
master_user_password: 'password-{{ resource_prefix | regex_findall(".{8}$") | first }}'
tags:
workload_type: other
<<: *aws_connection_info
register: setup_instance
- meta: refresh_inventory
@ -47,22 +41,13 @@
- "groups.aws_rds | length == 1"
- "groups.aws_rds.0 == '{{ resource_prefix }}'"
#- name: remove mariadb instance
# rds_instance:
# state: absent
# engine: mariadb
# skip_final_snapshot: yes
# instance_id: ansible-rds-mariadb-example
# <<: *aws_connection_info
- name: Use AWS CLI to delete the DB instance
command: "aws rds delete-db-instance --db-instance-identifier '{{ instance_id }}' --skip-final-snapshot"
ignore_errors: True
environment:
AWS_ACCESS_KEY_ID: "{{ aws_access_key }}"
AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}"
AWS_SESSION_TOKEN: "{{ security_token }}"
AWS_DEFAULT_REGION: "{{ aws_region }}"
- name: remove mariadb instance
rds_instance:
state: absent
engine: mariadb
skip_final_snapshot: yes
instance_id: ansible-rds-mariadb-example
<<: *aws_connection_info
- meta: refresh_inventory
@ -74,21 +59,12 @@
always:
- name: Use AWS CLI to delete the DB instance
command: "aws rds delete-db-instance --db-instance-identifier '{{ instance_id }}' --skip-final-snapshot"
ignore_errors: True
environment:
AWS_ACCESS_KEY_ID: "{{ aws_access_key }}"
AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}"
AWS_SESSION_TOKEN: "{{ security_token }}"
AWS_DEFAULT_REGION: "{{ aws_region }}"
#- name: remove mariadb instance
# rds_instance:
# state: absent
# engine: mariadb
# skip_final_snapshot: yes
# instance_id: ansible-rds-mariadb-example
# <<: *aws_connection_info
# ignore_errors: yes
# when: setup_instance is defined
- name: remove mariadb instance
rds_instance:
state: absent
engine: mariadb
skip_final_snapshot: yes
instance_id: ansible-rds-mariadb-example
<<: *aws_connection_info
ignore_errors: yes
when: setup_instance is defined

View file

@ -4,3 +4,5 @@ aws_secret_access_key: '{{ aws_secret_key }}'
aws_security_token: '{{ security_token }}'
regions:
- '{{ aws_region }}'
filters:
db-instance-id: "{{ resource_prefix }}-mariadb"

View file

@ -7,3 +7,5 @@ aws_secret_access_key: '{{ aws_secret_key }}'
aws_security_token: '{{ security_token }}'
regions:
- '{{ aws_region }}'
filters:
db-instance-id: "{{ resource_prefix }}-mariadb"

View file

@ -11,3 +11,5 @@ keyed_groups:
prefix: tag
- key: engine
prefix: rds
filters:
db-instance-id: "{{ resource_prefix }}-mariadb"

View file

@ -99,8 +99,12 @@ class AwsCloudEnvironment(CloudEnvironment):
ansible_vars.update(dict(parser.items('default')))
env_vars = {'ANSIBLE_DEBUG_BOTOCORE_LOGS': 'True'}
return CloudEnvironmentConfig(
env_vars=env_vars,
ansible_vars=ansible_vars,
callback_plugins=['aws_resource_actions'],
)
def on_failure(self, target, tries):