mirror of
https://github.com/ansible-collections/community.general.git
synced 2024-09-14 20:13:21 +02:00
Migrate s3_bucket module to boto3 (#37189)
This commit is contained in:
parent
9bb1ee30bf
commit
bab947d854
3 changed files with 416 additions and 215 deletions
|
@ -25,6 +25,7 @@ short_description: Manage S3 buckets in AWS, Ceph, Walrus and FakeS3
|
||||||
description:
|
description:
|
||||||
- Manage S3 buckets in AWS, Ceph, Walrus and FakeS3
|
- Manage S3 buckets in AWS, Ceph, Walrus and FakeS3
|
||||||
version_added: "2.0"
|
version_added: "2.0"
|
||||||
|
requirements: [ boto3 ]
|
||||||
author: "Rob White (@wimnat)"
|
author: "Rob White (@wimnat)"
|
||||||
options:
|
options:
|
||||||
force:
|
force:
|
||||||
|
@ -105,48 +106,22 @@ EXAMPLES = '''
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
import traceback
|
import time
|
||||||
import xml.etree.ElementTree as ET
|
|
||||||
|
|
||||||
import ansible.module_utils.six.moves.urllib.parse as urlparse
|
import ansible.module_utils.six.moves.urllib.parse as urlparse
|
||||||
from ansible.module_utils.six import string_types
|
from ansible.module_utils.six import string_types
|
||||||
from ansible.module_utils._text import to_native
|
from ansible.module_utils.basic import to_text
|
||||||
from ansible.module_utils.basic import AnsibleModule
|
from ansible.module_utils.aws.core import AnsibleAWSModule
|
||||||
from ansible.module_utils.ec2 import get_aws_connection_info, ec2_argument_spec
|
from ansible.module_utils.ec2 import compare_policies, ec2_argument_spec, boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list
|
||||||
from ansible.module_utils.ec2 import sort_json_policy_dict, compare_policies
|
from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn, AWSRetry
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import boto.ec2
|
from botocore.exceptions import BotoCoreError, ClientError, EndpointConnectionError, WaiterError
|
||||||
from boto.s3.connection import OrdinaryCallingFormat, Location, S3Connection
|
|
||||||
from boto.s3.tagging import Tags, TagSet
|
|
||||||
from boto.exception import BotoServerError, S3CreateError, S3ResponseError, BotoClientError
|
|
||||||
HAS_BOTO = True
|
|
||||||
except ImportError:
|
except ImportError:
|
||||||
HAS_BOTO = False
|
pass # handled by AnsibleAWSModule
|
||||||
|
|
||||||
|
|
||||||
def get_request_payment_status(bucket):
|
def create_or_update_bucket(s3_client, module, location):
|
||||||
|
|
||||||
response = bucket.get_request_payment()
|
|
||||||
root = ET.fromstring(response)
|
|
||||||
for message in root.findall('.//{http://s3.amazonaws.com/doc/2006-03-01/}Payer'):
|
|
||||||
payer = message.text
|
|
||||||
|
|
||||||
return (payer != "BucketOwner")
|
|
||||||
|
|
||||||
|
|
||||||
def create_tags_container(tags):
|
|
||||||
|
|
||||||
tag_set = TagSet()
|
|
||||||
tags_obj = Tags()
|
|
||||||
for key, val in tags.items():
|
|
||||||
tag_set.add_tag(key, val)
|
|
||||||
|
|
||||||
tags_obj.add_tag_set(tag_set)
|
|
||||||
return tags_obj
|
|
||||||
|
|
||||||
|
|
||||||
def _create_or_update_bucket(connection, module, location):
|
|
||||||
|
|
||||||
policy = module.params.get("policy")
|
policy = module.params.get("policy")
|
||||||
name = module.params.get("name")
|
name = module.params.get("name")
|
||||||
|
@ -156,170 +131,325 @@ def _create_or_update_bucket(connection, module, location):
|
||||||
changed = False
|
changed = False
|
||||||
|
|
||||||
try:
|
try:
|
||||||
bucket = connection.get_bucket(name)
|
bucket_is_present = bucket_exists(s3_client, name)
|
||||||
except S3ResponseError as e:
|
except EndpointConnectionError as e:
|
||||||
|
module.fail_json_aws(e, msg="Invalid endpoint provided: %s" % to_text(e))
|
||||||
|
except (BotoCoreError, ClientError) as e:
|
||||||
|
module.fail_json_aws(e, msg="Failed to check bucket presence")
|
||||||
|
|
||||||
|
if not bucket_is_present:
|
||||||
try:
|
try:
|
||||||
bucket = connection.create_bucket(name, location=location)
|
bucket_changed = create_bucket(s3_client, name, location)
|
||||||
changed = True
|
s3_client.get_waiter('bucket_exists').wait(Bucket=name)
|
||||||
except (S3CreateError, BotoClientError) as e:
|
changed = changed or bucket_changed
|
||||||
module.fail_json(msg=e.message)
|
except WaiterError as e:
|
||||||
|
module.fail_json_aws(e, msg='An error occurred waiting for the bucket to become available')
|
||||||
|
except (BotoCoreError, ClientError) as e:
|
||||||
|
module.fail_json_aws(e, msg="Failed while creating bucket")
|
||||||
|
|
||||||
# Versioning
|
# Versioning
|
||||||
versioning_status = bucket.get_versioning_status()
|
try:
|
||||||
|
versioning_status = get_bucket_versioning(s3_client, name)
|
||||||
|
except (ClientError, BotoCoreError) as e:
|
||||||
|
module.fail_json_aws(e, msg="Failed to get bucket versioning")
|
||||||
|
|
||||||
if versioning is not None:
|
if versioning is not None:
|
||||||
if versioning and versioning_status.get('Versioning') != "Enabled":
|
required_versioning = None
|
||||||
|
if versioning and versioning_status.get('Status') != "Enabled":
|
||||||
|
required_versioning = 'Enabled'
|
||||||
|
elif not versioning and versioning_status.get('Status') == "Enabled":
|
||||||
|
required_versioning = 'Suspended'
|
||||||
|
|
||||||
|
if required_versioning:
|
||||||
try:
|
try:
|
||||||
bucket.configure_versioning(versioning)
|
put_bucket_versioning(s3_client, name, required_versioning)
|
||||||
changed = True
|
changed = True
|
||||||
versioning_status = bucket.get_versioning_status()
|
except (BotoCoreError, ClientError) as e:
|
||||||
except S3ResponseError as e:
|
module.fail_json_aws(e, msg="Failed to update bucket versioning")
|
||||||
module.fail_json(msg=e.message, exception=traceback.format_exc())
|
|
||||||
elif not versioning and versioning_status.get('Versioning') == "Enabled":
|
versioning_status = wait_versioning_is_applied(module, s3_client, name, required_versioning)
|
||||||
try:
|
|
||||||
bucket.configure_versioning(versioning)
|
# This output format is there to ensure compatibility with previous versions of the module
|
||||||
changed = True
|
versioning_return_value = {
|
||||||
versioning_status = bucket.get_versioning_status()
|
'Versioning': versioning_status.get('Status', 'Disabled'),
|
||||||
except S3ResponseError as e:
|
'MfaDelete': versioning_status.get('MFADelete', 'Disabled'),
|
||||||
module.fail_json(msg=e.message, exception=traceback.format_exc())
|
}
|
||||||
|
|
||||||
# Requester pays
|
# Requester pays
|
||||||
requester_pays_status = get_request_payment_status(bucket)
|
try:
|
||||||
if requester_pays_status != requester_pays:
|
requester_pays_status = get_bucket_request_payment(s3_client, name)
|
||||||
if requester_pays:
|
except (BotoCoreError, ClientError) as e:
|
||||||
payer = 'Requester'
|
module.fail_json_aws(e, msg="Failed to get bucket request payment")
|
||||||
else:
|
|
||||||
payer = 'BucketOwner'
|
payer = 'Requester' if requester_pays else 'BucketOwner'
|
||||||
bucket.set_request_payment(payer=payer)
|
if requester_pays_status != payer:
|
||||||
|
put_bucket_request_payment(s3_client, name, payer)
|
||||||
|
requester_pays_status = wait_payer_is_applied(module, s3_client, name, payer, should_fail=False)
|
||||||
|
if requester_pays_status is None:
|
||||||
|
# We have seen that it happens quite a lot of times that the put request was not taken into
|
||||||
|
# account, so we retry one more time
|
||||||
|
put_bucket_request_payment(s3_client, name, payer)
|
||||||
|
requester_pays_status = wait_payer_is_applied(module, s3_client, name, payer, should_fail=True)
|
||||||
changed = True
|
changed = True
|
||||||
requester_pays_status = get_request_payment_status(bucket)
|
|
||||||
|
|
||||||
# Policy
|
# Policy
|
||||||
try:
|
try:
|
||||||
current_policy = json.loads(to_native(bucket.get_policy()))
|
current_policy = get_bucket_policy(s3_client, name)
|
||||||
except S3ResponseError as e:
|
except (ClientError, BotoCoreError) as e:
|
||||||
if e.error_code == "NoSuchBucketPolicy":
|
module.fail_json_aws(e, msg="Failed to get bucket policy")
|
||||||
current_policy = {}
|
|
||||||
else:
|
|
||||||
module.fail_json(msg=e.message)
|
|
||||||
if policy is not None:
|
if policy is not None:
|
||||||
if isinstance(policy, string_types):
|
if isinstance(policy, string_types):
|
||||||
policy = json.loads(policy)
|
policy = json.loads(policy)
|
||||||
|
|
||||||
if not policy:
|
if not policy and current_policy:
|
||||||
bucket.delete_policy()
|
|
||||||
# only show changed if there was already a policy
|
|
||||||
changed = bool(current_policy)
|
|
||||||
|
|
||||||
elif compare_policies(current_policy, policy):
|
|
||||||
changed = True
|
|
||||||
try:
|
try:
|
||||||
bucket.set_policy(json.dumps(policy))
|
delete_bucket_policy(s3_client, name)
|
||||||
current_policy = json.loads(to_native(bucket.get_policy()))
|
except (BotoCoreError, ClientError) as e:
|
||||||
except S3ResponseError as e:
|
module.fail_json_aws(e, msg="Failed to delete bucket policy")
|
||||||
module.fail_json(msg=e.message)
|
current_policy = wait_policy_is_applied(module, s3_client, name, policy)
|
||||||
|
changed = True
|
||||||
|
elif compare_policies(current_policy, policy):
|
||||||
|
try:
|
||||||
|
put_bucket_policy(s3_client, name, policy)
|
||||||
|
except (BotoCoreError, ClientError) as e:
|
||||||
|
module.fail_json_aws(e, msg="Failed to update bucket policy")
|
||||||
|
current_policy = wait_policy_is_applied(module, s3_client, name, policy, should_fail=False)
|
||||||
|
if current_policy is None:
|
||||||
|
# As for request payement, it happens quite a lot of times that the put request was not taken into
|
||||||
|
# account, so we retry one more time
|
||||||
|
put_bucket_policy(s3_client, name, policy)
|
||||||
|
current_policy = wait_policy_is_applied(module, s3_client, name, policy, should_fail=True)
|
||||||
|
changed = True
|
||||||
|
|
||||||
# Tags
|
# Tags
|
||||||
try:
|
try:
|
||||||
current_tags = bucket.get_tags()
|
current_tags_dict = get_current_bucket_tags_dict(s3_client, name)
|
||||||
except S3ResponseError as e:
|
except (ClientError, BotoCoreError) as e:
|
||||||
if e.error_code == "NoSuchTagSet":
|
module.fail_json_aws(e, msg="Failed to get bucket tags")
|
||||||
current_tags = None
|
|
||||||
else:
|
|
||||||
module.fail_json(msg=e.message)
|
|
||||||
|
|
||||||
if current_tags is None:
|
|
||||||
current_tags_dict = {}
|
|
||||||
else:
|
|
||||||
current_tags_dict = dict((t.key, t.value) for t in current_tags[0])
|
|
||||||
|
|
||||||
if tags is not None:
|
if tags is not None:
|
||||||
if current_tags_dict != tags:
|
if current_tags_dict != tags:
|
||||||
try:
|
|
||||||
if tags:
|
if tags:
|
||||||
bucket.set_tags(create_tags_container(tags))
|
try:
|
||||||
|
put_bucket_tagging(s3_client, name, tags)
|
||||||
|
except (BotoCoreError, ClientError) as e:
|
||||||
|
module.fail_json_aws(e, msg="Failed to update bucket tags")
|
||||||
else:
|
else:
|
||||||
bucket.delete_tags()
|
try:
|
||||||
|
delete_bucket_tagging(s3_client, name)
|
||||||
|
except (BotoCoreError, ClientError) as e:
|
||||||
|
module.fail_json_aws(e, msg="Failed to delete bucket tags")
|
||||||
|
wait_tags_are_applied(module, s3_client, name, tags)
|
||||||
current_tags_dict = tags
|
current_tags_dict = tags
|
||||||
changed = True
|
changed = True
|
||||||
except S3ResponseError as e:
|
|
||||||
module.fail_json(msg=e.message)
|
|
||||||
|
|
||||||
module.exit_json(changed=changed, name=bucket.name, versioning=versioning_status,
|
module.exit_json(changed=changed, name=name, versioning=versioning_return_value,
|
||||||
requester_pays=requester_pays_status, policy=current_policy, tags=current_tags_dict)
|
requester_pays=requester_pays, policy=current_policy, tags=current_tags_dict)
|
||||||
|
|
||||||
|
|
||||||
def _destroy_bucket(connection, module):
|
def bucket_exists(s3_client, bucket_name):
|
||||||
|
# head_bucket appeared to be really inconsistent, so we use list_buckets instead,
|
||||||
|
# and loop over all the buckets, even if we know it's less performant :(
|
||||||
|
all_buckets = s3_client.list_buckets(Bucket=bucket_name)['Buckets']
|
||||||
|
return any(bucket['Name'] == bucket_name for bucket in all_buckets)
|
||||||
|
|
||||||
|
|
||||||
|
@AWSRetry.exponential_backoff(max_delay=120)
|
||||||
|
def create_bucket(s3_client, bucket_name, location):
|
||||||
|
try:
|
||||||
|
configuration = {}
|
||||||
|
if location not in ('us-east-1', None):
|
||||||
|
configuration['LocationConstraint'] = location
|
||||||
|
if len(configuration) > 0:
|
||||||
|
s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration=configuration)
|
||||||
|
else:
|
||||||
|
s3_client.create_bucket(Bucket=bucket_name)
|
||||||
|
return True
|
||||||
|
except ClientError as e:
|
||||||
|
error_code = e.response['Error']['Code']
|
||||||
|
if error_code == 'BucketAlreadyOwnedByYou':
|
||||||
|
# We should never get there since we check the bucket presence before calling the create_or_update_bucket
|
||||||
|
# method. However, the AWS Api sometimes fails to report bucket presence, so we catch this exception
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
raise e
|
||||||
|
|
||||||
|
|
||||||
|
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
|
||||||
|
def put_bucket_tagging(s3_client, bucket_name, tags):
|
||||||
|
s3_client.put_bucket_tagging(Bucket=bucket_name, Tagging={'TagSet': ansible_dict_to_boto3_tag_list(tags)})
|
||||||
|
|
||||||
|
|
||||||
|
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
|
||||||
|
def put_bucket_policy(s3_client, bucket_name, policy):
|
||||||
|
s3_client.put_bucket_policy(Bucket=bucket_name, Policy=json.dumps(policy))
|
||||||
|
|
||||||
|
|
||||||
|
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
|
||||||
|
def delete_bucket_policy(s3_client, bucket_name):
|
||||||
|
s3_client.delete_bucket_policy(Bucket=bucket_name)
|
||||||
|
|
||||||
|
|
||||||
|
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
|
||||||
|
def get_bucket_policy(s3_client, bucket_name):
|
||||||
|
try:
|
||||||
|
current_policy = json.loads(s3_client.get_bucket_policy(Bucket=bucket_name).get('Policy'))
|
||||||
|
except ClientError as e:
|
||||||
|
if e.response['Error']['Code'] == 'NoSuchBucketPolicy':
|
||||||
|
current_policy = None
|
||||||
|
else:
|
||||||
|
raise e
|
||||||
|
return current_policy
|
||||||
|
|
||||||
|
|
||||||
|
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
|
||||||
|
def put_bucket_request_payment(s3_client, bucket_name, payer):
|
||||||
|
s3_client.put_bucket_request_payment(Bucket=bucket_name, RequestPaymentConfiguration={'Payer': payer})
|
||||||
|
|
||||||
|
|
||||||
|
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
|
||||||
|
def get_bucket_request_payment(s3_client, bucket_name):
|
||||||
|
return s3_client.get_bucket_request_payment(Bucket=bucket_name).get('Payer')
|
||||||
|
|
||||||
|
|
||||||
|
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
|
||||||
|
def get_bucket_versioning(s3_client, bucket_name):
|
||||||
|
return s3_client.get_bucket_versioning(Bucket=bucket_name)
|
||||||
|
|
||||||
|
|
||||||
|
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
|
||||||
|
def put_bucket_versioning(s3_client, bucket_name, required_versioning):
|
||||||
|
s3_client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'Status': required_versioning})
|
||||||
|
|
||||||
|
|
||||||
|
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
|
||||||
|
def delete_bucket_tagging(s3_client, bucket_name):
|
||||||
|
s3_client.delete_bucket_tagging(Bucket=bucket_name)
|
||||||
|
|
||||||
|
|
||||||
|
@AWSRetry.exponential_backoff(max_delay=120)
|
||||||
|
def delete_bucket(s3_client, bucket_name):
|
||||||
|
try:
|
||||||
|
s3_client.delete_bucket(Bucket=bucket_name)
|
||||||
|
except ClientError as e:
|
||||||
|
if e.response['Error']['Code'] == 'NoSuchBucket':
|
||||||
|
# This means bucket should have been in a deleting state when we checked it existence
|
||||||
|
# We just ignore the error
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
raise e
|
||||||
|
|
||||||
|
|
||||||
|
def wait_policy_is_applied(module, s3_client, bucket_name, expected_policy, should_fail=True):
|
||||||
|
for dummy in range(0, 12):
|
||||||
|
try:
|
||||||
|
current_policy = get_bucket_policy(s3_client, bucket_name)
|
||||||
|
except (ClientError, BotoCoreError) as e:
|
||||||
|
module.fail_json_aws(e, msg="Failed to get bucket policy")
|
||||||
|
|
||||||
|
if compare_policies(current_policy, expected_policy):
|
||||||
|
time.sleep(5)
|
||||||
|
else:
|
||||||
|
return current_policy
|
||||||
|
if should_fail:
|
||||||
|
module.fail_json(msg="Bucket policy failed to apply in the excepted time")
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def wait_payer_is_applied(module, s3_client, bucket_name, expected_payer, should_fail=True):
|
||||||
|
for dummy in range(0, 12):
|
||||||
|
try:
|
||||||
|
requester_pays_status = get_bucket_request_payment(s3_client, bucket_name)
|
||||||
|
except (BotoCoreError, ClientError) as e:
|
||||||
|
module.fail_json_aws(e, msg="Failed to get bucket request payment")
|
||||||
|
if requester_pays_status != expected_payer:
|
||||||
|
time.sleep(5)
|
||||||
|
else:
|
||||||
|
return requester_pays_status
|
||||||
|
if should_fail:
|
||||||
|
module.fail_json(msg="Bucket request payment failed to apply in the excepted time")
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def wait_versioning_is_applied(module, s3_client, bucket_name, required_versioning):
|
||||||
|
for dummy in range(0, 12):
|
||||||
|
try:
|
||||||
|
versioning_status = get_bucket_versioning(s3_client, bucket_name)
|
||||||
|
except (BotoCoreError, ClientError) as e:
|
||||||
|
module.fail_json_aws(e, msg="Failed to get updated versioning for bucket")
|
||||||
|
if versioning_status.get('Status') != required_versioning:
|
||||||
|
time.sleep(5)
|
||||||
|
else:
|
||||||
|
return versioning_status
|
||||||
|
module.fail_json(msg="Bucket versioning failed to apply in the excepted time")
|
||||||
|
|
||||||
|
|
||||||
|
def wait_tags_are_applied(module, s3_client, bucket_name, expected_tags_dict):
|
||||||
|
for dummy in range(0, 12):
|
||||||
|
try:
|
||||||
|
current_tags_dict = get_current_bucket_tags_dict(s3_client, bucket_name)
|
||||||
|
except (ClientError, BotoCoreError) as e:
|
||||||
|
module.fail_json_aws(e, msg="Failed to get bucket policy")
|
||||||
|
if current_tags_dict != expected_tags_dict:
|
||||||
|
time.sleep(5)
|
||||||
|
else:
|
||||||
|
return
|
||||||
|
module.fail_json(msg="Bucket tags failed to apply in the excepted time")
|
||||||
|
|
||||||
|
|
||||||
|
def get_current_bucket_tags_dict(s3_client, bucket_name):
|
||||||
|
try:
|
||||||
|
current_tags = s3_client.get_bucket_tagging(Bucket=bucket_name).get('TagSet')
|
||||||
|
except ClientError as e:
|
||||||
|
if e.response['Error']['Code'] == 'NoSuchTagSet':
|
||||||
|
return {}
|
||||||
|
raise e
|
||||||
|
|
||||||
|
return boto3_tag_list_to_ansible_dict(current_tags)
|
||||||
|
|
||||||
|
|
||||||
|
def paginated_list(s3_client, **pagination_params):
|
||||||
|
pg = s3_client.get_paginator('list_objects_v2')
|
||||||
|
for page in pg.paginate(**pagination_params):
|
||||||
|
yield [data['Key'] for data in page.get('Contents', [])]
|
||||||
|
|
||||||
|
|
||||||
|
def destroy_bucket(s3_client, module):
|
||||||
|
|
||||||
force = module.params.get("force")
|
force = module.params.get("force")
|
||||||
name = module.params.get("name")
|
name = module.params.get("name")
|
||||||
changed = False
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
bucket = connection.get_bucket(name)
|
bucket_is_present = bucket_exists(s3_client, name)
|
||||||
except S3ResponseError as e:
|
except EndpointConnectionError as e:
|
||||||
if e.error_code != "NoSuchBucket":
|
module.fail_json_aws(e, msg="Invalid endpoint provided: %s" % to_text(e))
|
||||||
module.fail_json(msg=e.message)
|
except (BotoCoreError, ClientError) as e:
|
||||||
else:
|
module.fail_json_aws(e, msg="Failed to check bucket presence")
|
||||||
# Bucket already absent
|
|
||||||
module.exit_json(changed=changed)
|
if not bucket_is_present:
|
||||||
|
module.exit_json(changed=False)
|
||||||
|
|
||||||
if force:
|
if force:
|
||||||
|
# if there are contents then we need to delete them before we can delete the bucket
|
||||||
try:
|
try:
|
||||||
# Empty the bucket
|
for keys in paginated_list(s3_client, Bucket=name):
|
||||||
for key in bucket.list():
|
formatted_keys = [{'Key': key} for key in keys]
|
||||||
key.delete()
|
if formatted_keys:
|
||||||
|
s3_client.delete_objects(Bucket=name, Delete={'Objects': formatted_keys})
|
||||||
except BotoServerError as e:
|
except (BotoCoreError, ClientError) as e:
|
||||||
module.fail_json(msg=e.message)
|
module.fail_json_aws(e, msg="Failed while deleting bucket")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
bucket = connection.delete_bucket(name)
|
delete_bucket(s3_client, name)
|
||||||
changed = True
|
s3_client.get_waiter('bucket_not_exists').wait(Bucket=name)
|
||||||
except S3ResponseError as e:
|
except WaiterError as e:
|
||||||
module.fail_json(msg=e.message)
|
module.fail_json_aws(e, msg='An error occurred waiting for the bucket to be deleted.')
|
||||||
|
except (BotoCoreError, ClientError) as e:
|
||||||
|
module.fail_json_aws(e, msg="Failed to delete bucket")
|
||||||
|
|
||||||
module.exit_json(changed=changed)
|
module.exit_json(changed=True)
|
||||||
|
|
||||||
|
|
||||||
def _create_or_update_bucket_ceph(connection, module, location):
|
|
||||||
# TODO: add update
|
|
||||||
|
|
||||||
name = module.params.get("name")
|
|
||||||
|
|
||||||
changed = False
|
|
||||||
|
|
||||||
try:
|
|
||||||
bucket = connection.get_bucket(name)
|
|
||||||
except S3ResponseError as e:
|
|
||||||
try:
|
|
||||||
bucket = connection.create_bucket(name, location=location)
|
|
||||||
changed = True
|
|
||||||
except (S3CreateError, BotoClientError) as e:
|
|
||||||
module.fail_json(msg=e.message)
|
|
||||||
|
|
||||||
if bucket:
|
|
||||||
module.exit_json(changed=changed)
|
|
||||||
else:
|
|
||||||
module.fail_json(msg='Unable to create bucket, no error from the API')
|
|
||||||
|
|
||||||
|
|
||||||
def _destroy_bucket_ceph(connection, module):
|
|
||||||
|
|
||||||
_destroy_bucket(connection, module)
|
|
||||||
|
|
||||||
|
|
||||||
def create_or_update_bucket(connection, module, location, flavour='aws'):
|
|
||||||
if flavour == 'ceph':
|
|
||||||
_create_or_update_bucket_ceph(connection, module, location)
|
|
||||||
else:
|
|
||||||
_create_or_update_bucket(connection, module, location)
|
|
||||||
|
|
||||||
|
|
||||||
def destroy_bucket(connection, module, flavour='aws'):
|
|
||||||
if flavour == 'ceph':
|
|
||||||
_destroy_bucket_ceph(connection, module)
|
|
||||||
else:
|
|
||||||
_destroy_bucket(connection, module)
|
|
||||||
|
|
||||||
|
|
||||||
def is_fakes3(s3_url):
|
def is_fakes3(s3_url):
|
||||||
|
@ -341,6 +471,32 @@ def is_walrus(s3_url):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def get_s3_client(module, aws_connect_kwargs, location, ceph, s3_url):
|
||||||
|
if s3_url and ceph: # TODO - test this
|
||||||
|
ceph = urlparse(s3_url)
|
||||||
|
params = dict(module=module, conn_type='client', resource='s3', use_ssl=ceph.scheme == 'https', region=location, endpoint=s3_url, **aws_connect_kwargs)
|
||||||
|
elif is_fakes3(s3_url):
|
||||||
|
fakes3 = urlparse(s3_url)
|
||||||
|
port = fakes3.port
|
||||||
|
if fakes3.scheme == 'fakes3s':
|
||||||
|
protocol = "https"
|
||||||
|
if port is None:
|
||||||
|
port = 443
|
||||||
|
else:
|
||||||
|
protocol = "http"
|
||||||
|
if port is None:
|
||||||
|
port = 80
|
||||||
|
params = dict(module=module, conn_type='client', resource='s3', region=location,
|
||||||
|
endpoint="%s://%s:%s" % (protocol, fakes3.hostname, to_text(port)),
|
||||||
|
use_ssl=fakes3.scheme == 'fakes3s', **aws_connect_kwargs)
|
||||||
|
elif is_walrus(s3_url):
|
||||||
|
walrus = urlparse(s3_url).hostname
|
||||||
|
params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=walrus, **aws_connect_kwargs)
|
||||||
|
else:
|
||||||
|
params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=s3_url, **aws_connect_kwargs)
|
||||||
|
return boto3_conn(**params)
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
|
||||||
argument_spec = ec2_argument_spec()
|
argument_spec = ec2_argument_spec()
|
||||||
|
@ -358,83 +514,45 @@ def main():
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
module = AnsibleModule(argument_spec=argument_spec)
|
module = AnsibleAWSModule(argument_spec=argument_spec)
|
||||||
|
|
||||||
if not HAS_BOTO:
|
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
|
||||||
module.fail_json(msg='boto required for this module')
|
|
||||||
|
|
||||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
|
||||||
|
|
||||||
if region in ('us-east-1', '', None):
|
if region in ('us-east-1', '', None):
|
||||||
# S3ism for the US Standard region
|
# default to US Standard region
|
||||||
location = Location.DEFAULT
|
location = 'us-east-1'
|
||||||
else:
|
else:
|
||||||
# Boto uses symbolic names for locations but region strings will
|
# Boto uses symbolic names for locations but region strings will
|
||||||
# actually work fine for everything except us-east-1 (US Standard)
|
# actually work fine for everything except us-east-1 (US Standard)
|
||||||
location = region
|
location = region
|
||||||
|
|
||||||
s3_url = module.params.get('s3_url')
|
s3_url = module.params.get('s3_url')
|
||||||
|
ceph = module.params.get('ceph')
|
||||||
|
|
||||||
# allow eucarc environment variables to be used if ansible vars aren't set
|
# allow eucarc environment variables to be used if ansible vars aren't set
|
||||||
if not s3_url and 'S3_URL' in os.environ:
|
if not s3_url and 'S3_URL' in os.environ:
|
||||||
s3_url = os.environ['S3_URL']
|
s3_url = os.environ['S3_URL']
|
||||||
|
|
||||||
ceph = module.params.get('ceph')
|
|
||||||
|
|
||||||
if ceph and not s3_url:
|
if ceph and not s3_url:
|
||||||
module.fail_json(msg='ceph flavour requires s3_url')
|
module.fail_json(msg='ceph flavour requires s3_url')
|
||||||
|
|
||||||
flavour = 'aws'
|
|
||||||
|
|
||||||
# bucket names with .'s in them need to use the calling_format option,
|
|
||||||
# otherwise the connection will fail. See https://github.com/boto/boto/issues/2836
|
|
||||||
# for more details.
|
|
||||||
aws_connect_params['calling_format'] = OrdinaryCallingFormat()
|
|
||||||
|
|
||||||
# Look at s3_url and tweak connection settings
|
# Look at s3_url and tweak connection settings
|
||||||
# if connecting to Walrus or fakes3
|
# if connecting to Ceph RGW, Walrus or fakes3
|
||||||
try:
|
if s3_url:
|
||||||
if s3_url and ceph:
|
for key in ['validate_certs', 'security_token', 'profile_name']:
|
||||||
ceph = urlparse.urlparse(s3_url)
|
aws_connect_kwargs.pop(key, None)
|
||||||
connection = boto.connect_s3(
|
s3_client = get_s3_client(module, aws_connect_kwargs, location, ceph, s3_url)
|
||||||
host=ceph.hostname,
|
|
||||||
port=ceph.port,
|
|
||||||
is_secure=ceph.scheme == 'https',
|
|
||||||
**aws_connect_params
|
|
||||||
)
|
|
||||||
flavour = 'ceph'
|
|
||||||
elif is_fakes3(s3_url):
|
|
||||||
fakes3 = urlparse.urlparse(s3_url)
|
|
||||||
connection = S3Connection(
|
|
||||||
is_secure=fakes3.scheme == 'fakes3s',
|
|
||||||
host=fakes3.hostname,
|
|
||||||
port=fakes3.port,
|
|
||||||
**aws_connect_params
|
|
||||||
)
|
|
||||||
elif is_walrus(s3_url):
|
|
||||||
del aws_connect_params['calling_format']
|
|
||||||
walrus = urlparse.urlparse(s3_url).hostname
|
|
||||||
connection = boto.connect_walrus(walrus, **aws_connect_params)
|
|
||||||
else:
|
|
||||||
connection = boto.s3.connect_to_region(location, is_secure=True, **aws_connect_params)
|
|
||||||
# use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
|
|
||||||
if connection is None:
|
|
||||||
connection = boto.connect_s3(**aws_connect_params)
|
|
||||||
|
|
||||||
except boto.exception.NoAuthHandlerFound as e:
|
if s3_client is None: # this should never happen
|
||||||
module.fail_json(msg='No Authentication Handler found: %s ' % str(e))
|
|
||||||
except Exception as e:
|
|
||||||
module.fail_json(msg='Failed to connect to S3: %s' % str(e))
|
|
||||||
|
|
||||||
if connection is None: # this should never happen
|
|
||||||
module.fail_json(msg='Unknown error, failed to create s3 connection, no information from boto.')
|
module.fail_json(msg='Unknown error, failed to create s3 connection, no information from boto.')
|
||||||
|
|
||||||
state = module.params.get("state")
|
state = module.params.get("state")
|
||||||
|
|
||||||
if state == 'present':
|
if state == 'present':
|
||||||
create_or_update_bucket(connection, module, location, flavour=flavour)
|
create_or_update_bucket(s3_client, module, location)
|
||||||
elif state == 'absent':
|
elif state == 'absent':
|
||||||
destroy_bucket(connection, module, flavour=flavour)
|
destroy_bucket(s3_client, module)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
main()
|
main()
|
||||||
|
|
|
@ -1 +1,2 @@
|
||||||
cloud/aws
|
cloud/aws
|
||||||
|
posix/ci/cloud/group4/aws
|
||||||
|
|
|
@ -86,6 +86,10 @@
|
||||||
- output.policy.Statement[0].Sid == 'AddPerm'
|
- output.policy.Statement[0].Sid == 'AddPerm'
|
||||||
|
|
||||||
# ============================================================
|
# ============================================================
|
||||||
|
- name: Pause to help with s3 bucket eventual consistency
|
||||||
|
pause:
|
||||||
|
seconds: 10
|
||||||
|
|
||||||
- name: Try to update the same complex s3_bucket
|
- name: Try to update the same complex s3_bucket
|
||||||
s3_bucket:
|
s3_bucket:
|
||||||
name: "{{ resource_prefix }}-testbucket-ansible-complex"
|
name: "{{ resource_prefix }}-testbucket-ansible-complex"
|
||||||
|
@ -127,6 +131,10 @@
|
||||||
- output.policy.Statement[0].Sid == 'AddPerm'
|
- output.policy.Statement[0].Sid == 'AddPerm'
|
||||||
|
|
||||||
# ============================================================
|
# ============================================================
|
||||||
|
- name: Pause to help with s3 bucket eventual consistency
|
||||||
|
pause:
|
||||||
|
seconds: 10
|
||||||
|
|
||||||
- name: Update attributes for s3_bucket
|
- name: Update attributes for s3_bucket
|
||||||
s3_bucket:
|
s3_bucket:
|
||||||
name: "{{ resource_prefix }}-testbucket-ansible-complex"
|
name: "{{ resource_prefix }}-testbucket-ansible-complex"
|
||||||
|
@ -146,7 +154,7 @@
|
||||||
- output.name == '{{ resource_prefix }}-testbucket-ansible-complex'
|
- output.name == '{{ resource_prefix }}-testbucket-ansible-complex'
|
||||||
- not output.requester_pays
|
- not output.requester_pays
|
||||||
- output.versioning.MfaDelete == 'Disabled'
|
- output.versioning.MfaDelete == 'Disabled'
|
||||||
- output.versioning.Versioning == 'Suspended'
|
- output.versioning.Versioning in ['Suspended', 'Disabled']
|
||||||
- output.tags.example == 'tag1-udpated'
|
- output.tags.example == 'tag1-udpated'
|
||||||
- output.tags.another == 'tag2'
|
- output.tags.another == 'tag2'
|
||||||
- output.policy.Statement[0].Action == 's3:GetObject'
|
- output.policy.Statement[0].Action == 's3:GetObject'
|
||||||
|
@ -155,6 +163,65 @@
|
||||||
- output.policy.Statement[0].Resource == 'arn:aws:s3:::{{ resource_prefix }}-testbucket-ansible-complex/*'
|
- output.policy.Statement[0].Resource == 'arn:aws:s3:::{{ resource_prefix }}-testbucket-ansible-complex/*'
|
||||||
- output.policy.Statement[0].Sid == 'AddPerm'
|
- output.policy.Statement[0].Sid == 'AddPerm'
|
||||||
|
|
||||||
|
# ============================================================
|
||||||
|
- name: Pause to help with s3 bucket eventual consistency
|
||||||
|
pause:
|
||||||
|
seconds: 10
|
||||||
|
|
||||||
|
- name: Remove a tag for s3_bucket
|
||||||
|
s3_bucket:
|
||||||
|
name: "{{ resource_prefix }}-testbucket-ansible-complex"
|
||||||
|
state: present
|
||||||
|
policy: "{{ lookup('template','policy.json') }}"
|
||||||
|
requester_pays: no
|
||||||
|
versioning: no
|
||||||
|
tags:
|
||||||
|
example: tag1-udpated
|
||||||
|
<<: *aws_connection_info
|
||||||
|
register: output
|
||||||
|
|
||||||
|
- assert:
|
||||||
|
that:
|
||||||
|
- output.changed
|
||||||
|
- output.tags.example == 'tag1-udpated'
|
||||||
|
- "'another' not in output.tags"
|
||||||
|
|
||||||
|
# ============================================================
|
||||||
|
- name: Pause to help with s3 bucket eventual consistency
|
||||||
|
pause:
|
||||||
|
seconds: 10
|
||||||
|
|
||||||
|
- name: Do not specify any tag to ensure previous tags are not removed
|
||||||
|
s3_bucket:
|
||||||
|
name: "{{ resource_prefix }}-testbucket-ansible-complex"
|
||||||
|
state: present
|
||||||
|
policy: "{{ lookup('template','policy.json') }}"
|
||||||
|
requester_pays: no
|
||||||
|
versioning: no
|
||||||
|
<<: *aws_connection_info
|
||||||
|
register: output
|
||||||
|
|
||||||
|
- assert:
|
||||||
|
that:
|
||||||
|
- not output.changed
|
||||||
|
- output.tags.example == 'tag1-udpated'
|
||||||
|
|
||||||
|
# ============================================================
|
||||||
|
- name: Remove all tags
|
||||||
|
s3_bucket:
|
||||||
|
name: "{{ resource_prefix }}-testbucket-ansible-complex"
|
||||||
|
state: present
|
||||||
|
policy: "{{ lookup('template','policy.json') }}"
|
||||||
|
requester_pays: no
|
||||||
|
versioning: no
|
||||||
|
tags: {}
|
||||||
|
<<: *aws_connection_info
|
||||||
|
register: output
|
||||||
|
|
||||||
|
- assert:
|
||||||
|
that:
|
||||||
|
- output.changed
|
||||||
|
- output.tags == {}
|
||||||
|
|
||||||
# ============================================================
|
# ============================================================
|
||||||
- name: Delete s3_bucket
|
- name: Delete s3_bucket
|
||||||
|
@ -181,6 +248,8 @@
|
||||||
- output.changed
|
- output.changed
|
||||||
- output.name == '{{ resource_prefix }}.testbucket.ansible'
|
- output.name == '{{ resource_prefix }}.testbucket.ansible'
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================
|
||||||
- name: Delete s3_bucket
|
- name: Delete s3_bucket
|
||||||
s3_bucket:
|
s3_bucket:
|
||||||
name: "{{ resource_prefix }}.testbucket.ansible"
|
name: "{{ resource_prefix }}.testbucket.ansible"
|
||||||
|
@ -192,6 +261,18 @@
|
||||||
that:
|
that:
|
||||||
- output.changed
|
- output.changed
|
||||||
|
|
||||||
|
# ============================================================
|
||||||
|
- name: Try to delete a missing bucket (should not fail)
|
||||||
|
s3_bucket:
|
||||||
|
name: "{{ resource_prefix }}.testbucket.ansible.missing"
|
||||||
|
state: absent
|
||||||
|
<<: *aws_connection_info
|
||||||
|
register: output
|
||||||
|
|
||||||
|
- assert:
|
||||||
|
that:
|
||||||
|
- not output.changed
|
||||||
|
|
||||||
# ============================================================
|
# ============================================================
|
||||||
always:
|
always:
|
||||||
- name: Ensure all buckets are deleted
|
- name: Ensure all buckets are deleted
|
||||||
|
@ -199,6 +280,7 @@
|
||||||
name: "{{item}}"
|
name: "{{item}}"
|
||||||
state: absent
|
state: absent
|
||||||
<<: *aws_connection_info
|
<<: *aws_connection_info
|
||||||
|
ignore_errors: yes
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ resource_prefix }}-testbucket-ansible"
|
- "{{ resource_prefix }}-testbucket-ansible"
|
||||||
- "{{ resource_prefix }}-testbucket-ansible-complex"
|
- "{{ resource_prefix }}-testbucket-ansible-complex"
|
||||||
|
|
Loading…
Reference in a new issue