1
0
Fork 0
mirror of https://github.com/ansible-collections/community.general.git synced 2024-09-14 20:13:21 +02:00

s3_sync: removing irrelevant s3.list_buckets() call - fixes #23409 (#23492)

* Removing irrelevant s3 call

Fix exception handling

Make s3_sync pep8 and remove from legacy file
This commit is contained in:
Sloane Hertel 2017-04-18 16:49:10 -04:00 committed by GitHub
parent 3f62dfda92
commit d63af51f90
2 changed files with 44 additions and 38 deletions

View file

@ -185,19 +185,20 @@ uploaded:
''' '''
import os import os
import stat as osstat # os.stat constants import stat as osstat # os.stat constants
import mimetypes import mimetypes
import datetime import datetime
from dateutil import tz from dateutil import tz
import hashlib import hashlib
import fnmatch import fnmatch
import traceback
# import module snippets # import module snippets
from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import ec2_argument_spec from ansible.module_utils.ec2 import ec2_argument_spec
# import a class, otherwise we'll use a fully qualified path # import a class, otherwise we'll use a fully qualified path
#from ansible.module_utils.ec2 import AWSRetry # from ansible.module_utils.ec2 import AWSRetry
import ansible.module_utils.ec2 import ansible.module_utils.ec2
@ -207,6 +208,7 @@ try:
except ImportError: except ImportError:
HAS_BOTO3 = False HAS_BOTO3 = False
def boto_exception(err): def boto_exception(err):
'''generic error message handler''' '''generic error message handler'''
if hasattr(err, 'error_message'): if hasattr(err, 'error_message'):
@ -242,6 +244,8 @@ def boto_exception(err):
# along with calculate_multipart_etag. If not, see <http://www.gnu.org/licenses/>. # along with calculate_multipart_etag. If not, see <http://www.gnu.org/licenses/>.
DEFAULT_CHUNK_SIZE = 5 * 1024 * 1024 DEFAULT_CHUNK_SIZE = 5 * 1024 * 1024
def calculate_multipart_etag(source_path, chunk_size=DEFAULT_CHUNK_SIZE): def calculate_multipart_etag(source_path, chunk_size=DEFAULT_CHUNK_SIZE):
""" """
@ -255,7 +259,7 @@ def calculate_multipart_etag(source_path, chunk_size=DEFAULT_CHUNK_SIZE):
md5s = [] md5s = []
with open(source_path,'rb') as fp: with open(source_path, 'rb') as fp:
while True: while True:
data = fp.read(chunk_size) data = fp.read(chunk_size)
@ -266,11 +270,11 @@ def calculate_multipart_etag(source_path, chunk_size=DEFAULT_CHUNK_SIZE):
if len(md5s) == 1: if len(md5s) == 1:
new_etag = '"{}"'.format(md5s[0].hexdigest()) new_etag = '"{}"'.format(md5s[0].hexdigest())
else: # > 1 else: # > 1
digests = b"".join(m.digest() for m in md5s) digests = b"".join(m.digest() for m in md5s)
new_md5 = hashlib.md5(digests) new_md5 = hashlib.md5(digests)
new_etag = '"{}-{}"'.format(new_md5.hexdigest(),len(md5s)) new_etag = '"{}-{}"'.format(new_md5.hexdigest(), len(md5s))
return new_etag return new_etag
@ -304,16 +308,17 @@ def gather_files(fileroot, include=None, exclude=None):
f_size = fstat[osstat.ST_SIZE] f_size = fstat[osstat.ST_SIZE]
f_modified_epoch = fstat[osstat.ST_MTIME] f_modified_epoch = fstat[osstat.ST_MTIME]
ret.append({ ret.append({
'fullpath':fullpath, 'fullpath': fullpath,
'chopped_path':chopped_path, 'chopped_path': chopped_path,
'modified_epoch': f_modified_epoch, 'modified_epoch': f_modified_epoch,
'bytes': f_size 'bytes': f_size,
}) })
# dirpath = path *to* the directory # dirpath = path *to* the directory
# dirnames = subdirs *in* our directory # dirnames = subdirs *in* our directory
# filenames # filenames
return ret return ret
def calculate_s3_path(filelist, key_prefix=''): def calculate_s3_path(filelist, key_prefix=''):
ret = [] ret = []
for fileentry in filelist: for fileentry in filelist:
@ -323,6 +328,7 @@ def calculate_s3_path(filelist, key_prefix=''):
ret.append(retentry) ret.append(retentry)
return ret return ret
def calculate_local_etag(filelist, key_prefix=''): def calculate_local_etag(filelist, key_prefix=''):
'''Really, "calculate md5", but since AWS uses their own format, we'll just call '''Really, "calculate md5", but since AWS uses their own format, we'll just call
it a "local etag". TODO optimization: only calculate if remote key exists.''' it a "local etag". TODO optimization: only calculate if remote key exists.'''
@ -334,6 +340,7 @@ def calculate_local_etag(filelist, key_prefix=''):
ret.append(retentry) ret.append(retentry)
return ret return ret
def determine_mimetypes(filelist, override_map): def determine_mimetypes(filelist, override_map):
ret = [] ret = []
for fileentry in filelist: for fileentry in filelist:
@ -357,6 +364,7 @@ def determine_mimetypes(filelist, override_map):
return ret return ret
def head_s3(s3, bucket, s3keys): def head_s3(s3, bucket, s3keys):
retkeys = [] retkeys = []
for entry in s3keys: for entry in s3keys:
@ -372,11 +380,12 @@ def head_s3(s3, bucket, s3keys):
pass pass
else: else:
raise Exception(err) raise Exception(err)
#error_msg = boto_exception(err) # error_msg = boto_exception(err)
#return {'error': error_msg} # return {'error': error_msg}
retkeys.append(retentry) retkeys.append(retentry)
return retkeys return retkeys
def filter_list(s3, bucket, s3filelist, strategy): def filter_list(s3, bucket, s3filelist, strategy):
keeplist = list(s3filelist) keeplist = list(s3filelist)
@ -398,20 +407,20 @@ def filter_list(s3, bucket, s3filelist, strategy):
else: else:
# file etags don't match, keep the entry. # file etags don't match, keep the entry.
pass pass
else: # we don't have an etag, so we'll keep it. else: # we don't have an etag, so we'll keep it.
pass pass
elif strategy == 'date_size': elif strategy == 'date_size':
for entry in keeplist: for entry in keeplist:
if entry.get('s3_head'): if entry.get('s3_head'):
#fstat = entry['stat'] # fstat = entry['stat']
local_modified_epoch = entry['modified_epoch'] local_modified_epoch = entry['modified_epoch']
local_size = entry['bytes'] local_size = entry['bytes']
# py2's datetime doesn't have a timestamp() field, so we have to revert to something more awkward. # py2's datetime doesn't have a timestamp() field, so we have to revert to something more awkward.
#remote_modified_epoch = entry['s3_head']['LastModified'].timestamp() # remote_modified_epoch = entry['s3_head']['LastModified'].timestamp()
remote_modified_datetime = entry['s3_head']['LastModified'] remote_modified_datetime = entry['s3_head']['LastModified']
delta = (remote_modified_datetime - datetime.datetime(1970, 1, 1, tzinfo=tz.tzutc())) delta = (remote_modified_datetime - datetime.datetime(1970, 1, 1, tzinfo=tz.tzutc()))
remote_modified_epoch = delta.seconds + (delta.days*86400) remote_modified_epoch = delta.seconds + (delta.days * 86400)
remote_size = entry['s3_head']['ContentLength'] remote_size = entry['s3_head']['ContentLength']
@ -429,6 +438,7 @@ def filter_list(s3, bucket, s3filelist, strategy):
# prune 'please skip' entries, if any. # prune 'please skip' entries, if any.
return [x for x in keeplist if not x.get('skip_flag')] return [x for x in keeplist if not x.get('skip_flag')]
def upload_files(s3, bucket, filelist, params): def upload_files(s3, bucket, filelist, params):
ret = [] ret = []
for entry in filelist: for entry in filelist:
@ -437,6 +447,7 @@ def upload_files(s3, bucket, filelist, params):
} }
if params.get('permission'): if params.get('permission'):
args['ACL'] = params['permission'] args['ACL'] = params['permission']
# if this fails exception is caught in main()
s3.upload_file(entry['fullpath'], bucket, entry['s3_path'], ExtraArgs=args, Callback=None, Config=None) s3.upload_file(entry['fullpath'], bucket, entry['s3_path'], ExtraArgs=args, Callback=None, Config=None)
ret.append(entry) ret.append(entry)
return ret return ret
@ -445,17 +456,17 @@ def upload_files(s3, bucket, filelist, params):
def main(): def main():
argument_spec = ec2_argument_spec() argument_spec = ec2_argument_spec()
argument_spec.update(dict( argument_spec.update(dict(
mode = dict(choices=['push'], default='push'), mode=dict(choices=['push'], default='push'),
file_change_strategy = dict(choices=['force','date_size','checksum'], default='date_size'), file_change_strategy=dict(choices=['force', 'date_size', 'checksum'], default='date_size'),
bucket = dict(required=True), bucket=dict(required=True),
key_prefix = dict(required=False, default=''), key_prefix=dict(required=False, default=''),
file_root = dict(required=True, type='path'), file_root=dict(required=True, type='path'),
permission = dict(required=False, choices=['private', 'public-read', 'public-read-write', 'authenticated-read', 'aws-exec-read', 'bucket-owner-read', permission=dict(required=False, choices=['private', 'public-read', 'public-read-write', 'authenticated-read',
'bucket-owner-full-control']), 'aws-exec-read', 'bucket-owner-read', 'bucket-owner-full-control']),
retries = dict(required=False), retries=dict(required=False),
mime_map = dict(required=False, type='dict'), mime_map=dict(required=False, type='dict'),
exclude = dict(required=False, default=".*"), exclude=dict(required=False, default=".*"),
include = dict(required=False, default="*"), include=dict(required=False, default="*"),
# future options: cache_control (string or map, perhaps), encoding, metadata, storage_class, retries # future options: cache_control (string or map, perhaps), encoding, metadata, storage_class, retries
) )
) )
@ -469,13 +480,10 @@ def main():
result = {} result = {}
mode = module.params['mode'] mode = module.params['mode']
region, ec2_url, aws_connect_kwargs = ansible.module_utils.ec2.get_aws_connection_info(module, boto3=True)
try: if not region:
region, ec2_url, aws_connect_kwargs = ansible.module_utils.ec2.get_aws_connection_info(module, boto3=True) module.fail_json(msg="Region must be specified")
s3 = ansible.module_utils.ec2.boto3_conn(module, conn_type='client', resource='s3', region=region, endpoint=ec2_url, **aws_connect_kwargs) s3 = ansible.module_utils.ec2.boto3_conn(module, conn_type='client', resource='s3', region=region, endpoint=ec2_url, **aws_connect_kwargs)
s3.list_buckets()
except botocore.exceptions.NoCredentialsError as e:
module.fail_json(msg=str(e))
if mode == 'push': if mode == 'push':
try: try:
@ -489,11 +497,10 @@ def main():
# mark changed if we actually upload something. # mark changed if we actually upload something.
if result.get('uploads') and len(result.get('uploads')): if result.get('uploads') and len(result.get('uploads')):
result['changed'] = True result['changed'] = True
#result.update(filelist=actionable_filelist) # result.update(filelist=actionable_filelist)
except Exception as err: except botocore.exceptions.ClientError as err:
error_msg = boto_exception(err) error_msg = boto_exception(err)
import traceback # traces get swallowed by Ansible. module.fail_json(msg=error_msg, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
module.fail_json(msg=error_msg, traceback=traceback.format_exc().splitlines())
module.exit_json(**result) module.exit_json(**result)

View file

@ -213,7 +213,6 @@ lib/ansible/modules/cloud/amazon/s3.py
lib/ansible/modules/cloud/amazon/s3_bucket.py lib/ansible/modules/cloud/amazon/s3_bucket.py
lib/ansible/modules/cloud/amazon/s3_lifecycle.py lib/ansible/modules/cloud/amazon/s3_lifecycle.py
lib/ansible/modules/cloud/amazon/s3_logging.py lib/ansible/modules/cloud/amazon/s3_logging.py
lib/ansible/modules/cloud/amazon/s3_sync.py
lib/ansible/modules/cloud/amazon/s3_website.py lib/ansible/modules/cloud/amazon/s3_website.py
lib/ansible/modules/cloud/amazon/sns_topic.py lib/ansible/modules/cloud/amazon/sns_topic.py
lib/ansible/modules/cloud/amazon/sts_assume_role.py lib/ansible/modules/cloud/amazon/sts_assume_role.py