#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible.  If not, see <http://www.gnu.org/licenses/>.

DOCUMENTATION = '''
---
module: s3
short_description: idempotent S3 module putting a file into S3. 
description:
    - This module allows the user to dictate the presence of a given file in an S3 bucket. If or once the key (file) exists in the bucket, it returns a time-expired download URL. This module has a dependency on python-boto.
version_added: "1.1"
options:
  bucket:
    description:
      - bucket you wish to present/absent for the key (file in path).
    required: true
    default: null 
    aliases: []
  state:
    description:
      - desired state for both bucket and file. 
    default: null
    aliases: []
  path:
    description:
      - path to the key (file) which you wish to be present/absent in the bucket.
    required: false
    default: null
    aliases: []
  dest:
    description:
      - the destination in S3, if different from path
    required: false
    default: null
    aliases: []
  expiry:
    description:
      - expiry period (in seconds) for returned download URL.
    required: false
    default: 600
    aliases: []
  overwrite:
    description:
      - force overwrite if a file with the same name already exists. Does not support files uploaded to S3 with multipart upload.
    required: false
    default: false
    version_added: "1.2"
requirements: [ "boto" ]
author: Lester Wade, Ralph Tice
'''

EXAMPLES = '''
# Simple PUT operation
- s3: bucket: mybucket 
   path: /path/to/file 
   state: present
# Force and overwrite if checksums don't match
- s3: bucket: mybucket
   path: /path/to/file
   state: present
   overwrite: yes
'''

import sys
import os
import urlparse

try:
    import boto
    import hashlib
except ImportError:
    print "failed=True msg='boto required for this module'"
    sys.exit(1)

def upload_s3file(module, s3, bucket, key_name, path, expiry):
    try:
        key = bucket.new_key(key_name)  
        key.set_contents_from_filename(path)
        url = key.generate_url(expiry)
        module.exit_json(msg="Put operation complete", url=url, changed=True)
        sys.exit(0)
    except s3.provider.storage_copy_error, e:
        module.fail_json(msg= str(e))

def main():
    module = AnsibleModule(
        argument_spec = dict(
            bucket         = dict(),
            path           = dict(),
            dest           = dict(),
            state          = dict(choices=['present', 'absent']),
            expiry         = dict(default=600, aliases=['expiration']),
            s3_url         = dict(aliases=['S3_URL']),
            ec2_secret_key = dict(aliases=['EC2_SECRET_KEY']),
            ec2_access_key = dict(aliases=['EC2_ACCESS_KEY']),
            overwrite      = dict(default=False, type='bool'),
        ),
        required_together=[ ['bucket', 'path', 'state'] ],
    )

    bucket_name = module.params.get('bucket')
    path = os.path.expanduser(module.params['path'])
    dest = module.params.get('dest')
    state = module.params.get('state')
    expiry = int(module.params['expiry'])
    s3_url = module.params.get('s3_url')
    ec2_secret_key = module.params.get('ec2_secret_key')
    ec2_access_key = module.params.get('ec2_access_key')
    overwrite = module.params.get('overwrite')

    # allow eucarc environment variables to be used if ansible vars aren't set
    if not s3_url and 'S3_URL' in os.environ:
        s3_url = os.environ['S3_URL']
    if not ec2_secret_key and 'EC2_SECRET_KEY' in os.environ:
        ec2_secret_key = os.environ['EC2_SECRET_KEY']
    if not ec2_access_key and 'EC2_ACCESS_KEY' in os.environ:
        ec2_access_key = os.environ['EC2_ACCESS_KEY']

    # If we have an S3_URL env var set, this is likely to be Walrus, so change connection method
    if 'S3_URL' in os.environ:
        try:
            walrus = urlparse.urlparse(s3_url).hostname
            s3 = boto.connect_walrus(walrus, ec2_access_key, ec2_secret_key)
        except boto.exception.NoAuthHandlerFound, e:
            module.fail_json(msg = str(e))
    else:
        try:
            s3 = boto.connect_s3(ec2_access_key, ec2_secret_key)
        except boto.exception.NoAuthHandlerFound, e:
            module.fail_json(msg = str(e))
   
    # README - Future features this module should have:
    # enhanced path (contents of a directory)
    # md5sum check of file vs. key in bucket
    # a user-friendly way to fetch the key (maybe a "fetch" parameter option)
    # persistent download URL if desired
    
    # Lets get some information from the s3 connection, including bucket check ...
    bucket = s3.lookup(bucket_name)
    if bucket:
        bucket_exists = True
    else:
        bucket_exists = False
    
    # Lets list the contents
    if bucket_exists is True:
        bucket_contents = bucket.list()

    # Check filename is valid, if not downloading 
    if path: 
        if not os.path.exists(path):
            failed = True
            module.fail_json(msg="Source %s cannot be found" % (path), failed=failed)
            sys.exit(0)

    # Default to setting the key to the same as the filename if dest is not provided.
    if dest is None:
        key_name = os.path.basename(path)
    else:
        key_name = dest

    # Check to see if the key already exists
    key_exists = False
    if bucket_exists is True:
        try:
            key_check = bucket.get_key(key_name)
            if key_check:
                key_exists = True
            else:
                key_exists = False
        except s3.provider.storage_response_error, e:
            module.fail_json(msg= str(e))

    if key_exists is True and overwrite:
        # Retrieve MD5 Checksums.
        md5_remote = key_check.etag[1:-1]           # Strip Quotation marks from etag: https://code.google.com/p/boto/issues/detail?id=391
        etag_multipart = md5_remote.find('-')!=-1   # Find out if this is a multipart upload -> etag is not md5: https://forums.aws.amazon.com/message.jspa?messageID=222158
        if etag_multipart is True:
            module.fail_json(msg="Files uploaded with multipart to s3 are not supported with checksum. They do not contain a valid md5 checksum, use overwrite=no instead.")
            sys.exit(0)
        md5_local = hashlib.md5(open(path, 'rb').read()).hexdigest()
        md5_equal = md5_local == md5_remote
        
    if state == 'present':
        if bucket_exists is True and key_exists is True:
            if overwrite is False:
                exists = True
                changed = False
                module.exit_json(msg="Bucket and key already exist", changed=changed)
            if overwrite is True: 
                if md5_equal is True:
                    module.exit_json(msg="Remote and local file checksums identical.", changed=False)
                if md5_equal is False:
                    upload_s3file(module, s3, bucket, key_name, path, expiry)
            sys.exit(0)



   # If bucket exists, there cannot be a key within, lets create it ...
    if state == 'present':
        if bucket_exists is False:
            try:
                bucket = s3.create_bucket(bucket_name)
                bucket_exists = True
                key_exists = False
                changed = True
            except s3.provider.storage_create_error, e:
                module.fail_json(msg = str(e))
    
    # If bucket now exists but key doesn't or overwrite is True, create the key
    if state == 'present':
        if bucket_exists is True and key_exists is False:
            upload_s3file(module, s3, bucket, key_name, path, expiry)

    # If state is absent and the bucket exists (doesn't matter about key since the bucket is the container), delete it.
    if state == 'absent':
        if bucket_exists is True:
            try:
                for contents in bucket.list():
                    bucket.delete_key(contents)
                    s3.delete_bucket(bucket)
                    changed = True
                    module.exit_json(msg="Bucket and key removed.", changed=changed)
                    sys.exit(0)
            except s3.provider.storage_response_error, e:
                module.fail_json(msg= str(e))
        else:
            changed = False
            module.exit_json(msg="Bucket and key do not exist", changed=changed)
    
    # TO DO - ADD BUCKET DOWNLOAD OPTION
    #    # If download is specified, fetch it
    #    if download:
    #        if bucket_exists is True and key_exists is True:
    #            try:
    #                getkey = bucket.lookup(key_name)
    #                getkey.get_contents_to_filename(path)
    #                url = getkey.generate_url(expiry)
    #                module.exit_json(msg="GET operation complete", url=url, changed=True)
    #                sys.exit(0)
    #            except s3.provider.storage_copy_error, e:
    #                module.fail_json(msg= str(e))

    sys.exit(0)

# this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>

main()