1
0
Fork 0
mirror of https://github.com/ansible-collections/community.general.git synced 2024-09-14 20:13:21 +02:00

Merge pull request #4395 from erjohnso/devel

adding Google Compute Engine modules and inventory plugin
This commit is contained in:
Michael DeHaan 2013-10-17 07:30:55 -07:00
commit baa13a5cc7
7 changed files with 2321 additions and 0 deletions

421
library/cloud/gce Normal file
View file

@ -0,0 +1,421 @@
#!/usr/bin/python
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: gce
short_description: create or terminate GCE instances
description:
- Creates or terminates Google Compute Engine (GCE) instances. See
U(https://cloud.google.com/products/compute-engine) for an overview.
Full install/configuration instructions for the gce* modules can
be found in the comments of ansible/test/gce_tests.py.
options:
image:
description:
- image string to use for the instance
required: false
default: "debian-7"
aliases: []
instance_names:
description:
- a comma-separated list of instance names to create or destroy
required: false
default: null
aliases: []
machine_type:
description:
- machine type to use for the instance, use 'n1-standard-1' by default
required: false
default: "n1-standard-1"
aliases: []
metadata:
description:
- a hash/dictionary of custom data for the instance; '{"key":"value",...}'
required: false
default: null
aliases: []
name:
description:
- identifier when working with a single instance
required: false
aliases: []
network:
description:
- name of the network, 'default' will be used if not specified
required: false
default: "default"
aliases: []
persistent_boot_disk:
description:
- if set, create the instance with a persistent boot disk
required: false
default: "false"
aliases: []
state:
description:
- desired state of the resource
required: false
default: "present"
choices: ["active", "present", "absent", "deleted"]
aliases: []
tags:
description:
- a comma-separated list of tags to associate with the instance
required: false
default: null
aliases: []
zone:
description:
- the GCE zone to use
required: true
default: "us-central1-a"
choices: ["us-central1-a", "us-central1-b", "us-central2-a", "europe-west1-a", "europe-west1-b"]
aliases: []
requirements: [ "libcloud" ]
author: Eric Johnson <erjohnso@google.com>
'''
EXAMPLES = '''
# Basic provisioning example. Create a single Debian 7 instance in the
# us-central1-a Zone of n1-standard-1 machine type.
- local_action:
module: gce
name: test-instance
zone: us-central1-a
machine_type: n1-standard-1
image: debian-7
# Example using defaults and with metadata to create a single 'foo' instance
- local_action:
module: gce
name: foo
metadata: '{"db":"postgres", "group":"qa", "id":500}'
# Launch instances from a control node, runs some tasks on the new instances,
# and then terminate them
- name: Create a sandbox instance
hosts: localhost
vars:
names: foo,bar
machine_type: n1-standard-1
image: debian-6
zone: us-central1-a
tasks:
- name: Launch instances
local_action: gce instance_names=${names} machine_type=${machine_type}
image=${image} zone=${zone}
register: gce
- name: Wait for SSH to come up
local_action: wait_for host=${item.public_ip} port=22 delay=10
timeout=60 state=started
with_items: ${gce.instance_data}
- name: Configure instance(s)
hosts: launched
sudo: True
roles:
- my_awesome_role
- my_awesome_tasks
- name: Terminate instances
hosts: localhost
connection: local
tasks:
- name: Terminate instances that were previously launched
local_action:
module: gce
state: 'absent'
instance_names: ${gce.instance_names}
'''
import sys
USER_AGENT_PRODUCT="Ansible-gce"
USER_AGENT_VERSION="v1beta15"
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceInUseError, ResourceNotFoundError
_ = Provider.GCE
except ImportError:
print("failed=True " + \
"msg='libcloud with GCE support (0.13.3+) required for this module'")
sys.exit(1)
try:
from ast import literal_eval
except ImportError:
print("failed=True " + \
"msg='GCE module requires python's 'ast' module, python v2.6+'")
sys.exit(1)
# Load in the libcloud secrets file
try:
import secrets
except ImportError:
secrets = None
ARGS = getattr(secrets, 'GCE_PARAMS', ())
KWARGS = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
if not ARGS or not KWARGS.has_key('project'):
print("failed=True " + \
"msg='Missing GCE connection parametres in libcloud secrets file.'")
sys.exit(1)
def unexpected_error_msg(error):
"""Create an error string based on passed in error."""
msg='Unexpected response: HTTP return_code['
msg+='%s], API error code[%s] and message: %s' % (
error.http_code, error.code, str(error.value))
return msg
def get_instance_info(inst):
"""Retrieves instance information from an instance object and returns it
as a dictionary.
"""
metadata = {}
if inst.extra.has_key('metadata') and inst.extra['metadata'].has_key('items'):
for md in inst.extra['metadata']['items']:
metadata[md['key']] = md['value']
try:
netname = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
except:
netname = None
return({
'image': not inst.image is None and inst.image.split('/')[-1] or None,
'machine_type': inst.size,
'metadata': metadata,
'name': inst.name,
'network': netname,
'private_ip': inst.private_ip[0],
'public_ip': inst.public_ip[0],
'status': inst.extra.has_key('status') and inst.extra['status'] or None,
'tags': inst.extra.has_key('tags') and inst.extra['tags'] or [],
'zone': inst.extra.has_key('zone') and inst.extra['zone'].name or None,
})
def create_instances(module, gce, instance_names):
"""Creates new instances. Attributes other than instance_names are picked
up from 'module'
module : AnsibleModule object
gce: authenticated GCE libcloud driver
instance_names: python list of instance names to create
Returns:
A list of dictionaries with instance information
about the instances that were launched.
"""
image = module.params.get('image')
machine_type = module.params.get('machine_type')
metadata = module.params.get('metadata')
network = module.params.get('network')
persistent_boot_disk = module.params.get('persistent_boot_disk')
state = module.params.get('state')
tags = module.params.get('tags')
zone = module.params.get('zone')
new_instances = []
changed = False
lc_image = gce.ex_get_image(image)
lc_network = gce.ex_get_network(network)
lc_machine_type = gce.ex_get_size(machine_type)
lc_zone = gce.ex_get_zone(zone)
# Try to convert the user's metadata value into the format expected
# by GCE. First try to ensure user has proper quoting of a
# dictionary-like syntax using 'literal_eval', then convert the python
# dict into a python list of 'key' / 'value' dicts. Should end up
# with:
# [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...]
if metadata:
try:
md = literal_eval(metadata)
if not isinstance(md, dict):
raise ValueError('metadata must be a dict')
except ValueError as e:
print("failed=True msg='bad metadata: %s'" % str(e))
sys.exit(1)
except SyntaxError as e:
print("failed=True msg='bad metadata syntax'")
sys.exit(1)
items = []
for k,v in md.items():
items.append({"key": k,"value": v})
metadata = {'items': items}
# These variables all have default values but check just in case
if not lc_image or not lc_network or not lc_machine_type or not lc_zone:
module.fail_json(msg='Missing required create instance variable',
changed=False)
for name in instance_names:
pd = None
if persistent_boot_disk:
try:
pd = gce.create_volume(None, "%s" % name, image=lc_image)
except ResourceExistsError:
pd = gce.ex_get_volume("%s" % name, lc_zone)
inst = None
try:
inst = gce.create_node(name, lc_machine_type, lc_image,
location=lc_zone, ex_network=network, ex_tags=tags,
ex_metadata=metadata, ex_boot_disk=pd)
changed = True
except ResourceExistsError:
inst = gce.ex_get_node(name, lc_zone)
except GoogleBaseError as e:
module.fail_json(msg='Unexpected error attempting to create ' + \
'instance %s, error: %s' % (name, e.value))
if inst:
new_instances.append(inst)
instance_names = []
instance_json_data = []
for inst in new_instances:
d = get_instance_info(inst)
instance_names.append(d['name'])
instance_json_data.append(d)
return (changed, instance_json_data, instance_names)
def terminate_instances(module, gce, instance_names, zone_name):
"""Terminates a list of instances.
module: Ansible module object
gce: authenticated GCE connection object
instance_names: a list of instance names to terminate
zone_name: the zone where the instances reside prior to termination
Returns a dictionary of instance names that were terminated.
"""
changed = False
terminated_instance_names = []
for name in instance_names:
inst = None
try:
inst = gce.ex_get_node(name, zone_name)
except ResourceNotFoundError:
pass
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if inst:
gce.destroy_node(inst)
terminated_instance_names.append(inst.name)
changed = True
return (changed, terminated_instance_names)
def main():
module = AnsibleModule(
argument_spec = dict(
image = dict(default='debian-7'),
instance_names = dict(),
machine_type = dict(default='n1-standard-1'),
metadata = dict(),
name = dict(default='gce'),
network = dict(default='default'),
persistent_boot_disk = dict(type='bool', choices=BOOLEANS, default=False),
state = dict(choices=['active', 'present', 'absent', 'deleted'],
default='present'),
tags = dict(type='list'),
zone = dict(choices=['us-central1-a', 'us-central1-b',
'us-central2-a', 'europe-west1-a', 'europe-west1-b'],
default='us-central1-a'),
)
)
image = module.params.get('image')
instance_names = module.params.get('instance_names')
machine_type = module.params.get('machine_type')
metadata = module.params.get('metadata')
name = module.params.get('name')
network = module.params.get('network')
persistent_boot_disk = module.params.get('persistent_boot_disk')
state = module.params.get('state')
tags = module.params.get('tags')
zone = module.params.get('zone')
changed = False
try:
gce = get_driver(Provider.GCE)(*ARGS, datacenter=zone, **KWARGS)
gce.connection.user_agent_append("%s/%s" % (
USER_AGENT_PRODUCT, USER_AGENT_VERSION))
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
inames = []
if isinstance(instance_names, list):
inames = instance_names
elif isinstance(instance_names, str):
inames = instance_names.split(',')
if name:
inames.append(name)
if not inames:
module.fail_json(msg='Must specify a "name" or "instance_names"',
changed=False)
if not zone:
module.fail_json(msg='Must specify a "zone"', changed=False)
json_output = {'zone': zone}
if state in ['absent', 'deleted']:
json_output['state'] = 'absent'
(changed, terminated_instance_names) = terminate_instances(module,
gce, inames, zone)
# based on what user specified, return the same variable, although
# value could be different if an instance could not be destroyed
if instance_names:
json_output['instance_names'] = terminated_instance_names
elif name:
json_output['name'] = name
elif state in ['active', 'present']:
json_output['state'] = 'present'
(changed, instance_data,instance_name_list) = create_instances(
module, gce, inames)
json_output['instance_data'] = instance_data
if instance_names:
json_output['instance_names'] = instance_name_list
elif name:
json_output['name'] = name
json_output['changed'] = changed
print json.dumps(json_output)
sys.exit(0)
# this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
main()

332
library/cloud/gce_lb Normal file
View file

@ -0,0 +1,332 @@
#!/usr/bin/python
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: gce_lb
short_description: create/destroy GCE load-balancer resources
description:
- This module can create and destroy Google Compute Engine C(loadbalancer)
and C(httphealthcheck) resources. The primary LB resource is the
C(load_balancer) resource and the health check parameters are all
prefixed with I(httphealthcheck).
The full documentation for Google Compute Engine load balancing is at
U(https://developers.google.com/compute/docs/load-balancing/). However,
the ansible module simplifies the configuration by following the
libcloud model.
Full install/configuration instructions for the gce* modules can
be found in the comments of ansible/test/gce_tests.py.
options:
httphealthcheck_name:
description:
- the name identifier for the HTTP health check
required: false
default: null
httphealthcheck_port:
description:
- the TCP port to use for HTTP health checking
required: false
default: 80
httphealthcheck_path:
description:
- the url path to use for HTTP health checking
required: false
default: "/"
httphealthcheck_interval:
description:
- the duration in seconds between each health check request
required: false
default: 5
httphealthcheck_timeout:
description:
- the timeout in seconds before a request is considered a failed check
required: false
default: 5
httphealthcheck_unhealthy_count:
description:
- number of consecutive failed checks before marking a node unhealthy
required: false
default: 2
httphealthcheck_healthy_count:
description:
- number of consecutive successful checks before marking a node healthy
required: false
default: 2
httphealthcheck_host:
description:
- host header to pass through on HTTP check requests
required: false
default: null
name:
description:
- name of the load-balancer resource
required: false
default: null
protocol:
description:
- the protocol used for the load-balancer packet forwarding, tcp or udp
required: false
default: "tcp"
choices: ['tcp', 'udp']
region:
description:
- the GCE region where the load-balancer is defined
required: false
choices: ["us-central1", "us-central2", "europe-west1"]
external_ip:
description:
- the external static IPv4 (or auto-assigned) address for the LB
required: false
default: null
port_range:
description:
- the port (range) to forward, e.g. 80 or 8000-8888 defaults to all ports
required: false
default: null
members:
description:
- a list of zone/nodename pairs, e.g ['us-central1-a/www-a', ...]
required: false
aliases: ['nodes']
state:
description:
- desired state of the LB
default: "present"
choices: ["active", "present", "absent", "deleted"]
aliases: []
requirements: [ "libcloud" ]
author: Eric Johnson <erjohnso@google.com>
'''
EXAMPLES = '''
# Simple example of creating a new LB, adding members, and a health check
- local_action:
module: gce_lb
name: testlb
region: us-central1
members: ["us-central1-a/www-a", "us-central1-b/www-b"]
httphealthcheck_name: hc
httphealthcheck_port: 80
httphealthcheck_path: "/up"
'''
import sys
USER_AGENT_PRODUCT="Ansible-gce_lb"
USER_AGENT_VERSION="v1beta15"
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.loadbalancer.types import Provider as Provider_lb
from libcloud.loadbalancer.providers import get_driver as get_driver_lb
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceNotFoundError
_ = Provider.GCE
except ImportError:
print("failed=True " + \
"msg='libcloud with GCE support required for this module.'")
sys.exit(1)
# Load in the libcloud secrets file
try:
import secrets
except ImportError:
secrets = None
ARGS = getattr(secrets, 'GCE_PARAMS', ())
KWARGS = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
if not ARGS or not KWARGS.has_key('project'):
print("failed=True msg='Missing GCE connection " + \
"parameters in libcloud secrets file.'")
sys.exit(1)
def unexpected_error_msg(error):
"""Format error string based on passed in error."""
msg='Unexpected response: HTTP return_code['
msg+='%s], API error code[%s] and message: %s' % (
error.http_code, error.code, str(error.value))
return msg
def main():
module = AnsibleModule(
argument_spec = dict(
httphealthcheck_name = dict(),
httphealthcheck_port = dict(default=80),
httphealthcheck_path = dict(default='/'),
httphealthcheck_interval = dict(default=5),
httphealthcheck_timeout = dict(default=5),
httphealthcheck_unhealthy_count = dict(default=2),
httphealthcheck_healthy_count = dict(default=2),
httphealthcheck_host = dict(),
name = dict(),
protocol = dict(default='tcp'),
region = dict(),
external_ip = dict(),
port_range = dict(),
members = dict(type='list'),
state = dict(default='present'),
)
)
httphealthcheck_name = module.params.get('httphealthcheck_name')
httphealthcheck_port = module.params.get('httphealthcheck_port')
httphealthcheck_path = module.params.get('httphealthcheck_path')
httphealthcheck_interval = module.params.get('httphealthcheck_interval')
httphealthcheck_timeout = module.params.get('httphealthcheck_timeout')
httphealthcheck_unhealthy_count = \
module.params.get('httphealthcheck_unhealthy_count')
httphealthcheck_healthy_count = \
module.params.get('httphealthcheck_healthy_count')
httphealthcheck_host = module.params.get('httphealthcheck_host')
name = module.params.get('name')
protocol = module.params.get('protocol')
region = module.params.get('region')
external_ip = module.params.get('external_ip')
port_range = module.params.get('port_range')
members = module.params.get('members')
state = module.params.get('state')
try:
gce = get_driver(Provider.GCE)(*ARGS, **KWARGS)
gce.connection.user_agent_append("%s/%s" % (
USER_AGENT_PRODUCT, USER_AGENT_VERSION))
gcelb = get_driver_lb(Provider_lb.GCE)(gce_driver=gce)
gcelb.connection.user_agent_append("%s/%s" % (
USER_AGENT_PRODUCT, USER_AGENT_VERSION))
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
changed = False
json_output = {'name': name, 'state': state}
if not name and not httphealthcheck_name:
module.fail_json(msg='Nothing to do, please specify a "name" ' + \
'or "httphealthcheck_name" parameter', changed=False)
if state in ['active', 'present']:
# first, create the httphealthcheck if requested
hc = None
if httphealthcheck_name:
json_output['httphealthcheck_name'] = httphealthcheck_name
try:
hc = gcelb.ex_create_healthcheck(httphealthcheck_name,
host=httphealthcheck_host, path=httphealthcheck_path,
port=httphealthcheck_port,
interval=httphealthcheck_interval,
timeout=httphealthcheck_timeout,
unhealthy_threshold=httphealthcheck_unhealthy_count,
healthy_threshold=httphealthcheck_healthy_count)
changed = True
except ResourceExistsError:
hc = gce.ex_get_healthcheck(httphealthcheck_name)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if hc is not None:
json_output['httphealthcheck_host'] = hc.extra['host']
json_output['httphealthcheck_path'] = hc.path
json_output['httphealthcheck_port'] = hc.port
json_output['httphealthcheck_interval'] = hc.interval
json_output['httphealthcheck_timeout'] = hc.timeout
json_output['httphealthcheck_unhealthy_count'] = \
hc.unhealthy_threshold
json_output['httphealthcheck_healthy_count'] = \
hc.healthy_threshold
# create the forwarding rule (and target pool under the hood)
lb = None
if name:
if not region:
module.fail_json(msg='Missing required region name',
changed=False)
nodes = []
output_nodes = []
json_output['name'] = name
# members is a python list of 'zone/inst' strings
if members:
for node in members:
try:
zone, node_name = node.split('/')
nodes.append(gce.ex_get_node(node_name, zone))
output_nodes.append(node)
except:
# skip nodes that are badly formatted or don't exist
pass
try:
if hc is not None:
lb = gcelb.create_balancer(name, port_range, protocol,
None, nodes, ex_region=region, ex_healthchecks=[hc],
ex_address=external_ip)
else:
lb = gcelb.create_balancer(name, port_range, protocol,
None, nodes, ex_region=region, ex_address=external_ip)
changed = True
except ResourceExistsError:
lb = gcelb.get_balancer(name)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if lb is not None:
json_output['members'] = output_nodes
json_output['protocol'] = protocol
json_output['region'] = region
json_output['external_ip'] = lb.ip
json_output['port_range'] = lb.port
hc_names = []
if lb.extra.has_key('healthchecks'):
for hc in lb.extra['healthchecks']:
hc_names.append(hc.name)
json_output['httphealthchecks'] = hc_names
if state in ['absent', 'deleted']:
# first, delete the load balancer (forwarding rule and target pool)
# if specified.
if name:
json_output['name'] = name
try:
lb = gcelb.get_balancer(name)
gcelb.destroy_balancer(lb)
changed = True
except ResourceNotFoundError:
pass
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
# destroy the health check if specified
if httphealthcheck_name:
json_output['httphealthcheck_name'] = httphealthcheck_name
try:
hc = gce.ex_get_healthcheck(httphealthcheck_name)
gce.ex_destroy_healthcheck(hc)
changed = True
except ResourceNotFoundError:
pass
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
json_output['changed'] = changed
print json.dumps(json_output)
sys.exit(0)
# this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
main()

272
library/cloud/gce_net Normal file
View file

@ -0,0 +1,272 @@
#!/usr/bin/python
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: gce_net
short_description: create/destroy GCE networks and firewall rules
description:
- This module can create and destroy Google Compue Engine networks and
firewall rules U(https://developers.google.com/compute/docs/networking).
The I(name) parameter is reserved for referencing a network while the
I(fwname) parameter is used to reference firewall rules.
IPv4 Address ranges must be specified using the CIDR
U(http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) format.
Full install/configuration instructions for the gce* modules can
be found in the comments of ansible/test/gce_tests.py.
options:
allowed:
description:
- the protocol:ports to allow ('tcp:80' or 'tcp:80,443' or 'tcp:80-800')
required: false
default: null
aliases: []
ipv4_range:
description:
- the IPv4 address range in CIDR notation for the network
required: false
aliases: ['cidr']
fwname:
description:
- name of the firewall rule
required: false
default: null
aliases: ['fwrule']
name:
description:
- name of the network
required: false
default: null
aliases: []
src_range:
description:
- the source IPv4 address range in CIDR notation
required: false
default: null
aliases: ['src_cidr']
src_tags:
description:
- the source instance tags for creating a firewall rule
required: false
default: null
aliases: []
state:
description:
- desired state of the persistent disk
required: false
default: "present"
choices: ["active", "present", "absent", "deleted"]
aliases: []
requirements: [ "libcloud" ]
author: Eric Johnson <erjohnso@google.com>
'''
EXAMPLES = '''
# Simple example of creating a new network
- local_action:
module: gce_net
name: privatenet
ipv4_range: '10.240.16.0/24'
# Simple example of creating a new firewall rule
- local_action:
module: gce_net
name: privatenet
allowed: tcp:80,8080
src_tags: ["web", "proxy"]
'''
import sys
USER_AGENT_PRODUCT="Ansible-gce_net"
USER_AGENT_VERSION="v1beta15"
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceNotFoundError
_ = Provider.GCE
except ImportError:
print("failed=True " + \
"msg='libcloud with GCE support required for this module.'")
sys.exit(1)
# Load in the libcloud secrets file
try:
import secrets
except ImportError:
secrets = None
ARGS = getattr(secrets, 'GCE_PARAMS', ())
KWARGS = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
if not ARGS or not KWARGS.has_key('project'):
print("failed=True msg='Missing GCE connection " + \
"parameters in libcloud secrets file.'")
sys.exit(1)
def unexpected_error_msg(error):
"""Format error string based on passed in error."""
msg='Unexpected response: HTTP return_code['
msg+='%s], API error code[%s] and message: %s' % (
error.http_code, error.code, str(error.value))
return msg
def format_allowed(allowed):
"""Format the 'allowed' value so that it is GCE compatible."""
if allowed.count(":") == 0:
protocol = allowed
ports = []
elif allowed.count(":") == 1:
protocol, ports = allowed.split(":")
else:
return []
if ports.count(","):
ports = ports.split(",")
else:
ports = [ports]
return_val = {"IPProtocol": protocol}
if ports:
return_val["ports"] = ports
return [return_val]
def main():
module = AnsibleModule(
argument_spec = dict(
allowed = dict(),
ipv4_range = dict(),
fwname = dict(),
name = dict(),
src_range = dict(),
src_tags = dict(type='list'),
state = dict(default='present'),
)
)
allowed = module.params.get('allowed')
ipv4_range = module.params.get('ipv4_range')
fwname = module.params.get('fwname')
name = module.params.get('name')
src_range = module.params.get('src_range')
src_tags = module.params.get('src_tags')
state = module.params.get('state')
try:
gce = get_driver(Provider.GCE)(*ARGS, **KWARGS)
gce.connection.user_agent_append("%s/%s" % (
USER_AGENT_PRODUCT, USER_AGENT_VERSION))
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
changed = False
json_output = {'state': state}
if state in ['active', 'present']:
network = None
try:
network = gce.ex_get_network(name)
json_output['name'] = name
json_output['ipv4_range'] = network.cidr
except ResourceNotFoundError:
pass
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
# user wants to create a new network that doesn't yet exist
if name and not network:
if not ipv4_range:
module.fail_json(msg="Missing required 'ipv4_range' parameter",
changed=False)
try:
network = gce.ex_create_network(name, ipv4_range)
json_output['name'] = name
json_output['ipv4_range'] = ipv4_range
changed = True
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if fwname:
# user creating a firewall rule
if not allowed and not src_range and not src_tags:
if changed and network:
module.fail_json(
msg="Network created, but missing required " + \
"firewall rule parameter(s)", changed=True)
module.fail_json(
msg="Missing required firewall rule parameter(s)",
changed=False)
allowed_list = format_allowed(allowed)
try:
gce.ex_create_firewall(fwname, allowed_list, network=name,
source_ranges=src_range, source_tags=src_tags)
changed = True
except ResourceExistsError:
pass
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
json_output['fwname'] = fwname
json_output['allowed'] = allowed
json_output['src_range'] = src_range
json_output['src_tags'] = src_tags
if state in ['absent', 'deleted']:
if fwname:
json_output['fwname'] = fwname
fw = None
try:
fw = gce.ex_get_firewall(fwname)
except ResourceNotFoundError:
pass
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if fw:
gce.ex_destroy_firewall(fw)
changed = True
if name:
json_output['name'] = name
network = None
try:
network = gce.ex_get_network(name)
# json_output['d1'] = 'found network name %s' % name
except ResourceNotFoundError:
# json_output['d2'] = 'not found network name %s' % name
pass
except Exception as e:
# json_output['d3'] = 'error with %s' % name
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if network:
# json_output['d4'] = 'deleting %s' % name
gce.ex_destroy_network(network)
# json_output['d5'] = 'deleted %s' % name
changed = True
json_output['changed'] = changed
print json.dumps(json_output)
sys.exit(0)
# this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
main()

253
library/cloud/gce_pd Normal file
View file

@ -0,0 +1,253 @@
#!/usr/bin/python
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: gce_pd
short_description: utilize GCE persistent disk resources
description:
- This module can create and destroy unformatted GCE persistent disks
U(https://developers.google.com/compute/docs/disks#persistentdisks).
It also supports attaching and detaching disks from running instances
but does not support creating boot disks from images or snapshots. The
'gce' module supports creating instances with boot disks.
Full install/configuration instructions for the gce* modules can
be found in the comments of ansible/test/gce_tests.py.
options:
detach_only:
description:
- do not destroy the disk, merely detach it from an instance
required: false
default: "no"
choices: ["yes", "no"]
aliases: []
instance_name:
description:
- instance name if you wish to attach or detach the disk
required: false
default: null
aliases: []
mode:
description:
- GCE mount mode of disk, READ_ONLY (default) or READ_WRITE
required: false
default: "READ_ONLY"
choices: ["READ_WRITE", "READ_ONLY"]
aliases: []
name:
description:
- name of the disk
required: true
default: null
aliases: []
size_gb:
description:
- whole integer size of disk (in GB) to create, default is 10 GB
required: false
default: 10
aliases: []
state:
description:
- desired state of the persistent disk
required: false
default: "present"
choices: ["active", "present", "absent", "deleted"]
aliases: []
zone:
description:
- zone in which to create the disk
required: false
default: "us-central1-b"
aliases: []
requirements: [ "libcloud" ]
author: Eric Johnson <erjohnso@google.com>
'''
EXAMPLES = '''
# Simple attachment action to an existing instance
- local_action:
module: gce_pd
instance_name: notlocalhost
size_gb: 5
name: pd
'''
import sys
USER_AGENT_PRODUCT="Ansible-gce_pd"
USER_AGENT_VERSION="v1beta15"
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceNotFoundError, ResourceInUseError
_ = Provider.GCE
except ImportError:
print("failed=True " + \
"msg='libcloud with GCE support is required for this module.'")
sys.exit(1)
# Load in the libcloud secrets file
try:
import secrets
except ImportError:
secrets = None
ARGS = getattr(secrets, 'GCE_PARAMS', ())
KWARGS = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
if not ARGS or not KWARGS.has_key('project'):
print("failed=True " + \
"msg='Missing GCE connection parameters in libcloud secrets file.'")
sys.exit(1)
def unexpected_error_msg(error):
msg='Unexpected response: HTTP return_code['
msg+='%s], API error code[%s] and message: %s' % (
error.http_code, error.code, str(error.value))
return msg
def main():
module = AnsibleModule(
argument_spec = dict(
detach_only = dict(choice=BOOLEANS),
instance_name = dict(),
mode = dict(default='READ_ONLY',
choices=['READ_WRITE', 'READ_ONLY']),
name = dict(required=True),
size_gb = dict(default=10),
state = dict(default='present'),
zone = dict(default='us-central1-b'),
)
)
detach_only = module.params.get('detach_only')
instance_name = module.params.get('instance_name')
mode = module.params.get('mode')
name = module.params.get('name')
size_gb = module.params.get('size_gb')
state = module.params.get('state')
zone = module.params.get('zone')
if detach_only and not instance_name:
module.fail_json(
msg='Must specify an instance name when detaching a disk',
changed=False)
try:
gce = get_driver(Provider.GCE)(*ARGS, datacenter=zone, **KWARGS)
gce.connection.user_agent_append("%s/%s" % (
USER_AGENT_PRODUCT, USER_AGENT_VERSION))
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
disk = inst = None
changed = is_attached = False
json_output = { 'name': name, 'zone': zone, 'state': state }
if detach_only:
json_output['detach_only'] = True
json_output['detached_from_instance'] = instance_name
if instance_name:
# user wants to attach/detach from an existing instance
try:
inst = gce.ex_get_node(instance_name, zone)
# is the disk attached?
for d in inst.extra['disks']:
if d['deviceName'] == name:
is_attached = True
json_output['attached_mode'] = d['mode']
json_output['attached_to_instance'] = inst.name
except:
pass
# find disk if it already exists
try:
disk = gce.ex_get_volume(name)
json_output['size_gb'] = int(disk.size)
except ResourceNotFoundError:
pass
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
# user wants a disk to exist. If "instance_name" is supplied the user
# also wants it attached
if state in ['active', 'present']:
if not size_gb:
module.fail_json(msg="Must supply a size_gb", changed=False)
try:
size_gb = int(round(float(size_gb)))
if size_gb < 1: raise Exception
except:
module.fail_json(msg="Must supply a size_gb larger than 1 GB",
changed=False)
if instance_name and inst is None:
module.fail_json(msg='Instance %s does not exist in zone %s' % (
instance_name, zone), changed=False)
if not disk:
try:
disk = gce.create_volume(size_gb, name, location=zone)
except ResourceExistsError:
pass
except QuotaExceededError:
module.fail_json(msg='Requested disk size exceeds quota',
changed=False)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
json_output['size_gb'] = size_gb
changed = True
if inst and not is_attached:
try:
gce.attach_volume(inst, disk, device=name, ex_mode=mode)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
json_output['attached_to_instance'] = inst.name
json_output['attached_mode'] = mode
changed = True
# user wants to delete a disk (or perhaps just detach it).
if state in ['absent', 'deleted'] and disk:
if inst and is_attached:
try:
gce.detach_volume(disk, ex_node=inst)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
changed = True
if not detach_only:
try:
gce.destroy_volume(disk)
except ResourceInUseError as e:
module.fail_json(msg=str(e.value), changed=False)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
changed = True
json_output['changed'] = changed
print json.dumps(json_output)
sys.exit(0)
# this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
main()

47
plugins/inventory/gce.ini Normal file
View file

@ -0,0 +1,47 @@
#!/usr/bin/python
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# The GCE inventory script has the following dependencies:
# 1. A valid Google Cloud Platform account with Google Compute Engine
# enabled. See https://cloud.google.com
# 2. An OAuth2 Service Account flow should be enabled. This will generate
# a private key file that the inventory script will use for API request
# authorization. See https://developers.google.com/accounts/docs/OAuth2
# 3. Convert the private key from PKCS12 to PEM format
# $ openssl pkcs12 -in pkey.pkcs12 -passin pass:notasecret \
# > -nodes -nocerts | openssl rsa -out pkey.pem
# 4. The libcloud (>=0.13.3) python libray. See http://libcloud.apache.org
#
# (See ansible/test/gce_tests.py comments for full install instructions)
#
# Author: Eric Johnson <erjohnso@google.com>
[gce]
# GCE Service Account configuration information can be stored in the
# libcloud 'secrets.py' file. Ideally, the 'secrets.py' file will already
# exist in your PYTHONPATH and be picked up automatically with an import
# statement in the inventory script. However, you can specify an absolute
# path to the secrets.py file with 'libcloud_secrets' parameter.
libcloud_secrets =
# If you are not going to use a 'secrets.py' file, you can set the necessary
# authorization parameters here.
gce_service_account_email_address =
gce_service_account_pem_file_path =
gce_project_id =

244
plugins/inventory/gce.py Executable file
View file

@ -0,0 +1,244 @@
#!/usr/bin/python
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
'''
GCE external inventory script
=================================
Generates inventory that Ansible can understand by making API requests
Google Compute Engine via the libcloud library. Full install/configuration
instructions for the gce* modules can be found in the comments of
ansible/test/gce_tests.py.
When run against a specific host, this script returns the following variables
based on the data obtained from the libcloud Node object:
- gce_uuid
- gce_id
- gce_image
- gce_machine_type
- gce_private_ip
- gce_public_ip
- gce_name
- gce_description
- gce_status
- gce_zone
- gce_tags
- gce_metadata
- gce_network
When run in --list mode, instances are grouped by the following categories:
- zone:
zone group name examples are us-central1-b, europe-west1-a, etc.
- instance tags:
An entry is created for each tag. For example, if you have two instances
with a common tag called 'foo', they will both be grouped together under
the 'tag_foo' name.
- network name:
the name of the network is appended to 'network_' (e.g. the 'default'
network will result in a group named 'network_default')
- machine type
types follow a pattern like n1-standard-4, g1-small, etc.
- running status:
group name prefixed with 'status_' (e.g. status_running, status_stopped,..)
- image:
when using an ephemeral/scratch disk, this will be set to the image name
used when creating the instance (e.g. debian-7-wheezy-v20130816). when
your instance was created with a root persistent disk it will be set to
'persistent_disk' since there is no current way to determine the image.
Examples:
Execute uname on all instances in the us-central1-a zone
$ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a"
Use the GCE inventory script to print out instance specific information
$ plugins/inventory/gce.py --host my_instance
Author: Eric Johnson <erjohnso@google.com>
Version: 0.0.1
'''
USER_AGENT_PRODUCT="Ansible-gce_inventory_plugin"
USER_AGENT_VERSION="v1beta15"
import sys
import os
import argparse
import ConfigParser
try:
import json
except ImportError:
import simplejson as json
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
_ = Provider.GCE
except:
print("GCE inventory script requires libcloud >= 0.13")
sys.exit(1)
class GceInventory(object):
def __init__(self):
# Read settings and parse CLI arguments
self.parse_cli_args()
self.driver = self.get_gce_driver()
# Just display data for specific host
if self.args.host:
print self.json_format_dict(self.node_to_dict(
self.get_instance(self.args.host)))
sys.exit(0)
# Otherwise, assume user wants all instances grouped
print self.json_format_dict(self.group_instances())
sys.exit(0)
def get_gce_driver(self):
'''Determine GCE authorization settings and return libcloud driver.'''
config = ConfigParser.SafeConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/gce.ini')
# the GCE params in 'secrets.py' will override these
secrets_path = config.get('gce', 'libcloud_secrets')
secrets_found = False
try:
import secrets
args = getattr(secrets, 'GCE_PARAMS', ())
kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
secrets_found = True
except:
pass
if not secrets_found and secrets_path:
if not secrets_path.endswith('secrets.py'):
err = "Must specify libcloud secrets file as "
err += "/absolute/path/to/secrets.py"
print(err)
sys.exit(1)
sys.path.append(os.path.dirname(secrets_path))
try:
import secrets
args = getattr(secrets, 'GCE_PARAMS', ())
kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
secrets_found = True
except:
pass
if not secrets_found:
args = (
config.get('gce','gce_service_account_email_address'),
config.get('gce','gce_service_account_pem_file_path')
)
kwargs = {'project': config.get('gce','gce_project_id')}
gce = get_driver(Provider.GCE)(*args, **kwargs)
gce.connection.user_agent_append("%s/%s" % (
USER_AGENT_PRODUCT, USER_AGENT_VERSION))
return gce
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(
description='Produce an Ansible Inventory file based on GCE')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all information about an instance')
self.args = parser.parse_args()
def node_to_dict(self, inst):
md = {}
if inst.extra['metadata'].has_key('items'):
for entry in inst.extra['metadata']['items']:
md[entry['key']] = entry['value']
net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
return {
'gce_uuid': inst.uuid,
'gce_id': inst.id,
'gce_image': inst.image,
'gce_machine_type': inst.size,
'gce_private_ip': inst.private_ip[0],
'gce_public_ip': inst.public_ip[0],
'gce_name': inst.name,
'gce_description': inst.extra['description'],
'gce_status': inst.extra['status'],
'gce_zone': inst.extra['zone'].name,
'gce_tags': inst.extra['tags'],
'gce_metadata': md,
'gce_network': net
}
def get_instance(self, instance_name):
'''Gets details about a specific instance '''
return self.driver.ex_get_node(instance_name)
def group_instances(self):
'''Group all instances'''
groups = {}
for node in self.driver.list_nodes():
name = node.name
zone = node.extra['zone'].name
if groups.has_key(zone): groups[zone].append(name)
else: groups[zone] = [name]
tags = node.extra['tags']
for t in tags:
tag = 'tag_%s' % t
if groups.has_key(tag): groups[tag].append(name)
else: groups[tag] = [name]
net = node.extra['networkInterfaces'][0]['network'].split('/')[-1]
net = 'network_%s' % net
if groups.has_key(net): groups[net].append(name)
else: groups[net] = [name]
machine_type = node.size
if groups.has_key(machine_type): groups[machine_type].append(name)
else: groups[machine_type] = [name]
image = node.image and node.image or 'persistent_disk'
if groups.has_key(image): groups[image].append(name)
else: groups[image] = [name]
status = node.extra['status']
stat = 'status_%s' % status.lower()
if groups.has_key(stat): groups[stat].append(name)
else: groups[stat] = [name]
return groups
def json_format_dict(self, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
# Run the script
GceInventory()

752
test/gce_tests.py Normal file
View file

@ -0,0 +1,752 @@
#!/usr/bin/env python
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a custom functional test script for the Google Compute Engine
# ansible modules. In order to run these tests, you must:
# 1) Create a Google Cloud Platform account and enable the Google
# Compute Engine service and billing
# 2) Download, install, and configure 'gcutil'
# see [https://developers.google.com/compute/docs/gcutil/]
# 3) Convert your GCE Service Account private key from PKCS12 to PEM format
# $ openssl pkcs12 -in pkey.pkcs12 -passin pass:notasecret \
# > -nodes -nocerts | openssl rsa -out pkey.pem
# 4) Make sure you have libcloud 0.13.3 or later installed.
# 5) Make sure you have a libcloud 'secrets.py' file in your PYTHONPATH
# 6) Set GCE_PARAMS and GCE_KEYWORD_PARMS in your 'secrets.py' file.
# 7) Set up a simple hosts file
# $ echo 127.0.0.1 > ~/ansible_hosts
# $ echo "export ANSIBLE_HOSTS='~/ansible_hosts'" >> ~/.bashrc
# $ . ~/.bashrc
# 8) Set up your ansible 'hacking' environment
# $ cd ~/ansible
# $ . hacking/env-setup
# $ export ANSIBLE_HOST_KEY_CHECKING=no
# $ ansible all -m ping
# 9) Set your PROJECT variable below
# 10) Run and time the tests and log output, take ~30 minutes to run
# $ time stdbuf -oL python test/gce_tests.py 2>&1 | tee log
# Set this to your test Project ID
PROJECT="google.com:erjohnso"
# debugging
DEBUG=False # lots of debugging output
VERBOSE=True # on failure, display ansible command and expected/actual result
# location - note that some tests rely on the module's 'default'
# region/zone, which should match the settings below.
REGION="us-central1"
ZONE="%s-a" % REGION
# Peeking is a way to trigger looking at a specified set of resources
# before and/or after a test run. The 'test_cases' data structure below
# has a few tests with 'peek_before' and 'peek_after'. When those keys
# are set and PEEKING_ENABLED is True, then these steps will be executed
# to aid in debugging tests. Normally, this is not needed.
PEEKING_ENABLED=False
# disks
DNAME="aaaaa-ansible-disk"
DNAME2="aaaaa-ansible-disk2"
DNAME6="aaaaa-ansible-inst6"
DNAME7="aaaaa-ansible-inst7"
USE_PD="true"
KERNEL="https://www.googleapis.com/compute/v1beta15/projects/google/global/kernels/gce-v20130813"
# instances
INAME="aaaaa-ansible-inst"
INAME2="aaaaa-ansible-inst2"
INAME3="aaaaa-ansible-inst3"
INAME4="aaaaa-ansible-inst4"
INAME5="aaaaa-ansible-inst5"
INAME6="aaaaa-ansible-inst6"
INAME7="aaaaa-ansible-inst7"
TYPE="n1-standard-1"
IMAGE="https://www.googleapis.com/compute/v1beta15/projects/debian-cloud/global/images/debian-7-wheezy-v20130816"
NETWORK="default"
SCOPES="https://www.googleapis.com/auth/userinfo.email,https://www.googleapis.com/auth/compute,https://www.googleapis.com/auth/devstorage.full_control"
# networks / firewalls
NETWK1="ansible-network1"
NETWK2="ansible-network2"
NETWK3="ansible-network3"
CIDR1="10.240.16.0/24"
CIDR2="10.240.32.0/24"
CIDR3="10.240.64.0/24"
GW1="10.240.16.1"
GW2="10.240.32.1"
FW1="ansible-fwrule1"
FW2="ansible-fwrule2"
FW3="ansible-fwrule3"
FW4="ansible-fwrule4"
# load-balancer tests
HC1="ansible-hc1"
HC2="ansible-hc2"
HC3="ansible-hc3"
LB1="ansible-lb1"
LB2="ansible-lb2"
from commands import getstatusoutput as run
import sys
test_cases = [
{'id': '01', 'desc': 'Detach / Delete disk tests',
'setup': ['gcutil addinstance "%s" --wait_until_running --zone=%s --machine_type=%s --network=%s --service_account_scopes="%s" --image="%s" --persistent_boot_disk=%s' % (INAME, ZONE, TYPE, NETWORK, SCOPES, IMAGE, USE_PD),
'gcutil adddisk "%s" --size_gb=2 --zone=%s' % (DNAME, ZONE)],
'tests': [
{'desc': 'DETACH_ONLY but disk not found [success]',
'm': 'gce_pd',
'a': 'name=%s instance_name=%s zone=%s detach_only=yes state=absent' % ("missing-disk", INAME, ZONE),
'r': '127.0.0.1 | success >> {"changed": false, "detach_only": true, "detached_from_instance": "%s", "name": "missing-disk", "state": "absent", "zone": "%s"}' % (INAME, ZONE),
},
{'desc': 'DETACH_ONLY but instance not found [success]',
'm': 'gce_pd',
'a': 'name=%s instance_name=%s zone=%s detach_only=yes state=absent' % (DNAME, "missing-instance", ZONE),
'r': '127.0.0.1 | success >> {"changed": false, "detach_only": true, "detached_from_instance": "missing-instance", "name": "%s", "size_gb": 2, "state": "absent", "zone": "%s"}' % (DNAME, ZONE),
},
{'desc': 'DETACH_ONLY but neither disk nor instance exists [success]',
'm': 'gce_pd',
'a': 'name=%s instance_name=%s zone=%s detach_only=yes state=absent' % ("missing-disk", "missing-instance", ZONE),
'r': '127.0.0.1 | success >> {"changed": false, "detach_only": true, "detached_from_instance": "missing-instance", "name": "missing-disk", "state": "absent", "zone": "%s"}' % (ZONE),
},
{'desc': 'DETACH_ONLY but disk is not currently attached [success]',
'm': 'gce_pd',
'a': 'name=%s instance_name=%s zone=%s detach_only=yes state=absent' % (DNAME, INAME, ZONE),
'r': '127.0.0.1 | success >> {"changed": false, "detach_only": true, "detached_from_instance": "%s", "name": "%s", "size_gb": 2, "state": "absent", "zone": "%s"}' % (INAME, DNAME, ZONE),
},
{'desc': 'DETACH_ONLY disk is attached and should be detached [success]',
'setup': ['gcutil attachdisk --disk="%s,mode=READ_ONLY" --zone=%s %s' % (DNAME, ZONE, INAME), 'sleep 10'],
'm': 'gce_pd',
'a': 'name=%s instance_name=%s zone=%s detach_only=yes state=absent' % (DNAME, INAME, ZONE),
'r': '127.0.0.1 | success >> {"attached_mode": "READ_ONLY", "attached_to_instance": "%s", "changed": true, "detach_only": true, "detached_from_instance": "%s", "name": "%s", "size_gb": 2, "state": "absent", "zone": "%s"}' % (INAME, INAME, DNAME, ZONE),
'teardown': ['gcutil detachdisk --zone=%s --device_name=%s %s' % (ZONE, DNAME, INAME)],
},
{'desc': 'DETACH_ONLY but not instance specified [FAIL]',
'm': 'gce_pd',
'a': 'name=%s zone=%s detach_only=yes state=absent' % (DNAME, ZONE),
'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Must specify an instance name when detaching a disk"}',
},
{'desc': 'DELETE but disk not found [success]',
'm': 'gce_pd',
'a': 'name=%s zone=%s state=absent' % ("missing-disk", ZONE),
'r': '127.0.0.1 | success >> {"changed": false, "name": "missing-disk", "state": "absent", "zone": "%s"}' % (ZONE),
},
{'desc': 'DELETE but disk is attached [FAIL]',
'setup': ['gcutil attachdisk --disk="%s,mode=READ_ONLY" --zone=%s %s' % (DNAME, ZONE, INAME), 'sleep 10'],
'm': 'gce_pd',
'a': 'name=%s zone=%s state=absent' % (DNAME, ZONE),
'r': "127.0.0.1 | FAILED >> {\"changed\": false, \"failed\": true, \"msg\": \"The disk resource 'projects/%s/zones/%s/disks/%s' is already being used by 'projects/%s/zones/%s/instances/%s'\"}" % (PROJECT, ZONE, DNAME, PROJECT, ZONE, INAME),
'teardown': ['gcutil detachdisk --zone=%s --device_name=%s %s' % (ZONE, DNAME, INAME)],
},
{'desc': 'DELETE disk [success]',
'm': 'gce_pd',
'a': 'name=%s zone=%s state=absent' % (DNAME, ZONE),
'r': '127.0.0.1 | success >> {"changed": true, "name": "%s", "size_gb": 2, "state": "absent", "zone": "%s"}' % (DNAME, ZONE),
},
],
'teardown': ['gcutil deleteinstance -f "%s" --zone=%s' % (INAME, ZONE),
'sleep 15',
'gcutil deletedisk -f "%s" --zone=%s' % (INAME, ZONE),
'sleep 10',
'gcutil deletedisk -f "%s" --zone=%s' % (DNAME, ZONE),
'sleep 10'],
},
{'id': '02', 'desc': 'Create disk but do not attach (e.g. no instance_name param)',
'setup': [],
'tests': [
{'desc': 'CREATE_NO_ATTACH "string" for size_gb [FAIL]',
'm': 'gce_pd',
'a': 'name=%s size_gb="foo" zone=%s' % (DNAME, ZONE),
'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Must supply a size_gb larger than 1 GB"}',
},
{'desc': 'CREATE_NO_ATTACH negative size_gb [FAIL]',
'm': 'gce_pd',
'a': 'name=%s size_gb=-2 zone=%s' % (DNAME, ZONE),
'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Must supply a size_gb larger than 1 GB"}',
},
{'desc': 'CREATE_NO_ATTACH size_gb exceeds quota [FAIL]',
'm': 'gce_pd',
'a': 'name=%s size_gb=9999 zone=%s' % ("big-disk", ZONE),
'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Requested disk size exceeds quota"}',
},
{'desc': 'CREATE_NO_ATTACH create the disk [success]',
'm': 'gce_pd',
'a': 'name=%s zone=%s' % (DNAME, ZONE),
'r': '127.0.0.1 | success >> {"changed": true, "name": "%s", "size_gb": 10, "state": "present", "zone": "%s"}' % (DNAME, ZONE),
},
{'desc': 'CREATE_NO_ATTACH but disk already exists [success]',
'm': 'gce_pd',
'a': 'name=%s zone=%s' % (DNAME, ZONE),
'r': '127.0.0.1 | success >> {"changed": false, "name": "%s", "size_gb": 10, "state": "present", "zone": "%s"}' % (DNAME, ZONE),
},
],
'teardown': ['gcutil deletedisk -f "%s" --zone=%s' % (DNAME, ZONE),
'sleep 10'],
},
{'id': '03', 'desc': 'Create and attach disk',
'setup': ['gcutil addinstance "%s" --zone=%s --machine_type=%s --network=%s --service_account_scopes="%s" --image="%s" --persistent_boot_disk=%s' % (INAME2, ZONE, TYPE, NETWORK, SCOPES, IMAGE, USE_PD),
'gcutil addinstance "%s" --zone=%s --machine_type=%s --network=%s --service_account_scopes="%s" --image="%s" --persistent_boot_disk=%s' % (INAME, ZONE, "g1-small", NETWORK, SCOPES, IMAGE, USE_PD),
'gcutil adddisk "%s" --size_gb=2 --zone=%s' % (DNAME, ZONE),
'gcutil adddisk "%s" --size_gb=2 --zone=%s' % (DNAME2, ZONE),
'sleep 10'],
'tests': [
{'desc': 'CREATE_AND_ATTACH "string" for size_gb [FAIL]',
'm': 'gce_pd',
'a': 'name=%s size_gb="foo" instance_name=%s zone=%s' % (DNAME, INAME, ZONE),
'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Must supply a size_gb larger than 1 GB"}',
},
{'desc': 'CREATE_AND_ATTACH negative size_gb [FAIL]',
'm': 'gce_pd',
'a': 'name=%s size_gb=-2 instance_name=%s zone=%s' % (DNAME, INAME, ZONE),
'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Must supply a size_gb larger than 1 GB"}',
},
{'desc': 'CREATE_AND_ATTACH size_gb exceeds quota [FAIL]',
'm': 'gce_pd',
'a': 'name=%s size_gb=9999 instance_name=%s zone=%s' % ("big-disk", INAME, ZONE),
'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Requested disk size exceeds quota"}',
},
{'desc': 'CREATE_AND_ATTACH missing instance [FAIL]',
'm': 'gce_pd',
'a': 'name=%s instance_name=%s zone=%s' % (DNAME, "missing-instance", ZONE),
'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Instance %s does not exist in zone %s"}' % ("missing-instance", ZONE),
},
{'desc': 'CREATE_AND_ATTACH disk exists but not attached [success]',
'peek_before': ["gcutil --format=csv listinstances --zone=%s --filter=\"name eq 'aaaa.*'\"" % (ZONE)],
'm': 'gce_pd',
'a': 'name=%s instance_name=%s zone=%s' % (DNAME, INAME, ZONE),
'r': '127.0.0.1 | success >> {"attached_mode": "READ_ONLY", "attached_to_instance": "%s", "changed": true, "name": "%s", "size_gb": 2, "state": "present", "zone": "%s"}' % (INAME, DNAME, ZONE),
'peek_after': ["gcutil --format=csv listinstances --zone=%s --filter=\"name eq 'aaaa.*'\"" % (ZONE)],
},
{'desc': 'CREATE_AND_ATTACH disk exists already attached [success]',
'm': 'gce_pd',
'a': 'name=%s instance_name=%s zone=%s' % (DNAME, INAME, ZONE),
'r': '127.0.0.1 | success >> {"attached_mode": "READ_ONLY", "attached_to_instance": "%s", "changed": false, "name": "%s", "size_gb": 2, "state": "present", "zone": "%s"}' % (INAME, DNAME, ZONE),
},
{'desc': 'CREATE_AND_ATTACH attached RO, attempt RO to 2nd inst [success]',
'peek_before': ["gcutil --format=csv listinstances --zone=%s --filter=\"name eq 'aaaa.*'\"" % (ZONE)],
'm': 'gce_pd',
'a': 'name=%s instance_name=%s zone=%s' % (DNAME, INAME2, ZONE),
'r': '127.0.0.1 | success >> {"attached_mode": "READ_ONLY", "attached_to_instance": "%s", "changed": true, "name": "%s", "size_gb": 2, "state": "present", "zone": "%s"}' % (INAME2, DNAME, ZONE),
'peek_after': ["gcutil --format=csv listinstances --zone=%s --filter=\"name eq 'aaaa.*'\"" % (ZONE)],
},
{'desc': 'CREATE_AND_ATTACH attached RO, attach RW to self [FAILED no-op]',
'peek_before': ["gcutil --format=csv listinstances --zone=%s --filter=\"name eq 'aaaa.*'\"" % (ZONE)],
'm': 'gce_pd',
'a': 'name=%s instance_name=%s zone=%s mode=READ_WRITE' % (DNAME, INAME, ZONE),
'r': '127.0.0.1 | success >> {"attached_mode": "READ_ONLY", "attached_to_instance": "%s", "changed": false, "name": "%s", "size_gb": 2, "state": "present", "zone": "%s"}' % (INAME, DNAME, ZONE),
},
{'desc': 'CREATE_AND_ATTACH attached RW, attach RW to other [FAIL]',
'setup': ['gcutil attachdisk --disk=%s,mode=READ_WRITE --zone=%s %s' % (DNAME2, ZONE, INAME), 'sleep 10'],
'peek_before': ["gcutil --format=csv listinstances --zone=%s --filter=\"name eq 'aaaa.*'\"" % (ZONE)],
'm': 'gce_pd',
'a': 'name=%s instance_name=%s zone=%s mode=READ_WRITE' % (DNAME2, INAME2, ZONE),
'r': "127.0.0.1 | FAILED >> {\"changed\": false, \"failed\": true, \"msg\": \"Unexpected response: HTTP return_code[200], API error code[RESOURCE_IN_USE] and message: The disk resource 'projects/%s/zones/%s/disks/%s' is already being used in read-write mode\"}" % (PROJECT, ZONE, DNAME2),
'peek_after': ["gcutil --format=csv listinstances --zone=%s --filter=\"name eq 'aaaa.*'\"" % (ZONE)],
},
{'desc': 'CREATE_AND_ATTACH attach too many disks to inst [FAIL]',
'setup': ['gcutil adddisk aa-disk-dummy --size_gb=2 --zone=%s' % (ZONE),
'sleep 10',
'gcutil adddisk aa-disk-dummy2 --size_gb=2 --zone=%s' % (ZONE),
'sleep 10',
'gcutil attachdisk --disk=aa-disk-dummy --zone=%s %s' % (ZONE, INAME),
'sleep 5'],
'peek_before': ["gcutil --format=csv listinstances --zone=%s --filter=\"name eq 'aaaa.*'\"" % (ZONE)],
'm': 'gce_pd',
'a': 'name=%s instance_name=%s zone=%s' % ("aa-disk-dummy2", INAME, ZONE),
'r': "127.0.0.1 | FAILED >> {\"changed\": false, \"failed\": true, \"msg\": \"Unexpected response: HTTP return_code[200], API error code[LIMIT_EXCEEDED] and message: Exceeded limit 'maximum_persistent_disks' on resource 'projects/%s/zones/%s/instances/%s'. Limit: 4\"}" % (PROJECT, ZONE, INAME),
'teardown': ['gcutil detachdisk --device_name=aa-disk-dummy --zone=%s %s' % (ZONE, INAME),
'sleep 3',
'gcutil deletedisk -f aa-disk-dummy --zone=%s' % (ZONE),
'sleep 10',
'gcutil deletedisk -f aa-disk-dummy2 --zone=%s' % (ZONE),
'sleep 10'],
},
],
'teardown': ['gcutil deleteinstance -f "%s" --zone=%s' % (INAME2, ZONE),
'sleep 15',
'gcutil deleteinstance -f "%s" --zone=%s' % (INAME, ZONE),
'sleep 15',
'gcutil deletedisk -f "%s" --zone=%s' % (INAME, ZONE),
'sleep 10',
'gcutil deletedisk -f "%s" --zone=%s' % (INAME2, ZONE),
'sleep 10',
'gcutil deletedisk -f "%s" --zone=%s' % (DNAME, ZONE),
'sleep 10',
'gcutil deletedisk -f "%s" --zone=%s' % (DNAME2, ZONE),
'sleep 10'],
},
{'id': '04', 'desc': 'Delete / destroy instances',
'setup': ['gcutil addinstance "%s" --zone=%s --machine_type=%s --image="%s" --persistent_boot_disk=false' % (INAME, ZONE, TYPE, IMAGE),
'gcutil addinstance "%s" --zone=%s --machine_type=%s --image="%s" --persistent_boot_disk=false' % (INAME2, ZONE, TYPE, IMAGE),
'gcutil addinstance "%s" --zone=%s --machine_type=%s --image="%s" --persistent_boot_disk=false' % (INAME3, ZONE, TYPE, IMAGE),
'gcutil addinstance "%s" --zone=%s --machine_type=%s --image="%s" --persistent_boot_disk=false' % (INAME4, ZONE, TYPE, IMAGE),
'gcutil addinstance "%s" --wait_until_running --zone=%s --machine_type=%s --image="%s" --persistent_boot_disk=false' % (INAME5, ZONE, TYPE, IMAGE)],
'tests': [
{'desc': 'DELETE instance, bad zone param [FAIL]',
'm': 'gce',
'a': 'name=missing-inst zone=bogus state=absent',
'r': '127.0.0.1 | FAILED >> {"failed": true, "msg": "value of zone must be one of: us-central1-a,us-central1-b,us-central2-a,europe-west1-a,europe-west1-b, got: bogus"}',
},
{'desc': 'DELETE non-existent instance, no-op [success]',
'm': 'gce',
'a': 'name=missing-inst zone=%s state=absent' % (ZONE),
'r': '127.0.0.1 | success >> {"changed": false, "name": "missing-inst", "state": "absent", "zone": "%s"}' % (ZONE),
},
{'desc': 'DELETE an existing named instance [success]',
'm': 'gce',
'a': 'name=%s zone=%s state=absent' % (INAME, ZONE),
'r': '127.0.0.1 | success >> {"changed": true, "name": "%s", "state": "absent", "zone": "%s"}' % (INAME, ZONE),
},
{'desc': 'DELETE list of instances with a non-existent one [success]',
'm': 'gce',
'a': 'instance_names=%s,missing,%s zone=%s state=absent' % (INAME2,INAME3, ZONE),
'r': '127.0.0.1 | success >> {"changed": true, "instance_names": ["%s", "%s"], "state": "absent", "zone": "%s"}' % (INAME2, INAME3, ZONE),
},
{'desc': 'DELETE list of instances all pre-exist [success]',
'm': 'gce',
'a': 'instance_names=%s,%s zone=%s state=absent' % (INAME4,INAME5, ZONE),
'r': '127.0.0.1 | success >> {"changed": true, "instance_names": ["%s", "%s"], "state": "absent", "zone": "%s"}' % (INAME4, INAME5, ZONE),
},
],
'teardown': ['gcutil deleteinstance -f "%s" --zone=%s' % (INAME, ZONE),
'gcutil deleteinstance -f "%s" --zone=%s' % (INAME2, ZONE),
'gcutil deleteinstance -f "%s" --zone=%s' % (INAME3, ZONE),
'gcutil deleteinstance -f "%s" --zone=%s' % (INAME4, ZONE),
'gcutil deleteinstance -f "%s" --zone=%s' % (INAME5, ZONE),
'sleep 10'],
},
{'id': '05', 'desc': 'Create instances',
'setup': [],
'tests': [
{'desc': 'CREATE_INSTANCE invalid image arg [FAIL]',
'm': 'gce',
'a': 'name=foo image=foo',
'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Missing required create instance variable"}',
},
{'desc': 'CREATE_INSTANCE metadata a list [FAIL]',
'strip_numbers': True,
'm': 'gce',
'a': 'name=%s zone=%s metadata=\'[\\"foo\\":\\"bar\\",\\"baz\\":1]\'' % (INAME,ZONE),
'r': '127.0.0.1 | FAILED >> {"failed": true, "msg": "bad metadata syntax"}',
},
{'desc': 'CREATE_INSTANCE metadata not a dict [FAIL]',
'strip_numbers': True,
'm': 'gce',
'a': 'name=%s zone=%s metadata=\\"foo\\":\\"bar\\",\\"baz\\":1' % (INAME,ZONE),
'r': '127.0.0.1 | FAILED >> {"failed": true, "msg": "bad metadata syntax"}',
},
{'desc': 'CREATE_INSTANCE with metadata form1 [FAIL]',
'strip_numbers': True,
'm': 'gce',
'a': 'name=%s zone=%s metadata=\'{"foo":"bar","baz":1}\'' % (INAME,ZONE),
'r': '127.0.0.1 | FAILED >> {"failed": true, "msg": "bad metadata: malformed string"}',
},
{'desc': 'CREATE_INSTANCE with metadata form2 [FAIL]',
'strip_numbers': True,
'm': 'gce',
'a': 'name=%s zone=%s metadata={\'foo\':\'bar\',\'baz\':1}' % (INAME,ZONE),
'r': '127.0.0.1 | FAILED >> {"failed": true, "msg": "bad metadata: malformed string"}',
},
{'desc': 'CREATE_INSTANCE with metadata form3 [FAIL]',
'strip_numbers': True,
'm': 'gce',
'a': 'name=%s zone=%s metadata="foo:bar" '% (INAME,ZONE),
'r': '127.0.0.1 | FAILED >> {"failed": true, "msg": "bad metadata syntax"}',
},
{'desc': 'CREATE_INSTANCE with metadata form4 [FAIL]',
'strip_numbers': True,
'm': 'gce',
'a': 'name=%s zone=%s metadata="{\'foo\':\'bar\'}"'% (INAME,ZONE),
'r': '127.0.0.1 | FAILED >> {"failed": true, "msg": "bad metadata: malformed string"}',
},
{'desc': 'CREATE_INSTANCE invalid image arg [FAIL]',
'm': 'gce',
'a': 'instance_names=foo,bar image=foo',
'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Missing required create instance variable"}',
},
{'desc': 'CREATE_INSTANCE single inst, using defaults [success]',
'strip_numbers': True,
'm': 'gce',
'a': 'name=%s' % (INAME),
'r': '127.0.0.1 | success >> {"changed": true, "instance_data": [{"image": "debian-7-wheezy-v20130816", "machine_type": "n1-standard-1", "metadata": {}, "name": "%s", "network": "default", "private_ip": "10.240.175.15", "public_ip": "173.255.120.190", "status": "RUNNING", "tags": [], "zone": "%s"}], "name": "%s", "state": "present", "zone": "%s"}' % (INAME, ZONE, INAME, ZONE),
},
{'desc': 'CREATE_INSTANCE the same instance again, no-op [success]',
'strip_numbers': True,
'm': 'gce',
'a': 'name=%s' % (INAME),
'r': '127.0.0.1 | success >> {"changed": false, "instance_data": [{"image": "debian-7-wheezy-v20130816", "machine_type": "n1-standard-1", "metadata": {}, "name": "%s", "network": "default", "private_ip": "10.240.175.15", "public_ip": "173.255.120.190", "status": "RUNNING", "tags": [], "zone": "%s"}], "name": "%s", "state": "present", "zone": "%s"}' % (INAME, ZONE, INAME, ZONE),
},
{'desc': 'CREATE_INSTANCE instance with alt type [success]',
'strip_numbers': True,
'm': 'gce',
'a': 'name=%s machine_type=n1-standard-2' % (INAME2),
'r': '127.0.0.1 | success >> {"changed": true, "instance_data": [{"image": "debian-7-wheezy-v20130816", "machine_type": "n1-standard-2", "metadata": {}, "name": "%s", "network": "default", "private_ip": "10.240.192.227", "public_ip": "173.255.121.233", "status": "RUNNING", "tags": [], "zone": "%s"}], "name": "%s", "state": "present", "zone": "%s"}' % (INAME2, ZONE, INAME2, ZONE),
},
{'desc': 'CREATE_INSTANCE instance with root pd [success]',
'strip_numbers': True,
'm': 'gce',
'a': 'name=%s persistent_boot_disk=yes' % (INAME3),
'r': '127.0.0.1 | success >> {"changed": true, "instance_data": [{"image": null, "machine_type": "n1-standard-1", "metadata": {}, "name": "%s", "network": "default", "private_ip": "10.240.178.140", "public_ip": "173.255.121.176", "status": "RUNNING", "tags": [], "zone": "%s"}], "name": "%s", "state": "present", "zone": "%s"}' % (INAME3, ZONE, INAME3, ZONE),
},
{'desc': 'CREATE_INSTANCE instance with root pd, that already exists [success]',
'setup': ['gcutil adddisk --source_image=%s --zone=%s %s' % (IMAGE, ZONE, DNAME6),
'sleep 10'],
'strip_numbers': True,
'm': 'gce',
'a': 'name=%s zone=%s persistent_boot_disk=yes' % (INAME6, ZONE),
'r': '127.0.0.1 | success >> {"changed": true, "instance_data": [{"image": null, "machine_type": "n1-standard-1", "metadata": {}, "name": "%s", "network": "default", "private_ip": "10.240.178.140", "public_ip": "173.255.121.176", "status": "RUNNING", "tags": [], "zone": "%s"}], "name": "%s", "state": "present", "zone": "%s"}' % (INAME6, ZONE, INAME6, ZONE),
},
{'desc': 'CREATE_INSTANCE instance with root pd attached to other inst [FAIL]',
'setup': ['gcutil adddisk --source_image=%s --zone=%s %s' % (IMAGE, ZONE, DNAME7),
'sleep 10',
'gcutil addinstance boo --wait_until_running --zone=%s --machine_type=%s --network=%s --disk=%s,mode=READ_WRITE,boot --kernel=%s' % (ZONE,TYPE,NETWORK,DNAME7,KERNEL),
],
'm': 'gce',
'a': 'name=%s zone=%s persistent_boot_disk=yes' % (INAME7, ZONE),
'r': '127.0.0.1 | FAILED >> {"failed": true, "msg": "Unexpected error attempting to create instance %s, error: The disk resource \'projects/%s/zones/%s/disks/%s\' is already being used in read-write mode"}' % (INAME7,PROJECT,ZONE,DNAME7),
},
{'desc': 'CREATE_INSTANCE use *all* the options! [success]',
'strip_numbers': True,
'm': 'gce',
'a': 'instance_names=%s,%s metadata=\'{\\"foo\\":\\"bar\\", \\"baz\\":1}\' tags=t1,t2,t3 zone=%s image=centos-6-v20130731 persistent_boot_disk=yes' % (INAME4,INAME5,ZONE),
'r': '127.0.0.1 | success >> {"changed": true, "instance_data": [{"image": null, "machine_type": "n1-standard-1", "metadata": {"baz": "1", "foo": "bar"}, "name": "%s", "network": "default", "private_ip": "10.240.130.4", "public_ip": "173.255.121.97", "status": "RUNNING", "tags": ["t1", "t2", "t3"], "zone": "%s"}, {"image": null, "machine_type": "n1-standard-1", "metadata": {"baz": "1", "foo": "bar"}, "name": "%s", "network": "default", "private_ip": "10.240.207.226", "public_ip": "173.255.121.85", "status": "RUNNING", "tags": ["t1", "t2", "t3"], "zone": "%s"}], "instance_names": ["%s", "%s"], "state": "present", "zone": "%s"}' % (INAME4, ZONE, INAME5, ZONE, INAME4, INAME5, ZONE),
},
],
'teardown': ['gcutil deleteinstance -f "%s" --zone=%s' % (INAME, ZONE),
'gcutil deleteinstance -f "%s" --zone=%s' % (INAME2, ZONE),
'gcutil deleteinstance -f "%s" --zone=%s' % (INAME3, ZONE),
'gcutil deleteinstance -f "%s" --zone=%s' % (INAME4, ZONE),
'gcutil deleteinstance -f "%s" --zone=%s' % (INAME5, ZONE),
'gcutil deleteinstance -f "%s" --zone=%s' % (INAME6, ZONE),
'gcutil deleteinstance -f "%s" --zone=%s' % (INAME7, ZONE),
'gcutil deleteinstance -f boo --zone=%s' % (ZONE),
'sleep 10',
'gcutil deletedisk -f "%s" --zone=%s' % (INAME3, ZONE),
'gcutil deletedisk -f "%s" --zone=%s' % (INAME4, ZONE),
'gcutil deletedisk -f "%s" --zone=%s' % (INAME5, ZONE),
'gcutil deletedisk -f "%s" --zone=%s' % (INAME6, ZONE),
'gcutil deletedisk -f "%s" --zone=%s' % (INAME7, ZONE),
'sleep 10'],
},
{'id': '06', 'desc': 'Delete / destroy networks and firewall rules',
'setup': ['gcutil addnetwork --range="%s" --gateway="%s" %s' % (CIDR1, GW1, NETWK1),
'gcutil addnetwork --range="%s" --gateway="%s" %s' % (CIDR2, GW2, NETWK2),
'sleep 5',
'gcutil addfirewall --allowed="tcp:80" --network=%s %s' % (NETWK1, FW1),
'gcutil addfirewall --allowed="tcp:80" --network=%s %s' % (NETWK2, FW2),
'sleep 5'],
'tests': [
{'desc': 'DELETE bogus named firewall [success]',
'm': 'gce_net',
'a': 'fwname=missing-fwrule state=absent',
'r': '127.0.0.1 | success >> {"changed": false, "fwname": "missing-fwrule", "state": "absent"}',
},
{'desc': 'DELETE bogus named network [success]',
'm': 'gce_net',
'a': 'name=missing-network state=absent',
'r': '127.0.0.1 | success >> {"changed": false, "name": "missing-network", "state": "absent"}',
},
{'desc': 'DELETE named firewall rule [success]',
'm': 'gce_net',
'a': 'fwname=%s state=absent' % (FW1),
'r': '127.0.0.1 | success >> {"changed": true, "fwname": "%s", "state": "absent"}' % (FW1),
'teardown': ['sleep 5'], # pause to give GCE time to delete fwrule
},
{'desc': 'DELETE unused named network [success]',
'm': 'gce_net',
'a': 'name=%s state=absent' % (NETWK1),
'r': '127.0.0.1 | success >> {"changed": true, "name": "%s", "state": "absent"}' % (NETWK1),
},
{'desc': 'DELETE named network *and* fwrule [success]',
'm': 'gce_net',
'a': 'name=%s fwname=%s state=absent' % (NETWK2, FW2),
'r': '127.0.0.1 | success >> {"changed": true, "fwname": "%s", "name": "%s", "state": "absent"}' % (FW2, NETWK2),
},
],
'teardown': ['gcutil deletenetwork -f %s' % (NETWK1),
'gcutil deletenetwork -f %s' % (NETWK2),
'sleep 5',
'gcutil deletefirewall -f %s' % (FW1),
'gcutil deletefirewall -f %s' % (FW2)],
},
{'id': '07', 'desc': 'Create networks and firewall rules',
'setup': ['gcutil addnetwork --range="%s" --gateway="%s" %s' % (CIDR1, GW1, NETWK1),
'sleep 5',
'gcutil addfirewall --allowed="tcp:80" --network=%s %s' % (NETWK1, FW1),
'sleep 5'],
'tests': [
{'desc': 'CREATE network without specifying ipv4_range [FAIL]',
'm': 'gce_net',
'a': 'name=fail',
'r': "127.0.0.1 | FAILED >> {\"changed\": false, \"failed\": true, \"msg\": \"Missing required 'ipv4_range' parameter\"}",
},
{'desc': 'CREATE network with specifying bad ipv4_range [FAIL]',
'm': 'gce_net',
'a': 'name=fail ipv4_range=bad_value',
'r': "127.0.0.1 | FAILED >> {\"changed\": false, \"failed\": true, \"msg\": \"Unexpected response: HTTP return_code[400], API error code[None] and message: Invalid value for field 'resource.IPv4Range': 'bad_value'. Must be a CIDR address range that is contained in the RFC1918 private address blocks: [10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16]\"}",
},
{'desc': 'CREATE existing network, not changed [success]',
'm': 'gce_net',
'a': 'name=%s ipv4_range=%s' % (NETWK1, CIDR1),
'r': '127.0.0.1 | success >> {"changed": false, "ipv4_range": "%s", "name": "%s", "state": "present"}' % (CIDR1, NETWK1),
},
{'desc': 'CREATE new network, changed [success]',
'm': 'gce_net',
'a': 'name=%s ipv4_range=%s' % (NETWK2, CIDR2),
'r': '127.0.0.1 | success >> {"changed": true, "ipv4_range": "10.240.32.0/24", "name": "%s", "state": "present"}' % (NETWK2),
},
{'desc': 'CREATE new fw rule missing params [FAIL]',
'm': 'gce_net',
'a': 'name=%s fwname=%s' % (NETWK1, FW1),
'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Missing required firewall rule parameter(s)"}',
},
{'desc': 'CREATE new fw rule bad params [FAIL]',
'm': 'gce_net',
'a': 'name=%s fwname=broken allowed=blah src_tags="one,two"' % (NETWK1),
'r': "127.0.0.1 | FAILED >> {\"changed\": false, \"failed\": true, \"msg\": \"Unexpected response: HTTP return_code[400], API error code[None] and message: Invalid value for field 'resource.allowed[0].IPProtocol': 'blah'. Must be one of [\\\"tcp\\\", \\\"udp\\\", \\\"icmp\\\"] or an IP protocol number between 0 and 255\"}",
},
{'desc': 'CREATE existing fw rule [success]',
'm': 'gce_net',
'a': 'name=%s fwname=%s allowed="tcp:80" src_tags="one,two"' % (NETWK1, FW1),
'r': '127.0.0.1 | success >> {"allowed": "tcp:80", "changed": false, "fwname": "%s", "ipv4_range": "%s", "name": "%s", "src_range": null, "src_tags": ["one", "two"], "state": "present"}' % (FW1, CIDR1, NETWK1),
},
{'desc': 'CREATE new fw rule [success]',
'm': 'gce_net',
'a': 'name=%s fwname=%s allowed="tcp:80" src_tags="one,two"' % (NETWK1, FW3),
'r': '127.0.0.1 | success >> {"allowed": "tcp:80", "changed": true, "fwname": "%s", "ipv4_range": "%s", "name": "%s", "src_range": null, "src_tags": ["one", "two"], "state": "present"}' % (FW3, CIDR1, NETWK1),
},
{'desc': 'CREATE new network *and* fw rule [success]',
'm': 'gce_net',
'a': 'name=%s ipv4_range=%s fwname=%s allowed="tcp:80" src_tags="one,two"' % (NETWK3, CIDR3, FW4),
'r': '127.0.0.1 | success >> {"allowed": "tcp:80", "changed": true, "fwname": "%s", "ipv4_range": "%s", "name": "%s", "src_range": null, "src_tags": ["one", "two"], "state": "present"}' % (FW4, CIDR3, NETWK3),
},
],
'teardown': ['gcutil deletefirewall -f %s' % (FW1),
'gcutil deletefirewall -f %s' % (FW2),
'gcutil deletefirewall -f %s' % (FW3),
'gcutil deletefirewall -f %s' % (FW4),
'sleep 5',
'gcutil deletenetwork -f %s' % (NETWK1),
'gcutil deletenetwork -f %s' % (NETWK2),
'gcutil deletenetwork -f %s' % (NETWK3),
'sleep 5'],
},
{'id': '08', 'desc': 'Create load-balancer resources',
'setup': ['gcutil addinstance "%s" --zone=%s --machine_type=%s --network=%s --service_account_scopes="%s" --image="%s" --nopersistent_boot_disk' % (INAME, ZONE, TYPE, NETWORK, SCOPES, IMAGE),
'gcutil addinstance "%s" --wait_until_running --zone=%s --machine_type=%s --network=%s --service_account_scopes="%s" --image="%s" --nopersistent_boot_disk' % (INAME2, ZONE, TYPE, NETWORK, SCOPES, IMAGE),
],
'tests': [
{'desc': 'Do nothing [FAIL]',
'm': 'gce_lb',
'a': 'httphealthcheck_port=7',
'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Nothing to do, please specify a \\\"name\\\" or \\\"httphealthcheck_name\\\" parameter"}',
},
{'desc': 'CREATE_HC create basic http healthcheck [success]',
'm': 'gce_lb',
'a': 'httphealthcheck_name=%s' % (HC1),
'r': '127.0.0.1 | success >> {"changed": true, "httphealthcheck_healthy_count": 2, "httphealthcheck_host": null, "httphealthcheck_interval": 5, "httphealthcheck_name": "%s", "httphealthcheck_path": "/", "httphealthcheck_port": 80, "httphealthcheck_timeout": 5, "httphealthcheck_unhealthy_count": 2, "name": null, "state": "present"}' % (HC1),
},
{'desc': 'CREATE_HC (repeat, no-op) create basic http healthcheck [success]',
'm': 'gce_lb',
'a': 'httphealthcheck_name=%s' % (HC1),
'r': '127.0.0.1 | success >> {"changed": false, "httphealthcheck_healthy_count": 2, "httphealthcheck_host": null, "httphealthcheck_interval": 5, "httphealthcheck_name": "%s", "httphealthcheck_path": "/", "httphealthcheck_port": 80, "httphealthcheck_timeout": 5, "httphealthcheck_unhealthy_count": 2, "name": null, "state": "present"}' % (HC1),
},
{'desc': 'CREATE_HC create custom http healthcheck [success]',
'm': 'gce_lb',
'a': 'httphealthcheck_name=%s httphealthcheck_port=1234 httphealthcheck_path="/whatup" httphealthcheck_host="foo" httphealthcheck_interval=300' % (HC2),
'r': '127.0.0.1 | success >> {"changed": true, "httphealthcheck_healthy_count": 2, "httphealthcheck_host": "foo", "httphealthcheck_interval": 300, "httphealthcheck_name": "%s", "httphealthcheck_path": "/whatup", "httphealthcheck_port": 1234, "httphealthcheck_timeout": 5, "httphealthcheck_unhealthy_count": 2, "name": null, "state": "present"}' % (HC2),
},
{'desc': 'CREATE_HC create (broken) custom http healthcheck [FAIL]',
'm': 'gce_lb',
'a': 'httphealthcheck_name=%s httphealthcheck_port="string" httphealthcheck_path=7' % (HC3),
'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Unexpected response: HTTP return_code[400], API error code[None] and message: Invalid value for: Expected a signed integer, got \'string\' (class java.lang.String)"}',
},
{'desc': 'CREATE_LB create lb, missing region [FAIL]',
'm': 'gce_lb',
'a': 'name=%s' % (LB1),
'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Missing required region name"}',
},
{'desc': 'CREATE_LB create lb, bogus region [FAIL]',
'm': 'gce_lb',
'a': 'name=%s region=bogus' % (LB1),
'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Unexpected response: HTTP return_code[404], API error code[None] and message: The resource \'projects/%s/regions/bogus\' was not found"}' % (PROJECT),
},
{'desc': 'CREATE_LB create lb, minimal params [success]',
'strip_numbers': True,
'm': 'gce_lb',
'a': 'name=%s region=%s' % (LB1, REGION),
'r': '127.0.0.1 | success >> {"changed": true, "external_ip": "173.255.123.245", "httphealthchecks": [], "members": [], "name": "%s", "port_range": "1-65535", "protocol": "tcp", "region": "%s", "state": "present"}' % (LB1, REGION),
},
{'desc': 'CREATE_LB create lb full params [success]',
'strip_numbers': True,
'm': 'gce_lb',
'a': 'httphealthcheck_name=%s httphealthcheck_port=5055 httphealthcheck_path="/howami" name=%s port_range=8000-8888 region=%s members=%s/%s,%s/%s' % (HC3,LB2,REGION,ZONE,INAME,ZONE,INAME2),
'r': '127.0.0.1 | success >> {"changed": true, "external_ip": "173.255.126.81", "httphealthcheck_healthy_count": 2, "httphealthcheck_host": null, "httphealthcheck_interval": 5, "httphealthcheck_name": "%s", "httphealthcheck_path": "/howami", "httphealthcheck_port": 5055, "httphealthcheck_timeout": 5, "httphealthcheck_unhealthy_count": 2, "httphealthchecks": ["%s"], "members": ["%s/%s", "%s/%s"], "name": "%s", "port_range": "8000-8888", "protocol": "tcp", "region": "%s", "state": "present"}' % (HC3,HC3,ZONE,INAME,ZONE,INAME2,LB2,REGION),
},
],
'teardown': [
'gcutil deleteinstance --zone=%s -f %s %s' % (ZONE, INAME, INAME2),
'gcutil deleteforwardingrule --region=%s -f %s %s' % (REGION, LB1, LB2),
'sleep 10',
'gcutil deletetargetpool --region=%s -f %s-tp %s-tp' % (REGION, LB1, LB2),
'sleep 10',
'gcutil deletehttphealthcheck -f %s %s %s' % (HC1, HC2, HC3),
],
},
{'id': '09', 'desc': 'Destroy load-balancer resources',
'setup': ['gcutil addhttphealthcheck %s' % (HC1),
'sleep 5',
'gcutil addhttphealthcheck %s' % (HC2),
'sleep 5',
'gcutil addtargetpool --health_checks=%s --region=%s %s-tp' % (HC1, REGION, LB1),
'sleep 5',
'gcutil addforwardingrule --target=%s-tp --region=%s %s' % (LB1, REGION, LB1),
'sleep 5',
'gcutil addtargetpool --region=%s %s-tp' % (REGION, LB2),
'sleep 5',
'gcutil addforwardingrule --target=%s-tp --region=%s %s' % (LB2, REGION, LB2),
'sleep 5',
],
'tests': [
{'desc': 'DELETE_LB: delete a non-existent LB [success]',
'm': 'gce_lb',
'a': 'name=missing state=absent',
'r': '127.0.0.1 | success >> {"changed": false, "name": "missing", "state": "absent"}',
},
{'desc': 'DELETE_LB: delete a non-existent LB+HC [success]',
'm': 'gce_lb',
'a': 'name=missing httphealthcheck_name=alsomissing state=absent',
'r': '127.0.0.1 | success >> {"changed": false, "httphealthcheck_name": "alsomissing", "name": "missing", "state": "absent"}',
},
{'desc': 'DELETE_LB: destroy standalone healthcheck [success]',
'm': 'gce_lb',
'a': 'httphealthcheck_name=%s state=absent' % (HC2),
'r': '127.0.0.1 | success >> {"changed": true, "httphealthcheck_name": "%s", "name": null, "state": "absent"}' % (HC2),
},
{'desc': 'DELETE_LB: destroy standalone balancer [success]',
'm': 'gce_lb',
'a': 'name=%s state=absent' % (LB2),
'r': '127.0.0.1 | success >> {"changed": true, "name": "%s", "state": "absent"}' % (LB2),
},
{'desc': 'DELETE_LB: destroy LB+HC [success]',
'm': 'gce_lb',
'a': 'name=%s httphealthcheck_name=%s state=absent' % (LB1, HC1),
'r': '127.0.0.1 | success >> {"changed": true, "httphealthcheck_name": "%s", "name": "%s", "state": "absent"}' % (HC1,LB1),
},
],
'teardown': [
'gcutil deleteforwardingrule --region=%s -f %s %s' % (REGION, LB1, LB2),
'sleep 10',
'gcutil deletetargetpool --region=%s -f %s-tp %s-tp' % (REGION, LB1, LB2),
'sleep 10',
'gcutil deletehttphealthcheck -f %s %s' % (HC1, HC2),
],
},
]
def main(tests_to_run=[]):
for test in test_cases:
if tests_to_run and test['id'] not in tests_to_run:
continue
print "=> starting/setup '%s:%s'"% (test['id'], test['desc'])
if DEBUG: print "=debug>", test['setup']
for c in test['setup']:
(s,o) = run(c)
test_i = 1
for t in test['tests']:
if DEBUG: print "=>debug>", test_i, t['desc']
# run any test-specific setup commands
if t.has_key('setup'):
for setup in t['setup']:
(status, output) = run(setup)
# run any 'peek_before' commands
if t.has_key('peek_before') and PEEKING_ENABLED:
for setup in t['peek_before']:
(status, output) = run(setup)
# run the ansible test if 'a' exists, otherwise
# an empty 'a' directive allows test to run
# setup/teardown for a subsequent test.
if t['a']:
if DEBUG: print "=>debug>", t['m'], t['a']
acmd = "ansible all -o -m %s -a \"%s\"" % (t['m'],t['a'])
#acmd = "ANSIBLE_KEEP_REMOTE_FILES=1 ansible all -vvv -m %s -a \"%s\"" % (t['m'],t['a'])
(s,o) = run(acmd)
# check expected output
if DEBUG: print "=debug>", o.strip(), "!=", t['r']
print "=> %s.%02d '%s':" % (test['id'], test_i, t['desc']),
if t.has_key('strip_numbers'):
# strip out all numbers so we don't trip over different
# IP addresses
is_good = (o.strip().translate(None, "0123456789") == t['r'].translate(None, "0123456789"))
else:
is_good = (o.strip() == t['r'])
if is_good:
print "PASS"
else:
print "FAIL"
if VERBOSE:
print "=>", acmd
print "=> Expected:", t['r']
print "=> Got:", o.strip()
# run any 'peek_after' commands
if t.has_key('peek_after') and PEEKING_ENABLED:
for setup in t['peek_after']:
(status, output) = run(setup)
# run any test-specific teardown commands
if t.has_key('teardown'):
for td in t['teardown']:
(status, output) = run(td)
test_i += 1
print "=> completing/teardown '%s:%s'" % (test['id'], test['desc'])
if DEBUG: print "=debug>", test['teardown']
for c in test['teardown']:
(s,o) = run(c)
if __name__ == '__main__':
tests_to_run = []
if len(sys.argv) == 2:
if sys.argv[1] in ["--help", "--list"]:
print "usage: %s [id1,id2,...,idN]" % sys.argv[0]
print " * An empty argument list will execute all tests"
print " * Do not need to specify tests in numerical order"
print " * List test categories with --list or --help"
print ""
for test in test_cases:
print "\t%s:%s" % (test['id'], test['desc'])
sys.exit(0)
else:
tests_to_run = sys.argv[1].split(',')
main(tests_to_run)