1
0
Fork 0
mirror of https://github.com/ansible-collections/community.general.git synced 2024-09-14 20:13:21 +02:00

Initial commit of Ansible support for the Consul clustering framework (http://consul.io).

Submission includes support for
 - creating and registering services and checks
 - reading, writing and lookup for values in consul's kv store
 - creating and manipulating sessions for distributed locking on values in the kv
 - creating and manipulating ACLs for restricting access to the kv store
 - inventory support that reads the Consul catalog and group nodes according to
     - datacenters
     - exposed services
     - service availability
     - arbitrary groupings from the kv store

This submission makes extensive use of the python-consul library and this is required
as a dependency and can be installed from pip.

The tests were written to target a vagrant cluster which can be setup by following the
instructions here http://github.com/sgargan/consul-vagrant
This commit is contained in:
Steve Gargan 2015-01-24 01:09:03 +00:00
parent 53a3644ecd
commit c02f114967
12 changed files with 1121 additions and 1 deletions

View file

@ -0,0 +1,128 @@
# (c) 2015, Steve Gargan <steve.gargan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
'''
Lookup plugin to grab metadata from a consul key value store.
============================================================
Plugin will lookup metadata for a playbook from the key value store in a
consul cluster. Values can be easily set in the kv store with simple rest
commands e.g.
curl -X PUT -d 'some-value' http://localhost:8500/v1/kv/ansible/somedata
this can then be looked up in a playbook as follows
- debug: msg='key contains {{item}}'
with_consul_kv:
- 'key/to/retrieve'
Parameters can be provided after the key be more specific about what to retrieve e.g.
- debug: msg='key contains {{item}}'
with_consul_kv:
- 'key/to recurse=true token=E6C060A9-26FB-407A-B83E-12DDAFCB4D98')}}'
recurse: if true, will retrieve all the values that have the given key as prefix
index: if the key has a value with the specified index then this is returned
allowing access to historical values.
token: acl token to allow access to restricted values.
By default this will lookup keys via the consul agent running on http://localhost:8500
this can be changed by setting the env variable 'ANSIBLE_CONSUL_URL' to point to the url
of the kv store you'd like to use.
'''
######################################################################
import os
import sys
from urlparse import urlparse
from ansible import utils, errors
try:
import json
except ImportError:
import simplejson as json
try:
import consul
except ImportError, e:
print "failed=True msg='python-consul required for this module. "\
"see http://python-consul.readthedocs.org/en/latest/#installation'"
sys.exit(1)
class LookupModule(object):
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
self.agent_url = 'http://localhost:8500'
if os.getenv('ANSIBLE_CONSUL_URL') is not None:
self.agent_url = os.environ['ANSIBLE_CONSUL_URL']
def run(self, terms, inject=None, **kwargs):
u = urlparse(self.agent_url)
consul_api = consul.Consul(host=u.hostname, port=u.port)
values = []
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
try:
for term in terms:
params = self.parse_params(term)
results = consul_api.kv.get(params['key'],
token=params['token'],
index=params['index'],
recurse=params['recurse'])
if results[1]:
# responds with a single or list of result maps
if isinstance(results[1], list):
for r in results[1]:
values.append(r['Value'])
else:
values.append(results[1]['Value'])
except Exception, e:
raise errors.AnsibleError(
"Error locating '%s' in kv store. Error was %s" % (term, e))
return values
def parse_params(self, term):
params = term.split(' ')
paramvals = {
'key': params[0],
'token': None,
'recurse': False,
'index': None
}
# parameters specified?
try:
for param in params[1:]:
if param and len(param) > 0:
name, value = param.split('=')
assert name in paramvals, "% not a valid consul lookup parameter" % name
paramvals[name] = value
except (ValueError, AssertionError), e:
raise errors.AnsibleError(e)
return paramvals

View file

@ -0,0 +1,37 @@
# Ansible Consul external inventory script settings.
[consul]
# restrict included nodes to those from this datacenter
#datacenter = nyc1
# url of the the consul cluster to query
#url = http://demo.consul.io
url = http://localhost:8500
# suffix added to each service to create a group name e.g Service of 'redis' and
# a suffix of '_servers' will add each address to the group name 'redis_servers'
servers_suffix = _servers
# if specified then the inventory will generate domain names that will resolve
# via Consul's inbuilt DNS.
#domain=consul
# make groups from service tags. the name of the group is derived from the
# service name and the tag name e.g. a service named nginx with tags ['master', 'v1']
# will create groups nginx_master and nginx_v1
tags = true
# looks up the node name at the given path for a list of groups to which the
# node should be added.
kv_groups=ansible/groups
# looks up the node name at the given path for a json dictionary of metadata that
# should be attached as metadata for the node
kv_metadata=ansible/metadata
# looks up the health of each service and adds the node to 'up' and 'down' groups
# based on the service availibility
availability = true
available_suffix = _up
unavailable_suffix = _down

427
plugins/inventory/consul_io.py Executable file
View file

@ -0,0 +1,427 @@
#!/usr/bin/env python
#
# (c) 2015, Steve Gargan <steve.gargan@gmail.com>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
'''
Consul.io inventory script (http://consul.io)
======================================
Generates Ansible inventory from nodes in a Consul cluster. This script will
group nodes by:
- datacenter,
- registered service
- service tags
- service status
- values from the k/v store
This script can be run with the switches
--list as expected groups all the nodes in all datacenters
--datacenter, to restrict the nodes to a single datacenter
--host to restrict the inventory to a single named node. (requires datacenter config)
The configuration for this plugin is read from a consul.ini file located in the
same directory as this inventory script. All config options in the config file
are optional except the host and port, which must point to a valid agent or
server running the http api. For more information on enabling the endpoint see.
http://www.consul.io/docs/agent/options.html
Other options include:
'datacenter':
which restricts the included nodes to those from the given datacenter
'domain':
if specified then the inventory will generate domain names that will resolve
via Consul's inbuilt DNS. The name is derived from the node name, datacenter
and domain <node_name>.node.<datacenter>.<domain>. Note that you will need to
have consul hooked into your DNS server for these to resolve. See the consul
DNS docs for more info.
which restricts the included nodes to those from the given datacenter
'servers_suffix':
defining the a suffix to add to the service name when creating the service
group. e.g Service name of 'redis' and a suffix of '_servers' will add
each nodes address to the group name 'redis_servers'. No suffix is added
if this is not set
'tags':
boolean flag defining if service tags should be used to create Inventory
groups e.g. an nginx service with the tags ['master', 'v1'] will create
groups nginx_master and nginx_v1 to which the node running the service
will be added. No tag groups are created if this is missing.
'token':
ACL token to use to authorize access to the key value store. May be required
to retrieve the kv_groups and kv_metadata based on your consul configuration.
'kv_groups':
This is used to lookup groups for a node in the key value store. It specifies a
path to which each discovered node's name will be added to create a key to query
the key/value store. There it expects to find a comma separated list of group
names to which the node should be added e.g. if the inventory contains
'nyc-web-1' and kv_groups = 'ansible/groups' then the key
'v1/kv/ansible/groups/nyc-web-1' will be queried for a group list. If this query
returned 'test,honeypot' then the node address to both groups.
'kv_metadata':
kv_metadata is used to lookup metadata for each discovered node. Like kv_groups
above it is used to build a path to lookup in the kv store where it expects to
find a json dictionary of metadata entries. If found, each key/value pair in the
dictionary is added to the metadata for the node.
'availability':
if true then availability groups will be created for each service. The node will
be added to one of the groups based on the health status of the service. The
group name is derived from the service name and the configurable availability
suffixes
'available_suffix':
suffix that should be appended to the service availability groups for available
services e.g. if the suffix is '_up' and the service is nginx, then nodes with
healthy nginx services will be added to the nginix_up group. Defaults to
'_available'
'unavailable_suffix':
as above but for unhealthy services, defaults to '_unavailable'
Note that if the inventory discovers an 'ssh' service running on a node it will
register the port as ansible_ssh_port in the node's metadata and this port will
be used to access the machine.
```
'''
import os
import re
import argparse
from time import time
import ConfigParser
import urllib, urllib2, base64
try:
import json
except ImportError:
import simplejson as json
try:
import consul
except ImportError, e:
print """failed=True msg='python-consul required for this module. see
http://python-consul.readthedocs.org/en/latest/#installation'"""
sys.exit(1)
class ConsulInventory(object):
def __init__(self):
''' Create an inventory based on the catalog of nodes and services
registered in a consul cluster'''
self.node_metadata = {}
self.nodes = {}
self.nodes_by_service = {}
self.nodes_by_tag = {}
self.nodes_by_datacenter = {}
self.nodes_by_kv = {}
self.nodes_by_availability = {}
self.current_dc = None
config = ConsulConfig()
self.config = config
self.consul_api = config.get_consul_api()
if config.has_config('datacenter'):
if config.has_config('host'):
self.load_data_for_node(config.host, config.datacenter)
else:
self.load_data_for_datacenter(config.datacenter)
else:
self.load_all_data_consul()
self.combine_all_results()
print json.dumps(self.inventory, sort_keys=True, indent=2)
def load_all_data_consul(self):
''' cycle through each of the datacenters in the consul catalog and process
the nodes in each '''
self.datacenters = self.consul_api.catalog.datacenters()
for datacenter in self.datacenters:
self.current_dc = datacenter
self.load_data_for_datacenter(datacenter)
def load_availability_groups(self, node, datacenter):
'''check the health of each service on a node and add add the node to either
an 'available' or 'unavailable' grouping. The suffix for each group can be
controlled from the config'''
if self.config.has_config('availability'):
for service_name, service in node['Services'].iteritems():
for node in self.consul_api.health.service(service_name)[1]:
for check in node['Checks']:
if check['ServiceName'] == service_name:
ok = 'passing' == check['Status']
if ok:
suffix = self.config.get_availability_suffix(
'available_suffix', '_available')
else:
suffix = self.config.get_availability_suffix(
'unavailable_suffix', '_unavailable')
self.add_node_to_map(self.nodes_by_availability,
service_name + suffix, node['Node'])
def load_data_for_datacenter(self, datacenter):
'''processes all the nodes in a particular datacenter'''
index, nodes = self.consul_api.catalog.nodes(dc=datacenter)
for node in nodes:
self.add_node_to_map(self.nodes_by_datacenter, datacenter, node)
self.load_data_for_node(node['Node'], datacenter)
def load_data_for_node(self, node, datacenter):
'''loads the data for a sinle node adding it to various groups based on
metadata retrieved from the kv store and service availablity'''
index, node_data = self.consul_api.catalog.node(node, datacenter)
node = node_data['Node']
self.add_node_to_map(self.nodes, 'all', node)
self.add_metadata(node_data, "consul_datacenter", datacenter)
self.add_metadata(node_data, "consul_nodename", node['Node'])
self.load_groups_from_kv(node_data)
self.load_node_metadata_from_kv(node_data)
self.load_availability_groups(node_data, datacenter)
for name, service in node_data['Services'].items():
self.load_data_from_service(name, service, node_data)
def load_node_metadata_from_kv(self, node_data):
''' load the json dict at the metadata path defined by the kv_metadata value
and the node name add each entry in the dictionary to the the node's
metadata '''
node = node_data['Node']
if self.config.has_config('kv_metadata'):
key = "%s/%s/%s" % (self.config.kv_metadata, self.current_dc, node['Node'])
index, metadata = self.consul_api.kv.get(key)
if metadata and metadata['Value']:
try:
metadata = json.loads(metadata['Value'])
for k,v in metadata.items():
self.add_metadata(node_data, k, v)
except:
pass
def load_groups_from_kv(self, node_data):
''' load the comma separated list of groups at the path defined by the
kv_groups config value and the node name add the node address to each
group found '''
node = node_data['Node']
if self.config.has_config('kv_groups'):
key = "%s/%s/%s" % (self.config.kv_groups, self.current_dc, node['Node'])
index, groups = self.consul_api.kv.get(key)
if groups and groups['Value']:
for group in groups['Value'].split(','):
self.add_node_to_map(self.nodes_by_kv, group.strip(), node)
def load_data_from_service(self, service_name, service, node_data):
'''process a service registered on a node, adding the node to a group with
the service name. Each service tag is extracted and the node is added to a
tag grouping also'''
self.add_metadata(node_data, "consul_services", service_name, True)
if self.is_service("ssh", service_name):
self.add_metadata(node_data, "ansible_ssh_port", service['Port'])
if self.config.has_config('servers_suffix'):
service_name = service_name + self.config.servers_suffix
self.add_node_to_map(self.nodes_by_service, service_name, node_data['Node'])
self.extract_groups_from_tags(service_name, service, node_data)
def is_service(self, target, name):
return name and (name.lower() == target.lower())
def extract_groups_from_tags(self, service_name, service, node_data):
'''iterates each service tag and adds the node to groups derived from the
service and tag names e.g. nginx_master'''
if self.config.has_config('tags') and service['Tags']:
tags = service['Tags']
self.add_metadata(node_data, "consul_%s_tags" % service_name, tags)
for tag in service['Tags']:
tagname = service_name +'_'+tag
self.add_node_to_map(self.nodes_by_tag, tagname, node_data['Node'])
def combine_all_results(self):
'''prunes and sorts all groupings for combination into the final map'''
self.inventory = {"_meta": { "hostvars" : self.node_metadata}}
groupings = [self.nodes, self.nodes_by_datacenter, self.nodes_by_service,
self.nodes_by_tag, self.nodes_by_kv, self.nodes_by_availability]
for grouping in groupings:
for name, addresses in grouping.items():
self.inventory[name] = sorted(list(set(addresses)))
def add_metadata(self, node_data, key, value, is_list = False):
''' Pushed an element onto a metadata dict for the node, creating
the dict if it doesn't exist '''
key = self.to_safe(key)
node = self.get_inventory_name(node_data['Node'])
if node in self.node_metadata:
metadata = self.node_metadata[node]
else:
metadata = {}
self.node_metadata[node] = metadata
if is_list:
self.push(metadata, key, value)
else:
metadata[key] = value
def get_inventory_name(self, node_data):
'''return the ip or a node name that can be looked up in consul's dns'''
domain = self.config.domain
if domain:
node_name = node_data['Node']
if self.current_dc:
return '%s.node.%s.%s' % ( node_name, self.current_dc, domain)
else:
return '%s.node.%s' % ( node_name, domain)
else:
return node_data['Address']
def add_node_to_map(self, map, name, node):
self.push(map, name, self.get_inventory_name(node))
def push(self, my_dict, key, element):
''' Pushed an element onto an array that may not have been defined in the
dict '''
key = self.to_safe(key)
if key in my_dict:
my_dict[key].append(element)
else:
my_dict[key] = [element]
def to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be used
as Ansible groups '''
return re.sub('[^A-Za-z0-9\-\.]', '_', word)
def sanitize_dict(self, d):
new_dict = {}
for k, v in d.items():
if v != None:
new_dict[self.to_safe(str(k))] = self.to_safe(str(v))
return new_dict
def sanitize_list(self, seq):
new_seq = []
for d in seq:
new_seq.append(self.sanitize_dict(d))
return new_seq
class ConsulConfig(dict):
def __init__(self):
self.read_settings()
self.read_cli_args()
def has_config(self, name):
if hasattr(self, name):
return getattr(self, name)
else:
return False
def read_settings(self):
''' Reads the settings from the consul.ini file '''
config = ConfigParser.SafeConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/consul.ini')
config_options = ['host', 'token', 'datacenter', 'servers_suffix',
'tags', 'kv_metadata', 'kv_groups', 'availability',
'unavailable_suffix', 'available_suffix', 'url',
'domain']
for option in config_options:
value = None
if config.has_option('consul', option):
value = config.get('consul', option)
setattr(self, option, value)
def read_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description=
'Produce an Ansible Inventory file based nodes in a Consul cluster')
parser.add_argument('--list', action='store_true',
help='Get all inventory variables from all nodes in the consul cluster')
parser.add_argument('--host', action='store',
help='Get all inventory variables about a specific consul node, \
requires datacenter set in consul.ini.')
parser.add_argument('--datacenter', action='store',
help='Get all inventory about a specific consul datacenter')
args = parser.parse_args()
arg_names = ['host', 'datacenter']
for arg in arg_names:
if getattr(args, arg):
setattr(self, arg, getattr(args, arg))
def get_availability_suffix(self, suffix, default):
if self.has_config(suffix):
return self.has_config(suffix)
return default
def get_consul_api(self):
'''get an instance of the api based on the supplied configuration'''
host = 'localhost'
port = 8500
token = None
if hasattr(self, 'url'):
from urlparse import urlparse
o = urlparse(self.url)
if o.hostname:
host = o.hostname
if o.port:
port = o.port
if hasattr(self, 'token'):
token = self.token
if not token:
token = 'anonymous'
return consul.Consul(host=host, port=port, token=token)
ConsulInventory()

View file

@ -19,6 +19,8 @@ TMPDIR = $(shell mktemp -d 2>/dev/null || mktemp -d -t 'mytmpdir')
VAULT_PASSWORD_FILE = vault-password
CONSUL_RUNNING := $(shell python consul_running.py)
all: parsing test_var_precedence unicode non_destructive destructive includes check_mode test_hash test_handlers test_group_by test_vault
parsing:
@ -30,7 +32,7 @@ parsing:
ansible-playbook good_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS)
includes:
ansible-playbook test_includes.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) $(TEST_FLAGS)
ansible-playbook test_includes.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) $(TEST_FLAGS)
unicode:
ansible-playbook unicode.yml -i $(INVENTORY) -e @$(VARS_FILE) -v $(TEST_FLAGS)
@ -119,6 +121,16 @@ rackspace: $(CREDENTIALS_FILE)
CLOUD_RESOURCE_PREFIX="$(CLOUD_RESOURCE_PREFIX)" make rackspace_cleanup ; \
exit $$RC;
$(CONSUL_RUNNING):
consul:
ifeq ($(CONSUL_RUNNING), True)
ansible-playbook -i $(INVENTORY) consul.yml ; \
ansible-playbook -i ../../plugins/inventory/consul_io.py consul_inventory.yml
else
@echo "Consul agent is not running locally. To run a cluster locally see http://github.com/sgargan/consul-vagrant"
endif
test_galaxy: test_galaxy_spec test_galaxy_yaml
test_galaxy_spec:

View file

@ -0,0 +1,82 @@
- hosts: localhost
connection: local
gather_facts: false
vars:
# these are the defaults from the consul-vagrant cluster setup
- mgmt_token: '4791402A-D875-4C18-8316-E652DBA53B18'
- acl_host: '11.0.0.2'
- metadata_json: '{"clearance": "top_secret"}'
pre_tasks:
# this works except for the KV_lookusp
- name: check that the consul agent is running locally
local_action: wait_for port=8500 timeout=5
ignore_errors: true
register: consul_running
roles:
- {role: test_consul_service,
when: not consul_running.failed is defined}
- {role: test_consul_kv,
when: not consul_running.failed is defined}
- {role: test_consul_acl,
when: not consul_running.failed is defined}
- {role: test_consul_session,
when: not consul_running.failed is defined}
tasks:
- name: setup services with passing check for consul inventory test
consul:
service_name: nginx
service_port: 80
script: "sh -c true"
interval: 5
token: '4791402A-D875-4C18-8316-E652DBA53B18'
tags:
- dev
- master
- name: setup failing service for inventory test
consul:
service_name: nginx
service_port: 443
script: "sh -c false"
interval: 5
tags:
- qa
- slave
- name: setup ssh service for inventory test
consul:
service_name: ssh
service_port: 2222
script: "sh -c true"
interval: 5
token: '4791402A-D875-4C18-8316-E652DBA53B18'
- name: update the Anonymous token to allow anon access to kv store
consul_acl:
mgmt_token: '{{mgmt_token}}'
host: '{{acl_host}}'
token: 'anonymous'
rules:
- key: ''
policy: write
register: inventory_token
- name: add metadata for the node through kv_store
consul_kv: "key=ansible/metadata/dc1/consul-1 value='{{metadata_json}}'"
- name: add metadata for the node through kv_store
consul_kv: key=ansible/groups/dc1/consul-1 value='a_group, another_group'
- name: warn that tests are ignored if consul agent is not running
debug: msg="A consul agent needs to be running inorder to run the tests. To setup a vagrant cluster for use in testing see http://github.com/sgargan/consul-vagrant"
when: consul_running.failed is defined

View file

@ -0,0 +1,19 @@
- hosts: all;!localhost
gather_facts: false
pre_tasks:
- name: check that the consul agent is running locally
local_action: wait_for port=8500 timeout=5
ignore_errors: true
register: consul_running
roles:
- {role: test_consul_inventory,
when: not consul_running.failed is defined}
tasks:
- name: warn that tests are ignored if consul agent is not running
debug: msg="A consul agent needs to be running inorder to run the tests. To setup a vagrant cluster for use in testing see http://github.com/sgargan/consul-vagrant"
when: consul_running.failed is defined

View file

@ -0,0 +1,11 @@
''' Checks that the consul agent is running locally. '''
if __name__ == '__main__':
try:
import consul
consul = consul.Consul(host='0.0.0.0', port=8500)
consul.catalog.nodes()
print "True"
except:
pass

View file

@ -0,0 +1,42 @@
- name: create a new acl token
consul_acl:
mgmt_token: '{{mgmt_token}}'
host: '{{acl_host}}'
name: 'New ACL'
register: new_ruleless
- name: verify ruleless key created
assert:
that:
- new_ruleless.token | length == 36
- new_ruleless.name == 'New ACL'
- name: add rules to an acl token
consul_acl:
mgmt_token: '{{mgmt_token}}'
host: '{{acl_host}}'
name: 'With rule'
rules:
- key: 'foo'
policy: read
- key: 'private/foo'
policy: deny
register: with_rules
- name: verify rules created
assert:
that:
- with_rules.token | length == 36
- with_rules.name == 'With rule'
- with_rules.rules | match('.*"foo".*')
- with_rules.rules | match('.*"private/foo".*')
- name: clear up
consul_acl:
mgmt_token: '{{mgmt_token}}'
host: '{{acl_host}}'
token: '{{item}}'
state: absent
with_items:
- '{{new_ruleless.token}}'
- '{{with_rules.token}}'

View file

@ -0,0 +1,39 @@
- name: there are three hosts with an available consul service
assert:
that:
- groups.consul_servers | length == 3
- name: there is one host with an available ssh service
assert:
that:
- groups.ssh_up | length == 1
- name: there is one host with a failing nginx service
assert:
that:
- groups.nginx_down | length == 1
- name: services get added to groups with their tags
assert:
that:
- groups.nginx_servers_qa | length == 1
- groups.nginx_servers_slave | length == 1
- name: metadata from the kv store gets added to the facts for a host
assert:
that:
- clearance | match('top_secret')
when: inventory_hostname == '11.0.0.2'
- name: extra groups a host should be added to can be loaded from kv
assert:
that:
- groups.a_group | length == 1
- groups.another_group | length == 1
- name: ansible_ssh_port is set if the ssh service is registered
assert:
that:
- ansible_ssh_port == 2222
when: not inventory_hostname in ['11.0.0.2', '11.0.0.3', '11.0.0.4']

View file

@ -0,0 +1,90 @@
- name: add rules to an acl token
consul_acl:
mgmt_token: '{{mgmt_token}}'
host: '{{acl_host}}'
name: 'ACL rule for testing'
rules:
- key: 'somekey'
policy: all
register: test_acl
- name: cleanup from previous failed runs
consul_kv: key={{item}} state=absent token='{{test_acl.token}}'
with_items:
- somekey
- name: add a kv pair to the kv store
consul_kv: key=somekey value=somevalue token='{{test_acl.token}}'
register: new_key
- name: verify new key
assert:
that:
- new_key.key == 'somekey'
- new_key.data.Value == 'somevalue'
- new_key.changed == true
- name: add an existing kv to the kv store
consul_kv: key=somekey value=somevalue token='{{test_acl.token}}'
register: existing_key
- name: verify existing key cause no change
assert:
that:
- existing_key.key == 'somekey'
- existing_key.data.Value == 'somevalue'
- existing_key.changed == False
- name: remove an existing kv from the kv store
consul_kv: key=somekey state=absent token='{{test_acl.token}}'
register: remove_key
- name: verify removal causes change and existing value is returned
assert:
that:
- remove_key.key == 'somekey'
- remove_key.data.Value == 'somevalue'
- remove_key.changed == True
- name: attempting to remove an non-existant kv from the kv store causes no change
consul_kv: key=not_present state=absent token='{{test_acl.token}}'
register: non_existant_key
- name: verify removal causes change and existing value is returned
assert:
that:
- non_existant_key.key == 'not_present'
- non_existant_key.data == None
- non_existant_key.changed == False
- name: Add a key to lookup with the lookup capability
consul_kv: key='key/to/lookup_{{item}}' value='somevalue_{{item}}' token='{{test_acl.token}}'
with_items:
- one
- two
register: lookup_keys
# necessary to make the new token available to the
- set_fact: acl_token={{test_acl.token}}
- name: kv test
assert:
that:
- "{{item | match('somevalue_one')}}"
with_consul_kv:
- 'key/to/lookup_one token={{acl_token}}'
- name: recursive kv lookup test
assert:
that:
- "{{item| match('somevalue_(one|two)')}}"
with_consul_kv:
- 'key/to recurse=true token={{acl_token}}'
- name: remove test acl rule
consul_acl:
mgmt_token: '{{mgmt_token}}'
host: '{{acl_host}}'
token: '{{test_acl.token}}'
state: absent

View file

@ -0,0 +1,156 @@
- name: cleanup any previous failed runs
consul:
service_id: '{{item}}'
state: absent
with_items:
- service1
- service2
- with_check
- with_tags
- name: register very basic service with consul gets default id
consul:
service_name: service1
service_port: 80
register: basic_result
- name: verify basic service registration
assert:
that:
- basic_result.changed
- basic_result.service_port == 80
- basic_result.service_id == 'service1'
- basic_result.service_name == 'service1'
- name: register very basic service with explicit id
consul:
service_name: Basic Service
service_id: service2
service_port: 80
register: basic2_result
- name: verify service2 registration
assert:
that:
- basic2_result.changed
- basic2_result.service_port == 80
- basic2_result.service_id == 'service2'
- basic2_result.service_name == 'Basic Service'
- name: register very basic service with check script
consul:
service_name: with_check
service_port: 80
script: "sh -c true"
interval: 60
register: script_result
- name: verify service with check registration
assert:
that:
- script_result.changed
- script_result.service_port == 80
- script_result.service_id == 'with_check'
- script_result.checks | length == 1
- name: register service with some service tags
consul:
service_name: with_tags
service_port: 80
tags:
- prod
- webservers
register: tags_result
- name: verify tags registration
assert:
that:
- tags_result.changed
- "tags_result.tags == ['prod', 'webservers']"
- name: create a node level check
consul:
check_name: Node Level Check
check_id: node_level
script: "sh -c true"
interval: 50m
register: nodelevel_result
- name: verify service with check registration
assert:
that:
- nodelevel_result.changed
- nodelevel_result.check_name == 'Node Level Check'
- nodelevel_result.check_id == 'node_level'
- nodelevel_result.script == 'sh -c true'
- nodelevel_result.interval == '50m'
- name: remove a service
consul:
service_id: 'service1'
state: absent
register: delete_service_result
- name: verify service removal
assert:
that:
- delete_service_result.changed
- name: removal of an non-existant service causes no change
consul:
service_id: 'service1'
state: absent
register: delete_service_result
- name: verify service removal caused no change
assert:
that:
- not delete_service_result.changed
- name: remove a check
consul:
check_id: 'node_level'
state: absent
register: delete_check_result
- name: verify check removal
assert:
that:
- delete_check_result.changed
- name: removal of an non-existant check causes no change
consul:
check_id: 'node_level'
state: absent
register: delete_check_result
- name: verify check removal cause no change
assert:
that:
- not delete_service_result.changed
- name: add service to test removal by name
consul:
service_name: by_name
service_port: 12345
- name: remove service by name
consul:
service_name: by_name
state: absent
register: delete_service_by_name_result
- name: verify service removal
assert:
that:
- delete_service_by_name_result.changed
- name: cleanup
consul:
service_id: '{{item}}'
state: absent
with_items:
- service2
- with_check
- with_tags

View file

@ -0,0 +1,77 @@
- name: register basic session with consul
consul_session:
name: session1
register: basic_result
- name: verify basic session registration
assert:
that:
- basic_result.changed
- basic_result.session_id | length == 36
- basic_result.name == 'session1'
- name: add checks for session health check
consul:
check_name: session_check
script: /usr/bin/true
interval: 15
- name: register a session with check
consul_session:
name: session_with_check
checks:
- session_check
register: with_check
- name: verify basic session registration
assert:
that:
- with_check.changed
- with_check.session_id | length == 36
- with_check.name == 'session_with_check'
- with_check.checks == ['session_check']
- name: register a session with lock_delay
consul_session:
name: session_with_delay
delay: 20
register: with_delay
- name: verify registration of session with delay
assert:
that:
- with_delay.changed
- with_delay.session_id | length == 36
- with_delay.name == 'session_with_delay'
- with_delay.delay == 20
- name: retrieve session by id
consul_session: id='{{with_delay.session_id}}' state=info
register: retrieved_by_id
- name: verify retrieval by id
assert:
that:
- with_delay.session_id == retrieved_by_id.sessions[1].ID
- name: retrieve sessions by id
consul_session: state=list
register: retrieved_by_list
- name: verify retrieval by list
assert:
that:
- 3 <= retrieved_by_list.sessions[0]
- name: remove sessions
consul_session: id={{item}} state=absent
with_items:
- basic_result.session_id
- with_check.session_id
- with_delay.session_id
- name: remove check
consul:
check_name: session_check
state: absent