From 01280226549b350da7f837d57067097a9b445d45 Mon Sep 17 00:00:00 2001 From: Milan Ilic <35260522+ilicmilan@users.noreply.github.com> Date: Wed, 28 Mar 2018 23:32:04 +0200 Subject: [PATCH] Add OpenNebula one_vm module (#37825) * Add OpenNebula one_vm module * `one_vm` - module for managing VM instances instances on OpenNebula * Add integration tests --- .../modules/cloud/opennebula/__init__.py | 0 .../modules/cloud/opennebula/one_vm.py | 1368 +++++++++++++++++ test/legacy/opennebula.yml | 4 + test/legacy/roles/one_vm/defaults/main.yml | 56 + test/legacy/roles/one_vm/tasks/main.yml | 924 +++++++++++ 5 files changed, 2352 insertions(+) create mode 100644 lib/ansible/modules/cloud/opennebula/__init__.py create mode 100644 lib/ansible/modules/cloud/opennebula/one_vm.py create mode 100644 test/legacy/opennebula.yml create mode 100644 test/legacy/roles/one_vm/defaults/main.yml create mode 100644 test/legacy/roles/one_vm/tasks/main.yml diff --git a/lib/ansible/modules/cloud/opennebula/__init__.py b/lib/ansible/modules/cloud/opennebula/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/lib/ansible/modules/cloud/opennebula/one_vm.py b/lib/ansible/modules/cloud/opennebula/one_vm.py new file mode 100644 index 0000000000..e7fb7c20d1 --- /dev/null +++ b/lib/ansible/modules/cloud/opennebula/one_vm.py @@ -0,0 +1,1368 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +""" +(c) 2017, Milan Ilic + +This file is part of Ansible + +Ansible is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +Ansible is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Ansible. If not, see . +""" + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: one_vm +short_description: Creates or terminates OpenNebula instances +description: + - Manages OpenNebula instances +version_added: "2.6" +requirements: + - python-oca +options: + api_url: + description: + - URL of the OpenNebula RPC server. + - It is recommended to use HTTPS so that the username/password are not + - transferred over the network unencrypted. + - If not set then the value of the C(ONE_URL) environment variable is used. + api_username: + description: + - Name of the user to login into the OpenNebula RPC server. If not set + - then the value of the C(ONE_USERNAME) environment variable is used. + api_password: + description: + - Password of the user to login into OpenNebula RPC server. If not set + template_name: + description: + - Name of VM template to use to create a new instace + template_id: + description: + - ID of a VM template to use to create a new instance + instance_ids: + description: + - A list of instance ids used for states':' C(absent), C(running), C(rebooted), C(poweredoff) + aliases: ['ids'] + state: + description: + - C(present) - create instances from a template specified with C(template_id)/C(template_name). + - C(running) - run instances + - C(poweredoff) - power-off instances + - C(rebooted) - reboot instances + - C(absent) - terminate instances + choices: ["present", "absent", "running", "rebooted", "poweredoff"] + default: present + hard: + description: + - Reboot, power-off or terminate instances C(hard) + default: no + type: bool + wait: + description: + - Wait for the instance to reach its desired state before returning. Keep + - in mind if you are waiting for instance to be in running state it + - doesn't mean that you will be able to SSH on that machine only that + - boot process have started on that instance, see 'wait_for' example for + - details. + default: yes + type: bool + wait_timeout: + description: + - How long before wait gives up, in seconds + default: 300 + attributes: + description: + - A dictionary of key/value attributes to add to new instances, or for + - setting C(state) of instances with these attributes. + - Keys are case insensitive and OpenNebula automatically converts them to upper case. + - Be aware C(NAME) is a special attribute which sets the name of the VM when it's deployed. + - C(#) character(s) can be appended to the C(NAME) and the module will automatically add + - indexes to the names of VMs. + - For example':' C(NAME':' foo-###) would create VMs with names C(foo-000), C(foo-001),... + - When used with C(count_attributes) and C(exact_count) the module will + - match the base name without the index part. + default: {} + labels: + description: + - A list of labels to associate with new instances, or for setting + - C(state) of instances with these labels. + default: [] + count_attributes: + description: + - A dictionary of key/value attributes that can only be used with + - C(exact_count) to determine how many nodes based on a specific + - attributes criteria should be deployed. This can be expressed in + - multiple ways and is shown in the EXAMPLES section. + count_labels: + description: + - A list of labels that can only be used with C(exact_count) to determine + - how many nodes based on a specific labels criteria should be deployed. + - This can be expressed in multiple ways and is shown in the EXAMPLES + - section. + count: + description: + - Number of instances to launch + default: 1 + exact_count: + description: + - Indicates how many instances that match C(count_attributes) and + - C(count_labels) parameters should be deployed. Instances are either + - created or terminated based on this value. + - NOTE':' Instances with the least IDs will be terminated first. + memory: + description: + - The size of the memory for new instances (in MB, GB, ...) + disk_size: + description: + - The size of the disk created for new instances (in MB, GB, TB,...). + - NOTE':' This option can be used only if the VM template specified with + - C(template_id)/C(template_name) has exactly one disk. + cpu: + description: + - Percentage of CPU divided by 100 required for the new instance. Half a + - processor is written 0.5. + vcpu: + description: + - Number of CPUs (cores) new VM will have. + networks: + description: + - A list of dictionaries with network parameters. See examples for more details. + default: [] + disk_saveas: + description: + - Creates an image from a VM disk. + - It is a dictionary where you have to specife C(name) of the new image. + - Optionally you can specife C(disk_id) of the disk you want to save. By default C(disk_id) is 0. + - I(NOTE)':' This operation will only be performed on the first VM (if more than one VM ID is passed) + - and the VM has to be in the C(poweredoff) state. + - Also this operation will fail if an image with specified C(name) already exists. +author: + - "Milan Ilic (@ilicmilan)" +''' + + +EXAMPLES = ''' +# Create a new instance +- one_vm: + template_id: 90 + register: result + +# Print VM properties +- debug: + msg: result + +# Deploy a new VM and set its name to 'foo' +- one_vm: + template_name: 'app1_template' + attributes: + name: foo + +# Deploy 2 new instances and set memory, vcpu, disk_size and 3 networks +- one_vm: + template_id: 15 + disk_size: 35.2 GB + memory: 4 GB + vcpu: 4 + count: 2 + networks: + - NETWORK_ID: 27 + - NETWORK: "default-network" + NETWORK_UNAME: "app-user" + SECURITY_GROUPS: "120,124" + - NETWORK_ID: 27 + SECURITY_GROUPS: "10" + +# Deploy an new instance with attribute 'bar: bar1' and set its name to 'foo' +- one_vm: + template_id: 53 + attributes: + name: foo + bar: bar1 + +# Enforce that 2 instances with attributes 'foo1: app1' and 'foo2: app2' are deployed +- one_vm: + template_id: 53 + attributes: + foo1: app1 + foo2: app2 + exact_count: 2 + count_attributes: + foo1: app1 + foo2: app2 + +# Enforce that 4 instances with an attribute 'bar' are deployed +- one_vm: + template_id: 53 + attributes: + name: app + bar: bar2 + exact_count: 4 + count_attributes: + bar: + +# Deploy 2 new instances with attribute 'foo: bar' and labels 'app1' and 'app2' and names in format 'fooapp-##' +# Names will be: fooapp-00 and fooapp-01 +- one_vm: + template_id: 53 + attributes: + name: fooapp-## + foo: bar + labels: + - app1 + - app2 + count: 2 + +# Deploy 2 new instances with attribute 'app: app1' and names in format 'fooapp-###' +# Names will be: fooapp-002 and fooapp-003 +- one_vm: + template_id: 53 + attributes: + name: fooapp-### + app: app1 + count: 2 + +# Reboot all instances with name in format 'fooapp-#' +# Instances 'fooapp-00', 'fooapp-01', 'fooapp-002' and 'fooapp-003' will be rebooted +- one_vm: + attributes: + name: fooapp-# + state: rebooted + +# Enforce that only 1 instance with name in format 'fooapp-#' is deployed +# The task will delete oldest instances, so only the 'fooapp-003' will remain +- one_vm: + template_id: 53 + exact_count: 1 + count_attributes: + name: fooapp-# + +# Deploy an new instance with a network +- one_vm: + template_id: 53 + networks: + - NETWORK_ID: 27 + register: vm + +# Wait for SSH to come up +- wait_for_connection: + delegate_to: '{{ vm.instances[0].networks[0].ip }}' + +# Terminate VMs by ids +- one_vm: + instance_ids: + - 153 + - 160 + state: absent + +# Reboot all VMs that have labels 'foo' and 'app1' +- one_vm: + labels: + - foo + - app1 + state: rebooted + +# Fetch all VMs that have name 'foo' and attribute 'app: bar' +- one_vm: + attributes: + name: foo + app: bar + register: results + +# Deploy 2 new instances with labels 'foo1' and 'foo2' +- one_vm: + template_name: app_template + labels: + - foo1 + - foo2 + count: 2 + +# Enforce that only 1 instance with label 'foo1' will be running +- one_vm: + template_name: app_template + labels: + - foo1 + exact_count: 1 + count_labels: + - foo1 + +# Terminate all instances that have attribute foo +- one_vm: + template_id: 53 + exact_count: 0 + count_attributes: + foo: + +# Power-off the VM and save VM's disk with id=0 to the image with name 'foo-image' +- one_vm: + instance_ids: 351 + state: powered-off + disk_saveas: + name: foo-image + +# Save VM's disk with id=1 to the image with name 'bar-image' +- one_vm: + instance_ids: 351 + disk_saveas: + name: bar-image + disk_id: 1 +''' + +RETURN = ''' +instances_ids: + description: a list of instances ids whose state is changed or which are fetched with C(instance_ids) option. + type: list + returned: success + sample: [ 1234, 1235 ] +instances: + description: a list of instances info whose state is changed or which are fetched with C(instance_ids) option. + type: complex + returned: success + contains: + vm_id: + description: vm id + type: integer + sample: 153 + vm_name: + description: vm name + type: string + sample: foo + template_id: + description: vm's template id + type: integer + sample: 153 + group_id: + description: vm's group id + type: integer + sample: 1 + group_name: + description: vm's group name + type: string + sample: one-users + user_id: + description: vm's user id + type: integer + sample: 143 + user_name: + description: vm's user name + type: string + sample: app-user + state: + description: state of an instance + type: string + sample: ACTIVE + lcm_state: + description: lcm state of an instance that is only relevant when the state is ACTIVE + type: string + sample: RUNNING + cpu: + description: Percentage of CPU divided by 100 + type: float + sample: 0.2 + vcpu: + description: Number of CPUs (cores) + type: int + sample: 2 + memory: + description: The size of the memory in MB + type: string + sample: 4096 MB + disk_size: + description: The size of the disk in MB + type: string + sample: 20480 MB + networks: + description: a list of dictionaries with info about IP, NAME, MAC, SECURITY_GROUPS for each NIC + type: list + sample: [ + { + "ip": "10.120.5.33", + "mac": "02:00:0a:78:05:21", + "name": "default-test-private", + "security_groups": "0,10" + }, + { + "ip": "10.120.5.34", + "mac": "02:00:0a:78:05:22", + "name": "default-test-private", + "security_groups": "0" + } + ] + uptime_h: + description: Uptime of the instance in hours + type: integer + sample: 35 + labels: + description: A list of string labels that are associated with the instance + type: list + sample: [ + "foo", + "spec-label" + ] + attributes: + description: A dictionary of key/values attributes that are associated with the instance + type: dict + sample: { + "HYPERVISOR": "kvm", + "LOGO": "images/logos/centos.png", + "TE_GALAXY": "bar", + "USER_INPUTS": null + } +tagged_instances: + description: + - A list of instances info based on a specific attributes and/or + - labels that are specified with C(count_attributes) and C(count_labels) + - options. + type: complex + returned: success + contains: + vm_id: + description: vm id + type: integer + sample: 153 + vm_name: + description: vm name + type: string + sample: foo + template_id: + description: vm's template id + type: integer + sample: 153 + group_id: + description: vm's group id + type: integer + sample: 1 + group_name: + description: vm's group name + type: string + sample: one-users + user_id: + description: vm's user id + type: integer + sample: 143 + user_name: + description: vm's user name + type: string + sample: app-user + state: + description: state of an instance + type: string + sample: ACTIVE + lcm_state: + description: lcm state of an instance that is only relevant when the state is ACTIVE + type: string + sample: RUNNING + cpu: + description: Percentage of CPU divided by 100 + type: float + sample: 0.2 + vcpu: + description: Number of CPUs (cores) + type: int + sample: 2 + memory: + description: The size of the memory in MB + type: string + sample: 4096 MB + disk_size: + description: The size of the disk in MB + type: string + sample: 20480 MB + networks: + description: a list of dictionaries with info about IP, NAME, MAC, SECURITY_GROUPS for each NIC + type: list + sample: [ + { + "ip": "10.120.5.33", + "mac": "02:00:0a:78:05:21", + "name": "default-test-private", + "security_groups": "0,10" + }, + { + "ip": "10.120.5.34", + "mac": "02:00:0a:78:05:22", + "name": "default-test-private", + "security_groups": "0" + } + ] + uptime_h: + description: Uptime of the instance in hours + type: integer + sample: 35 + labels: + description: A list of string labels that are associated with the instance + type: list + sample: [ + "foo", + "spec-label" + ] + attributes: + description: A dictionary of key/values attributes that are associated with the instance + type: dict + sample: { + "HYPERVISOR": "kvm", + "LOGO": "images/logos/centos.png", + "TE_GALAXY": "bar", + "USER_INPUTS": null + } +''' + + +try: + import oca + HAS_OCA = True +except ImportError: + HAS_OCA = False + +from ansible.module_utils.basic import AnsibleModule +import os + + +def get_template(module, client, predicate): + pool = oca.VmTemplatePool(client) + # Filter -2 means fetch all templates user can Use + pool.info(filter=-2) + found = 0 + found_template = None + template_name = '' + + for template in pool: + if predicate(template): + found = found + 1 + found_template = template + template_name = template.name + + if found == 0: + return None + elif found > 1: + module.fail_json(msg='There are more templates with name: ' + template_name) + return found_template + + +def get_template_by_name(module, client, template_name): + return get_template(module, client, lambda template: (template.name == template_name)) + + +def get_template_by_id(module, client, template_id): + return get_template(module, client, lambda template: (template.id == template_id)) + + +def get_template_id(module, client, requested_id, requested_name): + template = get_template_by_id(module, client, requested_id) if requested_id else get_template_by_name(module, client, requested_name) + if template: + return template.id + else: + return None + + +def get_vm_by_id(client, vm_id): + pool = oca.VirtualMachinePool(client) + # Retrieves information for all or part of the vms pool + # -4: Vms belonging to the user's primary group + # -3: Vms belonging to the user + # -2: All vms user can Use + # -1: Vms belonging to the user and any of his groups - default + # >= 0: UID User's vms + pool.info(filter=-2) + + for vm in pool: + if str(vm.id) == str(vm_id): + return vm + return None + + +def get_vms_by_ids(module, client, state, ids): + vms = [] + + for vm_id in ids: + vm = get_vm_by_id(client, vm_id) + if vm is None and state != 'absent': + module.fail_json(msg='There is no VM with id=' + str(vm_id)) + vms.append(vm) + + return vms + + +def get_vm_info(client, vm): + vm.info() + + networks_info = [] + + disk_size = '' + if hasattr(vm.template, 'disks'): + disk_size = vm.template.disks[0].size + ' MB' + + if hasattr(vm.template, 'nics'): + for nic in vm.template.nics: + networks_info.append({'ip': nic.ip, 'mac': nic.mac, 'name': nic.network, 'security_groups': nic.security_groups}) + import time + + current_time = time.localtime() + vm_start_time = time.localtime(vm.stime) + + vm_uptime = time.mktime(current_time) - time.mktime(vm_start_time) + vm_uptime /= (60 * 60) + + # LCM_STATE is VM's sub-state that is relevant only when STATE is ACTIVE + vm_lcm_state = None + if vm.state == VM_STATES.index('ACTIVE'): + vm_lcm_state = LCM_STATES[vm.lcm_state] + + vm_labels, vm_attributes = get_vm_labels_and_attributes_dict(client, vm.id) + info = { + 'template_id': int(vm.template.template_id), + 'vm_id': vm.id, + 'vm_name': vm.name, + 'state': VM_STATES[vm.state], + 'lcm_state': vm_lcm_state, + 'user_name': vm.uname, + 'user_id': vm.uid, + 'networks': networks_info, + 'disk_size': disk_size, + 'memory': vm.template.memory + ' MB', + 'vcpu': vm.template.vcpu, + 'cpu': vm.template.cpu, + 'group_name': vm.gname, + 'group_id': vm.gid, + 'uptime_h': int(vm_uptime), + 'attributes': vm_attributes, + 'labels': vm_labels + } + + return info + + +def get_size_in_MB(module, size_str): + + SYMBOLS = ['B', 'KB', 'MB', 'GB', 'TB'] + + s = size_str + init = size_str + num = "" + while s and s[0:1].isdigit() or s[0:1] == '.': + num += s[0] + s = s[1:] + num = float(num) + symbol = s.strip() + + if symbol not in SYMBOLS: + module.fail_json(msg="Cannot interpret %r %r %d" % (init, symbol, num)) + + prefix = {'B': 1} + + for i, s in enumerate(SYMBOLS[1:]): + prefix[s] = 1 << (i + 1) * 10 + + size_in_bytes = int(num * prefix[symbol]) + size_in_MB = size_in_bytes / (1024 * 1024) + + return size_in_MB + + +def create_disk_str(module, client, template_id, disk_size_str): + + if not disk_size_str: + return '' + + import xml.etree.ElementTree as ET + + template_XML = client.call('template.info', template_id) + root = ET.fromstring(template_XML) + + disks_num = 0 + disk = None + + for child in root.find('TEMPLATE').findall('DISK'): + disks_num += 1 + root = child + + if disks_num != 1: + module.fail_json(msg='You can pass disk_size only if template has exact one disk. This template has ' + str(disks_num) + ' disks.') + + disk = {} + # Get all info about existed disk e.g. IMAGE_ID,... + for child in root: + disk[child.tag] = child.text + + result = 'DISK = [' + ','.join('{key}="{val}"'.format(key=key, val=val) for key, val in disk.items() if key != 'SIZE') + result += ', SIZE=' + str(get_size_in_MB(module, disk_size_str)) + ']\n' + + return result + + +def create_attributes_str(attributes_dict, labels_list): + + attributes_str = '' + + if labels_list: + attributes_str += 'LABELS="' + ','.join('{label}'.format(label=label) for label in labels_list) + '"\n' + if attributes_dict: + attributes_str += '\n'.join('{key}="{val}"'.format(key=key.upper(), val=val) for key, val in attributes_dict.items()) + '\n' + + return attributes_str + + +def create_nics_str(network_attrs_list): + nics_str = '' + + for network in network_attrs_list: + # Packing key-value dict in string with format key="value", key="value" + network_str = ','.join('{key}="{val}"'.format(key=key, val=val) for key, val in network.items()) + nics_str = nics_str + 'NIC = [' + network_str + ']\n' + + return nics_str + + +def create_vm(module, client, template_id, attributes_dict, labels_list, disk_size, network_attrs_list): + + if attributes_dict: + vm_name = attributes_dict.get('NAME', '') + + disk_str = create_disk_str(module, client, template_id, disk_size) + vm_extra_template_str = create_attributes_str(attributes_dict, labels_list) + create_nics_str(network_attrs_list) + disk_str + vm_id = client.call('template.instantiate', template_id, vm_name, False, vm_extra_template_str) + vm = get_vm_by_id(client, vm_id) + + return get_vm_info(client, vm) + + +def generate_next_index(vm_filled_indexes_list, num_sign_cnt): + counter = 0 + cnt_str = str(counter).zfill(num_sign_cnt) + + while cnt_str in vm_filled_indexes_list: + counter = counter + 1 + cnt_str = str(counter).zfill(num_sign_cnt) + + return cnt_str + + +def get_vm_labels_and_attributes_dict(client, vm_id): + import xml.etree.ElementTree as ET + vm_XML = client.call('vm.info', vm_id) + root = ET.fromstring(vm_XML) + + attrs_dict = {} + labels_list = [] + + root = root.find('USER_TEMPLATE') + + for child in root: + if child.tag != 'LABELS': + attrs_dict[child.tag] = child.text + else: + if child.text is not None: + labels_list = child.text.split(',') + + return labels_list, attrs_dict + + +def get_all_vms_by_attributes(client, attributes_dict, labels_list): + pool = oca.VirtualMachinePool(client) + # Retrieves information for all or part of the vms pool + # -4: Vms belonging to the user's primary group + # -3: Vms belonging to the user + # -2: All vms user can Use + # -1: Vms belonging to the user and any of his groups - default + # >= 0: UID User's vms + pool.info(filter=-2) + vm_list = [] + name = '' + if attributes_dict: + name = attributes_dict.pop('NAME', '') + + if name != '': + base_name = name[:len(name) - name.count('#')] + # Check does the name have indexed format + with_hash = name.endswith('#') + + for vm in pool: + if vm.name.startswith(base_name): + if with_hash and vm.name[len(base_name):].isdigit(): + # If the name has indexed format and after base_name it has only digits it'll be matched + vm_list.append(vm) + elif not with_hash and vm.name == name: + # If the name is not indexed it has to be same + vm_list.append(vm) + pool = vm_list + + import copy + + vm_list = copy.copy(pool) + + for vm in pool: + vm_labels_list, vm_attributes_dict = get_vm_labels_and_attributes_dict(client, vm.id) + + if attributes_dict and len(attributes_dict) > 0: + for key, val in attributes_dict.items(): + if key in vm_attributes_dict: + if val and vm_attributes_dict[key] != val and vm in vm_list: + vm_list.remove(vm) + break + else: + if vm in vm_list: + vm_list.remove(vm) + break + if labels_list and len(labels_list) > 0: + for label in labels_list: + if label not in vm_labels_list and vm in vm_list: + vm_list.remove(vm) + break + + return vm_list + + +def create_count_of_vms(module, client, template_id, count, attributes_dict, labels_list, disk_size, network_attrs_list, wait, wait_timeout): + new_vms_list = [] + instances_ids = [] + instances = [] + + vm_name = '' + if attributes_dict: + vm_name = attributes_dict.get('NAME', '') + + if module.check_mode: + return {'changed': True} + + # Create list of used indexes + vm_filled_indexes_list = None + num_sign_cnt = vm_name.count('#') + if vm_name != '' and num_sign_cnt > 0: + vm_list = get_all_vms_by_attributes(client, {'NAME': vm_name}, None) + base_name = vm_name[:len(vm_name) - num_sign_cnt] + vm_name = base_name + # Make list which contains used indexes in format ['000', '001',...] + vm_filled_indexes_list = list((vm.name[len(base_name):].zfill(num_sign_cnt)) for vm in vm_list) + + while count > 0: + new_vm_name = vm_name + # Create indexed name + if vm_filled_indexes_list is not None: + next_index = generate_next_index(vm_filled_indexes_list, num_sign_cnt) + vm_filled_indexes_list.append(next_index) + new_vm_name += next_index + # Update NAME value in the attributes in case there is index + attributes_dict['NAME'] = new_vm_name + new_vm_dict = create_vm(module, client, template_id, attributes_dict, labels_list, disk_size, network_attrs_list) + new_vm_id = new_vm_dict.get('vm_id') + new_vm = get_vm_by_id(client, new_vm_id) + new_vms_list.append(new_vm) + count -= 1 + + if wait: + for vm in new_vms_list: + wait_for_running(module, vm, wait_timeout) + + for vm in new_vms_list: + vm_info = get_vm_info(client, vm) + instances.append(vm_info) + instances_ids.append(vm.id) + + return {'changed': True, 'instances_ids': instances_ids, 'instances': instances, 'tagged_instances': instances} + + +def create_exact_count_of_vms(module, client, template_id, exact_count, attributes_dict, count_attributes_dict, + labels_list, count_labels_list, disk_size, network_attrs_list, hard, wait, wait_timeout): + + vm_list = get_all_vms_by_attributes(client, count_attributes_dict, count_labels_list) + + vm_count_diff = exact_count - len(vm_list) + changed = vm_count_diff != 0 + + result = {} + new_vms_list = [] + instances_ids = [] + instances = [] + tagged_instances = list(get_vm_info(client, vm) for vm in vm_list) + + if module.check_mode: + return {'changed': changed, 'instances_ids': instances_ids, 'instances': instances, 'tagged_instances': tagged_instances} + + if vm_count_diff > 0: + # Add more VMs + result = create_count_of_vms(module, client, template_id, vm_count_diff, attributes_dict, + labels_list, disk_size, network_attrs_list, wait, wait_timeout) + + result['tagged_instances'] += tagged_instances + return result + + elif vm_count_diff < 0: + # Delete surplus VMs + old_vms_list = [] + + while vm_count_diff < 0: + old_vm = vm_list.pop(0) + old_vms_list.append(old_vm) + terminate_vm(module, client, old_vm, hard) + vm_count_diff += 1 + + if wait: + for vm in old_vms_list: + wait_for_done(module, vm, wait_timeout) + + for vm in old_vms_list: + vm_info = get_vm_info(client, vm) + instances.append(vm_info) + instances_ids.append(vm.id) + tagged_instances[:] = [dct for dct in tagged_instances if dct.get('vm_id') != vm.id] + + return {'changed': changed, 'instances_ids': instances_ids, 'instances': instances, 'tagged_instances': tagged_instances} + +VM_STATES = ['INIT', 'PENDING', 'HOLD', 'ACTIVE', 'STOPPED', 'SUSPENDED', 'DONE', '', 'POWEROFF', 'UNDEPLOYED', 'CLONING', 'CLONING_FAILURE'] +LCM_STATES = ['LCM_INIT', 'PROLOG', 'BOOT', 'RUNNING', 'MIGRATE', 'SAVE_STOP', + 'SAVE_SUSPEND', 'SAVE_MIGRATE', 'PROLOG_MIGRATE', 'PROLOG_RESUME', + 'EPILOG_STOP', 'EPILOG', 'SHUTDOWN', 'STATE13', 'STATE14', 'CLEANUP_RESUBMIT', 'UNKNOWN', 'HOTPLUG', 'SHUTDOWN_POWEROFF', + 'BOOT_UNKNOWN', 'BOOT_POWEROFF', 'BOOT_SUSPENDED', 'BOOT_STOPPED', 'CLEANUP_DELETE', 'HOTPLUG_SNAPSHOT', 'HOTPLUG_NIC', + 'HOTPLUG_SAVEAS', 'HOTPLUG_SAVEAS_POWEROFF', 'HOTPULG_SAVEAS_SUSPENDED', 'SHUTDOWN_UNDEPLOY'] + + +def wait_for_state(module, vm, wait_timeout, state_predicate): + import time + start_time = time.time() + + while (time.time() - start_time) < wait_timeout: + vm.info() + state = vm.state + lcm_state = vm.lcm_state + + if state_predicate(state, lcm_state): + return vm + elif state not in [VM_STATES.index('INIT'), VM_STATES.index('PENDING'), VM_STATES.index('HOLD'), + VM_STATES.index('ACTIVE'), VM_STATES.index('POWEROFF')]: + module.fail_json(msg='Action is unsuccessful. VM state: ' + VM_STATES[state]) + + time.sleep(1) + + module.fail_json(msg="Wait timeout has expired!") + + +def wait_for_running(module, vm, wait_timeout): + return wait_for_state(module, vm, wait_timeout, lambda state, + lcm_state: (state in [VM_STATES.index('ACTIVE')] and lcm_state in [LCM_STATES.index('RUNNING')])) + + +def wait_for_done(module, vm, wait_timeout): + return wait_for_state(module, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('DONE')])) + + +def wait_for_poweroff(module, vm, wait_timeout): + return wait_for_state(module, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('POWEROFF')])) + + +def terminate_vm(module, client, vm, hard=False): + changed = False + + if not vm: + return changed + + changed = True + + if not module.check_mode: + if hard: + client.call('vm.action', 'terminate-hard', vm.id) + else: + client.call('vm.action', 'terminate', vm.id) + + return changed + + +def terminate_vms(module, client, vms, wait, wait_timeout, hard, tagged): + changed = False + instances_ids = [] + instances = [] + + if tagged: + module.fail_json(msg='Option `instance_ids` is required when state is `absent`.') + + for vm in vms: + changed = terminate_vm(module, client, vm, hard) or changed + + if wait and not module.check_mode: + for vm in vms: + if vm is not None: + wait_for_done(module, vm, wait_timeout) + + for vm in vms: + if vm is not None: + instances_ids.append(vm.id) + instances.append(get_vm_info(client, vm)) + + return {'changed': changed, 'instances': instances, 'instances_ids': instances_ids, 'tagged_instances': []} + + +def poweroff_vm(module, vm, hard): + vm.info() + changed = False + + lcm_state = vm.lcm_state + state = vm.state + + if lcm_state not in [LCM_STATES.index('SHUTDOWN'), LCM_STATES.index('SHUTDOWN_POWEROFF')] and state not in [VM_STATES.index('POWEROFF')]: + changed = True + + if changed and not module.check_mode: + if not hard: + vm.poweroff() + else: + vm.poweroff_hard() + + return changed + + +def poweroff_vms(module, client, vms, wait, wait_timeout, hard, tagged): + instances_ids = [] + instances = [] + tagged_instances = [] + changed = False + + for vm in vms: + changed = poweroff_vm(module, vm, hard) or changed + + if wait and not module.check_mode: + for vm in vms: + wait_for_poweroff(module, vm, wait_timeout) + + for vm in vms: + instances_ids.append(vm.id) + instances.append(get_vm_info(client, vm)) + if tagged: + tagged_instances.append(get_vm_info(client, vm)) + + return {'changed': changed, 'instances_ids': instances_ids, 'instances': instances, 'tagged_instances': tagged_instances} + + +def reboot_vms(module, client, vms, wait, wait_timeout, hard, tagged): + instances_ids = [] + instances = [] + tagged_instances = [] + + if not module.check_mode: + # Firstly, power-off all instances + for vm in vms: + vm.info() + lcm_state = vm.lcm_state + state = vm.state + if lcm_state not in [LCM_STATES.index('SHUTDOWN_POWEROFF')] and state not in [VM_STATES.index('POWEROFF')]: + poweroff_vm(module, vm, hard) + + # Wait for all to be power-off + for vm in vms: + wait_for_poweroff(module, vm, wait_timeout) + + for vm in vms: + resume_vm(module, vm) + + if wait: + for vm in vms: + wait_for_running(module, vm, wait_timeout) + + for vm in vms: + instances_ids.append(vm.id) + instances.append(get_vm_info(client, vm)) + if tagged: + tagged_instances.append(get_vm_info(client, vm)) + + return {'changed': True, 'instances_ids': instances_ids, 'instances': instances, 'tagged_instances': tagged_instances} + + +def resume_vm(module, vm): + vm.info() + changed = False + + lcm_state = vm.lcm_state + if lcm_state == LCM_STATES.index('SHUTDOWN_POWEROFF'): + module.fail_json(msg="Cannot perform action 'resume' because this action is not available " + + "for LCM_STATE: 'SHUTDOWN_POWEROFF'. Wait for the VM to shutdown properly") + if lcm_state not in [LCM_STATES.index('RUNNING')]: + changed = True + + if changed and not module.check_mode: + vm.resume() + + return changed + + +def resume_vms(module, client, vms, wait, wait_timeout, tagged): + instances_ids = [] + instances = [] + tagged_instances = [] + + changed = False + + for vm in vms: + changed = resume_vm(module, vm) or changed + + if wait and changed and not module.check_mode: + for vm in vms: + wait_for_running(module, vm, wait_timeout) + + for vm in vms: + instances_ids.append(vm.id) + instances.append(get_vm_info(client, vm)) + if tagged: + tagged_instances.append(get_vm_info(client, vm)) + + return {'changed': changed, 'instances_ids': instances_ids, 'instances': instances, 'tagged_instances': tagged_instances} + + +def check_name_attribute(module, attributes): + if attributes.get("NAME"): + import re + if re.match(r'^[^#]+#*$', attributes.get("NAME")) is None: + module.fail_json(msg="Ilegal 'NAME' attribute: '" + attributes.get("NAME") + + "' .Signs '#' are allowed only at the end of the name and the name cannot contain only '#'.") + +TEMPLATE_RESTRICTED_ATTRIBUTES = ["CPU", "VCPU", "OS", "FEATURES", "MEMORY", "DISK", "NIC", "INPUT", "GRAPHICS", + "CONTEXT", "CREATED_BY", "CPU_COST", "DISK_COST", "MEMORY_COST", + "TEMPLATE_ID", "VMID", "AUTOMATIC_DS_REQUIREMENTS", "DEPLOY_FOLDER", "LABELS"] + + +def check_attributes(module, attributes): + for key in attributes.keys(): + if key in TEMPLATE_RESTRICTED_ATTRIBUTES: + module.fail_json(msg='Restricted attribute `' + key + '` cannot be used when filtering VMs.') + # Check the format of the name attribute + check_name_attribute(module, attributes) + + +def disk_save_as(module, client, vm, disk_saveas, wait_timeout): + if not disk_saveas.get('name'): + module.fail_json(msg="Key 'name' is required for 'disk_saveas' option") + + image_name = disk_saveas.get('name') + disk_id = disk_saveas.get('disk_id', 0) + + if not module.check_mode: + if vm.state != VM_STATES.index('POWEROFF'): + module.fail_json(msg="'disksaveas' option can be used only when the VM is in 'POWEROFF' state") + client.call('vm.disksaveas', vm.id, disk_id, image_name, 'OS', -1) + wait_for_poweroff(module, vm, wait_timeout) + + +def get_connection_info(module): + + url = module.params.get('api_url') + username = module.params.get('api_username') + password = module.params.get('api_password') + + if not url: + url = os.environ.get('ONE_URL') + + if not username: + username = os.environ.get('ONE_USERNAME') + + if not password: + password = os.environ.get('ONE_PASSWORD') + + if not(url and username and password): + module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified") + from collections import namedtuple + + auth_params = namedtuple('auth', ('url', 'username', 'password')) + + return auth_params(url=url, username=username, password=password) + + +def main(): + fields = { + "api_url": {"required": False, "type": "str"}, + "api_username": {"required": False, "type": "str"}, + "api_password": {"required": False, "type": "str", "no_log": True}, + "instance_ids": {"required": False, "aliases": ['ids'], "type": "list"}, + "template_name": {"required": False, "type": "str"}, + "template_id": {"required": False, "type": "int"}, + "state": { + "default": "present", + "choices": ['present', 'absent', 'rebooted', 'poweredoff', 'running'], + "type": "str" + }, + "wait": {"default": True, "type": "bool"}, + "wait_timeout": {"default": 300, "type": "int"}, + "hard": {"default": False, "type": "bool"}, + "memory": {"required": False, "type": "str"}, + "cpu": {"required": False, "type": "float"}, + "vcpu": {"required": False, "type": "int"}, + "disk_size": {"required": False, "type": "str"}, + "networks": {"default": [], "type": "list"}, + "count": {"default": 1, "type": "int"}, + "exact_count": {"required": False, "type": "int"}, + "attributes": {"default": {}, "type": "dict"}, + "count_attributes": {"required": False, "type": "dict"}, + "labels": {"default": [], "type": "list"}, + "count_labels": {"required": False, "type": "list"}, + "disk_saveas": {"type": "dict"} + } + + module = AnsibleModule(argument_spec=fields, + mutually_exclusive=[ + ['template_id', 'template_name', 'instance_ids'], + ['instance_ids', 'count_attributes', 'count'], + ['instance_ids', 'count_labels', 'count'], + ['instance_ids', 'exact_count'], + ['instance_ids', 'attributes'], + ['instance_ids', 'labels'], + ['disk_saveas', 'attributes'], + ['disk_saveas', 'labels'], + ['exact_count', 'count'], + ['count', 'hard'], + ['instance_ids', 'cpu'], ['instance_ids', 'vcpu'], + ['instance_ids', 'memory'], ['instance_ids', 'disk_size'], + ['instance_ids', 'networks'] + ], + supports_check_mode=True) + + if not HAS_OCA: + module.fail_json(msg='This module requires python-oca to work!') + + auth = get_connection_info(module) + params = module.params + instance_ids = params.get('instance_ids') + requested_template_name = params.get('template_name') + requested_template_id = params.get('template_id') + state = params.get('state') + wait = params.get('wait') + wait_timeout = params.get('wait_timeout') + hard = params.get('hard') + memory = params.get('memory') + cpu = params.get('cpu') + vcpu = params.get('vcpu') + disk_size = params.get('disk_size') + networks = params.get('networks') + count = params.get('count') + exact_count = params.get('exact_count') + attributes = params.get('attributes') + count_attributes = params.get('count_attributes') + labels = params.get('labels') + count_labels = params.get('count_labels') + disk_saveas = params.get('disk_saveas') + + client = oca.Client(auth.username + ':' + auth.password, auth.url) + + if attributes: + attributes = dict((key.upper(), value) for key, value in attributes.items()) + check_attributes(module, attributes) + + if count_attributes: + count_attributes = dict((key.upper(), value) for key, value in count_attributes.items()) + if not attributes: + import copy + module.warn('When you pass `count_attributes` without `attributes` option when deploying, `attributes` option will have same values implicitly.') + attributes = copy.copy(count_attributes) + check_attributes(module, count_attributes) + + if count_labels and not labels: + module.warn('When you pass `count_labels` without `labels` option when deploying, `labels` option will have same values implicitly.') + labels = count_labels + + # Fetch template + template_id = None + if requested_template_id or requested_template_name: + template_id = get_template_id(module, client, requested_template_id, requested_template_name) + if not template_id: + if requested_template_id: + module.fail_json(msg='There is no template with template_id: ' + str(requested_template_id)) + elif requested_template_name: + module.fail_json(msg="There is no template with name: " + requested_template_name) + + if exact_count and not template_id: + module.fail_json(msg='Option `exact_count` needs template_id or template_name') + + if exact_count is not None and not (count_attributes or count_labels): + module.fail_json(msg='Either `count_attributes` or `count_labels` has to be specified with option `exact_count`.') + if (count_attributes or count_labels) and exact_count is None: + module.fail_json(msg='Option `exact_count` has to be specified when either `count_attributes` or `count_labels` is used.') + if template_id and state != 'present': + module.fail_json(msg="Only state 'present' is valid for the template") + + if memory: + attributes['MEMORY'] = str(int(get_size_in_MB(module, memory))) + if cpu: + attributes['CPU'] = str(cpu) + if vcpu: + attributes['VCPU'] = str(vcpu) + + if exact_count is not None and state != 'present': + module.fail_json(msg='The `exact_count` option is valid only for the `present` state') + if exact_count is not None and exact_count < 0: + module.fail_json(msg='`exact_count` cannot be less than 0') + if count <= 0: + module.fail_json(msg='`count` has to be grater than 0') + + if exact_count is not None: + # Deploy an exact count of VMs + result = create_exact_count_of_vms(module, client, template_id, exact_count, attributes, count_attributes, + labels, count_labels, disk_size, networks, hard, wait, wait_timeout) + elif template_id and state == 'present': + # Deploy count VMs + result = create_count_of_vms(module, client, template_id, count, attributes, labels, disk_size, networks, wait, wait_timeout) + else: + # Fetch data of instances, or change their state + if not (instance_ids or attributes or labels): + module.fail_json(msg="At least one of `instance_ids`,`attributes`,`labels` must be passed!") + + if memory or cpu or vcpu or disk_size or networks: + module.fail_json(msg="Parameters as `memory`, `cpu`, `vcpu`, `disk_size` and `networks` you can only set when deploying a VM!") + + if hard and state not in ['rebooted', 'poweredoff', 'absent', 'present']: + module.fail_json(msg="The 'hard' option can be used only for one of these states: 'rebooted', 'poweredoff', 'absent' and 'present'") + + vms = None + tagged = False + + if instance_ids: + vms = get_vms_by_ids(module, client, state, instance_ids) + else: + tagged = True + vms = get_all_vms_by_attributes(client, attributes, labels) + + instances = list(get_vm_info(client, vm) for vm in vms if vm is not None) + instances_ids = list(vm.id for vm in vms if vm is not None) + + if tagged: + result = {'changed': False, 'instances': instances, 'instances_ids': instances_ids, 'tagged_instances': instances} + else: + result = {'changed': False, 'instances': instances, 'instances_ids': instances_ids, 'tagged_instances': []} + + if len(vms) == 0 and state != 'absent' and state != 'present': + module.fail_json(msg='There are no instances with specified `instance_ids`, `attributes` and/or `labels`') + + if len(vms) == 0 and state == 'present' and not tagged: + module.fail_json(msg='There are no instances with specified `instance_ids`.') + + if state == 'absent': + result = terminate_vms(module, client, vms, wait, wait_timeout, hard, tagged) + elif state == 'rebooted': + result = reboot_vms(module, client, vms, wait, wait_timeout, hard, tagged) + elif state == 'poweredoff': + result = poweroff_vms(module, client, vms, wait, wait_timeout, hard, tagged) + elif state == 'running': + result = resume_vms(module, client, vms, wait, wait_timeout, tagged) + + if disk_saveas is not None: + if len(vms) == 0: + module.fail_json(msg="There is no VM whose disk will be saved.") + disk_save_as(module, client, vms[0], disk_saveas, wait_timeout) + result['changed'] = True + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/test/legacy/opennebula.yml b/test/legacy/opennebula.yml new file mode 100644 index 0000000000..43d81808d8 --- /dev/null +++ b/test/legacy/opennebula.yml @@ -0,0 +1,4 @@ +--- +- hosts: localhost + roles: + - { role: one_vm, tags: test_one_vm } diff --git a/test/legacy/roles/one_vm/defaults/main.yml b/test/legacy/roles/one_vm/defaults/main.yml new file mode 100644 index 0000000000..e162252b7c --- /dev/null +++ b/test/legacy/roles/one_vm/defaults/main.yml @@ -0,0 +1,56 @@ +--- +# This is a role for running integration test of the one_vm module. +# For this role to be used you need to meet the following prerequisites: +# 1. Environment variables ONE_URL, ONE_USERNAME and ONE_PASSWORD +# need to be set. +# 2. VM template needs to exist. Here is an example of VM template: +# +# CONTEXT = [ +# NETWORK = "YES", +# REPORT_READY = "YES", +# SET_HOSTNAME = "$NAME", +# SSH_PUBLIC_KEY = "$USER[SSH_PUBLIC_KEY]", +# TOKEN = "YES" ] +# CPU = "0.2" +# CPU_COST = ".0890000000" +# DISK = [ +# IMAGE = "CentOS 7", +# IMAGE_UNAME = "oneadmin" ] +# DISK_COST = ".0000005853" +# GRAPHICS = [ +# LISTEN = "0.0.0.0", +# TYPE = "VNC" ] +# HYPERVISOR = "kvm" +# LOGO = "images/logos/centos.png" +# MEMORY = "1024" +# MEMORY_COST = ".0000003560" +# SCHED_DS_REQUIREMENTS = "NAME=local_system" +# USER_INPUTS = [ +# CPU = "O|fixed|| |0.2", +# MEMORY = "M|list||1024,2048,4096,8192,16384,24576,32768,49152,65536|1024", +# VCPU = "O|list||1,2,4,6,8,10|1" ] +# VCPU = "1" +# +# 3. Play vars need to be set bellow to reflect the vm IDs, networks, template IDs, etc. + +one_template_id: 15 +one_template_name: 'CentOS 7' +one_template_name_with_2_disks: 'Centos_2_disks' +one_memory: '4 GB' +one_cpu: 0.1 +one_vcpu: 4 + +one_incorrect_memory: '123ABC' +one_incorrect_disk_size: '12344FFB' +one_disk_size: '32.4 GB' + +one_networks_good: + - NETWORK: "default-test-private" + NETWORK_UNAME: "oneadmin" + SECURITY_GROUPS: "10" + - NETWORK_ID: 27 + +one_networks_bad: + - NETWORK_ID: 999 + - IP: '9.9.9' +one_vm_name: 'foo123' diff --git a/test/legacy/roles/one_vm/tasks/main.yml b/test/legacy/roles/one_vm/tasks/main.yml new file mode 100644 index 0000000000..3952b10a63 --- /dev/null +++ b/test/legacy/roles/one_vm/tasks/main.yml @@ -0,0 +1,924 @@ +--- +- name: 'Deploy a VM in check-mode with template_id' + one_vm: + template_id: '{{ one_template_id }}' + register: deployed_vm1 + check_mode: yes + +- name: Check if deployment in check-mode with template_id returns 'changed' + assert: + that: + - deployed_vm1 is changed + msg: Check mode doesn't return as 'changed' when deploying in check-mode with specified template_id + +- name: Deploy a VM in check-mode with template_name + one_vm: + template_name: '{{ one_template_name }}' + register: deployed_vm2 + check_mode: yes + +- name: Check if deployment in check-mode with template_name returns 'changed' + assert: + that: + - deployed_vm2 is changed + msg: Check mode doesn't return as 'changed' when deploying in check-mode with specified template_name + +- name: Deploy a VM in check-mode with non-existent template_name + one_vm: + template_name: 'unknown' + register: template_bad + failed_when: not template_bad is failed + +- name: Check if it fails if we try to access a non-existent VM in check-mode + one_vm: + instance_ids: non-existent-vm-{{ ansible_date_time.iso8601_basic_short }} + register: vm_missing + failed_when: not vm_missing is failed + check_mode: yes + +- name: Check if it fails if we try to access a non-existent VM + one_vm: + instance_ids: non-existent-vm-{{ ansible_date_time.iso8601_basic_short }} + register: vm_missing + failed_when: not vm_missing is failed + +- block: + - name: Deploy a VM with networks, memory and cpu + one_vm: + template_id: '{{ one_template_id }}' + networks: '{{ one_networks_good }}' + memory: '{{ one_memory }}' + cpu: '{{ one_cpu }}' + register: deployed_vm + + - name: Verify deploying of the VM + assert: + that: + - deployed_vm is changed + - deployed_vm.instances_ids|length == 1 + - deployed_vm.instances|length == 1 + - deployed_vm.instances[0].vm_id == deployed_vm.instances_ids[0] + - deployed_vm.instances[0].networks|length == one_networks_good|length + - deployed_vm.instances[0].memory == "4096 MB" + - deployed_vm.instances[0].cpu == "{{ one_cpu }}" + - deployed_vm.instances[0].state == "ACTIVE" + - deployed_vm.instances[0].lcm_state == "RUNNING" + + - name: Delete a VM in check-mode + one_vm: + instance_ids: '{{ deployed_vm.instances[0].vm_id }}' + state: absent + register: delete_vm + check_mode: yes + + - name: Check if delete in check-mode returns 'changed' + assert: + that: delete_vm is changed + + - name: Wait for the VM to become RUNNING + one_vm: + attributes: + name: '{{ deployed_vm.instances[0].vm_name }}' + state: running + + always: + - name: Delete the VM + one_vm: + instance_ids: + - '{{ deployed_vm.instances[0].vm_id }}' + state: absent + hard: yes + register: delete_vm + +- name: Check if deletion has done + assert: + that: + - delete_vm is changed + - delete_vm.instances_ids|length == 1 + - delete_vm.instances_ids[0] == deployed_vm.instances_ids[0] + msg: 'Deletion has not done' + +- name: Delete the VM again to test idempotence + one_vm: + instance_ids: + - '{{ deployed_vm.instances[0].vm_id }}' + state: absent + register: delete_vm_idempotent + +- name: Check if deletion is idempotent + assert: + that: + - not delete_vm_idempotent is changed + msg: 'Deletion is not idempotent' + +- name: Delete a non-existent VM + one_vm: + instance_ids: + - non-existent-vm-{{ ansible_date_time.iso8601_basic_short }} + state: absent + register: delete_non_existent_vm + +- name: Check if deletion is not executed + assert: + that: + - not delete_non_existent_vm is changed + msg: 'Deletion is bad, task has deleted non existent VM' + +- block: + - name: Set the unique name of the VM + set_fact: + vm_unique_name: test-vm-name-{{ ansible_date_time.iso8601_basic_short }} + + - name: Try to deploy an unique VM with exact_count but without count_attributes and count_labels + one_vm: + template_id: '{{ one_template_id }}' + attributes: + name: '{{ vm_unique_name }}' + exact_count: 1 + register: one_exact_count_without_count_attrs + failed_when: not one_exact_count_without_count_attrs is failed + + - name: Deploy an unique VM in check mode + one_vm: + template_id: '{{ one_template_id }}' + attributes: + name: '{{ vm_unique_name }}' + exact_count: 1 + count_attributes: + name: '{{ vm_unique_name }}' + register: unique_vm_check_mode + check_mode: yes + + - name: Check if deployment in check-mode returns as 'changed' + assert: + that: + - unique_vm_check_mode is changed + msg: 'Deploying an unique VM, that does not exist, in check-mode should return as changed.' + + - name: Really deploy an unique VM + one_vm: + template_id: '{{ one_template_id }}' + attributes: + name: '{{ vm_unique_name }}' + exact_count: 1 + count_attributes: + name: '{{ vm_unique_name }}' + register: unique_vm + + - name: Verify deploying with unique name + assert: + that: + - unique_vm is changed + - unique_vm.instances_ids|length == 1 + - unique_vm.instances|length == 1 + - unique_vm.instances[0].vm_name == "{{ vm_unique_name }}" + msg: Deployment of the unique VM doesn't return as 'changed' + + - name: Deploy an unique VM again to check idempotence + one_vm: + template_id: '{{ one_template_id }}' + attributes: + name: '{{ vm_unique_name }}' + exact_count: 1 + count_attributes: + name: '{{ vm_unique_name }}' + register: unique_vm_idempotent + + - name: Check idempotence of deployment with unique name + assert: + that: + - not unique_vm_idempotent is changed + msg: Deployment with unique name isn't idempotent + + always: + - name: Delete the unique VM + one_vm: + instance_ids: + - '{{ unique_vm.tagged_instances[0].vm_id }}' + state: absent + hard: yes + +- name: Try to deploy a VM with incorrect networks + one_vm: + template_name: '{{ one_template_name }}' + networks: '{{ one_networks_bad }}' + register: vm_with_bad_networks + failed_when: not vm_with_bad_networks is failed + +- name: Try to deploy a VM with incorrect memory size + one_vm: + template_name: '{{ one_template_name }}' + memory: '{{ one_incorrect_memory }}' + register: vm_with_bad_memory_param + failed_when: not vm_with_bad_memory_param is failed + +- name: Try to deploy a VM with incorrect disk size + one_vm: + template_name: '{{ one_template_name }}' + disk_size: '{{ one_incorrect_disk_size }}' + register: vm_with_bad_disk_size_param + failed_when: not vm_with_bad_disk_size_param is failed + +- name: Try to deploy a VM, with disk size, whose template has more than one DISK + one_vm: + template_name: '{{ one_template_name_with_2_disks }}' + disk_size: '{{ one_disk_size }}' + register: vm_with_disk_size_and_more_disks + failed_when: not vm_with_disk_size_and_more_disks is failed + +- name: Try to deploy a VM with incorrect name's format + one_vm: + template_name: '{{ one_template_name }}' + attributes: + name: 'foo#Vm###' + register: vm_with_bad_name + failed_when: not vm_with_bad_name is failed + +- name: Try to deploy a VM with incorrect name's format + one_vm: + template_name: '{{ one_template_name }}' + attributes: + name: '###' + register: vm_with_bad_name + failed_when: not vm_with_bad_name is failed + +- block: + - name: Deploy a VM and wait for it to become RUNNING + one_vm: + template_id: '{{ one_template_id }}' + register: vm_register + + - name: Power-off the VM and wait for it to become POWEROFF + one_vm: + instance_ids: + - '{{ vm_register.instances[0].vm_id }}' + state: poweredoff + hard: yes + register: power_off_vm + + - name: Check if VM went down properly + assert: + that: + - power_off_vm is changed + - power_off_vm.instances|length == 1 + - power_off_vm.instances[0].state == "POWEROFF" + msg: Power-off of the VM doesn't work + + - name: Power-off the VM again to check idempotence + one_vm: + instance_ids: + - '{{ vm_register.instances[0].vm_id }}' + state: poweredoff + register: power_off_vm_idempotent + + - name: Check if power-off is idempotent + assert: + that: + - not power_off_vm_idempotent is changed + msg: Power-off of the VM is not idempotent + + - name: Run a VM again in check-mode + one_vm: + instance_ids: + - '{{ vm_register.instances[0].vm_id }}' + state: running + register: run_vm_check_mode + check_mode: yes + + - name: Check if running in check-mode returns as 'changed' + assert: + that: + - run_vm_check_mode is changed + + - name: Run a VM and wait for it to become RUNNING + one_vm: + instance_ids: + - '{{ vm_register.instances[0].vm_id }}' + state: running + register: run_vm + + - name: Check if VM is running again + assert: + that: + - run_vm is changed + - run_vm.instances_ids|length == 1 + - run_vm.instances_ids[0] == vm_register.instances[0].vm_id + - run_vm.instances|length == 1 + - run_vm.instances[0].state == "ACTIVE" + - run_vm.instances[0].lcm_state == "RUNNING" + + - name: Reboot the running VM + one_vm: + instance_ids: + - '{{ vm_register.instances[0].vm_id }}' + state: rebooted + register: reboot_running_vm + + - name: Check if reboot returns as changed + assert: + that: + - reboot_running_vm is changed + msg: Reboot should be done anyway + + - name: Power-off the VM and wait for it to become POWEROFF + one_vm: + instance_ids: + - '{{ vm_register.instances[0].vm_id }}' + state: poweredoff + hard: yes + + - name: Reboot the extinguished VM and wait for it to become RUNNING + one_vm: + instance_ids: + - '{{ vm_register.instances[0].vm_id }}' + state: rebooted + register: reboot_extinguished_vm + + - name: Check if reboot started VM again + assert: + that: + - reboot_extinguished_vm is changed + - reboot_extinguished_vm.instances|length == 1 + - reboot_extinguished_vm.instances[0].state == "ACTIVE" + - reboot_extinguished_vm.instances[0].lcm_state == "RUNNING" + msg: Rebooting the extinguished VM should run it + + always: + - name: Delete the VM + one_vm: + instance_ids: + - '{{ vm_register.instances[0].vm_id }}' + state: absent + hard: yes + +- block: + - name: Deploy 2 VMs with attributes in check-mode + one_vm: + template_id: '{{ one_template_id }}' + attributes: + name: aero + key: value + count: 2 + register: deploy_vms_with_count_check_mode + check_mode: yes + + - name: Check if deployment in check-mode returns as 'changed' + assert: + that: + - deploy_vms_with_count_check_mode is changed + + - name: Deploy 2 VMs with attributes + one_vm: + template_id: '{{ one_template_id }}' + attributes: + name: aero + key: value + count: 2 + register: deploy_vms_with_count + + - name: Check if deployment in returns as 'changed' + assert: + that: + - deploy_vms_with_count is changed + - deploy_vms_with_count.instances_ids|length == 2 + - deploy_vms_with_count.instances|length == 2 + - deploy_vms_with_count.tagged_instances|length == 2 + - deploy_vms_with_count.tagged_instances[0].vm_name == "aero" + - deploy_vms_with_count.tagged_instances[1].vm_name == "aero" + + - name: Deploy 2 VMs with attributes to check it is not idempotent + one_vm: + template_id: '{{ one_template_id }}' + attributes: + name: aero + key: value + count: 2 + register: deploy_vms_with_count_check_idempotence + + - name: Check if deployment with count is not idempotent + assert: + that: + - deploy_vms_with_count_check_idempotence is changed + - deploy_vms_with_count_check_idempotence.instances_ids|length == 2 + - deploy_vms_with_count_check_idempotence.instances|length == 2 + + always: + - name: Delete all VMs + one_vm: + instance_ids: '{{ deploy_vms_with_count.instances_ids | union(deploy_vms_with_count_check_idempotence.instances_ids) }}' + state: absent + hard: yes + register: delete_all_vms + + - name: Verify deletion + assert: + that: + - delete_all_vms is changed + - delete_all_vms.instances_ids|length == 4 + - delete_all_vms.instances|length == 4 + - delete_all_vms.tagged_instances|length == 0 + +- block: + - name: Set VMs indexed format name + set_fact: + vms_indexed_name: 'aero-##' + + - name: Terminate all VMs with name's format 'aero-##' + one_vm: + template_id: '{{ one_template_id }}' + count_attributes: + name: '{{ vms_indexed_name }}' + hard: yes + exact_count: 0 + + - name: Terminate all VMs with name's format 'aero-##' again to check-idempotence + one_vm: + template_id: '{{ one_template_id }}' + count_attributes: + name: '{{ vms_indexed_name }}' + hard: yes + exact_count: 0 + + - name: Terminate all VMs with name's format 'aero' + one_vm: + template_id: '{{ one_template_id }}' + exact_count: 0 + count_attributes: + name: aero + hard: yes + + - name: Fetch all VMs with name's format 'aero-##' + one_vm: + attributes: + name: '{{ vms_indexed_name }}' + register: all_aero_vms + failed_when: all_aero_vms.instances_ids|length > 0 + + - name: Deploy exact 3 instances with name's format 'aero-##' + one_vm: + template_id: '{{ one_template_id }}' + attributes: + name: '{{ vms_indexed_name }}' + exact_count: 3 + count_attributes: + name: '{{ vms_indexed_name }}' + register: vms_with_hash + + - name: Deploy exact 2 instances with name's format 'aero' + one_vm: + template_id: '{{ one_template_id }}' + attributes: + name: aero + exact_count: 2 + count_attributes: + name: aero + register: vms_without_hash + + - name: Fetch all VMs with name's format 'aero-#' + one_vm: + attributes: + name: aero-# + register: all_aero_vms_with_hash + + - name: Check there are exactly 3 instances with name's format 'aero-#' + assert: + that: + - not all_aero_vms_with_hash is changed + - all_aero_vms_with_hash.tagged_instances|length == 3 + + - name: Decrement count of 'aero-#' instances + one_vm: + template_id: '{{ one_template_id }}' + exact_count: 2 + count_attributes: + name: aero-# + register: aero_vms_with_hash_decremented + + - name: Check if we terminated oldest one VM + assert: + that: + - aero_vms_with_hash_decremented is changed + - aero_vms_with_hash_decremented.instances_ids|length == 1 + - vms_with_hash.instances_ids|min == aero_vms_with_hash_decremented.instances_ids[0] + + - name: Deploy new one with name's format 'aero-#' + one_vm: + template_id: '{{ one_template_id }}' + attributes: + name: 'aero-#' + register: new_vm_with_hash + + - name: Check if new VM has index 0 + assert: + that: + - new_vm_with_hash is changed + - new_vm_with_hash.instances_ids|length == 1 + - new_vm_with_hash.instances|length == 1 + - new_vm_with_hash.tagged_instances|length == 1 + - new_vm_with_hash.instances[0].vm_name|regex_replace('(\d+)$','\1')|int == 0 + + always: + - name: Terminate all VMs with name's format 'aero-##' + one_vm: + template_id: '{{ one_template_id }}' + count_attributes: + name: '{{ vms_indexed_name }}' + exact_count: 0 + hard: yes + + - name: Terminate all VMs with name's format 'aero' + one_vm: + template_id: '{{ one_template_id }}' + count_attributes: + name: aero + exact_count: 0 + hard: yes + +- block: + - name: Try to create negative count of VMs + one_vm: + template_id: '{{ one_template_id }}' + count: -3 + register: negative_count_of_vms + failed_when: not negative_count_of_vms is failed + + - name: Verify the fail message + assert: + that: + - negative_count_of_vms.msg == "`count` has to be grater than 0" + + - name: Try to use hard parameter for running state + one_vm: + instance_ids: + - 123 + - 456 + hard: yes + state: running + register: hard_with_running + failed_when: not hard_with_running is failed + + - name: Try to use count with count_attributes/count_labels + one_vm: + template_id: '{{ one_template_id }}' + count: 2 + count_attributes: + key: val + count_labels: + - foo + register: use_count_with_count_attrs + failed_when: not use_count_with_count_attrs is failed + + - name: Terminate all VMs with label 'foo' + one_vm: + template_id: '{{ one_template_id }}' + count_labels: + - foo + exact_count: 0 + hard: yes + + - name: Deploy exact 3 VMs with label 'foo' + one_vm: + template_id: '{{ one_template_id }}' + labels: + - foo + exact_count: 3 + count_labels: + - foo + register: vms_with_foo_label + + - name: + assert: + that: + - vms_with_foo_label is changed + - vms_with_foo_label.tagged_instances|length == 3 + - vms_with_foo_label.tagged_instances[0].labels|length == 1 + - vms_with_foo_label.tagged_instances[0].labels[0] == "foo" + + - name: Try to use ids with exact_count and check if it fails + one_vm: + instance_ids: '{{ vms_with_foo_label.instances_ids }}' + exact_count: 2 + register: failed_ids_and_exact_count + failed_when: not failed_ids_and_exact_count is failed + + - name: Set special label for a new instance + set_fact: + vm_spec_label: spec-label-{{ ansible_date_time.iso8601_basic_short }} + + - name: Add a new instance in the group of instances with label 'foo' + one_vm: + template_id: '{{ one_template_id }}' + labels: + - foo + - '{{ vm_spec_label }}' + exact_count: 4 + count_labels: + - foo + register: new_vm_with_label + + - name: Fetch all instances with special label + one_vm: + labels: + - '{{ vm_spec_label }}' + register: vm_with_special_label + + - name: Check there is only one VM with the special label + assert: + that: + - not vm_with_special_label is changed + - vm_with_special_label.tagged_instances|length == 1 + - vm_with_special_label.instances_ids|length == 1 + - vm_with_special_label.instances_ids[0] == new_vm_with_label.instances_ids[0] + + always: + - name: Fetch all VMs with label 'foo' + one_vm: + labels: + - foo + register: all_foo_instances + + - name: Terminate all VMs with label 'foo' + one_vm: + instance_ids: '{{ all_foo_instances.instances_ids }}' + state: absent + hard: yes + when: all_foo_instances.instances_ids|length > 0 + +- block: + - name: Fetch all VMs with 'foo_app' key + one_vm: + attributes: + foo_app: + register: foo_app_instances + + - name: Terminate all VMs with 'foo_app' key + one_vm: + instance_ids: '{{ foo_app_instances.instances_ids }}' + state: absent + hard: yes + when: foo_app_instances.instances_ids|length > 0 + + - name: Terminate all instances with name's format 'aeroXYZ-##' to test name parsing later + one_vm: + template_id: '{{ one_template_id }}' + exact_count: 0 + count_attributes: + name: 'aeroXYZ-##' + hard: yes + + - name: Deploy 2 instances with attributes + one_vm: + template_id: '{{ one_template_id }}' + attributes: + name: 'aero-###' + foo_app: foo + count: 2 + + - name: Deploy 2 instances with different value for attribute + one_vm: + template_id: '{{ one_template_id }}' + attributes: + name: 'aero-###' + foo_app: bar + count: 2 + + - name: Get all instances with attribute's key + one_vm: + attributes: + foo_app: + register: all_foo_app_vms + + - name: Check there are 4 VMs with 'foo_app' key + assert: + that: + - all_foo_app_vms.tagged_instances|length == 4 + + - name: Decrement count of VMs with 'foo_app' key + one_vm: + template_id: '{{ one_template_id }}' + exact_count: 2 + count_attributes: + foo_app: + register: foo_app_vms + + # instances list contains affected instances + # tagged_instances list contains the remaining instances with the appropriate attributes + - name: Check are there 2 elements in instances list and 2 elements in tagged_instances list + assert: + that: + - foo_app_vms is changed + - foo_app_vms.instances|length == 2 + - foo_app_vms.tagged_instances|length == 2 + + - name: Fetch all instances with name's format 'aeroXYZ-##' to check parsing + one_vm: + attributes: + name: 'aeroXYZ-##' + register: check_there_are_no_vms + failed_when: check_there_are_no_vms.instances_ids|length > 0 + + always: + - name: Fetch all VMs with 'foo_app' key + one_vm: + attributes: + foo_app: + register: foo_app_instances + + - name: Terminate all VMs with 'foo_app' key + one_vm: + instance_ids: '{{ foo_app_instances.instances_ids }}' + state: absent + hard: yes + when: foo_app_instances.instances_ids|length > 0 + +- block: + - name: Set labels list + set_fact: + labels_list: + - bar1 + - bar2 + + - name: Deploy an instance with name 'app1', attribute 'foo app' and labels 'bar1' and 'bar2' + one_vm: + template_id: '{{ one_template_id }}' + attributes: + name: app1 + foo: app + labels: '{{ labels_list }}' + register: instance_with_labels + + - name: Fetch the current instance + one_vm: + attributes: + name: app1 + foo: app + labels: '{{ labels_list }}' + register: current_instance + + - name: Check that the instance is fetched + assert: + that: current_instance.instances|length == 1 + msg: There is no instance + + - name: Check that labels are set correctly + assert: + that: instance_with_labels.instances[0].labels|difference(labels_list)|length == 0 + msg: Labels are not correct + + - name: Check that name is correct + assert: + that: instance_with_labels.instances[0].vm_name == 'app1' + msg: The instance name is incorrect + + always: + - name: Terminate the instance + one_vm: + instance_ids: '{{ instance_with_labels.instances_ids }}' + state: absent + hard: yes + +- name: Try to use letters for ids option + one_vm: + instance_ids: + - asd + - 123 + state: running + register: ids_with_letters + failed_when: not ids_with_letters is failed + +- name: Try to use letters for ids option when terminate vms + one_vm: + instance_ids: + - asd + - 123 + state: absent + register: ids_with_letters + failed_when: ids_with_letters is failed + +- name: Try to use restricted attributes when deploying + one_vm: + template_id: '{{ one_template_id }}' + attributes: + disk: 34 GB + template: foo + register: restricted_attributes + failed_when: not restricted_attributes is failed + +- name: Verify the fail message + assert: + that: + - restricted_attributes.msg == "Restricted attribute `DISK` cannot be used when filtering VMs." + +- name: Test images creation + block: + - name: Set fact image name + set_fact: + image_name: test-image-name-{{ ansible_date_time.iso8601_basic_short }} + + - name: Deploy VM + one_vm: + template_id: '{{ one_template_id }}' + labels: + - test-image + attributes: + name: test-vm-image + register: vm_image + + - name: Try to use disk_saveas option with labels and attributes + one_vm: + labels: + - test-image + attributes: + name: test-vm-image + disk_saveas: + name: '{{ image_name }}' + register: save_disk_labels + failed_when: not save_disk_labels is failed + + - name: Try to save disk in running state to check if it will fail + one_vm: + instance_ids: '{{ vm_image.instances_ids }}' + disk_saveas: + name: '{{ image_name }}' + register: disk_save_as_running + failed_when: not disk_save_as_running + + - name: Verify the fail message + assert: + that: + - disk_save_as_running.msg == "'disksaveas' option can be used only when the VM is in 'POWEROFF' state" + + - name: Try to save disk without specified image name + one_vm: + instance_ids: '{{ vm_image.instances_ids }}' + disk_saveas: {} + register: disk_save_without_name + failed_when: not disk_save_without_name is failed + + - name: Try to save disk of non-existent VM + one_vm: + attributes: + name: test-vm-{{ ansible_date_time.iso8601_basic_short }} + disk_saveas: + name: '{{ image_name }}' + register: disk_save_no_vm + failed_when: not disk_save_no_vm is failed + + - name: Save disk of powered-off VM in check-mode + one_vm: + instance_ids: '{{ vm_image.instances_ids }}' + state: poweredoff + hard: yes + disk_saveas: + name: '{{ image_name }}' + check_mode: yes + register: vm_disk_saveas_check_mode + + - name: Check if disk saving in check-mode is returned as 'changed' + assert: + that: + - vm_disk_saveas_check_mode is changed + + - name: Check that image doesn't exist + one_image: + name: '{{ image_name }}' + failed_when: no + + - name: Save disk of powered-off VM + one_vm: + instance_ids: '{{ vm_image.instances_ids }}' + state: poweredoff + hard: yes + disk_saveas: + name: '{{ image_name }}' + register: vm_disk_saveas + + - name: Check if disk saving is returned as 'changed' + assert: + that: + - vm_disk_saveas is changed + + - name: Check if image is created + one_image: + name: '{{ image_name }}' + + - name: Try to save disk again with the same name + one_vm: + instance_ids: '{{ vm_image.instances_ids }}' + disk_saveas: + name: '{{ image_name }}' + register: disk_save_as_fail + failed_when: not disk_save_as_fail is failed + + always: + - name: Delete the image + one_image: + name: '{{ image_name }}' + state: absent + + - name: Delete the VM + one_vm: + instance_ids: '{{ vm_image.instances_ids }}' + state: absent + hard: yes + tags: test-image