mirror of
https://github.com/ansible-collections/community.general.git
synced 2024-09-14 20:13:21 +02:00
parent
887456ab8e
commit
02f66b9369
38 changed files with 865 additions and 888 deletions
|
@ -249,9 +249,9 @@ class CLI(with_metaclass(ABCMeta, object)):
|
|||
|
||||
if runas_opts:
|
||||
# Check for privilege escalation conflicts
|
||||
if (op.su or op.su_user) and (op.sudo or op.sudo_user) or \
|
||||
(op.su or op.su_user) and (op.become or op.become_user) or \
|
||||
(op.sudo or op.sudo_user) and (op.become or op.become_user):
|
||||
if ((op.su or op.su_user) and (op.sudo or op.sudo_user) or
|
||||
(op.su or op.su_user) and (op.become or op.become_user) or
|
||||
(op.sudo or op.sudo_user) and (op.become or op.become_user)):
|
||||
|
||||
self.parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') "
|
||||
"and su arguments ('-su', '--su-user', and '--ask-su-pass') "
|
||||
|
|
|
@ -42,7 +42,7 @@ if _system_six:
|
|||
not hasattr(_system_six, 'byte2int') or
|
||||
not hasattr(_system_six, 'add_metaclass') or
|
||||
not hasattr(_system_six.moves, 'urllib')
|
||||
):
|
||||
):
|
||||
|
||||
_system_six = False
|
||||
|
||||
|
|
|
@ -99,11 +99,9 @@ class HostState:
|
|||
if not isinstance(other, HostState):
|
||||
return False
|
||||
|
||||
for attr in (
|
||||
'_blocks', 'cur_block', 'cur_regular_task', 'cur_rescue_task', 'cur_always_task',
|
||||
'run_state', 'fail_state', 'pending_setup', 'cur_dep_chain',
|
||||
'tasks_child_state', 'rescue_child_state', 'always_child_state'
|
||||
):
|
||||
for attr in ('_blocks', 'cur_block', 'cur_regular_task', 'cur_rescue_task', 'cur_always_task',
|
||||
'run_state', 'fail_state', 'pending_setup', 'cur_dep_chain',
|
||||
'tasks_child_state', 'rescue_child_state', 'always_child_state'):
|
||||
if getattr(self, attr) != getattr(other, attr):
|
||||
return False
|
||||
|
||||
|
|
|
@ -664,9 +664,9 @@ class AnsibleFallbackNotFound(Exception):
|
|||
|
||||
class AnsibleModule(object):
|
||||
def __init__(self, argument_spec, bypass_checks=False, no_log=False,
|
||||
check_invalid_arguments=True, mutually_exclusive=None, required_together=None,
|
||||
required_one_of=None, add_file_common_args=False, supports_check_mode=False,
|
||||
required_if=None):
|
||||
check_invalid_arguments=True, mutually_exclusive=None, required_together=None,
|
||||
required_one_of=None, add_file_common_args=False, supports_check_mode=False,
|
||||
required_if=None):
|
||||
|
||||
'''
|
||||
common code for quickly building an ansible module in Python
|
||||
|
@ -1961,9 +1961,9 @@ class AnsibleModule(object):
|
|||
if isinstance(kwargs['deprecations'], list):
|
||||
for d in kwargs['deprecations']:
|
||||
if isinstance(d, SEQUENCETYPE) and len(d) == 2:
|
||||
self.deprecate(d[0], version=d[1])
|
||||
self.deprecate(d[0], version=d[1])
|
||||
else:
|
||||
self.deprecate(d)
|
||||
self.deprecate(d)
|
||||
else:
|
||||
self.deprecate(d)
|
||||
|
||||
|
|
|
@ -2498,8 +2498,8 @@ class LinuxNetwork(Network):
|
|||
)
|
||||
interface = dict(v4 = {}, v6 = {})
|
||||
for v in 'v4', 'v6':
|
||||
if v == 'v6' and self.facts['os_family'] == 'RedHat' \
|
||||
and self.facts['distribution_version'].startswith('4.'):
|
||||
if (v == 'v6' and self.facts['os_family'] == 'RedHat' and
|
||||
self.facts['distribution_version'].startswith('4.')):
|
||||
continue
|
||||
if v == 'v6' and not socket.has_ipv6:
|
||||
continue
|
||||
|
|
|
@ -300,7 +300,7 @@ class EcsServiceManager:
|
|||
return self.jsonize(response['service'])
|
||||
|
||||
def update_service(self, service_name, cluster_name, task_definition,
|
||||
load_balancers, desired_count, client_token, role, deployment_configuration):
|
||||
load_balancers, desired_count, client_token, role, deployment_configuration):
|
||||
response = self.ecs.update_service(
|
||||
cluster=cluster_name,
|
||||
service=service_name,
|
||||
|
|
|
@ -138,218 +138,218 @@ def boto_exception(err):
|
|||
|
||||
|
||||
def user_action(module, iam, name, policy_name, skip, pdoc, state):
|
||||
policy_match = False
|
||||
changed = False
|
||||
try:
|
||||
current_policies = [cp for cp in iam.get_all_user_policies(name).
|
||||
list_user_policies_result.
|
||||
policy_names]
|
||||
matching_policies = []
|
||||
for pol in current_policies:
|
||||
'''
|
||||
urllib is needed here because boto returns url encoded strings instead
|
||||
'''
|
||||
if urllib.unquote(iam.get_user_policy(name, pol).
|
||||
get_user_policy_result.policy_document) == pdoc:
|
||||
policy_match = True
|
||||
matching_policies.append(pol)
|
||||
policy_match = False
|
||||
changed = False
|
||||
try:
|
||||
current_policies = [cp for cp in iam.get_all_user_policies(name).
|
||||
list_user_policies_result.
|
||||
policy_names]
|
||||
matching_policies = []
|
||||
for pol in current_policies:
|
||||
'''
|
||||
urllib is needed here because boto returns url encoded strings instead
|
||||
'''
|
||||
if urllib.unquote(iam.get_user_policy(name, pol).
|
||||
get_user_policy_result.policy_document) == pdoc:
|
||||
policy_match = True
|
||||
matching_policies.append(pol)
|
||||
|
||||
if state == 'present':
|
||||
# If policy document does not already exist (either it's changed
|
||||
# or the policy is not present) or if we're not skipping dupes then
|
||||
# make the put call. Note that the put call does a create or update.
|
||||
if not policy_match or (not skip and policy_name not in matching_policies):
|
||||
changed = True
|
||||
iam.put_user_policy(name, policy_name, pdoc)
|
||||
elif state == 'absent':
|
||||
try:
|
||||
iam.delete_user_policy(name, policy_name)
|
||||
changed = True
|
||||
except boto.exception.BotoServerError as err:
|
||||
if state == 'present':
|
||||
# If policy document does not already exist (either it's changed
|
||||
# or the policy is not present) or if we're not skipping dupes then
|
||||
# make the put call. Note that the put call does a create or update.
|
||||
if not policy_match or (not skip and policy_name not in matching_policies):
|
||||
changed = True
|
||||
iam.put_user_policy(name, policy_name, pdoc)
|
||||
elif state == 'absent':
|
||||
try:
|
||||
iam.delete_user_policy(name, policy_name)
|
||||
changed = True
|
||||
except boto.exception.BotoServerError as err:
|
||||
error_msg = boto_exception(err)
|
||||
if 'cannot be found.' in error_msg:
|
||||
changed = False
|
||||
module.exit_json(changed=changed, msg="%s policy is already absent" % policy_name)
|
||||
|
||||
updated_policies = [cp for cp in iam.get_all_user_policies(name).
|
||||
list_user_policies_result.
|
||||
policy_names]
|
||||
except boto.exception.BotoServerError as err:
|
||||
error_msg = boto_exception(err)
|
||||
if 'cannot be found.' in error_msg:
|
||||
changed = False
|
||||
module.exit_json(changed=changed, msg="%s policy is already absent" % policy_name)
|
||||
module.fail_json(changed=changed, msg=error_msg)
|
||||
|
||||
updated_policies = [cp for cp in iam.get_all_user_policies(name).
|
||||
list_user_policies_result.
|
||||
policy_names]
|
||||
except boto.exception.BotoServerError as err:
|
||||
error_msg = boto_exception(err)
|
||||
module.fail_json(changed=changed, msg=error_msg)
|
||||
|
||||
return changed, name, updated_policies
|
||||
return changed, name, updated_policies
|
||||
|
||||
|
||||
def role_action(module, iam, name, policy_name, skip, pdoc, state):
|
||||
policy_match = False
|
||||
changed = False
|
||||
try:
|
||||
current_policies = [cp for cp in iam.list_role_policies(name).
|
||||
list_role_policies_result.
|
||||
policy_names]
|
||||
except boto.exception.BotoServerError as e:
|
||||
if e.error_code == "NoSuchEntity":
|
||||
# Role doesn't exist so it's safe to assume the policy doesn't either
|
||||
module.exit_json(changed=False, msg="No such role, policy will be skipped.")
|
||||
else:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
try:
|
||||
matching_policies = []
|
||||
for pol in current_policies:
|
||||
if urllib.unquote(iam.get_role_policy(name, pol).
|
||||
get_role_policy_result.policy_document) == pdoc:
|
||||
policy_match = True
|
||||
matching_policies.append(pol)
|
||||
|
||||
if state == 'present':
|
||||
# If policy document does not already exist (either it's changed
|
||||
# or the policy is not present) or if we're not skipping dupes then
|
||||
# make the put call. Note that the put call does a create or update.
|
||||
if not policy_match or (not skip and policy_name not in matching_policies):
|
||||
changed = True
|
||||
iam.put_role_policy(name, policy_name, pdoc)
|
||||
elif state == 'absent':
|
||||
try:
|
||||
iam.delete_role_policy(name, policy_name)
|
||||
changed = True
|
||||
except boto.exception.BotoServerError as err:
|
||||
error_msg = boto_exception(err)
|
||||
if 'cannot be found.' in error_msg:
|
||||
changed = False
|
||||
module.exit_json(changed=changed,
|
||||
msg="%s policy is already absent" % policy_name)
|
||||
policy_match = False
|
||||
changed = False
|
||||
try:
|
||||
current_policies = [cp for cp in iam.list_role_policies(name).
|
||||
list_role_policies_result.
|
||||
policy_names]
|
||||
except boto.exception.BotoServerError as e:
|
||||
if e.error_code == "NoSuchEntity":
|
||||
# Role doesn't exist so it's safe to assume the policy doesn't either
|
||||
module.exit_json(changed=False, msg="No such role, policy will be skipped.")
|
||||
else:
|
||||
module.fail_json(msg=err.message)
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
updated_policies = [cp for cp in iam.list_role_policies(name).
|
||||
list_role_policies_result.
|
||||
policy_names]
|
||||
except boto.exception.BotoServerError as err:
|
||||
error_msg = boto_exception(err)
|
||||
module.fail_json(changed=changed, msg=error_msg)
|
||||
try:
|
||||
matching_policies = []
|
||||
for pol in current_policies:
|
||||
if urllib.unquote(iam.get_role_policy(name, pol).
|
||||
get_role_policy_result.policy_document) == pdoc:
|
||||
policy_match = True
|
||||
matching_policies.append(pol)
|
||||
|
||||
return changed, name, updated_policies
|
||||
if state == 'present':
|
||||
# If policy document does not already exist (either it's changed
|
||||
# or the policy is not present) or if we're not skipping dupes then
|
||||
# make the put call. Note that the put call does a create or update.
|
||||
if not policy_match or (not skip and policy_name not in matching_policies):
|
||||
changed = True
|
||||
iam.put_role_policy(name, policy_name, pdoc)
|
||||
elif state == 'absent':
|
||||
try:
|
||||
iam.delete_role_policy(name, policy_name)
|
||||
changed = True
|
||||
except boto.exception.BotoServerError as err:
|
||||
error_msg = boto_exception(err)
|
||||
if 'cannot be found.' in error_msg:
|
||||
changed = False
|
||||
module.exit_json(changed=changed,
|
||||
msg="%s policy is already absent" % policy_name)
|
||||
else:
|
||||
module.fail_json(msg=err.message)
|
||||
|
||||
updated_policies = [cp for cp in iam.list_role_policies(name).
|
||||
list_role_policies_result.
|
||||
policy_names]
|
||||
except boto.exception.BotoServerError as err:
|
||||
error_msg = boto_exception(err)
|
||||
module.fail_json(changed=changed, msg=error_msg)
|
||||
|
||||
return changed, name, updated_policies
|
||||
|
||||
|
||||
def group_action(module, iam, name, policy_name, skip, pdoc, state):
|
||||
policy_match = False
|
||||
changed = False
|
||||
msg=''
|
||||
try:
|
||||
current_policies = [cp for cp in iam.get_all_group_policies(name).
|
||||
list_group_policies_result.
|
||||
policy_names]
|
||||
matching_policies = []
|
||||
for pol in current_policies:
|
||||
if urllib.unquote(iam.get_group_policy(name, pol).
|
||||
get_group_policy_result.policy_document) == pdoc:
|
||||
policy_match = True
|
||||
matching_policies.append(pol)
|
||||
msg=("The policy document you specified already exists "
|
||||
"under the name %s." % pol)
|
||||
if state == 'present':
|
||||
# If policy document does not already exist (either it's changed
|
||||
# or the policy is not present) or if we're not skipping dupes then
|
||||
# make the put call. Note that the put call does a create or update.
|
||||
if not policy_match or (not skip and policy_name not in matching_policies):
|
||||
changed = True
|
||||
iam.put_group_policy(name, policy_name, pdoc)
|
||||
elif state == 'absent':
|
||||
try:
|
||||
iam.delete_group_policy(name, policy_name)
|
||||
changed = True
|
||||
except boto.exception.BotoServerError as err:
|
||||
policy_match = False
|
||||
changed = False
|
||||
msg=''
|
||||
try:
|
||||
current_policies = [cp for cp in iam.get_all_group_policies(name).
|
||||
list_group_policies_result.
|
||||
policy_names]
|
||||
matching_policies = []
|
||||
for pol in current_policies:
|
||||
if urllib.unquote(iam.get_group_policy(name, pol).
|
||||
get_group_policy_result.policy_document) == pdoc:
|
||||
policy_match = True
|
||||
matching_policies.append(pol)
|
||||
msg=("The policy document you specified already exists "
|
||||
"under the name %s." % pol)
|
||||
if state == 'present':
|
||||
# If policy document does not already exist (either it's changed
|
||||
# or the policy is not present) or if we're not skipping dupes then
|
||||
# make the put call. Note that the put call does a create or update.
|
||||
if not policy_match or (not skip and policy_name not in matching_policies):
|
||||
changed = True
|
||||
iam.put_group_policy(name, policy_name, pdoc)
|
||||
elif state == 'absent':
|
||||
try:
|
||||
iam.delete_group_policy(name, policy_name)
|
||||
changed = True
|
||||
except boto.exception.BotoServerError as err:
|
||||
error_msg = boto_exception(err)
|
||||
if 'cannot be found.' in error_msg:
|
||||
changed = False
|
||||
module.exit_json(changed=changed,
|
||||
msg="%s policy is already absent" % policy_name)
|
||||
|
||||
updated_policies = [cp for cp in iam.get_all_group_policies(name).
|
||||
list_group_policies_result.
|
||||
policy_names]
|
||||
except boto.exception.BotoServerError as err:
|
||||
error_msg = boto_exception(err)
|
||||
if 'cannot be found.' in error_msg:
|
||||
changed = False
|
||||
module.exit_json(changed=changed,
|
||||
msg="%s policy is already absent" % policy_name)
|
||||
module.fail_json(changed=changed, msg=error_msg)
|
||||
|
||||
updated_policies = [cp for cp in iam.get_all_group_policies(name).
|
||||
list_group_policies_result.
|
||||
policy_names]
|
||||
except boto.exception.BotoServerError as err:
|
||||
error_msg = boto_exception(err)
|
||||
module.fail_json(changed=changed, msg=error_msg)
|
||||
|
||||
return changed, name, updated_policies, msg
|
||||
return changed, name, updated_policies, msg
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
iam_type=dict(
|
||||
default=None, required=True, choices=['user', 'group', 'role']),
|
||||
state=dict(
|
||||
default=None, required=True, choices=['present', 'absent']),
|
||||
iam_name=dict(default=None, required=False),
|
||||
policy_name=dict(default=None, required=True),
|
||||
policy_document=dict(default=None, required=False),
|
||||
policy_json=dict(type='json', default=None, required=False),
|
||||
skip_duplicates=dict(type='bool', default=True, required=False)
|
||||
))
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
iam_type=dict(
|
||||
default=None, required=True, choices=['user', 'group', 'role']),
|
||||
state=dict(
|
||||
default=None, required=True, choices=['present', 'absent']),
|
||||
iam_name=dict(default=None, required=False),
|
||||
policy_name=dict(default=None, required=True),
|
||||
policy_document=dict(default=None, required=False),
|
||||
policy_json=dict(type='json', default=None, required=False),
|
||||
skip_duplicates=dict(type='bool', default=True, required=False)
|
||||
))
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
)
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
)
|
||||
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto required for this module')
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto required for this module')
|
||||
|
||||
state = module.params.get('state').lower()
|
||||
iam_type = module.params.get('iam_type').lower()
|
||||
state = module.params.get('state')
|
||||
name = module.params.get('iam_name')
|
||||
policy_name = module.params.get('policy_name')
|
||||
skip = module.params.get('skip_duplicates')
|
||||
state = module.params.get('state').lower()
|
||||
iam_type = module.params.get('iam_type').lower()
|
||||
state = module.params.get('state')
|
||||
name = module.params.get('iam_name')
|
||||
policy_name = module.params.get('policy_name')
|
||||
skip = module.params.get('skip_duplicates')
|
||||
|
||||
if module.params.get('policy_document') is not None and module.params.get('policy_json') is not None:
|
||||
module.fail_json(msg='Only one of "policy_document" or "policy_json" may be set')
|
||||
if module.params.get('policy_document') is not None and module.params.get('policy_json') is not None:
|
||||
module.fail_json(msg='Only one of "policy_document" or "policy_json" may be set')
|
||||
|
||||
if module.params.get('policy_document') is not None:
|
||||
with open(module.params.get('policy_document'), 'r') as json_data:
|
||||
pdoc = json.dumps(json.load(json_data))
|
||||
json_data.close()
|
||||
elif module.params.get('policy_json') is not None:
|
||||
pdoc = module.params.get('policy_json')
|
||||
# if its a string, assume it is already JSON
|
||||
if not isinstance(pdoc, basestring):
|
||||
try:
|
||||
pdoc = json.dumps(pdoc)
|
||||
except Exception as e:
|
||||
module.fail_json(msg='Failed to convert the policy into valid JSON: %s' % str(e))
|
||||
else:
|
||||
pdoc=None
|
||||
|
||||
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
|
||||
|
||||
try:
|
||||
if region:
|
||||
iam = connect_to_aws(boto.iam, region, **aws_connect_kwargs)
|
||||
if module.params.get('policy_document') is not None:
|
||||
with open(module.params.get('policy_document'), 'r') as json_data:
|
||||
pdoc = json.dumps(json.load(json_data))
|
||||
json_data.close()
|
||||
elif module.params.get('policy_json') is not None:
|
||||
pdoc = module.params.get('policy_json')
|
||||
# if its a string, assume it is already JSON
|
||||
if not isinstance(pdoc, basestring):
|
||||
try:
|
||||
pdoc = json.dumps(pdoc)
|
||||
except Exception as e:
|
||||
module.fail_json(msg='Failed to convert the policy into valid JSON: %s' % str(e))
|
||||
else:
|
||||
iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
|
||||
except boto.exception.NoAuthHandlerFound as e:
|
||||
module.fail_json(msg=str(e))
|
||||
pdoc=None
|
||||
|
||||
changed = False
|
||||
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
|
||||
|
||||
if iam_type == 'user':
|
||||
changed, user_name, current_policies = user_action(module, iam, name,
|
||||
policy_name, skip, pdoc,
|
||||
state)
|
||||
module.exit_json(changed=changed, user_name=name, policies=current_policies)
|
||||
elif iam_type == 'role':
|
||||
changed, role_name, current_policies = role_action(module, iam, name,
|
||||
policy_name, skip, pdoc,
|
||||
state)
|
||||
module.exit_json(changed=changed, role_name=name, policies=current_policies)
|
||||
elif iam_type == 'group':
|
||||
changed, group_name, current_policies, msg = group_action(module, iam, name,
|
||||
policy_name, skip, pdoc,
|
||||
state)
|
||||
module.exit_json(changed=changed, group_name=name, policies=current_policies, msg=msg)
|
||||
try:
|
||||
if region:
|
||||
iam = connect_to_aws(boto.iam, region, **aws_connect_kwargs)
|
||||
else:
|
||||
iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
|
||||
except boto.exception.NoAuthHandlerFound as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
changed = False
|
||||
|
||||
if iam_type == 'user':
|
||||
changed, user_name, current_policies = user_action(module, iam, name,
|
||||
policy_name, skip, pdoc,
|
||||
state)
|
||||
module.exit_json(changed=changed, user_name=name, policies=current_policies)
|
||||
elif iam_type == 'role':
|
||||
changed, role_name, current_policies = role_action(module, iam, name,
|
||||
policy_name, skip, pdoc,
|
||||
state)
|
||||
module.exit_json(changed=changed, role_name=name, policies=current_policies)
|
||||
elif iam_type == 'group':
|
||||
changed, group_name, current_policies, msg = group_action(module, iam, name,
|
||||
policy_name, skip, pdoc,
|
||||
state)
|
||||
module.exit_json(changed=changed, group_name=name, policies=current_policies, msg=msg)
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
|
|
@ -367,8 +367,8 @@ def main():
|
|||
else:
|
||||
# No VPC configuration is desired, assure VPC config is empty when present in current config
|
||||
if ('VpcConfig' in current_config and
|
||||
'VpcId' in current_config['VpcConfig'] and
|
||||
current_config['VpcConfig']['VpcId'] != ''):
|
||||
'VpcId' in current_config['VpcConfig'] and
|
||||
current_config['VpcConfig']['VpcId'] != ''):
|
||||
func_kwargs.update({'VpcConfig':{'SubnetIds': [], 'SecurityGroupIds': []}})
|
||||
|
||||
# Upload new configuration if configuration has changed
|
||||
|
|
|
@ -677,7 +677,7 @@ def await_resource(conn, resource, status, module):
|
|||
# Some RDS resources take much longer than others to be ready. Check
|
||||
# less aggressively for slow ones to avoid throttling.
|
||||
if time.time() > start_time + 90:
|
||||
check_interval = 20
|
||||
check_interval = 20
|
||||
return resource
|
||||
|
||||
|
||||
|
|
|
@ -276,8 +276,8 @@ class SnsTopicManager(object):
|
|||
for sub in self.subscriptions_existing:
|
||||
sub_key = (sub['Protocol'], sub['Endpoint'])
|
||||
subscriptions_existing_list.append(sub_key)
|
||||
if self.purge_subscriptions and sub_key not in desired_subscriptions and \
|
||||
sub['SubscriptionArn'] not in ('PendingConfirmation', 'Deleted'):
|
||||
if (self.purge_subscriptions and sub_key not in desired_subscriptions and
|
||||
sub['SubscriptionArn'] not in ('PendingConfirmation', 'Deleted')):
|
||||
self.changed = True
|
||||
self.subscriptions_deleted.append(sub_key)
|
||||
if not self.check_mode:
|
||||
|
|
|
@ -847,13 +847,13 @@ class AzureRMVirtualMachine(AzureRMModuleBase):
|
|||
self.create_or_update_vm(vm_resource)
|
||||
|
||||
# Make sure we leave the machine in requested power state
|
||||
if powerstate_change == 'poweron' and \
|
||||
self.results['ansible_facts']['azure_vm']['powerstate'] != 'running':
|
||||
if (powerstate_change == 'poweron' and
|
||||
self.results['ansible_facts']['azure_vm']['powerstate'] != 'running'):
|
||||
# Attempt to power on the machine
|
||||
self.power_on_vm()
|
||||
|
||||
elif powerstate_change == 'poweroff' and \
|
||||
self.results['ansible_facts']['azure_vm']['powerstate'] == 'running':
|
||||
elif (powerstate_change == 'poweroff' and
|
||||
self.results['ansible_facts']['azure_vm']['powerstate'] == 'running'):
|
||||
# Attempt to power off the machine
|
||||
self.power_off_vm()
|
||||
|
||||
|
|
|
@ -275,8 +275,8 @@ class AnsibleCloudStackPortforwarding(AnsibleCloudStack):
|
|||
|
||||
if portforwarding_rules and 'portforwardingrule' in portforwarding_rules:
|
||||
for rule in portforwarding_rules['portforwardingrule']:
|
||||
if protocol == rule['protocol'] \
|
||||
and public_port == int(rule['publicport']):
|
||||
if (protocol == rule['protocol'] and
|
||||
public_port == int(rule['publicport'])):
|
||||
self.portforwarding_rule = rule
|
||||
break
|
||||
return self.portforwarding_rule
|
||||
|
|
|
@ -178,10 +178,10 @@ CLOUD_CLIENT_MINIMUM_VERSION = '0.22.0'
|
|||
CLOUD_CLIENT_USER_AGENT = 'ansible-pubsub-0.1'
|
||||
|
||||
try:
|
||||
from ast import literal_eval
|
||||
HAS_PYTHON26 = True
|
||||
from ast import literal_eval
|
||||
HAS_PYTHON26 = True
|
||||
except ImportError:
|
||||
HAS_PYTHON26 = False
|
||||
HAS_PYTHON26 = False
|
||||
|
||||
try:
|
||||
from google.cloud import pubsub
|
||||
|
@ -191,137 +191,137 @@ except ImportError as e:
|
|||
|
||||
|
||||
def publish_messages(message_list, topic):
|
||||
with topic.batch() as batch:
|
||||
for message in message_list:
|
||||
msg = message['message']
|
||||
attrs = {}
|
||||
if 'attributes' in message:
|
||||
attrs = message['attributes']
|
||||
batch.publish(bytes(msg), **attrs)
|
||||
return True
|
||||
with topic.batch() as batch:
|
||||
for message in message_list:
|
||||
msg = message['message']
|
||||
attrs = {}
|
||||
if 'attributes' in message:
|
||||
attrs = message['attributes']
|
||||
batch.publish(bytes(msg), **attrs)
|
||||
return True
|
||||
|
||||
def pull_messages(pull_params, sub):
|
||||
"""
|
||||
:rtype: tuple (output, changed)
|
||||
"""
|
||||
changed = False
|
||||
max_messages=pull_params.get('max_messages', None)
|
||||
message_ack = pull_params.get('message_ack', 'no')
|
||||
return_immediately = pull_params.get('return_immediately', False)
|
||||
"""
|
||||
:rtype: tuple (output, changed)
|
||||
"""
|
||||
changed = False
|
||||
max_messages=pull_params.get('max_messages', None)
|
||||
message_ack = pull_params.get('message_ack', 'no')
|
||||
return_immediately = pull_params.get('return_immediately', False)
|
||||
|
||||
output= []
|
||||
pulled = sub.pull(return_immediately=return_immediately,
|
||||
max_messages=max_messages)
|
||||
output= []
|
||||
pulled = sub.pull(return_immediately=return_immediately,
|
||||
max_messages=max_messages)
|
||||
|
||||
for ack_id, msg in pulled:
|
||||
msg_dict = {'message_id': msg.message_id,
|
||||
'attributes': msg.attributes,
|
||||
'data': msg.data,
|
||||
'ack_id': ack_id }
|
||||
output.append(msg_dict)
|
||||
for ack_id, msg in pulled:
|
||||
msg_dict = {'message_id': msg.message_id,
|
||||
'attributes': msg.attributes,
|
||||
'data': msg.data,
|
||||
'ack_id': ack_id }
|
||||
output.append(msg_dict)
|
||||
|
||||
if message_ack:
|
||||
ack_ids = [m['ack_id'] for m in output]
|
||||
if ack_ids:
|
||||
sub.acknowledge(ack_ids)
|
||||
changed = True
|
||||
return (output, changed)
|
||||
if message_ack:
|
||||
ack_ids = [m['ack_id'] for m in output]
|
||||
if ack_ids:
|
||||
sub.acknowledge(ack_ids)
|
||||
changed = True
|
||||
return (output, changed)
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(argument_spec=dict(
|
||||
topic=dict(required=True),
|
||||
state=dict(choices=['absent', 'present'], default='present'),
|
||||
publish=dict(type='list', default=None),
|
||||
subscription=dict(type='dict', default=None),
|
||||
service_account_email=dict(),
|
||||
credentials_file=dict(),
|
||||
project_id=dict(), ),)
|
||||
module = AnsibleModule(argument_spec=dict(
|
||||
topic=dict(required=True),
|
||||
state=dict(choices=['absent', 'present'], default='present'),
|
||||
publish=dict(type='list', default=None),
|
||||
subscription=dict(type='dict', default=None),
|
||||
service_account_email=dict(),
|
||||
credentials_file=dict(),
|
||||
project_id=dict(), ),)
|
||||
|
||||
if not HAS_PYTHON26:
|
||||
module.fail_json(
|
||||
msg="GCE module requires python's 'ast' module, python v2.6+")
|
||||
if not HAS_PYTHON26:
|
||||
module.fail_json(
|
||||
msg="GCE module requires python's 'ast' module, python v2.6+")
|
||||
|
||||
if not HAS_GOOGLE_CLOUD_PUBSUB:
|
||||
module.fail_json(msg="Please install google-cloud-pubsub library.")
|
||||
if not HAS_GOOGLE_CLOUD_PUBSUB:
|
||||
module.fail_json(msg="Please install google-cloud-pubsub library.")
|
||||
|
||||
if not check_min_pkg_version(CLOUD_CLIENT, CLOUD_CLIENT_MINIMUM_VERSION):
|
||||
if not check_min_pkg_version(CLOUD_CLIENT, CLOUD_CLIENT_MINIMUM_VERSION):
|
||||
module.fail_json(msg="Please install %s client version %s" % (CLOUD_CLIENT, CLOUD_CLIENT_MINIMUM_VERSION))
|
||||
|
||||
mod_params = {}
|
||||
mod_params['publish'] = module.params.get('publish')
|
||||
mod_params['state'] = module.params.get('state')
|
||||
mod_params['topic'] = module.params.get('topic')
|
||||
mod_params['subscription'] = module.params.get('subscription')
|
||||
mod_params = {}
|
||||
mod_params['publish'] = module.params.get('publish')
|
||||
mod_params['state'] = module.params.get('state')
|
||||
mod_params['topic'] = module.params.get('topic')
|
||||
mod_params['subscription'] = module.params.get('subscription')
|
||||
|
||||
creds, params = get_google_cloud_credentials(module)
|
||||
pubsub_client = pubsub.Client(project=params['project_id'], credentials=creds, use_gax=False)
|
||||
pubsub_client.user_agent = CLOUD_CLIENT_USER_AGENT
|
||||
creds, params = get_google_cloud_credentials(module)
|
||||
pubsub_client = pubsub.Client(project=params['project_id'], credentials=creds, use_gax=False)
|
||||
pubsub_client.user_agent = CLOUD_CLIENT_USER_AGENT
|
||||
|
||||
changed = False
|
||||
json_output = {}
|
||||
changed = False
|
||||
json_output = {}
|
||||
|
||||
t = None
|
||||
if mod_params['topic']:
|
||||
t = pubsub_client.topic(mod_params['topic'])
|
||||
s = None
|
||||
if mod_params['subscription']:
|
||||
# Note: default ack deadline cannot be changed without deleting/recreating subscription
|
||||
s = t.subscription(mod_params['subscription']['name'],
|
||||
ack_deadline=mod_params['subscription'].get('ack_deadline', None),
|
||||
push_endpoint=mod_params['subscription'].get('push_endpoint', None))
|
||||
t = None
|
||||
if mod_params['topic']:
|
||||
t = pubsub_client.topic(mod_params['topic'])
|
||||
s = None
|
||||
if mod_params['subscription']:
|
||||
# Note: default ack deadline cannot be changed without deleting/recreating subscription
|
||||
s = t.subscription(mod_params['subscription']['name'],
|
||||
ack_deadline=mod_params['subscription'].get('ack_deadline', None),
|
||||
push_endpoint=mod_params['subscription'].get('push_endpoint', None))
|
||||
|
||||
if mod_params['state'] == 'absent':
|
||||
# Remove the most granular resource. If subcription is specified
|
||||
# we remove it. If only topic is specified, that is what is removed.
|
||||
# Note that a topic can be removed without first removing the subscription.
|
||||
# TODO(supertom): Enhancement: Provide an option to only delete a topic
|
||||
# if there are no subscriptions associated with it (which the API does not support).
|
||||
if s is not None:
|
||||
if s.exists():
|
||||
s.delete()
|
||||
if mod_params['state'] == 'absent':
|
||||
# Remove the most granular resource. If subcription is specified
|
||||
# we remove it. If only topic is specified, that is what is removed.
|
||||
# Note that a topic can be removed without first removing the subscription.
|
||||
# TODO(supertom): Enhancement: Provide an option to only delete a topic
|
||||
# if there are no subscriptions associated with it (which the API does not support).
|
||||
if s is not None:
|
||||
if s.exists():
|
||||
s.delete()
|
||||
changed = True
|
||||
else:
|
||||
if t.exists():
|
||||
t.delete()
|
||||
changed = True
|
||||
elif mod_params['state'] == 'present':
|
||||
if not t.exists():
|
||||
t.create()
|
||||
changed = True
|
||||
else:
|
||||
if t.exists():
|
||||
t.delete()
|
||||
changed = True
|
||||
elif mod_params['state'] == 'present':
|
||||
if not t.exists():
|
||||
t.create()
|
||||
changed = True
|
||||
if s:
|
||||
if not s.exists():
|
||||
s.create()
|
||||
s.reload()
|
||||
changed = True
|
||||
else:
|
||||
# Subscription operations
|
||||
# TODO(supertom): if more 'update' operations arise, turn this into a function.
|
||||
s.reload()
|
||||
push_endpoint=mod_params['subscription'].get('push_endpoint', None)
|
||||
if push_endpoint is not None:
|
||||
if push_endpoint != s.push_endpoint:
|
||||
if push_endpoint == 'None':
|
||||
push_endpoint = None
|
||||
s.modify_push_configuration(push_endpoint=push_endpoint)
|
||||
s.reload()
|
||||
changed = push_endpoint == s.push_endpoint
|
||||
if s:
|
||||
if not s.exists():
|
||||
s.create()
|
||||
s.reload()
|
||||
changed = True
|
||||
else:
|
||||
# Subscription operations
|
||||
# TODO(supertom): if more 'update' operations arise, turn this into a function.
|
||||
s.reload()
|
||||
push_endpoint=mod_params['subscription'].get('push_endpoint', None)
|
||||
if push_endpoint is not None:
|
||||
if push_endpoint != s.push_endpoint:
|
||||
if push_endpoint == 'None':
|
||||
push_endpoint = None
|
||||
s.modify_push_configuration(push_endpoint=push_endpoint)
|
||||
s.reload()
|
||||
changed = push_endpoint == s.push_endpoint
|
||||
|
||||
if 'pull' in mod_params['subscription']:
|
||||
if s.push_endpoint is not None:
|
||||
module.fail_json(msg="Cannot pull messages, push_endpoint is configured.")
|
||||
(json_output['pulled_messages'], changed) = pull_messages(
|
||||
mod_params['subscription']['pull'], s)
|
||||
if 'pull' in mod_params['subscription']:
|
||||
if s.push_endpoint is not None:
|
||||
module.fail_json(msg="Cannot pull messages, push_endpoint is configured.")
|
||||
(json_output['pulled_messages'], changed) = pull_messages(
|
||||
mod_params['subscription']['pull'], s)
|
||||
|
||||
# publish messages to the topic
|
||||
if mod_params['publish'] and len(mod_params['publish']) > 0:
|
||||
changed = publish_messages(mod_params['publish'], t)
|
||||
# publish messages to the topic
|
||||
if mod_params['publish'] and len(mod_params['publish']) > 0:
|
||||
changed = publish_messages(mod_params['publish'], t)
|
||||
|
||||
|
||||
json_output['changed'] = changed
|
||||
json_output.update(mod_params)
|
||||
module.exit_json(**json_output)
|
||||
json_output['changed'] = changed
|
||||
json_output.update(mod_params)
|
||||
module.exit_json(**json_output)
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
|
|
|
@ -198,7 +198,14 @@ EXAMPLES = '''
|
|||
ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
|
||||
|
||||
# Create new container automatically selecting the next available vmid.
|
||||
- proxmox: node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' password='123456' hostname='example.org' ostemplate='local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
|
||||
- proxmox:
|
||||
node: 'uk-mc02'
|
||||
api_user: 'root@pam'
|
||||
api_password: '1q2w3e'
|
||||
api_host: 'node1'
|
||||
password: '123456'
|
||||
hostname: 'example.org'
|
||||
ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
|
||||
|
||||
# Create new container with minimal options with force(it will rewrite existing container)
|
||||
- proxmox:
|
||||
|
@ -304,299 +311,299 @@ import os
|
|||
import time
|
||||
|
||||
try:
|
||||
from proxmoxer import ProxmoxAPI
|
||||
HAS_PROXMOXER = True
|
||||
from proxmoxer import ProxmoxAPI
|
||||
HAS_PROXMOXER = True
|
||||
except ImportError:
|
||||
HAS_PROXMOXER = False
|
||||
HAS_PROXMOXER = False
|
||||
|
||||
VZ_TYPE=None
|
||||
|
||||
def get_nextvmid(proxmox):
|
||||
try:
|
||||
vmid = proxmox.cluster.nextid.get()
|
||||
return vmid
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Unable to get next vmid. Failed with exception: %s")
|
||||
try:
|
||||
vmid = proxmox.cluster.nextid.get()
|
||||
return vmid
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Unable to get next vmid. Failed with exception: %s")
|
||||
|
||||
def get_vmid(proxmox, hostname):
|
||||
return [ vm['vmid'] for vm in proxmox.cluster.resources.get(type='vm') if vm['name'] == hostname ]
|
||||
|
||||
def get_instance(proxmox, vmid):
|
||||
return [ vm for vm in proxmox.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid) ]
|
||||
return [ vm for vm in proxmox.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid) ]
|
||||
|
||||
def content_check(proxmox, node, ostemplate, template_store):
|
||||
return [ True for cnt in proxmox.nodes(node).storage(template_store).content.get() if cnt['volid'] == ostemplate ]
|
||||
return [ True for cnt in proxmox.nodes(node).storage(template_store).content.get() if cnt['volid'] == ostemplate ]
|
||||
|
||||
def node_check(proxmox, node):
|
||||
return [ True for nd in proxmox.nodes.get() if nd['node'] == node ]
|
||||
return [ True for nd in proxmox.nodes.get() if nd['node'] == node ]
|
||||
|
||||
def create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout, **kwargs):
|
||||
proxmox_node = proxmox.nodes(node)
|
||||
kwargs = dict((k,v) for k, v in kwargs.items() if v is not None)
|
||||
proxmox_node = proxmox.nodes(node)
|
||||
kwargs = dict((k,v) for k, v in kwargs.items() if v is not None)
|
||||
|
||||
if VZ_TYPE =='lxc':
|
||||
kwargs['cpulimit']=cpus
|
||||
kwargs['rootfs']=disk
|
||||
if 'netif' in kwargs:
|
||||
kwargs.update(kwargs['netif'])
|
||||
del kwargs['netif']
|
||||
if 'mounts' in kwargs:
|
||||
kwargs.update(kwargs['mounts'])
|
||||
del kwargs['mounts']
|
||||
if 'pubkey' in kwargs:
|
||||
if float(proxmox.version.get()['version']) >= 4.2:
|
||||
kwargs['ssh-public-keys'] = kwargs['pubkey']
|
||||
del kwargs['pubkey']
|
||||
else:
|
||||
kwargs['cpus']=cpus
|
||||
kwargs['disk']=disk
|
||||
if VZ_TYPE =='lxc':
|
||||
kwargs['cpulimit']=cpus
|
||||
kwargs['rootfs']=disk
|
||||
if 'netif' in kwargs:
|
||||
kwargs.update(kwargs['netif'])
|
||||
del kwargs['netif']
|
||||
if 'mounts' in kwargs:
|
||||
kwargs.update(kwargs['mounts'])
|
||||
del kwargs['mounts']
|
||||
if 'pubkey' in kwargs:
|
||||
if float(proxmox.version.get()['version']) >= 4.2:
|
||||
kwargs['ssh-public-keys'] = kwargs['pubkey']
|
||||
del kwargs['pubkey']
|
||||
else:
|
||||
kwargs['cpus']=cpus
|
||||
kwargs['disk']=disk
|
||||
|
||||
taskid = getattr(proxmox_node, VZ_TYPE).create(vmid=vmid, storage=storage, memory=memory, swap=swap, **kwargs)
|
||||
taskid = getattr(proxmox_node, VZ_TYPE).create(vmid=vmid, storage=storage, memory=memory, swap=swap, **kwargs)
|
||||
|
||||
while timeout:
|
||||
if ( proxmox_node.tasks(taskid).status.get()['status'] == 'stopped'
|
||||
and proxmox_node.tasks(taskid).status.get()['exitstatus'] == 'OK' ):
|
||||
return True
|
||||
timeout = timeout - 1
|
||||
if timeout == 0:
|
||||
module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s'
|
||||
% proxmox_node.tasks(taskid).log.get()[:1])
|
||||
|
||||
time.sleep(1)
|
||||
return False
|
||||
|
||||
def start_instance(module, proxmox, vm, vmid, timeout):
|
||||
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.start.post()
|
||||
while timeout:
|
||||
if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped'
|
||||
and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ):
|
||||
return True
|
||||
timeout = timeout - 1
|
||||
if timeout == 0:
|
||||
module.fail_json(msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s'
|
||||
% proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1])
|
||||
|
||||
time.sleep(1)
|
||||
return False
|
||||
|
||||
def stop_instance(module, proxmox, vm, vmid, timeout, force):
|
||||
if force:
|
||||
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.shutdown.post(forceStop=1)
|
||||
else:
|
||||
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.shutdown.post()
|
||||
while timeout:
|
||||
if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped'
|
||||
and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ):
|
||||
return True
|
||||
timeout = timeout - 1
|
||||
if timeout == 0:
|
||||
module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s'
|
||||
% proxmox_node.tasks(taskid).log.get()[:1])
|
||||
|
||||
time.sleep(1)
|
||||
return False
|
||||
|
||||
def umount_instance(module, proxmox, vm, vmid, timeout):
|
||||
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.umount.post()
|
||||
while timeout:
|
||||
if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped'
|
||||
and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ):
|
||||
return True
|
||||
timeout = timeout - 1
|
||||
if timeout == 0:
|
||||
module.fail_json(msg='Reached timeout while waiting for unmounting VM. Last line in task before timeout: %s'
|
||||
% proxmox_node.tasks(taskid).log.get()[:1])
|
||||
|
||||
time.sleep(1)
|
||||
return False
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
api_host = dict(required=True),
|
||||
api_user = dict(required=True),
|
||||
api_password = dict(no_log=True),
|
||||
vmid = dict(required=False),
|
||||
validate_certs = dict(type='bool', default='no'),
|
||||
node = dict(),
|
||||
pool = dict(),
|
||||
password = dict(no_log=True),
|
||||
hostname = dict(),
|
||||
ostemplate = dict(),
|
||||
disk = dict(type='str', default='3'),
|
||||
cpus = dict(type='int', default=1),
|
||||
memory = dict(type='int', default=512),
|
||||
swap = dict(type='int', default=0),
|
||||
netif = dict(type='dict'),
|
||||
mounts = dict(type='dict'),
|
||||
ip_address = dict(),
|
||||
onboot = dict(type='bool', default='no'),
|
||||
storage = dict(default='local'),
|
||||
cpuunits = dict(type='int', default=1000),
|
||||
nameserver = dict(),
|
||||
searchdomain = dict(),
|
||||
timeout = dict(type='int', default=30),
|
||||
force = dict(type='bool', default='no'),
|
||||
state = dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted']),
|
||||
pubkey = dict(type='str', default=None),
|
||||
unprivileged = dict(type='bool', default='no')
|
||||
)
|
||||
)
|
||||
|
||||
if not HAS_PROXMOXER:
|
||||
module.fail_json(msg='proxmoxer required for this module')
|
||||
|
||||
state = module.params['state']
|
||||
api_user = module.params['api_user']
|
||||
api_host = module.params['api_host']
|
||||
api_password = module.params['api_password']
|
||||
vmid = module.params['vmid']
|
||||
validate_certs = module.params['validate_certs']
|
||||
node = module.params['node']
|
||||
disk = module.params['disk']
|
||||
cpus = module.params['cpus']
|
||||
memory = module.params['memory']
|
||||
swap = module.params['swap']
|
||||
storage = module.params['storage']
|
||||
hostname = module.params['hostname']
|
||||
if module.params['ostemplate'] is not None:
|
||||
template_store = module.params['ostemplate'].split(":")[0]
|
||||
timeout = module.params['timeout']
|
||||
|
||||
# If password not set get it from PROXMOX_PASSWORD env
|
||||
if not api_password:
|
||||
try:
|
||||
api_password = os.environ['PROXMOX_PASSWORD']
|
||||
except KeyError as e:
|
||||
module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable')
|
||||
|
||||
try:
|
||||
proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=validate_certs)
|
||||
global VZ_TYPE
|
||||
VZ_TYPE = 'openvz' if float(proxmox.version.get()['version']) < 4.0 else 'lxc'
|
||||
|
||||
except Exception as e:
|
||||
module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e)
|
||||
|
||||
# If vmid not set get the Next VM id from ProxmoxAPI
|
||||
# If hostname is set get the VM id from ProxmoxAPI
|
||||
if not vmid and state == 'present':
|
||||
vmid = get_nextvmid(proxmox)
|
||||
elif not vmid and hostname:
|
||||
vmid = get_vmid(proxmox, hostname)[0]
|
||||
elif not vmid:
|
||||
module.exit_json(changed=False, msg="Vmid could not be fetched for the following action: %s" % state)
|
||||
|
||||
if state == 'present':
|
||||
try:
|
||||
if get_instance(proxmox, vmid) and not module.params['force']:
|
||||
module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid)
|
||||
# If no vmid was passed, there cannot be another VM named 'hostname'
|
||||
if not module.params['vmid'] and get_vmid(proxmox, hostname) and not module.params['force']:
|
||||
module.exit_json(changed=False, msg="VM with hostname %s already exists and has ID number %s" % (hostname, get_vmid(proxmox, hostname)[0]))
|
||||
elif not (node, module.params['hostname'] and module.params['password'] and module.params['ostemplate']):
|
||||
module.fail_json(msg='node, hostname, password and ostemplate are mandatory for creating vm')
|
||||
elif not node_check(proxmox, node):
|
||||
module.fail_json(msg="node '%s' not exists in cluster" % node)
|
||||
elif not content_check(proxmox, node, module.params['ostemplate'], template_store):
|
||||
module.fail_json(msg="ostemplate '%s' not exists on node %s and storage %s"
|
||||
% (module.params['ostemplate'], node, template_store))
|
||||
|
||||
create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout,
|
||||
pool = module.params['pool'],
|
||||
password = module.params['password'],
|
||||
hostname = module.params['hostname'],
|
||||
ostemplate = module.params['ostemplate'],
|
||||
netif = module.params['netif'],
|
||||
mounts = module.params['mounts'],
|
||||
ip_address = module.params['ip_address'],
|
||||
onboot = int(module.params['onboot']),
|
||||
cpuunits = module.params['cpuunits'],
|
||||
nameserver = module.params['nameserver'],
|
||||
searchdomain = module.params['searchdomain'],
|
||||
force = int(module.params['force']),
|
||||
pubkey = module.params['pubkey'],
|
||||
unprivileged = int(module.params['unprivileged']))
|
||||
|
||||
module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmid, module.params['ostemplate']))
|
||||
except Exception as e:
|
||||
module.fail_json(msg="creation of %s VM %s failed with exception: %s" % ( VZ_TYPE, vmid, e ))
|
||||
|
||||
elif state == 'started':
|
||||
try:
|
||||
vm = get_instance(proxmox, vmid)
|
||||
if not vm:
|
||||
module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
|
||||
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running':
|
||||
module.exit_json(changed=False, msg="VM %s is already running" % vmid)
|
||||
|
||||
if start_instance(module, proxmox, vm, vmid, timeout):
|
||||
module.exit_json(changed=True, msg="VM %s started" % vmid)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="starting of VM %s failed with exception: %s" % ( vmid, e ))
|
||||
|
||||
elif state == 'stopped':
|
||||
try:
|
||||
vm = get_instance(proxmox, vmid)
|
||||
if not vm:
|
||||
module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
|
||||
|
||||
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted':
|
||||
if module.params['force']:
|
||||
if umount_instance(module, proxmox, vm, vmid, timeout):
|
||||
module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
|
||||
else:
|
||||
module.exit_json(changed=False, msg=("VM %s is already shutdown, but mounted. "
|
||||
"You can use force option to umount it.") % vmid)
|
||||
|
||||
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped':
|
||||
module.exit_json(changed=False, msg="VM %s is already shutdown" % vmid)
|
||||
|
||||
if stop_instance(module, proxmox, vm, vmid, timeout, force = module.params['force']):
|
||||
module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="stopping of VM %s failed with exception: %s" % ( vmid, e ))
|
||||
|
||||
elif state == 'restarted':
|
||||
try:
|
||||
vm = get_instance(proxmox, vmid)
|
||||
if not vm:
|
||||
module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
|
||||
if ( getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped'
|
||||
or getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted' ):
|
||||
module.exit_json(changed=False, msg="VM %s is not running" % vmid)
|
||||
|
||||
if ( stop_instance(module, proxmox, vm, vmid, timeout, force = module.params['force']) and
|
||||
start_instance(module, proxmox, vm, vmid, timeout) ):
|
||||
module.exit_json(changed=True, msg="VM %s is restarted" % vmid)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="restarting of VM %s failed with exception: %s" % ( vmid, e ))
|
||||
|
||||
elif state == 'absent':
|
||||
try:
|
||||
vm = get_instance(proxmox, vmid)
|
||||
if not vm:
|
||||
module.exit_json(changed=False, msg="VM %s does not exist" % vmid)
|
||||
|
||||
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running':
|
||||
module.exit_json(changed=False, msg="VM %s is running. Stop it before deletion." % vmid)
|
||||
|
||||
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted':
|
||||
module.exit_json(changed=False, msg="VM %s is mounted. Stop it with force option before deletion." % vmid)
|
||||
|
||||
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE).delete(vmid)
|
||||
while timeout:
|
||||
if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped'
|
||||
and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ):
|
||||
module.exit_json(changed=True, msg="VM %s removed" % vmid)
|
||||
while timeout:
|
||||
if (proxmox_node.tasks(taskid).status.get()['status'] == 'stopped' and
|
||||
proxmox_node.tasks(taskid).status.get()['exitstatus'] == 'OK'):
|
||||
return True
|
||||
timeout = timeout - 1
|
||||
if timeout == 0:
|
||||
module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s'
|
||||
% proxmox_node.tasks(taskid).log.get()[:1])
|
||||
module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s' %
|
||||
proxmox_node.tasks(taskid).log.get()[:1])
|
||||
|
||||
time.sleep(1)
|
||||
return False
|
||||
|
||||
def start_instance(module, proxmox, vm, vmid, timeout):
|
||||
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.start.post()
|
||||
while timeout:
|
||||
if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and
|
||||
proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
|
||||
return True
|
||||
timeout = timeout - 1
|
||||
if timeout == 0:
|
||||
module.fail_json(msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s' %
|
||||
proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1])
|
||||
|
||||
time.sleep(1)
|
||||
return False
|
||||
|
||||
def stop_instance(module, proxmox, vm, vmid, timeout, force):
|
||||
if force:
|
||||
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.shutdown.post(forceStop=1)
|
||||
else:
|
||||
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.shutdown.post()
|
||||
while timeout:
|
||||
if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and
|
||||
proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
|
||||
return True
|
||||
timeout = timeout - 1
|
||||
if timeout == 0:
|
||||
module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s' %
|
||||
proxmox_node.tasks(taskid).log.get()[:1])
|
||||
|
||||
time.sleep(1)
|
||||
return False
|
||||
|
||||
def umount_instance(module, proxmox, vm, vmid, timeout):
|
||||
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.umount.post()
|
||||
while timeout:
|
||||
if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and
|
||||
proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
|
||||
return True
|
||||
timeout = timeout - 1
|
||||
if timeout == 0:
|
||||
module.fail_json(msg='Reached timeout while waiting for unmounting VM. Last line in task before timeout: %s' %
|
||||
proxmox_node.tasks(taskid).log.get()[:1])
|
||||
|
||||
time.sleep(1)
|
||||
return False
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
api_host = dict(required=True),
|
||||
api_user = dict(required=True),
|
||||
api_password = dict(no_log=True),
|
||||
vmid = dict(required=False),
|
||||
validate_certs = dict(type='bool', default='no'),
|
||||
node = dict(),
|
||||
pool = dict(),
|
||||
password = dict(no_log=True),
|
||||
hostname = dict(),
|
||||
ostemplate = dict(),
|
||||
disk = dict(type='str', default='3'),
|
||||
cpus = dict(type='int', default=1),
|
||||
memory = dict(type='int', default=512),
|
||||
swap = dict(type='int', default=0),
|
||||
netif = dict(type='dict'),
|
||||
mounts = dict(type='dict'),
|
||||
ip_address = dict(),
|
||||
onboot = dict(type='bool', default='no'),
|
||||
storage = dict(default='local'),
|
||||
cpuunits = dict(type='int', default=1000),
|
||||
nameserver = dict(),
|
||||
searchdomain = dict(),
|
||||
timeout = dict(type='int', default=30),
|
||||
force = dict(type='bool', default='no'),
|
||||
state = dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted']),
|
||||
pubkey = dict(type='str', default=None),
|
||||
unprivileged = dict(type='bool', default='no')
|
||||
)
|
||||
)
|
||||
|
||||
if not HAS_PROXMOXER:
|
||||
module.fail_json(msg='proxmoxer required for this module')
|
||||
|
||||
state = module.params['state']
|
||||
api_user = module.params['api_user']
|
||||
api_host = module.params['api_host']
|
||||
api_password = module.params['api_password']
|
||||
vmid = module.params['vmid']
|
||||
validate_certs = module.params['validate_certs']
|
||||
node = module.params['node']
|
||||
disk = module.params['disk']
|
||||
cpus = module.params['cpus']
|
||||
memory = module.params['memory']
|
||||
swap = module.params['swap']
|
||||
storage = module.params['storage']
|
||||
hostname = module.params['hostname']
|
||||
if module.params['ostemplate'] is not None:
|
||||
template_store = module.params['ostemplate'].split(":")[0]
|
||||
timeout = module.params['timeout']
|
||||
|
||||
# If password not set get it from PROXMOX_PASSWORD env
|
||||
if not api_password:
|
||||
try:
|
||||
api_password = os.environ['PROXMOX_PASSWORD']
|
||||
except KeyError as e:
|
||||
module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable')
|
||||
|
||||
try:
|
||||
proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=validate_certs)
|
||||
global VZ_TYPE
|
||||
VZ_TYPE = 'openvz' if float(proxmox.version.get()['version']) < 4.0 else 'lxc'
|
||||
|
||||
except Exception as e:
|
||||
module.fail_json(msg="deletion of VM %s failed with exception: %s" % ( vmid, e ))
|
||||
module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e)
|
||||
|
||||
# If vmid not set get the Next VM id from ProxmoxAPI
|
||||
# If hostname is set get the VM id from ProxmoxAPI
|
||||
if not vmid and state == 'present':
|
||||
vmid = get_nextvmid(proxmox)
|
||||
elif not vmid and hostname:
|
||||
vmid = get_vmid(proxmox, hostname)[0]
|
||||
elif not vmid:
|
||||
module.exit_json(changed=False, msg="Vmid could not be fetched for the following action: %s" % state)
|
||||
|
||||
if state == 'present':
|
||||
try:
|
||||
if get_instance(proxmox, vmid) and not module.params['force']:
|
||||
module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid)
|
||||
# If no vmid was passed, there cannot be another VM named 'hostname'
|
||||
if not module.params['vmid'] and get_vmid(proxmox, hostname) and not module.params['force']:
|
||||
module.exit_json(changed=False, msg="VM with hostname %s already exists and has ID number %s" % (hostname, get_vmid(proxmox, hostname)[0]))
|
||||
elif not (node, module.params['hostname'] and module.params['password'] and module.params['ostemplate']):
|
||||
module.fail_json(msg='node, hostname, password and ostemplate are mandatory for creating vm')
|
||||
elif not node_check(proxmox, node):
|
||||
module.fail_json(msg="node '%s' not exists in cluster" % node)
|
||||
elif not content_check(proxmox, node, module.params['ostemplate'], template_store):
|
||||
module.fail_json(msg="ostemplate '%s' not exists on node %s and storage %s"
|
||||
% (module.params['ostemplate'], node, template_store))
|
||||
|
||||
create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout,
|
||||
pool = module.params['pool'],
|
||||
password = module.params['password'],
|
||||
hostname = module.params['hostname'],
|
||||
ostemplate = module.params['ostemplate'],
|
||||
netif = module.params['netif'],
|
||||
mounts = module.params['mounts'],
|
||||
ip_address = module.params['ip_address'],
|
||||
onboot = int(module.params['onboot']),
|
||||
cpuunits = module.params['cpuunits'],
|
||||
nameserver = module.params['nameserver'],
|
||||
searchdomain = module.params['searchdomain'],
|
||||
force = int(module.params['force']),
|
||||
pubkey = module.params['pubkey'],
|
||||
unprivileged = int(module.params['unprivileged']))
|
||||
|
||||
module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmid, module.params['ostemplate']))
|
||||
except Exception as e:
|
||||
module.fail_json(msg="creation of %s VM %s failed with exception: %s" % ( VZ_TYPE, vmid, e ))
|
||||
|
||||
elif state == 'started':
|
||||
try:
|
||||
vm = get_instance(proxmox, vmid)
|
||||
if not vm:
|
||||
module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
|
||||
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running':
|
||||
module.exit_json(changed=False, msg="VM %s is already running" % vmid)
|
||||
|
||||
if start_instance(module, proxmox, vm, vmid, timeout):
|
||||
module.exit_json(changed=True, msg="VM %s started" % vmid)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="starting of VM %s failed with exception: %s" % ( vmid, e ))
|
||||
|
||||
elif state == 'stopped':
|
||||
try:
|
||||
vm = get_instance(proxmox, vmid)
|
||||
if not vm:
|
||||
module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
|
||||
|
||||
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted':
|
||||
if module.params['force']:
|
||||
if umount_instance(module, proxmox, vm, vmid, timeout):
|
||||
module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
|
||||
else:
|
||||
module.exit_json(changed=False, msg=("VM %s is already shutdown, but mounted. "
|
||||
"You can use force option to umount it.") % vmid)
|
||||
|
||||
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped':
|
||||
module.exit_json(changed=False, msg="VM %s is already shutdown" % vmid)
|
||||
|
||||
if stop_instance(module, proxmox, vm, vmid, timeout, force = module.params['force']):
|
||||
module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="stopping of VM %s failed with exception: %s" % ( vmid, e ))
|
||||
|
||||
elif state == 'restarted':
|
||||
try:
|
||||
vm = get_instance(proxmox, vmid)
|
||||
if not vm:
|
||||
module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
|
||||
if (getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped' or
|
||||
getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted'):
|
||||
module.exit_json(changed=False, msg="VM %s is not running" % vmid)
|
||||
|
||||
if (stop_instance(module, proxmox, vm, vmid, timeout, force = module.params['force']) and
|
||||
start_instance(module, proxmox, vm, vmid, timeout)):
|
||||
module.exit_json(changed=True, msg="VM %s is restarted" % vmid)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="restarting of VM %s failed with exception: %s" % ( vmid, e ))
|
||||
|
||||
elif state == 'absent':
|
||||
try:
|
||||
vm = get_instance(proxmox, vmid)
|
||||
if not vm:
|
||||
module.exit_json(changed=False, msg="VM %s does not exist" % vmid)
|
||||
|
||||
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running':
|
||||
module.exit_json(changed=False, msg="VM %s is running. Stop it before deletion." % vmid)
|
||||
|
||||
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted':
|
||||
module.exit_json(changed=False, msg="VM %s is mounted. Stop it with force option before deletion." % vmid)
|
||||
|
||||
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE).delete(vmid)
|
||||
while timeout:
|
||||
if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and
|
||||
proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
|
||||
module.exit_json(changed=True, msg="VM %s removed" % vmid)
|
||||
timeout = timeout - 1
|
||||
if timeout == 0:
|
||||
module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s'
|
||||
% proxmox_node.tasks(taskid).log.get()[:1])
|
||||
|
||||
time.sleep(1)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="deletion of VM %s failed with exception: %s" % ( vmid, e ))
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
|
|
|
@ -858,13 +858,13 @@ def create_vm(module, proxmox, vmid, newid, node, name, memory, cpu, cores, sock
|
|||
taskid = getattr(proxmox_node, VZ_TYPE).create(vmid=vmid, name=name, memory=memory, cpu=cpu, cores=cores, sockets=sockets, **kwargs)
|
||||
|
||||
while timeout:
|
||||
if (proxmox_node.tasks(taskid).status.get()['status'] == 'stopped'
|
||||
and proxmox_node.tasks(taskid).status.get()['exitstatus'] == 'OK'):
|
||||
if (proxmox_node.tasks(taskid).status.get()['status'] == 'stopped' and
|
||||
proxmox_node.tasks(taskid).status.get()['exitstatus'] == 'OK'):
|
||||
return True
|
||||
timeout = timeout - 1
|
||||
if timeout == 0:
|
||||
module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s'
|
||||
% proxmox_node.tasks(taskid).log.get()[:1])
|
||||
module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s' %
|
||||
proxmox_node.tasks(taskid).log.get()[:1])
|
||||
time.sleep(1)
|
||||
return False
|
||||
|
||||
|
@ -872,8 +872,8 @@ def create_vm(module, proxmox, vmid, newid, node, name, memory, cpu, cores, sock
|
|||
def start_vm(module, proxmox, vm, vmid, timeout):
|
||||
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.start.post()
|
||||
while timeout:
|
||||
if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped'
|
||||
and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ):
|
||||
if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and
|
||||
proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ):
|
||||
return True
|
||||
timeout = timeout - 1
|
||||
if timeout == 0:
|
||||
|
@ -890,8 +890,8 @@ def stop_vm(module, proxmox, vm, vmid, timeout, force):
|
|||
else:
|
||||
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.shutdown.post()
|
||||
while timeout:
|
||||
if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped'
|
||||
and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
|
||||
if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and
|
||||
proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
|
||||
return True
|
||||
timeout = timeout - 1
|
||||
if timeout == 0:
|
||||
|
@ -1207,8 +1207,8 @@ def main():
|
|||
|
||||
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE).delete(vmid)
|
||||
while timeout:
|
||||
if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped'
|
||||
and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ):
|
||||
if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and
|
||||
proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ):
|
||||
module.exit_json(changed=True, msg="VM %s removed" % vmid)
|
||||
timeout = timeout - 1
|
||||
if timeout == 0:
|
||||
|
|
|
@ -140,118 +140,118 @@ import os
|
|||
import time
|
||||
|
||||
try:
|
||||
from proxmoxer import ProxmoxAPI
|
||||
HAS_PROXMOXER = True
|
||||
from proxmoxer import ProxmoxAPI
|
||||
HAS_PROXMOXER = True
|
||||
except ImportError:
|
||||
HAS_PROXMOXER = False
|
||||
HAS_PROXMOXER = False
|
||||
|
||||
def get_template(proxmox, node, storage, content_type, template):
|
||||
return [ True for tmpl in proxmox.nodes(node).storage(storage).content.get()
|
||||
if tmpl['volid'] == '%s:%s/%s' % (storage, content_type, template) ]
|
||||
return [ True for tmpl in proxmox.nodes(node).storage(storage).content.get()
|
||||
if tmpl['volid'] == '%s:%s/%s' % (storage, content_type, template) ]
|
||||
|
||||
def upload_template(module, proxmox, api_host, node, storage, content_type, realpath, timeout):
|
||||
taskid = proxmox.nodes(node).storage(storage).upload.post(content=content_type, filename=open(realpath))
|
||||
while timeout:
|
||||
task_status = proxmox.nodes(api_host.split('.')[0]).tasks(taskid).status.get()
|
||||
if task_status['status'] == 'stopped' and task_status['exitstatus'] == 'OK':
|
||||
return True
|
||||
timeout = timeout - 1
|
||||
if timeout == 0:
|
||||
module.fail_json(msg='Reached timeout while waiting for uploading template. Last line in task before timeout: %s'
|
||||
% proxmox.node(node).tasks(taskid).log.get()[:1])
|
||||
taskid = proxmox.nodes(node).storage(storage).upload.post(content=content_type, filename=open(realpath))
|
||||
while timeout:
|
||||
task_status = proxmox.nodes(api_host.split('.')[0]).tasks(taskid).status.get()
|
||||
if task_status['status'] == 'stopped' and task_status['exitstatus'] == 'OK':
|
||||
return True
|
||||
timeout = timeout - 1
|
||||
if timeout == 0:
|
||||
module.fail_json(msg='Reached timeout while waiting for uploading template. Last line in task before timeout: %s'
|
||||
% proxmox.node(node).tasks(taskid).log.get()[:1])
|
||||
|
||||
time.sleep(1)
|
||||
return False
|
||||
time.sleep(1)
|
||||
return False
|
||||
|
||||
def delete_template(module, proxmox, node, storage, content_type, template, timeout):
|
||||
volid = '%s:%s/%s' % (storage, content_type, template)
|
||||
proxmox.nodes(node).storage(storage).content.delete(volid)
|
||||
while timeout:
|
||||
if not get_template(proxmox, node, storage, content_type, template):
|
||||
return True
|
||||
timeout = timeout - 1
|
||||
if timeout == 0:
|
||||
module.fail_json(msg='Reached timeout while waiting for deleting template.')
|
||||
volid = '%s:%s/%s' % (storage, content_type, template)
|
||||
proxmox.nodes(node).storage(storage).content.delete(volid)
|
||||
while timeout:
|
||||
if not get_template(proxmox, node, storage, content_type, template):
|
||||
return True
|
||||
timeout = timeout - 1
|
||||
if timeout == 0:
|
||||
module.fail_json(msg='Reached timeout while waiting for deleting template.')
|
||||
|
||||
time.sleep(1)
|
||||
return False
|
||||
time.sleep(1)
|
||||
return False
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
api_host = dict(required=True),
|
||||
api_user = dict(required=True),
|
||||
api_password = dict(no_log=True),
|
||||
validate_certs = dict(type='bool', default='no'),
|
||||
node = dict(),
|
||||
src = dict(),
|
||||
template = dict(),
|
||||
content_type = dict(default='vztmpl', choices=['vztmpl','iso']),
|
||||
storage = dict(default='local'),
|
||||
timeout = dict(type='int', default=30),
|
||||
force = dict(type='bool', default='no'),
|
||||
state = dict(default='present', choices=['present', 'absent']),
|
||||
)
|
||||
)
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
api_host = dict(required=True),
|
||||
api_user = dict(required=True),
|
||||
api_password = dict(no_log=True),
|
||||
validate_certs = dict(type='bool', default='no'),
|
||||
node = dict(),
|
||||
src = dict(),
|
||||
template = dict(),
|
||||
content_type = dict(default='vztmpl', choices=['vztmpl','iso']),
|
||||
storage = dict(default='local'),
|
||||
timeout = dict(type='int', default=30),
|
||||
force = dict(type='bool', default='no'),
|
||||
state = dict(default='present', choices=['present', 'absent']),
|
||||
)
|
||||
)
|
||||
|
||||
if not HAS_PROXMOXER:
|
||||
module.fail_json(msg='proxmoxer required for this module')
|
||||
if not HAS_PROXMOXER:
|
||||
module.fail_json(msg='proxmoxer required for this module')
|
||||
|
||||
state = module.params['state']
|
||||
api_user = module.params['api_user']
|
||||
api_host = module.params['api_host']
|
||||
api_password = module.params['api_password']
|
||||
validate_certs = module.params['validate_certs']
|
||||
node = module.params['node']
|
||||
storage = module.params['storage']
|
||||
timeout = module.params['timeout']
|
||||
state = module.params['state']
|
||||
api_user = module.params['api_user']
|
||||
api_host = module.params['api_host']
|
||||
api_password = module.params['api_password']
|
||||
validate_certs = module.params['validate_certs']
|
||||
node = module.params['node']
|
||||
storage = module.params['storage']
|
||||
timeout = module.params['timeout']
|
||||
|
||||
# If password not set get it from PROXMOX_PASSWORD env
|
||||
if not api_password:
|
||||
try:
|
||||
api_password = os.environ['PROXMOX_PASSWORD']
|
||||
except KeyError as e:
|
||||
module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable')
|
||||
|
||||
# If password not set get it from PROXMOX_PASSWORD env
|
||||
if not api_password:
|
||||
try:
|
||||
api_password = os.environ['PROXMOX_PASSWORD']
|
||||
except KeyError as e:
|
||||
module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable')
|
||||
|
||||
try:
|
||||
proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=validate_certs)
|
||||
except Exception as e:
|
||||
module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e)
|
||||
|
||||
if state == 'present':
|
||||
try:
|
||||
content_type = module.params['content_type']
|
||||
src = module.params['src']
|
||||
|
||||
from ansible import utils
|
||||
realpath = utils.path_dwim(None, src)
|
||||
template = os.path.basename(realpath)
|
||||
if get_template(proxmox, node, storage, content_type, template) and not module.params['force']:
|
||||
module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already exists' % (storage, content_type, template))
|
||||
elif not src:
|
||||
module.fail_json(msg='src param to uploading template file is mandatory')
|
||||
elif not (os.path.exists(realpath) and os.path.isfile(realpath)):
|
||||
module.fail_json(msg='template file on path %s not exists' % realpath)
|
||||
|
||||
if upload_template(module, proxmox, api_host, node, storage, content_type, realpath, timeout):
|
||||
module.exit_json(changed=True, msg='template with volid=%s:%s/%s uploaded' % (storage, content_type, template))
|
||||
proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=validate_certs)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="uploading of template %s failed with exception: %s" % ( template, e ))
|
||||
module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e)
|
||||
|
||||
elif state == 'absent':
|
||||
try:
|
||||
content_type = module.params['content_type']
|
||||
template = module.params['template']
|
||||
if state == 'present':
|
||||
try:
|
||||
content_type = module.params['content_type']
|
||||
src = module.params['src']
|
||||
|
||||
if not template:
|
||||
module.fail_json(msg='template param is mandatory')
|
||||
elif not get_template(proxmox, node, storage, content_type, template):
|
||||
module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already deleted' % (storage, content_type, template))
|
||||
from ansible import utils
|
||||
realpath = utils.path_dwim(None, src)
|
||||
template = os.path.basename(realpath)
|
||||
if get_template(proxmox, node, storage, content_type, template) and not module.params['force']:
|
||||
module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already exists' % (storage, content_type, template))
|
||||
elif not src:
|
||||
module.fail_json(msg='src param to uploading template file is mandatory')
|
||||
elif not (os.path.exists(realpath) and os.path.isfile(realpath)):
|
||||
module.fail_json(msg='template file on path %s not exists' % realpath)
|
||||
|
||||
if delete_template(module, proxmox, node, storage, content_type, template, timeout):
|
||||
module.exit_json(changed=True, msg='template with volid=%s:%s/%s deleted' % (storage, content_type, template))
|
||||
except Exception as e:
|
||||
module.fail_json(msg="deleting of template %s failed with exception: %s" % ( template, e ))
|
||||
if upload_template(module, proxmox, api_host, node, storage, content_type, realpath, timeout):
|
||||
module.exit_json(changed=True, msg='template with volid=%s:%s/%s uploaded' % (storage, content_type, template))
|
||||
except Exception as e:
|
||||
module.fail_json(msg="uploading of template %s failed with exception: %s" % ( template, e ))
|
||||
|
||||
elif state == 'absent':
|
||||
try:
|
||||
content_type = module.params['content_type']
|
||||
template = module.params['template']
|
||||
|
||||
if not template:
|
||||
module.fail_json(msg='template param is mandatory')
|
||||
elif not get_template(proxmox, node, storage, content_type, template):
|
||||
module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already deleted' % (storage, content_type, template))
|
||||
|
||||
if delete_template(module, proxmox, node, storage, content_type, template, timeout):
|
||||
module.exit_json(changed=True, msg='template with volid=%s:%s/%s deleted' % (storage, content_type, template))
|
||||
except Exception as e:
|
||||
module.fail_json(msg="deleting of template %s failed with exception: %s" % ( template, e ))
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
|
|
|
@ -90,10 +90,10 @@ def _needs_update(module, aggregate):
|
|||
new_metadata = (module.params['metadata'] or {})
|
||||
new_metadata['availability_zone'] = module.params['availability_zone']
|
||||
|
||||
if (module.params['name'] != aggregate.name) or \
|
||||
(module.params['hosts'] is not None and module.params['hosts'] != aggregate.hosts) or \
|
||||
(module.params['availability_zone'] is not None and module.params['availability_zone'] != aggregate.availability_zone) or \
|
||||
(module.params['metadata'] is not None and new_metadata != aggregate.metadata):
|
||||
if ((module.params['name'] != aggregate.name) or
|
||||
(module.params['hosts'] is not None and module.params['hosts'] != aggregate.hosts) or
|
||||
(module.params['availability_zone'] is not None and module.params['availability_zone'] != aggregate.availability_zone) or
|
||||
(module.params['metadata'] is not None and new_metadata != aggregate.metadata)):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
|
|
@ -249,147 +249,147 @@ SANDISK_SIZES = [10,20,25,30,40,50,75,100,125,150,175,200,250,300,350,400,500,75
|
|||
NIC_SPEEDS = [10,100,1000]
|
||||
|
||||
try:
|
||||
import SoftLayer
|
||||
from SoftLayer import VSManager
|
||||
import SoftLayer
|
||||
from SoftLayer import VSManager
|
||||
|
||||
HAS_SL = True
|
||||
vsManager = VSManager(SoftLayer.create_client_from_env())
|
||||
HAS_SL = True
|
||||
vsManager = VSManager(SoftLayer.create_client_from_env())
|
||||
except ImportError:
|
||||
HAS_SL = False
|
||||
HAS_SL = False
|
||||
|
||||
|
||||
def create_virtual_instance(module):
|
||||
|
||||
instances = vsManager.list_instances(
|
||||
hostname = module.params.get('hostname'),
|
||||
domain = module.params.get('domain'),
|
||||
datacenter = module.params.get('datacenter')
|
||||
)
|
||||
instances = vsManager.list_instances(
|
||||
hostname = module.params.get('hostname'),
|
||||
domain = module.params.get('domain'),
|
||||
datacenter = module.params.get('datacenter')
|
||||
)
|
||||
|
||||
if instances:
|
||||
return False, None
|
||||
if instances:
|
||||
return False, None
|
||||
|
||||
|
||||
# Check if OS or Image Template is provided (Can't be both, defaults to OS)
|
||||
if (module.params.get('os_code') is not None and module.params.get('os_code') != ''):
|
||||
module.params['image_id'] = ''
|
||||
elif (module.params.get('image_id') is not None and module.params.get('image_id') != ''):
|
||||
module.params['os_code'] = ''
|
||||
module.params['disks'] = [] # Blank out disks since it will use the template
|
||||
else:
|
||||
return False, None
|
||||
# Check if OS or Image Template is provided (Can't be both, defaults to OS)
|
||||
if (module.params.get('os_code') is not None and module.params.get('os_code') != ''):
|
||||
module.params['image_id'] = ''
|
||||
elif (module.params.get('image_id') is not None and module.params.get('image_id') != ''):
|
||||
module.params['os_code'] = ''
|
||||
module.params['disks'] = [] # Blank out disks since it will use the template
|
||||
else:
|
||||
return False, None
|
||||
|
||||
tags = module.params.get('tags')
|
||||
if isinstance(tags, list):
|
||||
tags = ','.join(map(str, module.params.get('tags')))
|
||||
tags = module.params.get('tags')
|
||||
if isinstance(tags, list):
|
||||
tags = ','.join(map(str, module.params.get('tags')))
|
||||
|
||||
instance = vsManager.create_instance(
|
||||
hostname = module.params.get('hostname'),
|
||||
domain = module.params.get('domain'),
|
||||
cpus = module.params.get('cpus'),
|
||||
memory = module.params.get('memory'),
|
||||
hourly = module.params.get('hourly'),
|
||||
datacenter = module.params.get('datacenter'),
|
||||
os_code = module.params.get('os_code'),
|
||||
image_id = module.params.get('image_id'),
|
||||
local_disk = module.params.get('local_disk'),
|
||||
disks = module.params.get('disks'),
|
||||
ssh_keys = module.params.get('ssh_keys'),
|
||||
nic_speed = module.params.get('nic_speed'),
|
||||
private = module.params.get('private'),
|
||||
public_vlan = module.params.get('public_vlan'),
|
||||
private_vlan = module.params.get('private_vlan'),
|
||||
dedicated = module.params.get('dedicated'),
|
||||
post_uri = module.params.get('post_uri'),
|
||||
tags = tags)
|
||||
instance = vsManager.create_instance(
|
||||
hostname = module.params.get('hostname'),
|
||||
domain = module.params.get('domain'),
|
||||
cpus = module.params.get('cpus'),
|
||||
memory = module.params.get('memory'),
|
||||
hourly = module.params.get('hourly'),
|
||||
datacenter = module.params.get('datacenter'),
|
||||
os_code = module.params.get('os_code'),
|
||||
image_id = module.params.get('image_id'),
|
||||
local_disk = module.params.get('local_disk'),
|
||||
disks = module.params.get('disks'),
|
||||
ssh_keys = module.params.get('ssh_keys'),
|
||||
nic_speed = module.params.get('nic_speed'),
|
||||
private = module.params.get('private'),
|
||||
public_vlan = module.params.get('public_vlan'),
|
||||
private_vlan = module.params.get('private_vlan'),
|
||||
dedicated = module.params.get('dedicated'),
|
||||
post_uri = module.params.get('post_uri'),
|
||||
tags = tags)
|
||||
|
||||
if instance is not None and instance['id'] > 0:
|
||||
return True, instance
|
||||
else:
|
||||
return False, None
|
||||
if instance is not None and instance['id'] > 0:
|
||||
return True, instance
|
||||
else:
|
||||
return False, None
|
||||
|
||||
|
||||
def wait_for_instance(module,id):
|
||||
instance = None
|
||||
completed = False
|
||||
wait_timeout = time.time() + module.params.get('wait_time')
|
||||
while not completed and wait_timeout > time.time():
|
||||
try:
|
||||
completed = vsManager.wait_for_ready(id, 10, 2)
|
||||
if completed:
|
||||
instance = vsManager.get_instance(id)
|
||||
except:
|
||||
completed = False
|
||||
instance = None
|
||||
completed = False
|
||||
wait_timeout = time.time() + module.params.get('wait_time')
|
||||
while not completed and wait_timeout > time.time():
|
||||
try:
|
||||
completed = vsManager.wait_for_ready(id, 10, 2)
|
||||
if completed:
|
||||
instance = vsManager.get_instance(id)
|
||||
except:
|
||||
completed = False
|
||||
|
||||
return completed, instance
|
||||
return completed, instance
|
||||
|
||||
|
||||
def cancel_instance(module):
|
||||
canceled = True
|
||||
if module.params.get('instance_id') is None and (module.params.get('tags') or module.params.get('hostname') or module.params.get('domain')):
|
||||
tags = module.params.get('tags')
|
||||
if isinstance(tags, basestring):
|
||||
tags = [module.params.get('tags')]
|
||||
instances = vsManager.list_instances(tags = tags, hostname = module.params.get('hostname'), domain = module.params.get('domain'))
|
||||
for instance in instances:
|
||||
try:
|
||||
vsManager.cancel_instance(instance['id'])
|
||||
except:
|
||||
canceled = False
|
||||
elif module.params.get('instance_id') and module.params.get('instance_id') != 0:
|
||||
try:
|
||||
vsManager.cancel_instance(instance['id'])
|
||||
except:
|
||||
canceled = False
|
||||
else:
|
||||
return False, None
|
||||
canceled = True
|
||||
if module.params.get('instance_id') is None and (module.params.get('tags') or module.params.get('hostname') or module.params.get('domain')):
|
||||
tags = module.params.get('tags')
|
||||
if isinstance(tags, basestring):
|
||||
tags = [module.params.get('tags')]
|
||||
instances = vsManager.list_instances(tags = tags, hostname = module.params.get('hostname'), domain = module.params.get('domain'))
|
||||
for instance in instances:
|
||||
try:
|
||||
vsManager.cancel_instance(instance['id'])
|
||||
except:
|
||||
canceled = False
|
||||
elif module.params.get('instance_id') and module.params.get('instance_id') != 0:
|
||||
try:
|
||||
vsManager.cancel_instance(instance['id'])
|
||||
except:
|
||||
canceled = False
|
||||
else:
|
||||
return False, None
|
||||
|
||||
return canceled, None
|
||||
return canceled, None
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
instance_id=dict(),
|
||||
hostname=dict(),
|
||||
domain=dict(),
|
||||
datacenter=dict(choices=DATACENTERS),
|
||||
tags=dict(),
|
||||
hourly=dict(type='bool', default=True),
|
||||
private=dict(type='bool', default=False),
|
||||
dedicated=dict(type='bool', default=False),
|
||||
local_disk=dict(type='bool', default=True),
|
||||
cpus=dict(type='int', choices=CPU_SIZES),
|
||||
memory=dict(type='int', choices=MEMORY_SIZES),
|
||||
disks=dict(type='list', default=[25]),
|
||||
os_code=dict(),
|
||||
image_id=dict(),
|
||||
nic_speed=dict(type='int', choices=NIC_SPEEDS),
|
||||
public_vlan=dict(),
|
||||
private_vlan=dict(),
|
||||
ssh_keys=dict(type='list', default=[]),
|
||||
post_uri=dict(),
|
||||
state=dict(default='present', choices=STATES),
|
||||
wait=dict(type='bool', default=True),
|
||||
wait_time=dict(type='int', default=600)
|
||||
)
|
||||
)
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
instance_id=dict(),
|
||||
hostname=dict(),
|
||||
domain=dict(),
|
||||
datacenter=dict(choices=DATACENTERS),
|
||||
tags=dict(),
|
||||
hourly=dict(type='bool', default=True),
|
||||
private=dict(type='bool', default=False),
|
||||
dedicated=dict(type='bool', default=False),
|
||||
local_disk=dict(type='bool', default=True),
|
||||
cpus=dict(type='int', choices=CPU_SIZES),
|
||||
memory=dict(type='int', choices=MEMORY_SIZES),
|
||||
disks=dict(type='list', default=[25]),
|
||||
os_code=dict(),
|
||||
image_id=dict(),
|
||||
nic_speed=dict(type='int', choices=NIC_SPEEDS),
|
||||
public_vlan=dict(),
|
||||
private_vlan=dict(),
|
||||
ssh_keys=dict(type='list', default=[]),
|
||||
post_uri=dict(),
|
||||
state=dict(default='present', choices=STATES),
|
||||
wait=dict(type='bool', default=True),
|
||||
wait_time=dict(type='int', default=600)
|
||||
)
|
||||
)
|
||||
|
||||
if not HAS_SL:
|
||||
module.fail_json(msg='softlayer python library required for this module')
|
||||
if not HAS_SL:
|
||||
module.fail_json(msg='softlayer python library required for this module')
|
||||
|
||||
if module.params.get('state') == 'absent':
|
||||
(changed, instance) = cancel_instance(module)
|
||||
if module.params.get('state') == 'absent':
|
||||
(changed, instance) = cancel_instance(module)
|
||||
|
||||
elif module.params.get('state') == 'present':
|
||||
(changed, instance) = create_virtual_instance(module)
|
||||
if module.params.get('wait') is True and instance:
|
||||
(changed, instance) = wait_for_instance(module, instance['id'])
|
||||
elif module.params.get('state') == 'present':
|
||||
(changed, instance) = create_virtual_instance(module)
|
||||
if module.params.get('wait') is True and instance:
|
||||
(changed, instance) = wait_for_instance(module, instance['id'])
|
||||
|
||||
module.exit_json(changed=changed, instance=json.loads(json.dumps(instance, default=lambda o: o.__dict__)))
|
||||
module.exit_json(changed=changed, instance=json.loads(json.dumps(instance, default=lambda o: o.__dict__)))
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
main()
|
||||
|
|
|
@ -1005,8 +1005,9 @@ def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name
|
|||
# Make sure the new disk size is higher than the current value
|
||||
dev = dev_list[disk_num]
|
||||
if disksize < int(dev.capacityInKB):
|
||||
vsphere_client.disconnect()
|
||||
module.fail_json(msg="Error in '%s' definition. New size needs to be higher than the current value (%s GB)." % (disk, int(dev.capacityInKB) / 1024 / 1024))
|
||||
vsphere_client.disconnect()
|
||||
module.fail_json(msg="Error in '%s' definition. New size needs to be higher than the current value (%s GB)." %
|
||||
(disk, int(dev.capacityInKB) / 1024 / 1024))
|
||||
|
||||
# Set the new disk size
|
||||
elif disksize > int(dev.capacityInKB):
|
||||
|
|
|
@ -167,7 +167,7 @@ def db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype):
|
|||
else:
|
||||
db_info = get_db_info(cursor, db)
|
||||
if (encoding and
|
||||
get_encoding_id(cursor, encoding) != db_info['encoding_id']):
|
||||
get_encoding_id(cursor, encoding) != db_info['encoding_id']):
|
||||
raise NotSupportedError(
|
||||
'Changing database encoding is not supported. '
|
||||
'Current encoding: %s' % db_info['encoding']
|
||||
|
@ -193,7 +193,7 @@ def db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype):
|
|||
else:
|
||||
db_info = get_db_info(cursor, db)
|
||||
if (encoding and
|
||||
get_encoding_id(cursor, encoding) != db_info['encoding_id']):
|
||||
get_encoding_id(cursor, encoding) != db_info['encoding_id']):
|
||||
return False
|
||||
elif lc_collate and lc_collate != db_info['lc_collate']:
|
||||
return False
|
||||
|
|
|
@ -216,8 +216,8 @@ def present(schema_facts, cursor, schema, usage_roles, create_roles, owner):
|
|||
"Changing schema owner is not supported. "
|
||||
"Current owner: {0}."
|
||||
).format(schema_facts[schema_key]['owner']))
|
||||
if cmp(sorted(usage_roles), sorted(schema_facts[schema_key]['usage_roles'])) != 0 or \
|
||||
cmp(sorted(create_roles), sorted(schema_facts[schema_key]['create_roles'])) != 0:
|
||||
if (cmp(sorted(usage_roles), sorted(schema_facts[schema_key]['usage_roles'])) != 0 or
|
||||
cmp(sorted(create_roles), sorted(schema_facts[schema_key]['create_roles'])) != 0):
|
||||
update_roles(schema_facts, cursor, schema,
|
||||
schema_facts[schema_key]['usage_roles'], usage_roles,
|
||||
schema_facts[schema_key]['create_roles'], create_roles)
|
||||
|
|
|
@ -193,7 +193,7 @@ def update_roles(user_facts, cursor, user,
|
|||
cursor.execute("alter user {0} default role {1}".format(user, ','.join(required)))
|
||||
|
||||
def check(user_facts, user, profile, resource_pool,
|
||||
locked, password, expired, ldap, roles):
|
||||
locked, password, expired, ldap, roles):
|
||||
user_key = user.lower()
|
||||
if user_key not in user_facts:
|
||||
return False
|
||||
|
@ -205,16 +205,16 @@ def check(user_facts, user, profile, resource_pool,
|
|||
return False
|
||||
if password and password != user_facts[user_key]['password']:
|
||||
return False
|
||||
if expired is not None and expired != (user_facts[user_key]['expired'] == 'True') or \
|
||||
ldap is not None and ldap != (user_facts[user_key]['expired'] == 'True'):
|
||||
if (expired is not None and expired != (user_facts[user_key]['expired'] == 'True') or
|
||||
ldap is not None and ldap != (user_facts[user_key]['expired'] == 'True')):
|
||||
return False
|
||||
if roles and (cmp(sorted(roles), sorted(user_facts[user_key]['roles'])) != 0 or \
|
||||
cmp(sorted(roles), sorted(user_facts[user_key]['default_roles'])) != 0):
|
||||
if (roles and (cmp(sorted(roles), sorted(user_facts[user_key]['roles'])) != 0 or
|
||||
cmp(sorted(roles), sorted(user_facts[user_key]['default_roles'])) != 0)):
|
||||
return False
|
||||
return True
|
||||
|
||||
def present(user_facts, cursor, user, profile, resource_pool,
|
||||
locked, password, expired, ldap, roles):
|
||||
locked, password, expired, ldap, roles):
|
||||
user_key = user.lower()
|
||||
if user_key not in user_facts:
|
||||
query_fragments = ["create user {0}".format(user)]
|
||||
|
@ -275,8 +275,8 @@ def present(user_facts, cursor, user, profile, resource_pool,
|
|||
changed = True
|
||||
if changed:
|
||||
cursor.execute(' '.join(query_fragments))
|
||||
if roles and (cmp(sorted(roles), sorted(user_facts[user_key]['roles'])) != 0 or \
|
||||
cmp(sorted(roles), sorted(user_facts[user_key]['default_roles'])) != 0):
|
||||
if (roles and (cmp(sorted(roles), sorted(user_facts[user_key]['roles'])) != 0 or
|
||||
cmp(sorted(roles), sorted(user_facts[user_key]['default_roles'])) != 0)):
|
||||
update_roles(user_facts, cursor, user,
|
||||
user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], roles)
|
||||
changed = True
|
||||
|
|
|
@ -825,7 +825,7 @@ def main():
|
|||
|
||||
if existing.get('asn'):
|
||||
if (existing.get('asn') != module.params['asn'] and
|
||||
state == 'present'):
|
||||
state == 'present'):
|
||||
module.fail_json(msg='Another BGP ASN already exists.',
|
||||
proposed_asn=module.params['asn'],
|
||||
existing_asn=existing.get('asn'))
|
||||
|
|
|
@ -704,7 +704,7 @@ def get_table_map_command(module, existing, key, value):
|
|||
if value != 'default':
|
||||
command = '{0} {1}'.format(key, module.params['table_map'])
|
||||
if (module.params['table_map_filter'] is not None and
|
||||
module.params['table_map_filter'] != 'default'):
|
||||
module.params['table_map_filter'] != 'default'):
|
||||
command += ' filter'
|
||||
commands.append(command)
|
||||
else:
|
||||
|
@ -916,7 +916,7 @@ def main():
|
|||
|
||||
if existing.get('asn'):
|
||||
if (existing.get('asn') != module.params['asn'] and
|
||||
state == 'present'):
|
||||
state == 'present'):
|
||||
module.fail_json(msg='Another BGP ASN already exists.',
|
||||
proposed_asn=module.params['asn'],
|
||||
existing_asn=existing.get('asn'))
|
||||
|
|
|
@ -582,7 +582,7 @@ def main():
|
|||
existing = invoke('get_existing', module, args)
|
||||
if existing.get('asn'):
|
||||
if (existing.get('asn') != module.params['asn'] and
|
||||
state == 'present'):
|
||||
state == 'present'):
|
||||
module.fail_json(msg='Another BGP ASN already exists.',
|
||||
proposed_asn=module.params['asn'],
|
||||
existing_asn=existing.get('asn'))
|
||||
|
|
|
@ -415,7 +415,7 @@ def get_custom_value(arg, config, module):
|
|||
value = ''
|
||||
|
||||
if (arg.startswith('filter_list') or arg.startswith('prefix_list') or
|
||||
arg.startswith('route_map')):
|
||||
arg.startswith('route_map')):
|
||||
value = in_out_param(arg, splitted_config, module)
|
||||
elif arg == 'send_community':
|
||||
for line in splitted_config:
|
||||
|
@ -874,9 +874,9 @@ def main():
|
|||
|
||||
state = module.params['state']
|
||||
if ((module.params['max_prefix_interval'] or
|
||||
module.params['max_prefix_warning'] or
|
||||
module.params['max_prefix_threshold']) and
|
||||
not module.params['max_prefix_limit']):
|
||||
module.params['max_prefix_warning'] or
|
||||
module.params['max_prefix_threshold']) and
|
||||
not module.params['max_prefix_limit']):
|
||||
module.fail_json(msg='max_prefix_limit is required when using '
|
||||
'max_prefix_warning, max_prefix_limit or '
|
||||
'max_prefix_threshold.')
|
||||
|
@ -922,7 +922,7 @@ def main():
|
|||
existing = invoke('get_existing', module, args)
|
||||
if existing.get('asn'):
|
||||
if (existing.get('asn') != module.params['asn'] and
|
||||
state == 'present'):
|
||||
state == 'present'):
|
||||
module.fail_json(msg='Another BGP ASN already exists.',
|
||||
proposed_asn=module.params['asn'],
|
||||
existing_asn=existing.get('asn'))
|
||||
|
|
|
@ -317,9 +317,8 @@ def main():
|
|||
if commands:
|
||||
if (existing.get('route_distinguisher') and
|
||||
proposed.get('route_distinguisher')):
|
||||
if (existing['route_distinguisher'] != proposed[
|
||||
'route_distinguisher'] and
|
||||
proposed['route_distinguisher'] != 'default'):
|
||||
if (existing['route_distinguisher'] != proposed['route_distinguisher'] and
|
||||
proposed['route_distinguisher'] != 'default'):
|
||||
WARNINGS.append('EVPN RD {0} was automatically removed. '
|
||||
'It is highly recommended to use a task '
|
||||
'(with default as value) to explicitly '
|
||||
|
|
|
@ -116,7 +116,7 @@ def get_available_features(feature, module):
|
|||
available_features[feature] = state
|
||||
else:
|
||||
if (available_features[feature] == 'disabled' and
|
||||
state == 'enabled'):
|
||||
state == 'enabled'):
|
||||
available_features[feature] = state
|
||||
|
||||
return available_features
|
||||
|
|
|
@ -222,11 +222,11 @@ def get_commands(module, state, mode):
|
|||
elif module.params['system_mode_maintenance_timeout']:
|
||||
timeout = get_maintenance_timeout(module)
|
||||
if (state == 'present' and
|
||||
timeout != module.params['system_mode_maintenance_timeout']):
|
||||
timeout != module.params['system_mode_maintenance_timeout']):
|
||||
commands.append('system mode maintenance timeout {0}'.format(
|
||||
module.params['system_mode_maintenance_timeout']))
|
||||
elif (state == 'absent' and
|
||||
timeout == module.params['system_mode_maintenance_timeout']):
|
||||
timeout == module.params['system_mode_maintenance_timeout']):
|
||||
commands.append('no system mode maintenance timeout {0}'.format(
|
||||
module.params['system_mode_maintenance_timeout']))
|
||||
|
||||
|
@ -236,9 +236,7 @@ def get_commands(module, state, mode):
|
|||
elif module.params['system_mode_maintenance_on_reload_reset_reason']:
|
||||
reset_reasons = get_reset_reasons(module)
|
||||
if (state == 'present' and
|
||||
module.params[
|
||||
'system_mode_maintenance_on_reload_reset_reason'].lower() not
|
||||
in reset_reasons.lower()):
|
||||
module.params['system_mode_maintenance_on_reload_reset_reason'].lower() not in reset_reasons.lower()):
|
||||
commands.append('system mode maintenance on-reload '
|
||||
'reset-reason {0}'.format(
|
||||
module.params[
|
||||
|
|
|
@ -343,7 +343,7 @@ def get_custom_command(existing_cmd, proposed, key, module):
|
|||
|
||||
elif key.startswith('ip ospf message-digest-key'):
|
||||
if (proposed['message_digest_key_id'] != 'default' and
|
||||
'options' not in key):
|
||||
'options' not in key):
|
||||
if proposed['message_digest_encryption_type'] == '3des':
|
||||
encryption_type = '3'
|
||||
elif proposed['message_digest_encryption_type'] == 'cisco_type_7':
|
||||
|
@ -514,8 +514,8 @@ def main():
|
|||
proposed['area'] = normalize_area(proposed['area'], module)
|
||||
result = {}
|
||||
if (state == 'present' or (state == 'absent' and
|
||||
existing.get('ospf') == proposed['ospf'] and
|
||||
existing.get('area') == proposed['area'])):
|
||||
existing.get('ospf') == proposed['ospf'] and
|
||||
existing.get('area') == proposed['area'])):
|
||||
|
||||
candidate = CustomNetworkConfig(indent=3)
|
||||
invoke('state_%s' % state, module, existing, proposed, candidate)
|
||||
|
|
|
@ -456,7 +456,7 @@ def main():
|
|||
result['updates'] = action_results
|
||||
|
||||
if (action == 'create' and
|
||||
module.params['save_snapshot_locally']):
|
||||
module.params['save_snapshot_locally']):
|
||||
snapshot = get_snapshot(module)
|
||||
written_file = write_on_file(snapshot,
|
||||
module.params['snapshot_name'], module)
|
||||
|
|
|
@ -405,10 +405,9 @@ def main():
|
|||
commands = ['no vlan ' + vlan_id]
|
||||
elif state == 'present':
|
||||
if (existing.get('mapped_vni') == '0' and
|
||||
proposed.get('mapped_vni') == 'default'):
|
||||
proposed.get('mapped_vni') == 'default'):
|
||||
proposed.pop('mapped_vni')
|
||||
delta = dict(set(
|
||||
proposed.items()).difference(existing.items()))
|
||||
delta = dict(set(proposed.items()).difference(existing.items()))
|
||||
if delta or not existing:
|
||||
commands = get_vlan_config_commands(delta, vlan_id)
|
||||
|
||||
|
@ -418,7 +417,7 @@ def main():
|
|||
if commands:
|
||||
if existing.get('mapped_vni') and state != 'absent':
|
||||
if (existing.get('mapped_vni') != proposed.get('mapped_vni') and
|
||||
existing.get('mapped_vni') != '0' and proposed.get('mapped_vni') != 'default'):
|
||||
existing.get('mapped_vni') != '0' and proposed.get('mapped_vni') != 'default'):
|
||||
commands.insert(1, 'no vn-segment')
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True,
|
||||
|
|
|
@ -622,8 +622,7 @@ class NetAppESeriesStoragePool(object):
|
|||
|
||||
@property
|
||||
def reserved_drive_count_differs(self):
|
||||
if int(self.pool_detail['volumeGroupData']['diskPoolData'][
|
||||
'reconstructionReservedDriveCount']) != self.reserve_drive_count:
|
||||
if int(self.pool_detail['volumeGroupData']['diskPoolData']['reconstructionReservedDriveCount']) != self.reserve_drive_count:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
|
|
@ -347,9 +347,9 @@ class Block(Base, Become, Conditional, Taggable):
|
|||
for task in target:
|
||||
if isinstance(task, Block):
|
||||
tmp_list.append(evaluate_block(task))
|
||||
elif task.action == 'meta' \
|
||||
or (task.action == 'include' and task.evaluate_tags([], play_context.skip_tags, all_vars=all_vars)) \
|
||||
or task.evaluate_tags(play_context.only_tags, play_context.skip_tags, all_vars=all_vars):
|
||||
elif (task.action == 'meta' or
|
||||
(task.action == 'include' and task.evaluate_tags([], play_context.skip_tags, all_vars=all_vars)) or
|
||||
task.evaluate_tags(play_context.only_tags, play_context.skip_tags, all_vars=all_vars)):
|
||||
tmp_list.append(task)
|
||||
return tmp_list
|
||||
|
||||
|
|
|
@ -220,7 +220,7 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
not wrap_async, # async does not support pipelining
|
||||
self._play_context.become_method != 'su', # su does not work with pipelining,
|
||||
# FIXME: we might need to make become_method exclusion a configurable list
|
||||
]:
|
||||
]:
|
||||
if not condition:
|
||||
return False
|
||||
|
||||
|
|
|
@ -178,9 +178,9 @@ def _private_query(v, value):
|
|||
|
||||
def _public_query(v, value):
|
||||
v_ip = netaddr.IPAddress(str(v.ip))
|
||||
if v_ip.is_unicast() and not v_ip.is_private() and \
|
||||
not v_ip.is_loopback() and not v_ip.is_netmask() and \
|
||||
not v_ip.is_hostmask():
|
||||
if (v_ip.is_unicast() and not v_ip.is_private() and
|
||||
not v_ip.is_loopback() and not v_ip.is_netmask() and
|
||||
not v_ip.is_hostmask()):
|
||||
return value
|
||||
|
||||
def _revdns_query(v):
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
lib/ansible/cli/__init__.py
|
||||
lib/ansible/cli/galaxy.py
|
||||
lib/ansible/cli/playbook.py
|
||||
lib/ansible/compat/six/__init__.py
|
||||
lib/ansible/constants.py
|
||||
lib/ansible/errors/__init__.py
|
||||
lib/ansible/executor/play_iterator.py
|
||||
|
@ -59,7 +58,6 @@ lib/ansible/modules/cloud/amazon/s3.py
|
|||
lib/ansible/modules/cloud/amazon/s3_lifecycle.py
|
||||
lib/ansible/modules/cloud/amazon/s3_sync.py
|
||||
lib/ansible/modules/cloud/amazon/s3_website.py
|
||||
lib/ansible/modules/cloud/amazon/sns_topic.py
|
||||
lib/ansible/modules/cloud/amazon/sts_assume_role.py
|
||||
lib/ansible/modules/cloud/amazon/sts_session_token.py
|
||||
lib/ansible/modules/cloud/azure/azure.py
|
||||
|
@ -73,18 +71,14 @@ lib/ansible/modules/cloud/cloudscale/cloudscale_server.py
|
|||
lib/ansible/modules/cloud/cloudstack/cs_host.py
|
||||
lib/ansible/modules/cloud/cloudstack/cs_instance.py
|
||||
lib/ansible/modules/cloud/cloudstack/cs_iso.py
|
||||
lib/ansible/modules/cloud/cloudstack/cs_portforward.py
|
||||
lib/ansible/modules/cloud/digital_ocean/digital_ocean.py
|
||||
lib/ansible/modules/cloud/google/gc_storage.py
|
||||
lib/ansible/modules/cloud/google/gce_tag.py
|
||||
lib/ansible/modules/cloud/google/gcpubsub.py
|
||||
lib/ansible/modules/cloud/misc/ovirt.py
|
||||
lib/ansible/modules/cloud/misc/proxmox.py
|
||||
lib/ansible/modules/cloud/misc/proxmox_kvm.py
|
||||
lib/ansible/modules/cloud/misc/proxmox_template.py
|
||||
lib/ansible/modules/cloud/misc/serverless.py
|
||||
lib/ansible/modules/cloud/openstack/_nova_compute.py
|
||||
lib/ansible/modules/cloud/openstack/os_nova_host_aggregate.py
|
||||
lib/ansible/modules/cloud/packet/packet_device.py
|
||||
lib/ansible/modules/cloud/packet/packet_sshkey.py
|
||||
lib/ansible/modules/cloud/profitbricks/profitbricks.py
|
||||
|
@ -111,8 +105,6 @@ lib/ansible/modules/database/postgresql/postgresql_ext.py
|
|||
lib/ansible/modules/database/postgresql/postgresql_privs.py
|
||||
lib/ansible/modules/database/postgresql/postgresql_schema.py
|
||||
lib/ansible/modules/database/postgresql/postgresql_user.py
|
||||
lib/ansible/modules/database/vertica/vertica_schema.py
|
||||
lib/ansible/modules/database/vertica/vertica_user.py
|
||||
lib/ansible/modules/files/acl.py
|
||||
lib/ansible/modules/files/archive.py
|
||||
lib/ansible/modules/files/copy.py
|
||||
|
@ -139,16 +131,6 @@ lib/ansible/modules/network/f5/bigip_pool_member.py
|
|||
lib/ansible/modules/network/f5/bigip_virtual_server.py
|
||||
lib/ansible/modules/network/haproxy.py
|
||||
lib/ansible/modules/network/nmcli.py
|
||||
lib/ansible/modules/network/nxos/nxos_bgp.py
|
||||
lib/ansible/modules/network/nxos/nxos_bgp_af.py
|
||||
lib/ansible/modules/network/nxos/nxos_bgp_neighbor.py
|
||||
lib/ansible/modules/network/nxos/nxos_bgp_neighbor_af.py
|
||||
lib/ansible/modules/network/nxos/nxos_evpn_vni.py
|
||||
lib/ansible/modules/network/nxos/nxos_feature.py
|
||||
lib/ansible/modules/network/nxos/nxos_gir.py
|
||||
lib/ansible/modules/network/nxos/nxos_interface_ospf.py
|
||||
lib/ansible/modules/network/nxos/nxos_snapshot.py
|
||||
lib/ansible/modules/network/nxos/nxos_vlan.py
|
||||
lib/ansible/modules/network/panos/panos_nat_policy.py
|
||||
lib/ansible/modules/network/snmp_facts.py
|
||||
lib/ansible/modules/notification/hall.py
|
||||
|
@ -210,7 +192,6 @@ lib/ansible/modules/windows/win_uri.py
|
|||
lib/ansible/modules/windows/win_webpicmd.py
|
||||
lib/ansible/parsing/mod_args.py
|
||||
lib/ansible/playbook/attribute.py
|
||||
lib/ansible/playbook/block.py
|
||||
lib/ansible/playbook/role/__init__.py
|
||||
lib/ansible/playbook/role/metadata.py
|
||||
lib/ansible/plugins/action/set_fact.py
|
||||
|
@ -223,7 +204,6 @@ lib/ansible/plugins/connection/accelerate.py
|
|||
lib/ansible/plugins/connection/paramiko_ssh.py
|
||||
lib/ansible/plugins/connection/ssh.py
|
||||
lib/ansible/plugins/connection/winrm.py
|
||||
lib/ansible/plugins/filter/ipaddr.py
|
||||
lib/ansible/plugins/lookup/first_found.py
|
||||
lib/ansible/plugins/shell/fish.py
|
||||
lib/ansible/plugins/shell/sh.py
|
||||
|
|
|
@ -1,5 +1 @@
|
|||
E111
|
||||
E114
|
||||
E125
|
||||
E129
|
||||
E501
|
||||
|
|
Loading…
Reference in a new issue