1
0
Fork 0
mirror of https://github.com/ansible-collections/community.general.git synced 2024-09-14 20:13:21 +02:00

PEP 8 whitespace cleanup. (#20783)

* PEP 8 E271 whitespace cleanup.
* PEP 8 W293 whitespace cleanup.
* Fix whitespace issue from recent PR.
This commit is contained in:
Matt Clay 2017-01-27 15:45:23 -08:00 committed by GitHub
parent 802fbcadf8
commit 95789f3949
132 changed files with 287 additions and 313 deletions

View file

@ -142,7 +142,7 @@ def generate_inv_from_api(enterprise_entity,config):
break break
else: else:
vm_nic = None vm_nic = None
vm_state = True vm_state = True
# From abiquo.ini: Only adding to inventory VMs deployed # From abiquo.ini: Only adding to inventory VMs deployed
if ((config.getboolean('defaults', 'deployed_only') == True) and (vmcollection['state'] == 'NOT_ALLOCATED')): if ((config.getboolean('defaults', 'deployed_only') == True) and (vmcollection['state'] == 'NOT_ALLOCATED')):

View file

@ -145,7 +145,7 @@ class LibcloudInventory(object):
self.cache_path_cache = cache_path + "/ansible-libcloud.cache" self.cache_path_cache = cache_path + "/ansible-libcloud.cache"
self.cache_path_index = cache_path + "/ansible-libcloud.index" self.cache_path_index = cache_path + "/ansible-libcloud.index"
self.cache_max_age = config.getint('cache', 'cache_max_age') self.cache_max_age = config.getint('cache', 'cache_max_age')
def parse_cli_args(self): def parse_cli_args(self):
''' '''
@ -225,7 +225,7 @@ class LibcloudInventory(object):
# Inventory: Group by key pair # Inventory: Group by key pair
if node.extra['key_name']: if node.extra['key_name']:
self.push(self.inventory, self.to_safe('key_' + node.extra['key_name']), dest) self.push(self.inventory, self.to_safe('key_' + node.extra['key_name']), dest)
# Inventory: Group by security group, quick thing to handle single sg # Inventory: Group by security group, quick thing to handle single sg
if node.extra['security_group']: if node.extra['security_group']:
self.push(self.inventory, self.to_safe('sg_' + node.extra['security_group'][0]), dest) self.push(self.inventory, self.to_safe('sg_' + node.extra['security_group'][0]), dest)

View file

@ -289,7 +289,7 @@ class ConsulInventory(object):
and the node name add each entry in the dictionary to the the node's and the node name add each entry in the dictionary to the the node's
metadata ''' metadata '''
node = node_data['Node'] node = node_data['Node']
if self.config.has_config('kv_metadata'): if self.config.has_config('kv_metadata'):
key = "%s/%s/%s" % (self.config.kv_metadata, self.current_dc, node['Node']) key = "%s/%s/%s" % (self.config.kv_metadata, self.current_dc, node['Node'])
index, metadata = self.consul_api.kv.get(key) index, metadata = self.consul_api.kv.get(key)
if metadata and metadata['Value']: if metadata and metadata['Value']:
@ -305,7 +305,7 @@ class ConsulInventory(object):
kv_groups config value and the node name add the node address to each kv_groups config value and the node name add the node address to each
group found ''' group found '''
node = node_data['Node'] node = node_data['Node']
if self.config.has_config('kv_groups'): if self.config.has_config('kv_groups'):
key = "%s/%s/%s" % (self.config.kv_groups, self.current_dc, node['Node']) key = "%s/%s/%s" % (self.config.kv_groups, self.current_dc, node['Node'])
index, groups = self.consul_api.kv.get(key) index, groups = self.consul_api.kv.get(key)
if groups and groups['Value']: if groups and groups['Value']:

View file

@ -77,7 +77,7 @@ def get_a_ssh_config(box_name):
if options.list: if options.list:
ssh_config = get_ssh_config() ssh_config = get_ssh_config()
hosts = { 'coreos': []} hosts = { 'coreos': []}
for data in ssh_config: for data in ssh_config:
hosts['coreos'].append(data['Host']) hosts['coreos'].append(data['Host'])

View file

@ -18,7 +18,7 @@ def initialize():
except AttributeError: except AttributeError:
#FreeIPA < 4.0 compatibility #FreeIPA < 4.0 compatibility
api.Backend.xmlclient.connect() api.Backend.xmlclient.connect()
return api return api
def list_groups(api): def list_groups(api):
@ -39,7 +39,7 @@ def list_groups(api):
if 'member_host' in hostgroup: if 'member_host' in hostgroup:
members = [host for host in hostgroup['member_host']] members = [host for host in hostgroup['member_host']]
if 'memberindirect_host' in hostgroup: if 'memberindirect_host' in hostgroup:
members += (host for host in hostgroup['memberindirect_host']) members += (host for host in hostgroup['memberindirect_host'])
inventory[hostgroup['cn'][0]] = {'hosts': [host for host in members]} inventory[hostgroup['cn'][0]] = {'hosts': [host for host in members]}
for member in members: for member in members:
@ -48,7 +48,7 @@ def list_groups(api):
inventory['_meta'] = {'hostvars': hostvars} inventory['_meta'] = {'hostvars': hostvars}
inv_string = json.dumps(inventory, indent=1, sort_keys=True) inv_string = json.dumps(inventory, indent=1, sort_keys=True)
print(inv_string) print(inv_string)
return None return None
def parse_args(): def parse_args():

View file

@ -220,7 +220,7 @@ elif options.host:
print('Problem executing the command "%s inventory": %s' % print('Problem executing the command "%s inventory": %s' %
(SW_REPORT, str(e)), file=sys.stderr) (SW_REPORT, str(e)), file=sys.stderr)
sys.exit(2) sys.exit(2)
if options.human: if options.human:
print('Host: %s' % options.host) print('Host: %s' % options.host)
for k, v in iteritems(host_details): for k, v in iteritems(host_details):

View file

@ -58,7 +58,7 @@ try:
import requests import requests
except: except:
sys.exit('requests package is required for this inventory script') sys.exit('requests package is required for this inventory script')
CONFIG_FILES = ['/etc/stacki/stacki.yml', '/etc/ansible/stacki.yml'] CONFIG_FILES = ['/etc/stacki/stacki.yml', '/etc/ansible/stacki.yml']

View file

@ -3,10 +3,10 @@
import optparse import optparse
from jinja2 import Environment, FileSystemLoader from jinja2 import Environment, FileSystemLoader
from ansible.playbook import Play from ansible.playbook import Play
from ansible.playbook.block import Block from ansible.playbook.block import Block
from ansible.playbook.role import Role from ansible.playbook.role import Role
from ansible.playbook.task import Task from ansible.playbook.task import Task
template_file = 'playbooks_directives.rst.j2' template_file = 'playbooks_directives.rst.j2'
oblist = {} oblist = {}

View file

@ -59,7 +59,7 @@ class AggregateStats:
if host is None: if host is None:
host = '_run' host = '_run'
if host not in self.custom: if host not in self.custom:
self.custom[host] = {which: what} self.custom[host] = {which: what}
else: else:
self.custom[host][which] = what self.custom[host][which] = what

View file

@ -44,12 +44,12 @@ class GalaxyLogin(object):
''' Class to handle authenticating user with Galaxy API prior to performing CUD operations ''' ''' Class to handle authenticating user with Galaxy API prior to performing CUD operations '''
GITHUB_AUTH = 'https://api.github.com/authorizations' GITHUB_AUTH = 'https://api.github.com/authorizations'
def __init__(self, galaxy, github_token=None): def __init__(self, galaxy, github_token=None):
self.galaxy = galaxy self.galaxy = galaxy
self.github_username = None self.github_username = None
self.github_password = None self.github_password = None
if github_token == None: if github_token == None:
self.get_credentials() self.get_credentials()
@ -61,7 +61,7 @@ class GalaxyLogin(object):
display.display("The password will not be displayed." + u'\n\n', screen_only=True) display.display("The password will not be displayed." + u'\n\n', screen_only=True)
display.display("Use " + stringc("--github-token",'yellow') + display.display("Use " + stringc("--github-token",'yellow') +
" if you do not want to enter your password." + u'\n\n', screen_only=True) " if you do not want to enter your password." + u'\n\n', screen_only=True)
try: try:
self.github_username = raw_input("Github Username: ") self.github_username = raw_input("Github Username: ")
except: except:

View file

@ -208,7 +208,7 @@ class GalaxyRole(object):
# create tar file from scm url # create tar file from scm url
tmp_file = RoleRequirement.scm_archive_role(**self.spec) tmp_file = RoleRequirement.scm_archive_role(**self.spec)
elif self.src: elif self.src:
if os.path.isfile(self.src): if os.path.isfile(self.src):
# installing a local tar.gz # installing a local tar.gz
local_file = True local_file = True
tmp_file = self.src tmp_file = self.src

View file

@ -42,7 +42,7 @@ class GalaxyToken(object):
self.config = yaml.safe_load(self.__open_config_for_read()) self.config = yaml.safe_load(self.__open_config_for_read())
if not self.config: if not self.config:
self.config = {} self.config = {}
def __open_config_for_read(self): def __open_config_for_read(self):
if os.path.isfile(self.file): if os.path.isfile(self.file):
display.vvv('Opened %s' % self.file) display.vvv('Opened %s' % self.file)
@ -57,11 +57,11 @@ class GalaxyToken(object):
def set(self, token): def set(self, token):
self.config['token'] = token self.config['token'] = token
self.save() self.save()
def get(self): def get(self):
return self.config.get('token', None) return self.config.get('token', None)
def save(self): def save(self):
with open(self.file,'w') as f: with open(self.file,'w') as f:
yaml.safe_dump(self.config,f,default_flow_style=False) yaml.safe_dump(self.config,f,default_flow_style=False)

View file

@ -333,7 +333,7 @@ class AzureRMModuleBase(object):
def _get_credentials(self, params): def _get_credentials(self, params):
# Get authentication credentials. # Get authentication credentials.
# Precedence: module parameters-> environment variables-> default profile in ~/.azure/credentials. # Precedence: module parameters-> environment variables-> default profile in ~/.azure/credentials.
self.log('Getting credentials') self.log('Getting credentials')
arg_credentials = dict() arg_credentials = dict()
@ -345,11 +345,11 @@ class AzureRMModuleBase(object):
self.log('Retrieving credentials with profile parameter.') self.log('Retrieving credentials with profile parameter.')
credentials = self._get_profile(arg_credentials['profile']) credentials = self._get_profile(arg_credentials['profile'])
return credentials return credentials
if arg_credentials['subscription_id']: if arg_credentials['subscription_id']:
self.log('Received credentials from parameters.') self.log('Received credentials from parameters.')
return arg_credentials return arg_credentials
# try environment # try environment
env_credentials = self._get_env_credentials() env_credentials = self._get_env_credentials()
if env_credentials: if env_credentials:

View file

@ -368,7 +368,7 @@ class Facts(object):
self.facts['service_mgr'] = proc_1_map.get(proc_1, proc_1) self.facts['service_mgr'] = proc_1_map.get(proc_1, proc_1)
# start with the easy ones # start with the easy ones
elif self.facts['distribution'] == 'MacOSX': elif self.facts['distribution'] == 'MacOSX':
#FIXME: find way to query executable, version matching is not ideal #FIXME: find way to query executable, version matching is not ideal
if LooseVersion(platform.mac_ver()[0]) >= LooseVersion('10.4'): if LooseVersion(platform.mac_ver()[0]) >= LooseVersion('10.4'):
self.facts['service_mgr'] = 'launchd' self.facts['service_mgr'] = 'launchd'
@ -2155,7 +2155,7 @@ class AIX(Hardware):
rc, out, err = self.module.run_command(cmd) rc, out, err = self.module.run_command(cmd)
if rc == 0 and out: if rc == 0 and out:
pp_size = re.search(r'PP SIZE:\s+(\d+\s+\S+)',out).group(1) pp_size = re.search(r'PP SIZE:\s+(\d+\s+\S+)',out).group(1)
for n in re.finditer(r'(\S+)\s+(\w+)\s+(\d+)\s+(\d+).*',m.group(0)): for n in re.finditer(r'(\S+)\s+(\w+)\s+(\d+)\s+(\d+).*',m.group(0)):
pv_info = { 'pv_name': n.group(1), pv_info = { 'pv_name': n.group(1),
'pv_state': n.group(2), 'pv_state': n.group(2),
'total_pps': n.group(3), 'total_pps': n.group(3),

View file

@ -127,7 +127,7 @@ class Rhsm(RegistrationBase):
for k,v in kwargs.items(): for k,v in kwargs.items():
if re.search(r'^(system|rhsm)_', k): if re.search(r'^(system|rhsm)_', k):
args.append('--%s=%s' % (k.replace('_','.'), v)) args.append('--%s=%s' % (k.replace('_','.'), v))
self.module.run_command(args, check_rc=True) self.module.run_command(args, check_rc=True)
@property @property

View file

@ -644,7 +644,7 @@ def replace(connection, module):
instances = props['instances'] instances = props['instances']
if replace_instances: if replace_instances:
instances = replace_instances instances = replace_instances
#check if min_size/max_size/desired capacity have been specified and if not use ASG values #check if min_size/max_size/desired capacity have been specified and if not use ASG values
if min_size is None: if min_size is None:
min_size = as_group.min_size min_size = as_group.min_size
@ -674,7 +674,7 @@ def replace(connection, module):
if not old_instances: if not old_instances:
changed = False changed = False
return(changed, props) return(changed, props)
# set temporary settings and wait for them to be reached # set temporary settings and wait for them to be reached
# This should get overwritten if the number of instances left is less than the batch size. # This should get overwritten if the number of instances left is less than the batch size.
@ -827,7 +827,7 @@ def wait_for_term_inst(connection, module, term_instances):
lifecycle = instance_facts[i]['lifecycle_state'] lifecycle = instance_facts[i]['lifecycle_state']
health = instance_facts[i]['health_status'] health = instance_facts[i]['health_status']
log.debug("Instance {0} has state of {1},{2}".format(i,lifecycle,health )) log.debug("Instance {0} has state of {1},{2}".format(i,lifecycle,health ))
if lifecycle == 'Terminating' or health == 'Unhealthy': if lifecycle == 'Terminating' or health == 'Unhealthy':
count += 1 count += 1
time.sleep(10) time.sleep(10)

View file

@ -148,7 +148,7 @@ class Ec2CustomerGatewayManager:
CustomerGatewayId=gw_id CustomerGatewayId=gw_id
) )
return response return response
def ensure_cgw_present(self, bgp_asn, ip_address): def ensure_cgw_present(self, bgp_asn, ip_address):
response = self.ec2.create_customer_gateway( response = self.ec2.create_customer_gateway(
DryRun=False, DryRun=False,

View file

@ -215,7 +215,7 @@ class ElbInformation(object):
elb_array.append(existing_lb) elb_array.append(existing_lb)
else: else:
elb_array = all_elbs elb_array = all_elbs
return list(map(self._get_elb_info, elb_array)) return list(map(self._get_elb_info, elb_array))
def main(): def main():

View file

@ -143,14 +143,14 @@ def main():
resource = module.params.get('resource') resource = module.params.get('resource')
tags = module.params.get('tags') tags = module.params.get('tags')
state = module.params.get('state') state = module.params.get('state')
ec2 = ec2_connect(module) ec2 = ec2_connect(module)
# We need a comparison here so that we can accurately report back changed status. # We need a comparison here so that we can accurately report back changed status.
# Need to expand the gettags return format and compare with "tags" and then tag or detag as appropriate. # Need to expand the gettags return format and compare with "tags" and then tag or detag as appropriate.
filters = {'resource-id' : resource} filters = {'resource-id' : resource}
gettags = ec2.get_all_tags(filters=filters) gettags = ec2.get_all_tags(filters=filters)
dictadd = {} dictadd = {}
dictremove = {} dictremove = {}
baddict = {} baddict = {}
@ -170,7 +170,7 @@ def main():
tagger = ec2.create_tags(resource, dictadd) tagger = ec2.create_tags(resource, dictadd)
gettags = ec2.get_all_tags(filters=filters) gettags = ec2.get_all_tags(filters=filters)
module.exit_json(msg="Tags %s created for resource %s." % (dictadd,resource), changed=True) module.exit_json(msg="Tags %s created for resource %s." % (dictadd,resource), changed=True)
if state == 'absent': if state == 'absent':
if not tags: if not tags:
module.fail_json(msg="tags argument is required when state is absent") module.fail_json(msg="tags argument is required when state is absent")

View file

@ -97,7 +97,7 @@ def get_volume_info(volume):
}, },
'tags': volume.tags 'tags': volume.tags
} }
return volume_info return volume_info
def list_ec2_volumes(connection, module): def list_ec2_volumes(connection, module):

View file

@ -289,7 +289,7 @@ def main():
changed = False changed = False
new_options = collections.defaultdict(lambda: None) new_options = collections.defaultdict(lambda: None)
region, ec2_url, boto_params = get_aws_connection_info(module) region, ec2_url, boto_params = get_aws_connection_info(module)
connection = connect_to_aws(boto.vpc, region, **boto_params) connection = connect_to_aws(boto.vpc, region, **boto_params)
@ -378,9 +378,9 @@ def main():
# and remove old ones if that was requested # and remove old ones if that was requested
if params['delete_old'] and existing_options: if params['delete_old'] and existing_options:
remove_dhcp_options_by_id(connection, existing_options.id) remove_dhcp_options_by_id(connection, existing_options.id)
module.exit_json(changed=changed, new_options=new_options, dhcp_options_id=dhcp_option.id) module.exit_json(changed=changed, new_options=new_options, dhcp_options_id=dhcp_option.id)
from ansible.module_utils.basic import * from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import * from ansible.module_utils.ec2 import *

View file

@ -72,7 +72,7 @@ def get_vpc_info(vpc):
classic_link = vpc.classic_link_enabled classic_link = vpc.classic_link_enabled
except AttributeError: except AttributeError:
classic_link = False classic_link = False
vpc_info = { 'id': vpc.id, vpc_info = { 'id': vpc.id,
'instance_tenancy': vpc.instance_tenancy, 'instance_tenancy': vpc.instance_tenancy,
'classic_link_enabled': classic_link, 'classic_link_enabled': classic_link,

View file

@ -133,7 +133,7 @@ def list_virtual_gateways(client, module):
snaked_vgws = [camel_dict_to_snake_dict(get_virtual_gateway_info(vgw)) snaked_vgws = [camel_dict_to_snake_dict(get_virtual_gateway_info(vgw))
for vgw in all_virtual_gateways['VpnGateways']] for vgw in all_virtual_gateways['VpnGateways']]
module.exit_json(virtual_gateways=snaked_vgws) module.exit_json(virtual_gateways=snaked_vgws)

View file

@ -457,7 +457,7 @@ def delete_group(module=None, iam=None, name=None):
iam.delete_group_policy(name, policy) iam.delete_group_policy(name, policy)
try: try:
iam.delete_group(name) iam.delete_group(name)
except boto.exception.BotoServerError as err: except boto.exception.BotoServerError as err:
error_msg = boto_exception(err) error_msg = boto_exception(err)
if ('must detach all policies first') in error_msg: if ('must detach all policies first') in error_msg:
module.fail_json(changed=changed, msg="All inline polices have been removed. Though it appears" module.fail_json(changed=changed, msg="All inline polices have been removed. Though it appears"

View file

@ -280,7 +280,7 @@ EXAMPLES = '''
command: reboot command: reboot
instance_name: database instance_name: database
wait: yes wait: yes
# Restore a Postgres db instance from a snapshot, wait for it to become available again, and # Restore a Postgres db instance from a snapshot, wait for it to become available again, and
# then modify it to add your security group. Also, display the new endpoint. # then modify it to add your security group. Also, display the new endpoint.
# Note that the "publicly_accessible" option is allowed here just as it is in the AWS CLI # Note that the "publicly_accessible" option is allowed here just as it is in the AWS CLI
@ -298,7 +298,7 @@ EXAMPLES = '''
tags: tags:
Name: pg1_test_name_tag Name: pg1_test_name_tag
register: rds register: rds
- local_action: - local_action:
module: rds module: rds
command: modify command: modify
@ -844,7 +844,7 @@ def promote_db_instance(module, conn):
valid_vars = ['backup_retention', 'backup_window'] valid_vars = ['backup_retention', 'backup_window']
params = validate_parameters(required_vars, valid_vars, module) params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name') instance_name = module.params.get('instance_name')
result = conn.get_db_instance(instance_name) result = conn.get_db_instance(instance_name)
if not result: if not result:
module.fail_json(msg="DB Instance %s does not exist" % instance_name) module.fail_json(msg="DB Instance %s does not exist" % instance_name)

View file

@ -130,7 +130,7 @@ def main():
except BotoServerError as e: except BotoServerError as e:
if e.error_code != 'DBSubnetGroupNotFoundFault': if e.error_code != 'DBSubnetGroupNotFoundFault':
module.fail_json(msg = e.error_message) module.fail_json(msg = e.error_message)
if state == 'absent': if state == 'absent':
if exists: if exists:
conn.delete_db_subnet_group(group_name) conn.delete_db_subnet_group(group_name)

View file

@ -470,7 +470,7 @@ def main():
if command_in == 'create': if command_in == 'create':
if ( weight_in!=None or region_in!=None or failover_in!=None ) and identifier_in==None: if ( weight_in!=None or region_in!=None or failover_in!=None ) and identifier_in==None:
module.fail_json(msg= "If you specify failover, region or weight you must also specify identifier") module.fail_json(msg= "If you specify failover, region or weight you must also specify identifier")
elif ( weight_in==None and region_in==None and failover_in==None ) and identifier_in!=None: elif ( weight_in==None and region_in==None and failover_in==None ) and identifier_in!=None:
module.fail_json(msg= "You have specified identifier which makes sense only if you specify one of: weight, region or failover.") module.fail_json(msg= "You have specified identifier which makes sense only if you specify one of: weight, region or failover.")

View file

@ -57,7 +57,7 @@ options:
- "Suffix that is appended to a request that is for a directory on the website endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/ the data that is returned will be for the object with the key name images/index.html). The suffix must not include a slash character." - "Suffix that is appended to a request that is for a directory on the website endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/ the data that is returned will be for the object with the key name images/index.html). The suffix must not include a slash character."
required: false required: false
default: index.html default: index.html
extends_documentation_fragment: extends_documentation_fragment:
- aws - aws
- ec2 - ec2
@ -76,14 +76,14 @@ EXAMPLES = '''
- s3_website: - s3_website:
name: mybucket.com name: mybucket.com
state: absent state: absent
# Configure an s3 bucket as a website with index and error pages # Configure an s3 bucket as a website with index and error pages
- s3_website: - s3_website:
name: mybucket.com name: mybucket.com
suffix: home.htm suffix: home.htm
error_key: errors/404.htm error_key: errors/404.htm
state: present state: present
''' '''
RETURN = ''' RETURN = '''
@ -170,7 +170,7 @@ def _create_website_configuration(suffix, error_key, redirect_all_requests):
def enable_or_update_bucket_as_website(client_connection, resource_connection, module): def enable_or_update_bucket_as_website(client_connection, resource_connection, module):
bucket_name = module.params.get("name") bucket_name = module.params.get("name")
redirect_all_requests = module.params.get("redirect_all_requests") redirect_all_requests = module.params.get("redirect_all_requests")
# If redirect_all_requests is set then don't use the default suffix that has been set # If redirect_all_requests is set then don't use the default suffix that has been set
@ -263,7 +263,7 @@ def main():
redirect_all_requests=dict(type='str', required=False) redirect_all_requests=dict(type='str', required=False)
) )
) )
module = AnsibleModule( module = AnsibleModule(
argument_spec=argument_spec, argument_spec=argument_spec,
mutually_exclusive = [ mutually_exclusive = [
@ -273,7 +273,7 @@ def main():
if not HAS_BOTO3: if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module') module.fail_json(msg='boto3 required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
if region: if region:
@ -288,7 +288,7 @@ def main():
enable_or_update_bucket_as_website(client_connection, resource_connection, module) enable_or_update_bucket_as_website(client_connection, resource_connection, module)
elif state == 'absent': elif state == 'absent':
disable_bucket_as_website(client_connection, module) disable_bucket_as_website(client_connection, module)
from ansible.module_utils.basic import * from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import * from ansible.module_utils.ec2 import *

View file

@ -187,7 +187,7 @@ def create_or_update_sqs_queue(connection, module):
queue = connection.create_queue(queue_name) queue = connection.create_queue(queue_name)
update_sqs_queue(queue, **queue_attributes) update_sqs_queue(queue, **queue_attributes)
result['changed'] = True result['changed'] = True
if not module.check_mode: if not module.check_mode:
result['queue_arn'] = queue.get_attributes('QueueArn')['QueueArn'] result['queue_arn'] = queue.get_attributes('QueueArn')['QueueArn']
result['default_visibility_timeout'] = queue.get_attributes('VisibilityTimeout')['VisibilityTimeout'] result['default_visibility_timeout'] = queue.get_attributes('VisibilityTimeout')['VisibilityTimeout']
@ -195,7 +195,7 @@ def create_or_update_sqs_queue(connection, module):
result['maximum_message_size'] = queue.get_attributes('MaximumMessageSize')['MaximumMessageSize'] result['maximum_message_size'] = queue.get_attributes('MaximumMessageSize')['MaximumMessageSize']
result['delivery_delay'] = queue.get_attributes('DelaySeconds')['DelaySeconds'] result['delivery_delay'] = queue.get_attributes('DelaySeconds')['DelaySeconds']
result['receive_message_wait_time'] = queue.get_attributes('ReceiveMessageWaitTimeSeconds')['ReceiveMessageWaitTimeSeconds'] result['receive_message_wait_time'] = queue.get_attributes('ReceiveMessageWaitTimeSeconds')['ReceiveMessageWaitTimeSeconds']
except BotoServerError: except BotoServerError:
result['msg'] = 'Failed to create/update sqs queue due to error: ' + traceback.format_exc() result['msg'] = 'Failed to create/update sqs queue due to error: ' + traceback.format_exc()
module.fail_json(**result) module.fail_json(**result)

View file

@ -365,7 +365,7 @@ def create_virtual_machine(module, azure):
azure.get_role(name, name, name) azure.get_role(name, name, name)
except AzureMissingException: except AzureMissingException:
# vm does not exist; create it # vm does not exist; create it
if os_type == 'linux': if os_type == 'linux':
# Create linux configuration # Create linux configuration
disable_ssh_password_authentication = not password disable_ssh_password_authentication = not password
@ -563,7 +563,7 @@ def main():
cloud_service_raw = None cloud_service_raw = None
if module.params.get('state') == 'absent': if module.params.get('state') == 'absent':
(changed, public_dns_name, deployment) = terminate_virtual_machine(module, azure) (changed, public_dns_name, deployment) = terminate_virtual_machine(module, azure)
elif module.params.get('state') == 'present': elif module.params.get('state') == 'present':
# Changed is always set to true when provisioning new instances # Changed is always set to true when provisioning new instances
if not module.params.get('name'): if not module.params.get('name'):

View file

@ -372,7 +372,7 @@ class AzureRMStorageBlob(AzureRMModuleBase):
self.log('Create container %s' % self.container) self.log('Create container %s' % self.container)
tags = None tags = None
if not self.blob and self.tags: if not self.blob and self.tags:
# when a blob is present, then tags are assigned at the blob level # when a blob is present, then tags are assigned at the blob level
tags = self.tags tags = self.tags

View file

@ -918,7 +918,7 @@ class AzureRMVirtualMachine(AzureRMModuleBase):
interface_dict['properties'] = nic_dict['properties'] interface_dict['properties'] = nic_dict['properties']
# Expand public IPs to include config properties # Expand public IPs to include config properties
for interface in result['properties']['networkProfile']['networkInterfaces']: for interface in result['properties']['networkProfile']['networkInterfaces']:
for config in interface['properties']['ipConfigurations']: for config in interface['properties']['ipConfigurations']:
if config['properties'].get('publicIPAddress'): if config['properties'].get('publicIPAddress'):
pipid_dict = azure_id_to_dict(config['properties']['publicIPAddress']['id']) pipid_dict = azure_id_to_dict(config['properties']['publicIPAddress']['id'])

View file

@ -347,7 +347,7 @@ class AzureRMVirtualNetwork(AzureRMModuleBase):
try: try:
poller = self.network_client.virtual_networks.create_or_update(self.resource_group, self.name, vnet) poller = self.network_client.virtual_networks.create_or_update(self.resource_group, self.name, vnet)
new_vnet = self.get_poller_result(poller) new_vnet = self.get_poller_result(poller)
except Exception as exc: except Exception as exc:
self.fail("Error creating or updating virtual network {0} - {1}".format(self.name, str(exc))) self.fail("Error creating or updating virtual network {0} - {1}".format(self.name, str(exc)))
return virtual_network_to_dict(new_vnet) return virtual_network_to_dict(new_vnet)

View file

@ -232,7 +232,7 @@ class AnsibleCloudStackInstanceFacts(AnsibleCloudStack):
if not instance: if not instance:
self.module.fail_json(msg="Instance not found: %s" % self.module.params.get('name')) self.module.fail_json(msg="Instance not found: %s" % self.module.params.get('name'))
self.facts['cloudstack_instance'] = self.get_result(instance) self.facts['cloudstack_instance'] = self.get_result(instance)
return self.facts return self.facts
def get_result(self, instance): def get_result(self, instance):

View file

@ -293,7 +293,7 @@ class ImageManager(DockerBaseClass):
if repo_tag: if repo_tag:
self.name = repo self.name = repo
self.tag = repo_tag self.tag = repo_tag
if self.state in ['present', 'build']: if self.state in ['present', 'build']:
self.present() self.present()
elif self.state == 'absent': elif self.state == 'absent':

View file

@ -639,7 +639,7 @@ class ContainerManager(DockerBaseClass):
return options return options
def cmd_up(self): def cmd_up(self):
start_deps = self.dependencies start_deps = self.dependencies
service_names = self.services service_names = self.services
detached = True detached = True
@ -943,7 +943,7 @@ class ContainerManager(DockerBaseClass):
short_id=container.short_id short_id=container.short_id
)) ))
result['actions'].append(service_res) result['actions'].append(service_res)
if not self.check_mode and result['changed']: if not self.check_mode and result['changed']:
_, fd_name = tempfile.mkstemp(prefix="ansible") _, fd_name = tempfile.mkstemp(prefix="ansible")
try: try:

View file

@ -165,7 +165,7 @@ def grant_check(module, gs, obj):
module.fail_json(msg= str(e)) module.fail_json(msg= str(e))
return True return True
def key_check(module, gs, bucket, obj): def key_check(module, gs, bucket, obj):
try: try:
@ -228,7 +228,7 @@ def delete_key(module, gs, bucket, obj):
module.exit_json(msg="Object deleted from bucket ", changed=True) module.exit_json(msg="Object deleted from bucket ", changed=True)
except gs.provider.storage_response_error as e: except gs.provider.storage_response_error as e:
module.fail_json(msg= str(e)) module.fail_json(msg= str(e))
def create_dirkey(module, gs, bucket, obj): def create_dirkey(module, gs, bucket, obj):
try: try:
bucket = gs.lookup(bucket) bucket = gs.lookup(bucket)
@ -325,7 +325,7 @@ def handle_put(module, gs, bucket, obj, overwrite, src, expiration):
module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.", failed=True) module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.", failed=True)
else: else:
upload_gsfile(module, gs, bucket, obj, src, expiration) upload_gsfile(module, gs, bucket, obj, src, expiration)
if not bucket_rc: if not bucket_rc:
create_bucket(module, gs, bucket) create_bucket(module, gs, bucket)
upload_gsfile(module, gs, bucket, obj, src, expiration) upload_gsfile(module, gs, bucket, obj, src, expiration)
@ -333,7 +333,7 @@ def handle_put(module, gs, bucket, obj, overwrite, src, expiration):
# If bucket exists but key doesn't, just upload. # If bucket exists but key doesn't, just upload.
if bucket_rc and not key_rc: if bucket_rc and not key_rc:
upload_gsfile(module, gs, bucket, obj, src, expiration) upload_gsfile(module, gs, bucket, obj, src, expiration)
def handle_delete(module, gs, bucket, obj): def handle_delete(module, gs, bucket, obj):
if bucket and not obj: if bucket and not obj:
if bucket_check(module, gs, bucket): if bucket_check(module, gs, bucket):
@ -350,7 +350,7 @@ def handle_delete(module, gs, bucket, obj):
module.exit_json(msg="Bucket does not exist.", changed=False) module.exit_json(msg="Bucket does not exist.", changed=False)
else: else:
module.fail_json(msg="Bucket or Bucket & object parameter is required.", failed=True) module.fail_json(msg="Bucket or Bucket & object parameter is required.", failed=True)
def handle_create(module, gs, bucket, obj): def handle_create(module, gs, bucket, obj):
if bucket and not obj: if bucket and not obj:
if bucket_check(module, gs, bucket): if bucket_check(module, gs, bucket):
@ -417,7 +417,7 @@ def main():
gs = boto.connect_gs(gs_access_key, gs_secret_key) gs = boto.connect_gs(gs_access_key, gs_secret_key)
except boto.exception.NoAuthHandlerFound as e: except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg = str(e)) module.fail_json(msg = str(e))
if mode == 'get': if mode == 'get':
if not bucket_check(module, gs, bucket) or not key_check(module, gs, bucket, obj): if not bucket_check(module, gs, bucket) or not key_check(module, gs, bucket, obj):
module.fail_json(msg="Target bucket/key cannot be found", failed=True) module.fail_json(msg="Target bucket/key cannot be found", failed=True)
@ -425,7 +425,7 @@ def main():
download_gsfile(module, gs, bucket, obj, dest) download_gsfile(module, gs, bucket, obj, dest)
else: else:
handle_get(module, gs, bucket, obj, overwrite, dest) handle_get(module, gs, bucket, obj, overwrite, dest)
if mode == 'put': if mode == 'put':
if not path_check(src): if not path_check(src):
module.fail_json(msg="Local object for PUT does not exist", failed=True) module.fail_json(msg="Local object for PUT does not exist", failed=True)
@ -434,10 +434,10 @@ def main():
# Support for deleting an object if we have both params. # Support for deleting an object if we have both params.
if mode == 'delete': if mode == 'delete':
handle_delete(module, gs, bucket, obj) handle_delete(module, gs, bucket, obj)
if mode == 'create': if mode == 'create':
handle_create(module, gs, bucket, obj) handle_create(module, gs, bucket, obj)
if mode == 'get_url': if mode == 'get_url':
if bucket and obj: if bucket and obj:
if bucket_check(module, gs, bucket) and key_check(module, gs, bucket, obj): if bucket_check(module, gs, bucket) and key_check(module, gs, bucket, obj):

View file

@ -93,7 +93,7 @@ EXAMPLES = '''
# Basic zone creation example. # Basic zone creation example.
- name: Create a basic zone with the minimum number of parameters. - name: Create a basic zone with the minimum number of parameters.
gcdns_zone: zone=example.com gcdns_zone: zone=example.com
# Zone removal example. # Zone removal example.
- name: Remove a zone. - name: Remove a zone.
gcdns_zone: zone=example.com state=absent gcdns_zone: zone=example.com state=absent

View file

@ -103,7 +103,7 @@ EXAMPLES = '''
- gce_img: - gce_img:
name: test-image name: test-image
source: https://storage.googleapis.com/bucket/path/to/image.tgz source: https://storage.googleapis.com/bucket/path/to/image.tgz
# Alternatively use the gs scheme # Alternatively use the gs scheme
- gce_img: - gce_img:
name: test-image name: test-image

View file

@ -194,7 +194,7 @@ def publish_messages(message_list, topic):
attrs = message['attributes'] attrs = message['attributes']
batch.publish(bytes(msg), **attrs) batch.publish(bytes(msg), **attrs)
return True return True
def pull_messages(pull_params, sub): def pull_messages(pull_params, sub):
""" """
:rtype: tuple (output, changed) :rtype: tuple (output, changed)
@ -203,7 +203,7 @@ def pull_messages(pull_params, sub):
max_messages=pull_params.get('max_messages', None) max_messages=pull_params.get('max_messages', None)
message_ack = pull_params.get('message_ack', 'no') message_ack = pull_params.get('message_ack', 'no')
return_immediately = pull_params.get('return_immediately', False) return_immediately = pull_params.get('return_immediately', False)
output= [] output= []
pulled = sub.pull(return_immediately=return_immediately, pulled = sub.pull(return_immediately=return_immediately,
max_messages=max_messages) max_messages=max_messages)
@ -237,7 +237,7 @@ def main():
if not HAS_PYTHON26: if not HAS_PYTHON26:
module.fail_json( module.fail_json(
msg="GCE module requires python's 'ast' module, python v2.6+") msg="GCE module requires python's 'ast' module, python v2.6+")
if not HAS_GOOGLE_CLOUD_PUBSUB: if not HAS_GOOGLE_CLOUD_PUBSUB:
module.fail_json(msg="Please install google-cloud-pubsub library.") module.fail_json(msg="Please install google-cloud-pubsub library.")

View file

@ -98,7 +98,7 @@ except ImportError as e:
def list_func(data, member='name'): def list_func(data, member='name'):
"""Used for state=list.""" """Used for state=list."""
return [getattr(x, member) for x in data] return [getattr(x, member) for x in data]
def main(): def main():
module = AnsibleModule(argument_spec=dict( module = AnsibleModule(argument_spec=dict(
@ -112,7 +112,7 @@ def main():
if not HAS_PYTHON26: if not HAS_PYTHON26:
module.fail_json( module.fail_json(
msg="GCE module requires python's 'ast' module, python v2.6+") msg="GCE module requires python's 'ast' module, python v2.6+")
if not HAS_GOOGLE_CLOUD_PUBSUB: if not HAS_GOOGLE_CLOUD_PUBSUB:
module.fail_json(msg="Please install google-cloud-pubsub library.") module.fail_json(msg="Please install google-cloud-pubsub library.")

View file

@ -247,7 +247,7 @@ def linodeServers(module, api, state, name, plan, distribution, datacenter, lino
# - need linode_id (entity) # - need linode_id (entity)
# - need disk_id for linode_id - create disk from distrib # - need disk_id for linode_id - create disk from distrib
# - need config_id for linode_id - create config (need kernel) # - need config_id for linode_id - create config (need kernel)
# Any create step triggers a job that need to be waited for. # Any create step triggers a job that need to be waited for.
if not servers: if not servers:
for arg in (name, plan, distribution, datacenter): for arg in (name, plan, distribution, datacenter):
@ -424,7 +424,7 @@ def linodeServers(module, api, state, name, plan, distribution, datacenter, lino
instance['status'] = 'Restarting' instance['status'] = 'Restarting'
changed = True changed = True
instances.append(instance) instances.append(instance)
elif state in ('absent', 'deleted'): elif state in ('absent', 'deleted'):
for server in servers: for server in servers:
instance = getInstanceDetails(api, server) instance = getInstanceDetails(api, server)

View file

@ -306,7 +306,7 @@ def create_vm(conn, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork,
# define network parameters # define network parameters
network_net = params.Network(name=vmnetwork) network_net = params.Network(name=vmnetwork)
nic_net1 = params.NIC(name=vmnic, network=network_net, interface='virtio') nic_net1 = params.NIC(name=vmnic, network=network_net, interface='virtio')
try: try:
conn.vms.add(vmparams) conn.vms.add(vmparams)
except: except:
@ -502,7 +502,7 @@ def main():
else: else:
vm_stop(c, vmname) vm_stop(c, vmname)
module.exit_json(changed=True, msg="VM %s is shutting down" % vmname) module.exit_json(changed=True, msg="VM %s is shutting down" % vmname)
if state == 'restart': if state == 'restart':
if vm_status(c, vmname) == 'up': if vm_status(c, vmname) == 'up':
vm_restart(c, vmname) vm_restart(c, vmname)

View file

@ -1038,7 +1038,7 @@ def main():
time.sleep(1) time.sleep(1)
except Exception as e: except Exception as e:
module.fail_json(msg="deletion of VM %s failed with exception: %s" % ( vmid, e )) module.fail_json(msg="deletion of VM %s failed with exception: %s" % ( vmid, e ))
elif state == 'current': elif state == 'current':
status = {} status = {}
try: try:

View file

@ -421,7 +421,7 @@ class VirtNetwork(object):
def create(self, entryid): def create(self, entryid):
return self.conn.create(entryid) return self.conn.create(entryid)
def modify(self, entryid, xml): def modify(self, entryid, xml):
return self.conn.modify(entryid, xml) return self.conn.modify(entryid, xml)

View file

@ -180,7 +180,7 @@ def _add_gateway_router(neutron, module, router_id, network_id):
module.fail_json(msg = "Error in adding gateway to router: %s" % e.message) module.fail_json(msg = "Error in adding gateway to router: %s" % e.message)
return True return True
def _remove_gateway_router(neutron, module, router_id): def _remove_gateway_router(neutron, module, router_id):
try: try:
neutron.remove_gateway_router(router_id) neutron.remove_gateway_router(router_id)
except Exception as e: except Exception as e:

View file

@ -193,7 +193,7 @@ def _get_port_id(neutron, module, router_id, subnet_id):
module.fail_json( msg = "Error in listing ports: %s" % e.message) module.fail_json( msg = "Error in listing ports: %s" % e.message)
if not ports['ports']: if not ports['ports']:
return None return None
for port in ports['ports']: for port in ports['ports']:
for subnet in port['fixed_ips']: for subnet in port['fixed_ips']:
if subnet['subnet_id'] == subnet_id: if subnet['subnet_id'] == subnet_id:
return port['id'] return port['id']
@ -209,7 +209,7 @@ def _add_interface_router(neutron, module, router_id, subnet_id):
module.fail_json(msg = "Error in adding interface to router: %s" % e.message) module.fail_json(msg = "Error in adding interface to router: %s" % e.message)
return True return True
def _remove_interface_router(neutron, module, router_id, subnet_id): def _remove_interface_router(neutron, module, router_id, subnet_id):
kwargs = { kwargs = {
'subnet_id': subnet_id 'subnet_id': subnet_id
} }

View file

@ -205,7 +205,7 @@ def main():
else: else:
if masters is None: if masters is None:
masters = [] masters = []
pre_update_zone = zone pre_update_zone = zone
changed = _system_state_change(state, email, changed = _system_state_change(state, email,
description, ttl, description, ttl,

View file

@ -346,7 +346,7 @@ def get_hostname_list(module):
_msg = ("If you set count>1, you should only specify one hostname " _msg = ("If you set count>1, you should only specify one hostname "
"with the %d formatter, not a list of hostnames.") "with the %d formatter, not a list of hostnames.")
raise Exception(_msg) raise Exception(_msg)
if (len(hostnames) == 1) and (count > 0): if (len(hostnames) == 1) and (count > 0):
hostname_spec = hostnames[0] hostname_spec = hostnames[0]
count_range = range(count_offset, count_offset + count) count_range = range(count_offset, count_offset + count)
@ -382,7 +382,7 @@ def get_device_id_list(module):
raise Exception("You specified too many devices, max is %d" % raise Exception("You specified too many devices, max is %d" %
MAX_DEVICES) MAX_DEVICES)
return device_ids return device_ids
def create_single_device(module, packet_conn, hostname): def create_single_device(module, packet_conn, hostname):
@ -430,7 +430,7 @@ def wait_for_ips(module, packet_conn, created_devices):
if all_have_public_ip(refreshed): if all_have_public_ip(refreshed):
return refreshed return refreshed
time.sleep(5) time.sleep(5)
raise Exception("Waiting for IP assignment timed out. Hostnames: %s" raise Exception("Waiting for IP assignment timed out. Hostnames: %s"
% [d.hostname for d in created_devices]) % [d.hostname for d in created_devices])

View file

@ -204,7 +204,7 @@ def act_on_sshkeys(target_state, module, packet_conn):
new_key_response = packet_conn.create_ssh_key( new_key_response = packet_conn.create_ssh_key(
newkey['label'], newkey['key']) newkey['label'], newkey['key'])
changed = True changed = True
matching_sshkeys.append(new_key_response) matching_sshkeys.append(new_key_response)
else: else:
# state is 'absent' => delete mathcing keys # state is 'absent' => delete mathcing keys

View file

@ -101,9 +101,9 @@ def rax_facts(module, address, name, server_id):
servers.append(cs.servers.get(server_id)) servers.append(cs.servers.get(server_id))
except Exception as e: except Exception as e:
pass pass
servers[:] = [server for server in servers if server.status != "DELETED"] servers[:] = [server for server in servers if server.status != "DELETED"]
if len(servers) > 1: if len(servers) > 1:
module.fail_json(msg='Multiple servers found matching provided ' module.fail_json(msg='Multiple servers found matching provided '
'search parameters') 'search parameters')

View file

@ -93,7 +93,7 @@ def find_vswitch_by_name(host, vswitch_name):
class VMwareHostVirtualSwitch(object): class VMwareHostVirtualSwitch(object):
def __init__(self, module): def __init__(self, module):
self.host_system = None self.host_system = None
self.content = None self.content = None
@ -132,7 +132,7 @@ class VMwareHostVirtualSwitch(object):
# Source from # Source from
# https://github.com/rreubenur/pyvmomi-community-samples/blob/patch-1/samples/create_vswitch.py # https://github.com/rreubenur/pyvmomi-community-samples/blob/patch-1/samples/create_vswitch.py
def state_create_vswitch(self): def state_create_vswitch(self):
vss_spec = vim.host.VirtualSwitch.Specification() vss_spec = vim.host.VirtualSwitch.Specification()
vss_spec.numPorts = self.number_of_ports vss_spec.numPorts = self.number_of_ports
@ -146,7 +146,7 @@ class VMwareHostVirtualSwitch(object):
def state_destroy_vswitch(self): def state_destroy_vswitch(self):
config = vim.host.NetworkConfig() config = vim.host.NetworkConfig()
for portgroup in self.host_system.configManager.networkSystem.networkInfo.portgroup: for portgroup in self.host_system.configManager.networkSystem.networkInfo.portgroup:
if portgroup.spec.vswitchName == self.vss.name: if portgroup.spec.vswitchName == self.vss.name:
portgroup_config = vim.host.PortGroup.Config() portgroup_config = vim.host.PortGroup.Config()
@ -158,7 +158,7 @@ class VMwareHostVirtualSwitch(object):
portgroup_config.spec.vswitchName = portgroup.spec.vswitchName portgroup_config.spec.vswitchName = portgroup.spec.vswitchName
portgroup_config.spec.policy = vim.host.NetworkPolicy() portgroup_config.spec.policy = vim.host.NetworkPolicy()
config.portgroup.append(portgroup_config) config.portgroup.append(portgroup_config)
self.host_system.configManager.networkSystem.UpdateNetworkConfig(config, "modify") self.host_system.configManager.networkSystem.UpdateNetworkConfig(config, "modify")
self.host_system.configManager.networkSystem.RemoveVirtualSwitch(self.vss.name) self.host_system.configManager.networkSystem.RemoveVirtualSwitch(self.vss.name)
self.module.exit_json(changed=True) self.module.exit_json(changed=True)
@ -170,15 +170,15 @@ class VMwareHostVirtualSwitch(object):
host = get_all_objs(self.content, [vim.HostSystem]) host = get_all_objs(self.content, [vim.HostSystem])
if not host: if not host:
self.module.fail_json(msg="Unable to find host") self.module.fail_json(msg="Unable to find host")
self.host_system = host.keys()[0] self.host_system = host.keys()[0]
self.vss = find_vswitch_by_name(self.host_system, self.switch_name) self.vss = find_vswitch_by_name(self.host_system, self.switch_name)
if self.vss is None: if self.vss is None:
return 'absent' return 'absent'
else: else:
return 'present' return 'present'
def main(): def main():
argument_spec = vmware_argument_spec() argument_spec = vmware_argument_spec()

View file

@ -999,7 +999,7 @@ def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name
except (KeyError, ValueError): except (KeyError, ValueError):
vsphere_client.disconnect() vsphere_client.disconnect()
module.fail_json(msg="Error in '%s' definition. Size needs to be specified as an integer." % disk) module.fail_json(msg="Error in '%s' definition. Size needs to be specified as an integer." % disk)
# Make sure the new disk size is higher than the current value # Make sure the new disk size is higher than the current value
dev = dev_list[disk_num] dev = dev_list[disk_num]
if disksize < int(dev.capacityInKB): if disksize < int(dev.capacityInKB):

View file

@ -148,7 +148,7 @@ def main():
existing_app = app_map.get(app_name) existing_app = app_map.get(app_name)
result = {} result = {}
# Here's where the real stuff happens # Here's where the real stuff happens
if app_state == 'present': if app_state == 'present':

View file

@ -84,7 +84,7 @@ options:
EXAMPLES = ''' EXAMPLES = '''
# This will also create a default DB user with the same # This will also create a default DB user with the same
# name as the database, and the specified password. # name as the database, and the specified password.
- name: Create a database - name: Create a database
webfaction_db: webfaction_db:
name: "{{webfaction_user}}_db1" name: "{{webfaction_user}}_db1"
@ -145,7 +145,7 @@ def main():
existing_user = user_map.get(db_name) existing_user = user_map.get(db_name)
result = {} result = {}
# Here's where the real stuff happens # Here's where the real stuff happens
if db_state == 'present': if db_state == 'present':
@ -175,16 +175,16 @@ def main():
# If this isn't a dry run... # If this isn't a dry run...
if not module.check_mode: if not module.check_mode:
if not (existing_db or existing_user): if not (existing_db or existing_user):
module.exit_json(changed = False,) module.exit_json(changed = False,)
if existing_db: if existing_db:
# Delete the db if it exists # Delete the db if it exists
result.update( result.update(
webfaction.delete_db(session_id, db_name, db_type) webfaction.delete_db(session_id, db_name, db_type)
) )
if existing_user: if existing_user:
# Delete the default db user if it exists # Delete the default db user if it exists
result.update( result.update(

View file

@ -121,7 +121,7 @@ def main():
existing_domain = domain_map.get(domain_name) existing_domain = domain_map.get(domain_name)
result = {} result = {}
# Here's where the real stuff happens # Here's where the real stuff happens
if domain_state == 'present': if domain_state == 'present':

View file

@ -107,7 +107,7 @@ def main():
existing_mailbox = mailbox_name in mailbox_list existing_mailbox = mailbox_name in mailbox_list
result = {} result = {}
# Here's where the real stuff happens # Here's where the real stuff happens
if site_state == 'present': if site_state == 'present':

View file

@ -53,7 +53,7 @@ options:
required: false required: false
choices: ['present', 'absent'] choices: ['present', 'absent']
default: "present" default: "present"
host: host:
description: description:
- The webfaction host on which the site should be created. - The webfaction host on which the site should be created.
@ -141,7 +141,7 @@ def main():
existing_site = site_map.get(site_name) existing_site = site_map.get(site_name)
result = {} result = {}
# Here's where the real stuff happens # Here's where the real stuff happens
if site_state == 'present': if site_state == 'present':

View file

@ -395,7 +395,7 @@ def parse_service(module):
module.fail_json( msg="service_name supplied but no service_port, a port is required to configure a service. Did you configure the 'port' argument meaning 'service_port'?") module.fail_json( msg="service_name supplied but no service_port, a port is required to configure a service. Did you configure the 'port' argument meaning 'service_port'?")
class ConsulService(): class ConsulService():
def __init__(self, service_id=None, name=None, address=None, port=-1, def __init__(self, service_id=None, name=None, address=None, port=-1,
tags=None, loaded=None): tags=None, loaded=None):

View file

@ -195,7 +195,7 @@ class PrivateKey(object):
} }
return result return result
def main(): def main():

View file

@ -163,7 +163,7 @@ class PublicKey(object):
} }
return result return result
def main(): def main():

View file

@ -161,7 +161,7 @@ def install_plugin(module, plugin_bin, plugin_name, version, url, proxy_host, pr
if rc != 0: if rc != 0:
reason = parse_error(out) reason = parse_error(out)
module.fail_json(msg=reason) module.fail_json(msg=reason)
return True, cmd, out, err return True, cmd, out, err
def remove_plugin(module, plugin_bin, plugin_name): def remove_plugin(module, plugin_bin, plugin_name):
@ -177,7 +177,7 @@ def remove_plugin(module, plugin_bin, plugin_name):
if rc != 0: if rc != 0:
reason = parse_error(out) reason = parse_error(out)
module.fail_json(msg=reason) module.fail_json(msg=reason)
return True, cmd, out, err return True, cmd, out, err
def main(): def main():

View file

@ -181,7 +181,7 @@ def install_plugin(module, plugin_bin, plugin_name, url, timeout):
if rc != 0: if rc != 0:
reason = parse_error(out) reason = parse_error(out)
module.fail_json(msg=reason) module.fail_json(msg=reason)
return True, cmd, out, err return True, cmd, out, err
def remove_plugin(module, plugin_bin, plugin_name): def remove_plugin(module, plugin_bin, plugin_name):
@ -191,12 +191,12 @@ def remove_plugin(module, plugin_bin, plugin_name):
if module.check_mode: if module.check_mode:
return True, cmd, "check mode", "" return True, cmd, "check mode", ""
rc, out, err = module.run_command(cmd) rc, out, err = module.run_command(cmd)
if rc != 0: if rc != 0:
reason = parse_error(out) reason = parse_error(out)
module.fail_json(msg=reason) module.fail_json(msg=reason)
return True, cmd, out, err return True, cmd, out, err
def main(): def main():

View file

@ -322,7 +322,7 @@ def main():
e = get_exception() e = get_exception()
module.fail_json(msg="unable to connect to database: %s" % e) module.fail_json(msg="unable to connect to database: %s" % e)
try: try:
old_value = r.config_get(name)[name] old_value = r.config_get(name)[name]
except Exception: except Exception:

View file

@ -179,7 +179,7 @@ def main():
ring_size = stats['ring_creation_size'] ring_size = stats['ring_creation_size']
rc, out, err = module.run_command([riak_bin, 'version'] ) rc, out, err = module.run_command([riak_bin, 'version'] )
version = out.strip() version = out.strip()
result = dict(node_name=node_name, result = dict(node_name=node_name,
nodes=nodes, nodes=nodes,
ring_size=ring_size, ring_size=ring_size,

View file

@ -181,7 +181,7 @@ def main():
login_password = module.params['login_password'] login_password = module.params['login_password']
login_host = module.params['login_host'] login_host = module.params['login_host']
login_port = module.params['login_port'] login_port = module.params['login_port']
login_querystring = login_host login_querystring = login_host
if login_port != "1433": if login_port != "1433":
login_querystring = "%s:%s" % (login_host, login_port) login_querystring = "%s:%s" % (login_host, login_port)

View file

@ -178,7 +178,7 @@ def main():
else: else:
if state == "absent": if state == "absent":
changed = ext_delete(cursor, ext) changed = ext_delete(cursor, ext)
elif state == "present": elif state == "present":
changed = ext_create(cursor, ext) changed = ext_create(cursor, ext)
except NotSupportedError: except NotSupportedError:

View file

@ -253,21 +253,21 @@ def main():
if lang_exists(cursor, lang): if lang_exists(cursor, lang):
lang_trusted = lang_istrusted(cursor, lang) lang_trusted = lang_istrusted(cursor, lang)
if (lang_trusted and not trust) or (not lang_trusted and trust): if (lang_trusted and not trust) or (not lang_trusted and trust):
if module.check_mode: if module.check_mode:
changed = True changed = True
else: else:
changed = lang_altertrust(cursor, lang, trust) changed = lang_altertrust(cursor, lang, trust)
else: else:
if module.check_mode: if module.check_mode:
changed = True changed = True
else: else:
changed = lang_add(cursor, lang, trust) changed = lang_add(cursor, lang, trust)
if force_trust: if force_trust:
changed = lang_altertrust(cursor, lang, trust) changed = lang_altertrust(cursor, lang, trust)
else: else:
if lang_exists(cursor, lang): if lang_exists(cursor, lang):
if module.check_mode: if module.check_mode:
changed = True changed = True
kw['lang_dropped'] = True kw['lang_dropped'] = True
else: else:

View file

@ -688,7 +688,7 @@ def main():
module.fail_json(msg=str(e)) module.fail_json(msg=str(e))
else: else:
if user_exists(cursor, user): if user_exists(cursor, user):
if module.check_mode: if module.check_mode:
changed = True changed = True
kw['user_removed'] = True kw['user_removed'] = True
else: else:

View file

@ -116,7 +116,7 @@ def get_configuration_facts(cursor, parameter_name=''):
'current_value': row.current_value, 'current_value': row.current_value,
'default_value': row.default_value} 'default_value': row.default_value}
return facts return facts
def check(configuration_facts, parameter_name, current_value): def check(configuration_facts, parameter_name, current_value):
parameter_key = parameter_name.lower() parameter_key = parameter_name.lower()
if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower(): if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower():

View file

@ -135,7 +135,7 @@ def update_roles(role_facts, cursor, role,
cursor.execute("revoke {0} from {1}".format(assigned_role, role)) cursor.execute("revoke {0} from {1}".format(assigned_role, role))
for assigned_role in set(required) - set(existing): for assigned_role in set(required) - set(existing):
cursor.execute("grant {0} to {1}".format(assigned_role, role)) cursor.execute("grant {0} to {1}".format(assigned_role, role))
def check(role_facts, role, assigned_roles): def check(role_facts, role, assigned_roles):
role_key = role.lower() role_key = role.lower()
if role_key not in role_facts: if role_key not in role_facts:

View file

@ -182,7 +182,7 @@ def main():
# patch need an absolute file name # patch need an absolute file name
p.src = os.path.abspath(p.src) p.src = os.path.abspath(p.src)
changed = False changed = False
if not is_already_applied(patch_func, p.src, p.basedir, dest_file=p.dest, binary=p.binary, strip=p.strip): if not is_already_applied(patch_func, p.src, p.basedir, dest_file=p.dest, binary=p.binary, strip=p.strip):
try: try:

View file

@ -121,12 +121,12 @@ def build_url(name, apiid, action, meter_id=None, cert_type=None):
elif action == "certificates": elif action == "certificates":
return "https://%s/%s/meters/%s/%s.pem" % (api_host, apiid, meter_id, cert_type) return "https://%s/%s/meters/%s/%s.pem" % (api_host, apiid, meter_id, cert_type)
elif action == "tags": elif action == "tags":
return "https://%s/%s/meters/%s/tags" % (api_host, apiid, meter_id) return "https://%s/%s/meters/%s/tags" % (api_host, apiid, meter_id)
elif action == "delete": elif action == "delete":
return "https://%s/%s/meters/%s" % (api_host, apiid, meter_id) return "https://%s/%s/meters/%s" % (api_host, apiid, meter_id)
def http_request(module, name, apiid, apikey, action, data=None, meter_id=None, cert_type=None): def http_request(module, name, apiid, apikey, action, data=None, meter_id=None, cert_type=None):
if meter_id is None: if meter_id is None:
url = build_url(name, apiid, action) url = build_url(name, apiid, action)
else: else:

View file

@ -123,7 +123,7 @@ def main():
params["application_id"] = module.params["application_id"] params["application_id"] = module.params["application_id"]
else: else:
module.fail_json(msg="you must set one of 'app_name' or 'application_id'") module.fail_json(msg="you must set one of 'app_name' or 'application_id'")
for item in [ "changelog", "description", "revision", "user", "appname", "environment" ]: for item in [ "changelog", "description", "revision", "user", "appname", "environment" ]:
if module.params[item]: if module.params[item]:
params[item] = module.params[item] params[item] = module.params[item]

View file

@ -209,7 +209,7 @@ def create(module, name, user, passwd, token, requester_id, service, hours, minu
'Content-Type' : 'application/json', 'Content-Type' : 'application/json',
} }
request_data = {'maintenance_window': {'start_time': start, 'end_time': end, 'description': desc, 'service_ids': service}} request_data = {'maintenance_window': {'start_time': start, 'end_time': end, 'description': desc, 'service_ids': service}}
if requester_id: if requester_id:
request_data['requester_id'] = requester_id request_data['requester_id'] = requester_id
else: else:
@ -235,7 +235,7 @@ def absent(module, name, user, passwd, token, requester_id, service):
'Content-Type' : 'application/json', 'Content-Type' : 'application/json',
} }
request_data = {} request_data = {}
if requester_id: if requester_id:
request_data['requester_id'] = requester_id request_data['requester_id'] = requester_id
else: else:

View file

@ -376,10 +376,10 @@ class Host(object):
if host['proxy_hostid'] != proxy_id: if host['proxy_hostid'] != proxy_id:
return True return True
if host['name'] != visible_name: if host['name'] != visible_name:
return True return True
return False return False
# link or clear template of the host # link or clear template of the host

View file

@ -123,7 +123,7 @@ def chain(module):
if state in ('absent') and not config_present: if state in ('absent') and not config_present:
module.exit_json(changed=False) module.exit_json(changed=False)
if state in ('present'): if state in ('present'):
response = rest.put('chain[name="%s"]' % name, data={'name': name}) response = rest.put('chain[name="%s"]' % name, data={'name': name})
if response.status_code == 204: if response.status_code == 204:

View file

@ -391,7 +391,7 @@ class CloudflareAPI(object):
error_msg += "; Failed to parse API response: {0}".format(content) error_msg += "; Failed to parse API response: {0}".format(content)
# received an error status but no data with details on what failed # received an error status but no data with details on what failed
if (info['status'] not in [200,304]) and (result is None): if (info['status'] not in [200,304]) and (result is None):
self.module.fail_json(msg=error_msg) self.module.fail_json(msg=error_msg)
if not result['success']: if not result['success']:

View file

@ -183,14 +183,14 @@ class Default(FactsBase):
return sw_name.text return sw_name.text
else: else:
return "" return ""
def parse_version(self, data): def parse_version(self, data):
sw_ver = data.find('./data/system-sw-state/sw-version/sw-version') sw_ver = data.find('./data/system-sw-state/sw-version/sw-version')
if sw_ver is not None: if sw_ver is not None:
return sw_ver.text return sw_ver.text
else: else:
return "" return ""
def parse_hostname(self, data): def parse_hostname(self, data):
match = re.search(r'hostname\s+(\S+)', data, re.M) match = re.search(r'hostname\s+(\S+)', data, re.M)
if match: if match:
@ -224,7 +224,7 @@ class Hardware(FactsBase):
self.facts['cpu_arch'] = self.parse_cpu_arch(xml_data) self.facts['cpu_arch'] = self.parse_cpu_arch(xml_data)
data = self.runner.get_command('show processes memory | grep Total') data = self.runner.get_command('show processes memory | grep Total')
match = self.parse_memory(data) match = self.parse_memory(data)
if match: if match:
self.facts['memtotal_mb'] = int(match[0]) / 1024 self.facts['memtotal_mb'] = int(match[0]) / 1024
@ -236,7 +236,7 @@ class Hardware(FactsBase):
return cpu_arch.text return cpu_arch.text
else: else:
return "" return ""
def parse_memory(self, data): def parse_memory(self, data):
return re.findall(r'\:\s*(\d+)', data, re.M) return re.findall(r'\:\s*(\d+)', data, re.M)
@ -372,7 +372,7 @@ class Interfaces(FactsBase):
lldp_facts[name].append(fact) lldp_facts[name].append(fact)
return lldp_facts return lldp_facts
FACT_SUBSETS = dict( FACT_SUBSETS = dict(
default=Default, default=Default,
hardware=Hardware, hardware=Hardware,

View file

@ -327,7 +327,7 @@ class Interfaces(FactsBase):
return match.group(3) return match.group(3)
if flag==1: if flag==1:
return "null" return "null"
def parse_type(self, key, properties): def parse_type(self, key, properties):
type_val, type_val_next = properties.split('--------- ------- --------------------- --------------------- --------------') type_val, type_val_next = properties.split('--------- ------- --------------------- --------------------- --------------')
flag=1 flag=1

View file

@ -31,25 +31,25 @@ options:
- Account API Key. - Account API Key.
required: true required: true
default: null default: null
account_secret: account_secret:
description: description:
- Account Secret Key. - Account Secret Key.
required: true required: true
default: null default: null
domain: domain:
description: description:
- Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNS Made Easy (e.g. "839989") for faster resolution. - Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNS Made Easy (e.g. "839989") for faster resolution.
required: true required: true
default: null default: null
record_name: record_name:
description: description:
- Record name to get/create/delete/update. If record_name is not specified; all records for the domain will be returned in "result" regardless of the state argument. - Record name to get/create/delete/update. If record_name is not specified; all records for the domain will be returned in "result" regardless of the state argument.
required: false required: false
default: null default: null
record_type: record_type:
description: description:
- Record type. - Record type.
@ -63,20 +63,20 @@ options:
- "If record_value is not specified; no changes will be made and the record will be returned in 'result' (in other words, this module can be used to fetch a record's current id, type, and ttl)" - "If record_value is not specified; no changes will be made and the record will be returned in 'result' (in other words, this module can be used to fetch a record's current id, type, and ttl)"
required: false required: false
default: null default: null
record_ttl: record_ttl:
description: description:
- record's "Time to live". Number of seconds the record remains cached in DNS servers. - record's "Time to live". Number of seconds the record remains cached in DNS servers.
required: false required: false
default: 1800 default: 1800
state: state:
description: description:
- whether the record should exist or not - whether the record should exist or not
required: true required: true
choices: [ 'present', 'absent' ] choices: [ 'present', 'absent' ]
default: null default: null
validate_certs: validate_certs:
description: description:
- If C(no), SSL certificates will not be validated. This should only be used - If C(no), SSL certificates will not be validated. This should only be used
@ -89,7 +89,7 @@ options:
notes: notes:
- The DNS Made Easy service requires that machines interacting with the API have the proper time and timezone set. Be sure you are within a few seconds of actual time by using NTP. - The DNS Made Easy service requires that machines interacting with the API have the proper time and timezone set. Be sure you are within a few seconds of actual time by using NTP.
- This module returns record(s) in the "result" element when 'state' is set to 'present'. This value can be be registered and used in your playbooks. - This module returns record(s) in the "result" element when 'state' is set to 'present'. This value can be be registered and used in your playbooks.
requirements: [ hashlib, hmac ] requirements: [ hashlib, hmac ]
author: "Brice Burgess (@briceburg)" author: "Brice Burgess (@briceburg)"
''' '''
@ -102,7 +102,7 @@ EXAMPLES = '''
domain: my.com domain: my.com
state: present state: present
register: response register: response
# create / ensure the presence of a record # create / ensure the presence of a record
- dnsmadeeasy: - dnsmadeeasy:
account_key: key account_key: key
@ -130,7 +130,7 @@ EXAMPLES = '''
state: present state: present
record_name: test record_name: test
register: response register: response
# delete a record / ensure it is absent # delete a record / ensure it is absent
- dnsmadeeasy: - dnsmadeeasy:
account_key: key account_key: key

View file

@ -283,7 +283,7 @@ def map_config_to_obj(module):
'state': parse_state(out) 'state': parse_state(out)
} }
def map_params_to_obj(module): def map_params_to_obj(module):
obj = { obj = {
'http': module.params['http'], 'http': module.params['http'],
'http_port': module.params['http_port'], 'http_port': module.params['http_port'],
@ -310,7 +310,7 @@ def collect_facts(module, result):
for each in out[0]['urls']: for each in out[0]['urls']:
intf, url = each.split(' : ') intf, url = each.split(' : ')
key = str(intf).strip() key = str(intf).strip()
if key not in facts['eos_eapi_urls']: if key not in facts['eos_eapi_urls']:
facts['eos_eapi_urls'][key] = list() facts['eos_eapi_urls'][key] = list()
facts['eos_eapi_urls'][key].append(str(url).strip()) facts['eos_eapi_urls'][key].append(str(url).strip())
result['ansible_facts'] = facts result['ansible_facts'] = facts

View file

@ -38,7 +38,7 @@ EXAMPLES = '''
# Retrieve switch/port information # Retrieve switch/port information
- name: Gather information from lldp - name: Gather information from lldp
lldp: lldp:
- name: Print each switch/port - name: Print each switch/port
debug: debug:
msg: "{{ lldp[item]['chassis']['name'] }} / {{ lldp[item]['port']['ifalias'] }}" msg: "{{ lldp[item]['chassis']['name'] }} / {{ lldp[item]['port']['ifalias'] }}"
@ -73,7 +73,7 @@ def gather_lldp():
current_dict = current_dict[path_component] current_dict = current_dict[path_component]
current_dict[final] = value current_dict[final] = value
return output_dict return output_dict
def main(): def main():
module = AnsibleModule({}) module = AnsibleModule({})
@ -84,7 +84,7 @@ def main():
module.exit_json(ansible_facts=data) module.exit_json(ansible_facts=data)
except TypeError: except TypeError:
module.fail_json(msg="lldpctl command failed. is lldpd running?") module.fail_json(msg="lldpctl command failed. is lldpd running?")
# import module snippets # import module snippets
from ansible.module_utils.basic import * from ansible.module_utils.basic import *

View file

@ -492,7 +492,7 @@ def main():
) )
module = get_network_module(argument_spec=argument_spec, module = get_network_module(argument_spec=argument_spec,
supports_check_mode=True) supports_check_mode=True)
server_type = module.params['server_type'] server_type = module.params['server_type']
global_key = module.params['global_key'] global_key = module.params['global_key']
encrypt_type = module.params['encrypt_type'] encrypt_type = module.params['encrypt_type']

View file

@ -428,7 +428,7 @@ def _match_dict(match_list, key_map):
def get_aaa_host_info(module, server_type, address): def get_aaa_host_info(module, server_type, address):
aaa_host_info = {} aaa_host_info = {}
command = 'show run | inc {0}-server.host.{1}'.format(server_type, address) command = 'show run | inc {0}-server.host.{1}'.format(server_type, address)
body = execute_show_command(command, module, command_type='cli_show_ascii') body = execute_show_command(command, module, command_type='cli_show_ascii')
if body: if body:
@ -574,7 +574,7 @@ def main():
results['updates'] = cmds results['updates'] = cmds
results['changed'] = changed results['changed'] = changed
results['end_state'] = end_state results['end_state'] = end_state
module.exit_json(**results) module.exit_json(**results)

View file

@ -612,7 +612,7 @@ def get_custom_string_value(config, arg, module):
elif arg.startswith('dampening'): elif arg.startswith('dampening'):
REGEX = re.compile(r'(?:{0}\s)(?P<value>.*)$'.format( REGEX = re.compile(r'(?:{0}\s)(?P<value>.*)$'.format(
PARAM_TO_COMMAND_KEYMAP[arg]), re.M) PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
if arg == 'dampen_igp_metric' or arg == 'dampening_routemap': if arg == 'dampen_igp_metric' or arg == 'dampening_routemap':
value = '' value = ''
if PARAM_TO_COMMAND_KEYMAP[arg] in config: if PARAM_TO_COMMAND_KEYMAP[arg] in config:
value = REGEX.search(config).group('value') value = REGEX.search(config).group('value')

View file

@ -295,7 +295,7 @@ def main():
supports_check_mode=True) supports_check_mode=True)
splitted_ssm_range = module.params['ssm_range'].split('.') splitted_ssm_range = module.params['ssm_range'].split('.')
if len(splitted_ssm_range) != 4 and module.params['ssm_range'] != 'none': if len(splitted_ssm_range) != 4 and module.params['ssm_range'] != 'none':
module.fail_json(msg="Valid ssm_range values are multicast addresses " module.fail_json(msg="Valid ssm_range values are multicast addresses "
"or the keyword 'none'.") "or the keyword 'none'.")

View file

@ -495,10 +495,10 @@ def get_interface_mode(interface, intf_type, module):
def get_pim_interface(module, interface): def get_pim_interface(module, interface):
pim_interface = {} pim_interface = {}
command = 'show ip pim interface {0}'.format(interface) command = 'show ip pim interface {0}'.format(interface)
body = execute_show_command(command, module, body = execute_show_command(command, module,
command_type='cli_show_ascii', text=True) command_type='cli_show_ascii', text=True)
if body: if body:
if 'not running' not in body[0]: if 'not running' not in body[0]:
body = execute_show_command(command, module) body = execute_show_command(command, module)
@ -552,7 +552,7 @@ def get_pim_interface(module, interface):
return {} return {}
command = 'show run interface {0}'.format(interface) command = 'show run interface {0}'.format(interface)
body = execute_show_command(command, module, command_type='cli_show_ascii') body = execute_show_command(command, module, command_type='cli_show_ascii')
jp_configs = [] jp_configs = []

View file

@ -378,7 +378,7 @@ def main():
) )
module = get_network_module(argument_spec=argument_spec, module = get_network_module(argument_spec=argument_spec,
supports_check_mode=True) supports_check_mode=True)
location = module.params['location'] location = module.params['location']
state = module.params['state'] state = module.params['state']

View file

@ -463,7 +463,7 @@ def main():
) )
module = get_network_module(argument_spec=argument_spec, module = get_network_module(argument_spec=argument_spec,
supports_check_mode=True) supports_check_mode=True)
group = module.params['group'].lower() group = module.params['group'].lower()
state = module.params['state'] state = module.params['state']

View file

@ -97,7 +97,7 @@ def wakeonlan(module, mac, broadcast, port):
int(mac, 16) int(mac, 16)
except ValueError: except ValueError:
module.fail_json(msg="Incorrect MAC address format: %s" % mac_orig) module.fail_json(msg="Incorrect MAC address format: %s" % mac_orig)
# Create payload for magic packet # Create payload for magic packet
data = '' data = ''
padding = ''.join(['FFFFFFFFFFFF', mac * 20]) padding = ''.join(['FFFFFFFFFFFF', mac * 20])

View file

@ -142,7 +142,7 @@ def main():
url = "https://api.flowdock.com/v1/messages/team_inbox/%s" % (token) url = "https://api.flowdock.com/v1/messages/team_inbox/%s" % (token)
else: else:
url = "https://api.flowdock.com/v1/messages/chat/%s" % (token) url = "https://api.flowdock.com/v1/messages/chat/%s" % (token)
params = {} params = {}
# required params # required params

View file

@ -782,7 +782,7 @@ def get_cache(module):
else: else:
module.fail_json(msg=str(e)) module.fail_json(msg=str(e))
return cache return cache
def main(): def main():
module = AnsibleModule( module = AnsibleModule(

View file

@ -110,7 +110,7 @@ def update_package_db(module):
module.fail_json(msg="could not update package db: %s" % err) module.fail_json(msg="could not update package db: %s" % err)
def remove_packages(module, packages): def remove_packages(module, packages):
remove_c = 0 remove_c = 0
# Using a for loop in case of error, we can report the package that failed # Using a for loop in case of error, we can report the package that failed
for package in packages: for package in packages:
@ -122,7 +122,7 @@ def remove_packages(module, packages):
if rc != 0: if rc != 0:
module.fail_json(msg="failed to remove %s: %s" % (package, err)) module.fail_json(msg="failed to remove %s: %s" % (package, err))
remove_c += 1 remove_c += 1
if remove_c > 0: if remove_c > 0:
@ -162,7 +162,7 @@ def main():
state = dict(default='installed', choices=['installed', 'removed', 'absent', 'present']), state = dict(default='installed', choices=['installed', 'removed', 'absent', 'present']),
update_cache = dict(default=False, aliases=['update-cache'], type='bool'), update_cache = dict(default=False, aliases=['update-cache'], type='bool'),
package = dict(aliases=['pkg', 'name'], required=True))) package = dict(aliases=['pkg', 'name'], required=True)))
if not os.path.exists(APT_PATH) or not os.path.exists(RPM_PATH): if not os.path.exists(APT_PATH) or not os.path.exists(RPM_PATH):
module.fail_json(msg="cannot find /usr/bin/apt-get and/or /usr/bin/rpm") module.fail_json(msg="cannot find /usr/bin/apt-get and/or /usr/bin/rpm")
@ -182,6 +182,6 @@ def main():
# this is magic, see lib/ansible/module_common.py # this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import * from ansible.module_utils.basic import *
if __name__ == '__main__': if __name__ == '__main__':
main() main()

View file

@ -158,7 +158,7 @@ def install_overlay(module, name, list_url=None):
if layman.is_installed(name): if layman.is_installed(name):
return False return False
if module.check_mode: if module.check_mode:
mymsg = 'Would add layman repo \'' + name + '\'' mymsg = 'Would add layman repo \'' + name + '\''
module.exit_json(changed=True, msg=mymsg) module.exit_json(changed=True, msg=mymsg)
@ -195,7 +195,7 @@ def uninstall_overlay(module, name):
if not layman.is_installed(name): if not layman.is_installed(name):
return False return False
if module.check_mode: if module.check_mode:
mymsg = 'Would remove layman repo \'' + name + '\'' mymsg = 'Would remove layman repo \'' + name + '\''
module.exit_json(changed=True, msg=mymsg) module.exit_json(changed=True, msg=mymsg)

View file

@ -144,18 +144,18 @@ def main():
saturl = module.params['url'] saturl = module.params['url']
user = module.params['user'] user = module.params['user']
password = module.params['password'] password = module.params['password']
#initialize connection #initialize connection
client = xmlrpclib.Server(saturl, verbose=0) client = xmlrpclib.Server(saturl, verbose=0)
session = client.auth.login(user, password) session = client.auth.login(user, password)
# get systemid # get systemid
sys_id = get_systemid(client, session, systname) sys_id = get_systemid(client, session, systname)
# get channels for system # get channels for system
chans = base_channels(client, session, sys_id) chans = base_channels(client, session, sys_id)
if state == 'present': if state == 'present':
if channelname in chans: if channelname in chans:
module.exit_json(changed=False, msg="Channel %s already exists" % channelname) module.exit_json(changed=False, msg="Channel %s already exists" % channelname)

View file

@ -119,10 +119,10 @@ def update_package_db(module):
rc, stdout, stderr = module.run_command(cmd, check_rc=False) rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0: if rc != 0:
module.fail_json(msg="could not update package db") module.fail_json(msg="could not update package db")
def remove_packages(module, packages): def remove_packages(module, packages):
remove_c = 0 remove_c = 0
# Using a for loop in case of error, we can report the package that failed # Using a for loop in case of error, we can report the package that failed
for package in packages: for package in packages:
@ -135,7 +135,7 @@ def remove_packages(module, packages):
if rc != 0: if rc != 0:
module.fail_json(msg="failed to remove %s" % (package)) module.fail_json(msg="failed to remove %s" % (package))
remove_c += 1 remove_c += 1
if remove_c > 0: if remove_c > 0:
@ -189,7 +189,7 @@ def main():
force = dict(default=True, type='bool'), force = dict(default=True, type='bool'),
no_recommends = dict(default=True, aliases=['no-recommends'], type='bool'), no_recommends = dict(default=True, aliases=['no-recommends'], type='bool'),
package = dict(aliases=['pkg', 'name'], required=True))) package = dict(aliases=['pkg', 'name'], required=True)))
if not os.path.exists(URPMI_PATH): if not os.path.exists(URPMI_PATH):
module.fail_json(msg="cannot find urpmi, looking for %s" % (URPMI_PATH)) module.fail_json(msg="cannot find urpmi, looking for %s" % (URPMI_PATH))
@ -212,6 +212,6 @@ def main():
# import module snippets # import module snippets
from ansible.module_utils.basic import * from ansible.module_utils.basic import *
if __name__ == '__main__': if __name__ == '__main__':
main() main()

View file

@ -390,12 +390,12 @@ def is_available(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=None, di
except Exception: except Exception:
e = get_exception() e = get_exception()
module.fail_json(msg="Failure talking to yum: %s" % e) module.fail_json(msg="Failure talking to yum: %s" % e)
return [ po_to_nevra(p) for p in pkgs ] return [ po_to_nevra(p) for p in pkgs ]
else: else:
myrepoq = list(repoq) myrepoq = list(repoq)
r_cmd = ['--disablerepo', ','.join(dis_repos)] r_cmd = ['--disablerepo', ','.join(dis_repos)]
myrepoq.extend(r_cmd) myrepoq.extend(r_cmd)
@ -442,7 +442,7 @@ def is_update(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=None, dis_r
for pkg in pkgs: for pkg in pkgs:
if pkg in updates: if pkg in updates:
retpkgs.append(pkg) retpkgs.append(pkg)
return set([ po_to_nevra(p) for p in retpkgs ]) return set([ po_to_nevra(p) for p in retpkgs ])
else: else:
@ -455,12 +455,12 @@ def is_update(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=None, dis_r
cmd = myrepoq + ["--pkgnarrow=updates", "--qf", qf, pkgspec] cmd = myrepoq + ["--pkgnarrow=updates", "--qf", qf, pkgspec]
rc,out,err = module.run_command(cmd) rc,out,err = module.run_command(cmd)
if rc == 0: if rc == 0:
return set([ p for p in out.split('\n') if p.strip() ]) return set([ p for p in out.split('\n') if p.strip() ])
else: else:
module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err)) module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err))
return set() return set()
def what_provides(module, repoq, req_spec, conf_file, qf=def_qf, en_repos=None, dis_repos=None, installroot='/'): def what_provides(module, repoq, req_spec, conf_file, qf=def_qf, en_repos=None, dis_repos=None, installroot='/'):
@ -725,7 +725,7 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos, i
if is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos, installroot=installroot): if is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos, installroot=installroot):
found = True found = True
res['results'].append('package providing %s is already installed' % (spec)) res['results'].append('package providing %s is already installed' % (spec))
if found: if found:
continue continue

View file

@ -373,7 +373,7 @@ def package_present(m, name, want_latest):
def package_update_all(m): def package_update_all(m):
"run update or patch on all available packages" "run update or patch on all available packages"
retvals = {'rc': 0, 'stdout': '', 'stderr': ''} retvals = {'rc': 0, 'stdout': '', 'stderr': ''}
if m.params['type'] == 'patch': if m.params['type'] == 'patch':
cmdname = 'patch' cmdname = 'patch'

View file

@ -142,7 +142,7 @@ EXAMPLES = '''
- zypper_repository: - zypper_repository:
repo: 'http://download.opensuse.org/repositories/systemsmanagement/openSUSE_Leap_42.1/' repo: 'http://download.opensuse.org/repositories/systemsmanagement/openSUSE_Leap_42.1/'
auto_import_keys: yes auto_import_keys: yes
# Force refresh of a repository # Force refresh of a repository
- zypper_repository: - zypper_repository:
repo: 'http://my_internal_ci_repo/repo repo: 'http://my_internal_ci_repo/repo

Some files were not shown because too many files have changed in this diff Show more