mirror of
https://github.com/ansible-collections/community.general.git
synced 2024-09-14 20:13:21 +02:00
Enable more pylint rules and fix reported issues. (#30539)
* Enable pylint unreachable test. * Enable pylint suppressed-message test. * Enable pylint redundant-unittest-assert test. * Enable pylint bad-open-mode test. * Enable pylint signature-differs test. * Enable pylint unnecessary-pass test. * Enable pylint unnecessary-lambda test. * Enable pylint raising-bad-type test. * Enable pylint logging-not-lazy test. * Enable pylint logging-format-interpolation test. * Enable pylint useless-else-on-loop test.
This commit is contained in:
parent
01563ccd5d
commit
7714dcd04e
36 changed files with 92 additions and 135 deletions
|
@ -466,35 +466,30 @@ class AosInventory(object):
|
||||||
except:
|
except:
|
||||||
if 'AOS_SERVER' in os.environ.keys():
|
if 'AOS_SERVER' in os.environ.keys():
|
||||||
self.aos_server = os.environ['AOS_SERVER']
|
self.aos_server = os.environ['AOS_SERVER']
|
||||||
pass
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.aos_server_port = config.get('aos', 'port')
|
self.aos_server_port = config.get('aos', 'port')
|
||||||
except:
|
except:
|
||||||
if 'AOS_PORT' in os.environ.keys():
|
if 'AOS_PORT' in os.environ.keys():
|
||||||
self.aos_server_port = os.environ['AOS_PORT']
|
self.aos_server_port = os.environ['AOS_PORT']
|
||||||
pass
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.aos_username = config.get('aos', 'username')
|
self.aos_username = config.get('aos', 'username')
|
||||||
except:
|
except:
|
||||||
if 'AOS_USERNAME' in os.environ.keys():
|
if 'AOS_USERNAME' in os.environ.keys():
|
||||||
self.aos_username = os.environ['AOS_USERNAME']
|
self.aos_username = os.environ['AOS_USERNAME']
|
||||||
pass
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.aos_password = config.get('aos', 'password')
|
self.aos_password = config.get('aos', 'password')
|
||||||
except:
|
except:
|
||||||
if 'AOS_PASSWORD' in os.environ.keys():
|
if 'AOS_PASSWORD' in os.environ.keys():
|
||||||
self.aos_password = os.environ['AOS_PASSWORD']
|
self.aos_password = os.environ['AOS_PASSWORD']
|
||||||
pass
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.aos_blueprint = config.get('aos', 'blueprint')
|
self.aos_blueprint = config.get('aos', 'blueprint')
|
||||||
except:
|
except:
|
||||||
if 'AOS_BLUEPRINT' in os.environ.keys():
|
if 'AOS_BLUEPRINT' in os.environ.keys():
|
||||||
self.aos_blueprint = os.environ['AOS_BLUEPRINT']
|
self.aos_blueprint = os.environ['AOS_BLUEPRINT']
|
||||||
pass
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if config.get('aos', 'blueprint_interface') in ['false', 'no']:
|
if config.get('aos', 'blueprint_interface') in ['false', 'no']:
|
||||||
|
|
|
@ -212,7 +212,7 @@ class CollinsInventory(object):
|
||||||
cur_page += 1
|
cur_page += 1
|
||||||
num_retries = 0
|
num_retries = 0
|
||||||
except:
|
except:
|
||||||
self.log.error("Error while communicating with Collins, retrying:\n%s" % traceback.format_exc())
|
self.log.error("Error while communicating with Collins, retrying:\n%s", traceback.format_exc())
|
||||||
num_retries += 1
|
num_retries += 1
|
||||||
return assets
|
return assets
|
||||||
|
|
||||||
|
@ -281,7 +281,7 @@ class CollinsInventory(object):
|
||||||
try:
|
try:
|
||||||
server_assets = self.find_assets()
|
server_assets = self.find_assets()
|
||||||
except:
|
except:
|
||||||
self.log.error("Error while locating assets from Collins:\n%s" % traceback.format_exc())
|
self.log.error("Error while locating assets from Collins:\n%s", traceback.format_exc())
|
||||||
return False
|
return False
|
||||||
|
|
||||||
for asset in server_assets:
|
for asset in server_assets:
|
||||||
|
@ -305,7 +305,7 @@ class CollinsInventory(object):
|
||||||
if self.prefer_hostnames and self._asset_has_attribute(asset, 'HOSTNAME'):
|
if self.prefer_hostnames and self._asset_has_attribute(asset, 'HOSTNAME'):
|
||||||
asset_identifier = self._asset_get_attribute(asset, 'HOSTNAME')
|
asset_identifier = self._asset_get_attribute(asset, 'HOSTNAME')
|
||||||
elif 'ADDRESSES' not in asset:
|
elif 'ADDRESSES' not in asset:
|
||||||
self.log.warning("No IP addresses found for asset '%s', skipping" % asset)
|
self.log.warning("No IP addresses found for asset '%s', skipping", asset)
|
||||||
continue
|
continue
|
||||||
elif len(asset['ADDRESSES']) < ip_index + 1:
|
elif len(asset['ADDRESSES']) < ip_index + 1:
|
||||||
self.log.warning(
|
self.log.warning(
|
||||||
|
|
|
@ -301,7 +301,7 @@ class ConfigManager(object):
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
sys.stderr.write("Error while loading ini config %s: %s" % (cfile, to_native(e)))
|
sys.stderr.write("Error while loading ini config %s: %s" % (cfile, to_native(e)))
|
||||||
elif ftype == 'yaml':
|
elif ftype == 'yaml':
|
||||||
pass # FIXME: implement, also , break down key from defs (. notation???)
|
# FIXME: implement, also , break down key from defs (. notation???)
|
||||||
origin = cfile
|
origin = cfile
|
||||||
|
|
||||||
'''
|
'''
|
||||||
|
|
|
@ -430,18 +430,18 @@ class TaskExecutor:
|
||||||
except AnsibleError:
|
except AnsibleError:
|
||||||
# loop error takes precedence
|
# loop error takes precedence
|
||||||
if self._loop_eval_error is not None:
|
if self._loop_eval_error is not None:
|
||||||
raise self._loop_eval_error
|
raise self._loop_eval_error # pylint: disable=raising-bad-type
|
||||||
# skip conditional exception in the case of includes as the vars needed might not be available except in the included tasks or due to tags
|
# skip conditional exception in the case of includes as the vars needed might not be available except in the included tasks or due to tags
|
||||||
if self._task.action not in ['include', 'include_tasks', 'include_role']:
|
if self._task.action not in ['include', 'include_tasks', 'include_role']:
|
||||||
raise
|
raise
|
||||||
|
|
||||||
# Not skipping, if we had loop error raised earlier we need to raise it now to halt the execution of this task
|
# Not skipping, if we had loop error raised earlier we need to raise it now to halt the execution of this task
|
||||||
if self._loop_eval_error is not None:
|
if self._loop_eval_error is not None:
|
||||||
raise self._loop_eval_error
|
raise self._loop_eval_error # pylint: disable=raising-bad-type
|
||||||
|
|
||||||
# if we ran into an error while setting up the PlayContext, raise it now
|
# if we ran into an error while setting up the PlayContext, raise it now
|
||||||
if context_validation_error is not None:
|
if context_validation_error is not None:
|
||||||
raise context_validation_error
|
raise context_validation_error # pylint: disable=raising-bad-type
|
||||||
|
|
||||||
# if this task is a TaskInclude, we just return now with a success code so the
|
# if this task is a TaskInclude, we just return now with a success code so the
|
||||||
# main thread can expand the task list for the given host
|
# main thread can expand the task list for the given host
|
||||||
|
|
|
@ -729,8 +729,7 @@ def env_fallback(*args, **kwargs):
|
||||||
for arg in args:
|
for arg in args:
|
||||||
if arg in os.environ:
|
if arg in os.environ:
|
||||||
return os.environ[arg]
|
return os.environ[arg]
|
||||||
else:
|
raise AnsibleFallbackNotFound
|
||||||
raise AnsibleFallbackNotFound
|
|
||||||
|
|
||||||
|
|
||||||
def _lenient_lowercase(lst):
|
def _lenient_lowercase(lst):
|
||||||
|
|
|
@ -72,7 +72,7 @@ class AWSRetry(CloudRetry):
|
||||||
return error.response['Error']['Code']
|
return error.response['Error']['Code']
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def found(response_code, catch_extra_error_codes):
|
def found(response_code, catch_extra_error_codes=None):
|
||||||
# This list of failures is based on this API Reference
|
# This list of failures is based on this API Reference
|
||||||
# http://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html
|
# http://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html
|
||||||
#
|
#
|
||||||
|
|
|
@ -434,13 +434,11 @@ def download_s3file(module, s3, bucket, obj, dest, retries, version=None):
|
||||||
if x >= retries:
|
if x >= retries:
|
||||||
module.fail_json(msg="Failed while downloading %s." % obj, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
|
module.fail_json(msg="Failed while downloading %s." % obj, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
|
||||||
# otherwise, try again, this may be a transient timeout.
|
# otherwise, try again, this may be a transient timeout.
|
||||||
pass
|
|
||||||
except SSLError as e: # will ClientError catch SSLError?
|
except SSLError as e: # will ClientError catch SSLError?
|
||||||
# actually fail on last pass through the loop.
|
# actually fail on last pass through the loop.
|
||||||
if x >= retries:
|
if x >= retries:
|
||||||
module.fail_json(msg="s3 download failed: %s." % e, exception=traceback.format_exc())
|
module.fail_json(msg="s3 download failed: %s." % e, exception=traceback.format_exc())
|
||||||
# otherwise, try again, this may be a transient timeout.
|
# otherwise, try again, this may be a transient timeout.
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def download_s3str(module, s3, bucket, obj, version=None, validate=True):
|
def download_s3str(module, s3, bucket, obj, version=None, validate=True):
|
||||||
|
|
|
@ -200,8 +200,7 @@ def pipeline_id(client, name):
|
||||||
for dp in pipelines['pipelineIdList']:
|
for dp in pipelines['pipelineIdList']:
|
||||||
if dp['name'] == name:
|
if dp['name'] == name:
|
||||||
return dp['id']
|
return dp['id']
|
||||||
else:
|
raise DataPipelineNotFound
|
||||||
raise DataPipelineNotFound
|
|
||||||
|
|
||||||
|
|
||||||
def pipeline_description(client, dp_id):
|
def pipeline_description(client, dp_id):
|
||||||
|
@ -233,8 +232,7 @@ def pipeline_field(client, dp_id, field):
|
||||||
for field_key in dp_description['pipelineDescriptionList'][0]['fields']:
|
for field_key in dp_description['pipelineDescriptionList'][0]['fields']:
|
||||||
if field_key['key'] == field:
|
if field_key['key'] == field:
|
||||||
return field_key['stringValue']
|
return field_key['stringValue']
|
||||||
else:
|
raise KeyError("Field key {0} not found!".format(field))
|
||||||
raise KeyError("Field key {0} not found!".format(field))
|
|
||||||
|
|
||||||
|
|
||||||
def run_with_timeout(timeout, func, *func_args, **func_kwargs):
|
def run_with_timeout(timeout, func, *func_args, **func_kwargs):
|
||||||
|
|
|
@ -587,7 +587,7 @@ def elb_dreg(asg_connection, module, group_name, instance_id):
|
||||||
|
|
||||||
for lb in as_group['LoadBalancerNames']:
|
for lb in as_group['LoadBalancerNames']:
|
||||||
deregister_lb_instances(elb_connection, lb, instance_id)
|
deregister_lb_instances(elb_connection, lb, instance_id)
|
||||||
log.debug("De-registering {0} from ELB {1}".format(instance_id, lb))
|
log.debug("De-registering %s from ELB %s", instance_id, lb)
|
||||||
|
|
||||||
wait_timeout = time.time() + wait_timeout
|
wait_timeout = time.time() + wait_timeout
|
||||||
while wait_timeout > time.time() and count > 0:
|
while wait_timeout > time.time() and count > 0:
|
||||||
|
@ -597,7 +597,7 @@ def elb_dreg(asg_connection, module, group_name, instance_id):
|
||||||
for i in lb_instances['InstanceStates']:
|
for i in lb_instances['InstanceStates']:
|
||||||
if i['InstanceId'] == instance_id and i['State'] == "InService":
|
if i['InstanceId'] == instance_id and i['State'] == "InService":
|
||||||
count += 1
|
count += 1
|
||||||
log.debug("{0}: {1}, {2}".format(i['InstanceId'], i['State'], i['Description']))
|
log.debug("%s: %s, %s", i['InstanceId'], i['State'], i['Description'])
|
||||||
time.sleep(10)
|
time.sleep(10)
|
||||||
|
|
||||||
if wait_timeout <= time.time():
|
if wait_timeout <= time.time():
|
||||||
|
@ -614,7 +614,7 @@ def elb_healthy(asg_connection, elb_connection, module, group_name):
|
||||||
for instance, settings in props['instance_facts'].items():
|
for instance, settings in props['instance_facts'].items():
|
||||||
if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy':
|
if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy':
|
||||||
instances.append(dict(InstanceId=instance))
|
instances.append(dict(InstanceId=instance))
|
||||||
log.debug("ASG considers the following instances InService and Healthy: {0}".format(instances))
|
log.debug("ASG considers the following instances InService and Healthy: %s", instances)
|
||||||
log.debug("ELB instance status:")
|
log.debug("ELB instance status:")
|
||||||
lb_instances = list()
|
lb_instances = list()
|
||||||
for lb in as_group.get('LoadBalancerNames'):
|
for lb in as_group.get('LoadBalancerNames'):
|
||||||
|
@ -635,7 +635,7 @@ def elb_healthy(asg_connection, elb_connection, module, group_name):
|
||||||
for i in lb_instances.get('InstanceStates'):
|
for i in lb_instances.get('InstanceStates'):
|
||||||
if i['State'] == "InService":
|
if i['State'] == "InService":
|
||||||
healthy_instances.add(i['InstanceId'])
|
healthy_instances.add(i['InstanceId'])
|
||||||
log.debug("ELB Health State {0}: {1}".format(i['InstanceId'], i['State']))
|
log.debug("ELB Health State %s: %s", i['InstanceId'], i['State'])
|
||||||
return len(healthy_instances)
|
return len(healthy_instances)
|
||||||
|
|
||||||
|
|
||||||
|
@ -648,7 +648,7 @@ def tg_healthy(asg_connection, elbv2_connection, module, group_name):
|
||||||
for instance, settings in props['instance_facts'].items():
|
for instance, settings in props['instance_facts'].items():
|
||||||
if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy':
|
if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy':
|
||||||
instances.append(dict(Id=instance))
|
instances.append(dict(Id=instance))
|
||||||
log.debug("ASG considers the following instances InService and Healthy: {0}".format(instances))
|
log.debug("ASG considers the following instances InService and Healthy: %s", instances)
|
||||||
log.debug("Target Group instance status:")
|
log.debug("Target Group instance status:")
|
||||||
tg_instances = list()
|
tg_instances = list()
|
||||||
for tg in as_group.get('TargetGroupARNs'):
|
for tg in as_group.get('TargetGroupARNs'):
|
||||||
|
@ -669,7 +669,7 @@ def tg_healthy(asg_connection, elbv2_connection, module, group_name):
|
||||||
for i in tg_instances.get('TargetHealthDescriptions'):
|
for i in tg_instances.get('TargetHealthDescriptions'):
|
||||||
if i['TargetHealth']['State'] == "healthy":
|
if i['TargetHealth']['State'] == "healthy":
|
||||||
healthy_instances.add(i['Target']['Id'])
|
healthy_instances.add(i['Target']['Id'])
|
||||||
log.debug("Target Group Health State {0}: {1}".format(i['Target']['Id'], i['TargetHealth']['State']))
|
log.debug("Target Group Health State %s: %s", i['Target']['Id'], i['TargetHealth']['State'])
|
||||||
return len(healthy_instances)
|
return len(healthy_instances)
|
||||||
|
|
||||||
|
|
||||||
|
@ -695,12 +695,12 @@ def wait_for_elb(asg_connection, module, group_name):
|
||||||
|
|
||||||
while healthy_instances < as_group.get('MinSize') and wait_timeout > time.time():
|
while healthy_instances < as_group.get('MinSize') and wait_timeout > time.time():
|
||||||
healthy_instances = elb_healthy(asg_connection, elb_connection, module, group_name)
|
healthy_instances = elb_healthy(asg_connection, elb_connection, module, group_name)
|
||||||
log.debug("ELB thinks {0} instances are healthy.".format(healthy_instances))
|
log.debug("ELB thinks %s instances are healthy.", healthy_instances)
|
||||||
time.sleep(10)
|
time.sleep(10)
|
||||||
if wait_timeout <= time.time():
|
if wait_timeout <= time.time():
|
||||||
# waiting took too long
|
# waiting took too long
|
||||||
module.fail_json(msg="Waited too long for ELB instances to be healthy. %s" % time.asctime())
|
module.fail_json(msg="Waited too long for ELB instances to be healthy. %s" % time.asctime())
|
||||||
log.debug("Waiting complete. ELB thinks {0} instances are healthy.".format(healthy_instances))
|
log.debug("Waiting complete. ELB thinks %s instances are healthy.", healthy_instances)
|
||||||
|
|
||||||
|
|
||||||
def wait_for_target_group(asg_connection, module, group_name):
|
def wait_for_target_group(asg_connection, module, group_name):
|
||||||
|
@ -725,12 +725,12 @@ def wait_for_target_group(asg_connection, module, group_name):
|
||||||
|
|
||||||
while healthy_instances < as_group.get('MinSize') and wait_timeout > time.time():
|
while healthy_instances < as_group.get('MinSize') and wait_timeout > time.time():
|
||||||
healthy_instances = tg_healthy(asg_connection, elbv2_connection, module, group_name)
|
healthy_instances = tg_healthy(asg_connection, elbv2_connection, module, group_name)
|
||||||
log.debug("Target Group thinks {0} instances are healthy.".format(healthy_instances))
|
log.debug("Target Group thinks %s instances are healthy.", healthy_instances)
|
||||||
time.sleep(10)
|
time.sleep(10)
|
||||||
if wait_timeout <= time.time():
|
if wait_timeout <= time.time():
|
||||||
# waiting took too long
|
# waiting took too long
|
||||||
module.fail_json(msg="Waited too long for ELB instances to be healthy. %s" % time.asctime())
|
module.fail_json(msg="Waited too long for ELB instances to be healthy. %s" % time.asctime())
|
||||||
log.debug("Waiting complete. Target Group thinks {0} instances are healthy.".format(healthy_instances))
|
log.debug("Waiting complete. Target Group thinks %s instances are healthy.", healthy_instances)
|
||||||
|
|
||||||
|
|
||||||
def suspend_processes(ec2_connection, as_group, module):
|
def suspend_processes(ec2_connection, as_group, module):
|
||||||
|
@ -1042,7 +1042,7 @@ def get_chunks(l, n):
|
||||||
def update_size(connection, group, max_size, min_size, dc):
|
def update_size(connection, group, max_size, min_size, dc):
|
||||||
|
|
||||||
log.debug("setting ASG sizes")
|
log.debug("setting ASG sizes")
|
||||||
log.debug("minimum size: {0}, desired_capacity: {1}, max size: {2}".format(min_size, dc, max_size))
|
log.debug("minimum size: %s, desired_capacity: %s, max size: %s", min_size, dc, max_size)
|
||||||
updated_group = dict()
|
updated_group = dict()
|
||||||
updated_group['AutoScalingGroupName'] = group['AutoScalingGroupName']
|
updated_group['AutoScalingGroupName'] = group['AutoScalingGroupName']
|
||||||
updated_group['MinSize'] = min_size
|
updated_group['MinSize'] = min_size
|
||||||
|
@ -1083,7 +1083,7 @@ def replace(connection, module):
|
||||||
|
|
||||||
# we don't want to spin up extra instances if not necessary
|
# we don't want to spin up extra instances if not necessary
|
||||||
if num_new_inst_needed < batch_size:
|
if num_new_inst_needed < batch_size:
|
||||||
log.debug("Overriding batch size to {0}".format(num_new_inst_needed))
|
log.debug("Overriding batch size to %s", num_new_inst_needed)
|
||||||
batch_size = num_new_inst_needed
|
batch_size = num_new_inst_needed
|
||||||
|
|
||||||
if not old_instances:
|
if not old_instances:
|
||||||
|
@ -1143,14 +1143,14 @@ def get_instances_by_lc(props, lc_check, initial_instances):
|
||||||
old_instances.append(i)
|
old_instances.append(i)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
log.debug("Comparing initial instances with current: {0}".format(initial_instances))
|
log.debug("Comparing initial instances with current: %s", initial_instances)
|
||||||
for i in props['instances']:
|
for i in props['instances']:
|
||||||
if i not in initial_instances:
|
if i not in initial_instances:
|
||||||
new_instances.append(i)
|
new_instances.append(i)
|
||||||
else:
|
else:
|
||||||
old_instances.append(i)
|
old_instances.append(i)
|
||||||
log.debug("New instances: {0}, {1}".format(len(new_instances), new_instances))
|
log.debug("New instances: %s, %s", len(new_instances), new_instances)
|
||||||
log.debug("Old instances: {0}, {1}".format(len(old_instances), old_instances))
|
log.debug("Old instances: %s, %s", len(old_instances), old_instances)
|
||||||
|
|
||||||
return new_instances, old_instances
|
return new_instances, old_instances
|
||||||
|
|
||||||
|
@ -1192,17 +1192,17 @@ def terminate_batch(connection, module, replace_instances, initial_instances, le
|
||||||
# and they have a non-current launch config
|
# and they have a non-current launch config
|
||||||
instances_to_terminate = list_purgeable_instances(props, lc_check, replace_instances, initial_instances)
|
instances_to_terminate = list_purgeable_instances(props, lc_check, replace_instances, initial_instances)
|
||||||
|
|
||||||
log.debug("new instances needed: {0}".format(num_new_inst_needed))
|
log.debug("new instances needed: %s", num_new_inst_needed)
|
||||||
log.debug("new instances: {0}".format(new_instances))
|
log.debug("new instances: %s", new_instances)
|
||||||
log.debug("old instances: {0}".format(old_instances))
|
log.debug("old instances: %s", old_instances)
|
||||||
log.debug("batch instances: {0}".format(",".join(instances_to_terminate)))
|
log.debug("batch instances: %s", ",".join(instances_to_terminate))
|
||||||
|
|
||||||
if num_new_inst_needed == 0:
|
if num_new_inst_needed == 0:
|
||||||
decrement_capacity = True
|
decrement_capacity = True
|
||||||
if as_group['MinSize'] != min_size:
|
if as_group['MinSize'] != min_size:
|
||||||
updated_params = dict(AutoScalingGroupName=as_group['AutoScalingGroupName'], MinSize=min_size)
|
updated_params = dict(AutoScalingGroupName=as_group['AutoScalingGroupName'], MinSize=min_size)
|
||||||
update_asg(connection, **updated_params)
|
update_asg(connection, **updated_params)
|
||||||
log.debug("Updating minimum size back to original of {0}".format(min_size))
|
log.debug("Updating minimum size back to original of %s", min_size)
|
||||||
# if are some leftover old instances, but we are already at capacity with new ones
|
# if are some leftover old instances, but we are already at capacity with new ones
|
||||||
# we don't want to decrement capacity
|
# we don't want to decrement capacity
|
||||||
if leftovers:
|
if leftovers:
|
||||||
|
@ -1216,13 +1216,13 @@ def terminate_batch(connection, module, replace_instances, initial_instances, le
|
||||||
instances_to_terminate = instances_to_terminate[:num_new_inst_needed]
|
instances_to_terminate = instances_to_terminate[:num_new_inst_needed]
|
||||||
decrement_capacity = False
|
decrement_capacity = False
|
||||||
break_loop = False
|
break_loop = False
|
||||||
log.debug("{0} new instances needed".format(num_new_inst_needed))
|
log.debug("%s new instances needed", num_new_inst_needed)
|
||||||
|
|
||||||
log.debug("decrementing capacity: {0}".format(decrement_capacity))
|
log.debug("decrementing capacity: %s", decrement_capacity)
|
||||||
|
|
||||||
for instance_id in instances_to_terminate:
|
for instance_id in instances_to_terminate:
|
||||||
elb_dreg(connection, module, group_name, instance_id)
|
elb_dreg(connection, module, group_name, instance_id)
|
||||||
log.debug("terminating instance: {0}".format(instance_id))
|
log.debug("terminating instance: %s", instance_id)
|
||||||
terminate_asg_instance(connection, instance_id, decrement_capacity)
|
terminate_asg_instance(connection, instance_id, decrement_capacity)
|
||||||
|
|
||||||
# we wait to make sure the machines we marked as Unhealthy are
|
# we wait to make sure the machines we marked as Unhealthy are
|
||||||
|
@ -1248,7 +1248,7 @@ def wait_for_term_inst(connection, module, term_instances):
|
||||||
for i in instances:
|
for i in instances:
|
||||||
lifecycle = instance_facts[i]['lifecycle_state']
|
lifecycle = instance_facts[i]['lifecycle_state']
|
||||||
health = instance_facts[i]['health_status']
|
health = instance_facts[i]['health_status']
|
||||||
log.debug("Instance {0} has state of {1},{2}".format(i, lifecycle, health))
|
log.debug("Instance %s has state of %s,%s", i, lifecycle, health)
|
||||||
if lifecycle == 'Terminating' or health == 'Unhealthy':
|
if lifecycle == 'Terminating' or health == 'Unhealthy':
|
||||||
count += 1
|
count += 1
|
||||||
time.sleep(10)
|
time.sleep(10)
|
||||||
|
@ -1263,18 +1263,18 @@ def wait_for_new_inst(module, connection, group_name, wait_timeout, desired_size
|
||||||
# make sure we have the latest stats after that last loop.
|
# make sure we have the latest stats after that last loop.
|
||||||
as_group = describe_autoscaling_groups(connection, group_name)[0]
|
as_group = describe_autoscaling_groups(connection, group_name)[0]
|
||||||
props = get_properties(as_group, module)
|
props = get_properties(as_group, module)
|
||||||
log.debug("Waiting for {0} = {1}, currently {2}".format(prop, desired_size, props[prop]))
|
log.debug("Waiting for %s = %s, currently %s", prop, desired_size, props[prop])
|
||||||
# now we make sure that we have enough instances in a viable state
|
# now we make sure that we have enough instances in a viable state
|
||||||
wait_timeout = time.time() + wait_timeout
|
wait_timeout = time.time() + wait_timeout
|
||||||
while wait_timeout > time.time() and desired_size > props[prop]:
|
while wait_timeout > time.time() and desired_size > props[prop]:
|
||||||
log.debug("Waiting for {0} = {1}, currently {2}".format(prop, desired_size, props[prop]))
|
log.debug("Waiting for %s = %s, currently %s", prop, desired_size, props[prop])
|
||||||
time.sleep(10)
|
time.sleep(10)
|
||||||
as_group = describe_autoscaling_groups(connection, group_name)[0]
|
as_group = describe_autoscaling_groups(connection, group_name)[0]
|
||||||
props = get_properties(as_group, module)
|
props = get_properties(as_group, module)
|
||||||
if wait_timeout <= time.time():
|
if wait_timeout <= time.time():
|
||||||
# waiting took too long
|
# waiting took too long
|
||||||
module.fail_json(msg="Waited too long for new instances to become viable. %s" % time.asctime())
|
module.fail_json(msg="Waited too long for new instances to become viable. %s" % time.asctime())
|
||||||
log.debug("Reached {0}: {1}".format(prop, desired_size))
|
log.debug("Reached %s: %s", prop, desired_size)
|
||||||
return props
|
return props
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -219,7 +219,6 @@ def main():
|
||||||
module.fail_json(msg="timed out while waiting for the key to be re-created")
|
module.fail_json(msg="timed out while waiting for the key to be re-created")
|
||||||
|
|
||||||
changed = True
|
changed = True
|
||||||
pass
|
|
||||||
|
|
||||||
# if the key doesn't exist, create it now
|
# if the key doesn't exist, create it now
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -613,7 +613,6 @@ class Wrapper(object):
|
||||||
except AzureException as e:
|
except AzureException as e:
|
||||||
if not str(e).lower().find("temporary redirect") == -1:
|
if not str(e).lower().find("temporary redirect") == -1:
|
||||||
time.sleep(5)
|
time.sleep(5)
|
||||||
pass
|
|
||||||
else:
|
else:
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
|
|
|
@ -668,8 +668,7 @@ class LxcContainerManagement(object):
|
||||||
build_command.append(
|
build_command.append(
|
||||||
'%s %s' % (key, value)
|
'%s %s' % (key, value)
|
||||||
)
|
)
|
||||||
else:
|
return build_command
|
||||||
return build_command
|
|
||||||
|
|
||||||
def _get_vars(self, variables):
|
def _get_vars(self, variables):
|
||||||
"""Return a dict of all variables as found within the module.
|
"""Return a dict of all variables as found within the module.
|
||||||
|
@ -689,8 +688,7 @@ class LxcContainerManagement(object):
|
||||||
_var = self.module.params.get(k)
|
_var = self.module.params.get(k)
|
||||||
if _var not in false_values:
|
if _var not in false_values:
|
||||||
return_dict[v] = _var
|
return_dict[v] = _var
|
||||||
else:
|
return return_dict
|
||||||
return return_dict
|
|
||||||
|
|
||||||
def _run_command(self, build_command, unsafe_shell=False):
|
def _run_command(self, build_command, unsafe_shell=False):
|
||||||
"""Return information from running an Ansible Command.
|
"""Return information from running an Ansible Command.
|
||||||
|
@ -975,16 +973,15 @@ class LxcContainerManagement(object):
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
else:
|
else:
|
||||||
return True
|
return True
|
||||||
else:
|
self.failure(
|
||||||
self.failure(
|
lxc_container=self._container_data(),
|
||||||
lxc_container=self._container_data(),
|
error='Failed to start container'
|
||||||
error='Failed to start container'
|
' [ %s ]' % self.container_name,
|
||||||
' [ %s ]' % self.container_name,
|
rc=1,
|
||||||
rc=1,
|
msg='The container [ %s ] failed to start. Check to lxc is'
|
||||||
msg='The container [ %s ] failed to start. Check to lxc is'
|
' available and that the container is in a functional'
|
||||||
' available and that the container is in a functional'
|
' state.' % self.container_name
|
||||||
' state.' % self.container_name
|
)
|
||||||
)
|
|
||||||
|
|
||||||
def _check_archive(self):
|
def _check_archive(self):
|
||||||
"""Create a compressed archive of a container.
|
"""Create a compressed archive of a container.
|
||||||
|
|
|
@ -1229,6 +1229,7 @@ class RHEV(object):
|
||||||
self.__get_conn()
|
self.__get_conn()
|
||||||
return self.conn.set_VM_Host(vmname, vmhost)
|
return self.conn.set_VM_Host(vmname, vmhost)
|
||||||
|
|
||||||
|
# pylint: disable=unreachable
|
||||||
VM = self.conn.get_VM(vmname)
|
VM = self.conn.get_VM(vmname)
|
||||||
HOST = self.conn.get_Host(vmhost)
|
HOST = self.conn.get_Host(vmhost)
|
||||||
|
|
||||||
|
|
|
@ -122,7 +122,6 @@ def rax_dns(module, comment, email, name, state, ttl):
|
||||||
domain = dns.find(name=name)
|
domain = dns.find(name=name)
|
||||||
except pyrax.exceptions.NotFound:
|
except pyrax.exceptions.NotFound:
|
||||||
domain = {}
|
domain = {}
|
||||||
pass
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
module.fail_json(msg='%s' % e.message)
|
module.fail_json(msg='%s' % e.message)
|
||||||
|
|
||||||
|
|
|
@ -270,7 +270,6 @@ def rax_dns_record(module, comment=None, data=None, domain=None, name=None,
|
||||||
record = domain.find_record(record_type, name=name, data=data)
|
record = domain.find_record(record_type, name=name, data=data)
|
||||||
except pyrax.exceptions.DomainRecordNotFound as e:
|
except pyrax.exceptions.DomainRecordNotFound as e:
|
||||||
record = {}
|
record = {}
|
||||||
pass
|
|
||||||
except pyrax.exceptions.DomainRecordNotUnique as e:
|
except pyrax.exceptions.DomainRecordNotUnique as e:
|
||||||
module.fail_json(msg='%s' % e.message)
|
module.fail_json(msg='%s' % e.message)
|
||||||
|
|
||||||
|
|
|
@ -246,6 +246,7 @@ USER_AGENT = "ansible-k8s-module/0.0.1"
|
||||||
|
|
||||||
def decode_cert_data(module):
|
def decode_cert_data(module):
|
||||||
return
|
return
|
||||||
|
# pylint: disable=unreachable
|
||||||
d = module.params.get("certificate_authority_data")
|
d = module.params.get("certificate_authority_data")
|
||||||
if d and not d.startswith("-----BEGIN"):
|
if d and not d.startswith("-----BEGIN"):
|
||||||
module.params["certificate_authority_data"] = base64.b64decode(d)
|
module.params["certificate_authority_data"] = base64.b64decode(d)
|
||||||
|
|
|
@ -111,8 +111,7 @@ def main():
|
||||||
if parts != '':
|
if parts != '':
|
||||||
return parts
|
return parts
|
||||||
|
|
||||||
else:
|
return ''
|
||||||
return ''
|
|
||||||
|
|
||||||
def run_command(command):
|
def run_command(command):
|
||||||
"""Runs a monit command, and returns the new status."""
|
"""Runs a monit command, and returns the new status."""
|
||||||
|
|
|
@ -286,7 +286,6 @@ class ModuleManager(object):
|
||||||
if status in ['Changes Pending']:
|
if status in ['Changes Pending']:
|
||||||
details = self._get_details_from_resource(resource)
|
details = self._get_details_from_resource(resource)
|
||||||
self._validate_pending_status(details)
|
self._validate_pending_status(details)
|
||||||
pass
|
|
||||||
elif status in ['Awaiting Initial Sync', 'Not All Devices Synced']:
|
elif status in ['Awaiting Initial Sync', 'Not All Devices Synced']:
|
||||||
pass
|
pass
|
||||||
elif status == 'In Sync':
|
elif status == 'In Sync':
|
||||||
|
|
|
@ -277,7 +277,7 @@ class MavenDownloader:
|
||||||
if self.latest_version_found:
|
if self.latest_version_found:
|
||||||
return self.latest_version_found
|
return self.latest_version_found
|
||||||
path = "/%s/maven-metadata.xml" % (artifact.path(False))
|
path = "/%s/maven-metadata.xml" % (artifact.path(False))
|
||||||
xml = self._request(self.base + path, "Failed to download maven-metadata.xml", lambda r: etree.parse(r))
|
xml = self._request(self.base + path, "Failed to download maven-metadata.xml", etree.parse)
|
||||||
v = xml.xpath("/metadata/versioning/versions/version[last()]/text()")
|
v = xml.xpath("/metadata/versioning/versions/version[last()]/text()")
|
||||||
if v:
|
if v:
|
||||||
self.latest_version_found = v[0]
|
self.latest_version_found = v[0]
|
||||||
|
@ -289,7 +289,7 @@ class MavenDownloader:
|
||||||
|
|
||||||
if artifact.is_snapshot():
|
if artifact.is_snapshot():
|
||||||
path = "/%s/maven-metadata.xml" % (artifact.path())
|
path = "/%s/maven-metadata.xml" % (artifact.path())
|
||||||
xml = self._request(self.base + path, "Failed to download maven-metadata.xml", lambda r: etree.parse(r))
|
xml = self._request(self.base + path, "Failed to download maven-metadata.xml", etree.parse)
|
||||||
timestamp = xml.xpath("/metadata/versioning/snapshot/timestamp/text()")[0]
|
timestamp = xml.xpath("/metadata/versioning/snapshot/timestamp/text()")[0]
|
||||||
buildNumber = xml.xpath("/metadata/versioning/snapshot/buildNumber/text()")[0]
|
buildNumber = xml.xpath("/metadata/versioning/snapshot/buildNumber/text()")[0]
|
||||||
for snapshotArtifact in xml.xpath("/metadata/versioning/snapshotVersions/snapshotVersion"):
|
for snapshotArtifact in xml.xpath("/metadata/versioning/snapshotVersions/snapshotVersion"):
|
||||||
|
|
|
@ -267,15 +267,15 @@ class NetAppESeriesFlashCache(object):
|
||||||
@property
|
@property
|
||||||
def needs_more_disks(self):
|
def needs_more_disks(self):
|
||||||
if len(self.cache_detail['driveRefs']) < self.disk_count:
|
if len(self.cache_detail['driveRefs']) < self.disk_count:
|
||||||
self.debug("needs resize: current disk count %s < requested requested count %s" % (
|
self.debug("needs resize: current disk count %s < requested requested count %s",
|
||||||
len(self.cache_detail['driveRefs']), self.disk_count))
|
len(self.cache_detail['driveRefs']), self.disk_count)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def needs_less_disks(self):
|
def needs_less_disks(self):
|
||||||
if len(self.cache_detail['driveRefs']) > self.disk_count:
|
if len(self.cache_detail['driveRefs']) > self.disk_count:
|
||||||
self.debug("needs resize: current disk count %s < requested requested count %s" % (
|
self.debug("needs resize: current disk count %s < requested requested count %s",
|
||||||
len(self.cache_detail['driveRefs']), self.disk_count))
|
len(self.cache_detail['driveRefs']), self.disk_count)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
@ -292,8 +292,8 @@ class NetAppESeriesFlashCache(object):
|
||||||
@property
|
@property
|
||||||
def needs_more_capacity(self):
|
def needs_more_capacity(self):
|
||||||
if self.current_size_bytes < self.requested_size_bytes:
|
if self.current_size_bytes < self.requested_size_bytes:
|
||||||
self.debug("needs resize: current capacity %sb is less than requested minimum %sb" % (
|
self.debug("needs resize: current capacity %sb is less than requested minimum %sb",
|
||||||
self.current_size_bytes, self.requested_size_bytes))
|
self.current_size_bytes, self.requested_size_bytes)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
@ -405,7 +405,7 @@ def main():
|
||||||
try:
|
try:
|
||||||
sp.apply()
|
sp.apply()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
sp.debug("Exception in apply(): \n%s" % to_native(e))
|
sp.debug("Exception in apply(): \n%s", to_native(e))
|
||||||
sp.module.fail_json(msg="Failed to create flash cache. Error[%s]" % to_native(e),
|
sp.module.fail_json(msg="Failed to create flash cache. Error[%s]" % to_native(e),
|
||||||
exception=traceback.format_exc())
|
exception=traceback.format_exc())
|
||||||
|
|
||||||
|
|
|
@ -269,7 +269,7 @@ class NetAppESeriesStoragePool(object):
|
||||||
min_total_capacity = min_total_capacity * self._size_unit_map[size_unit]
|
min_total_capacity = min_total_capacity * self._size_unit_map[size_unit]
|
||||||
|
|
||||||
# filter clearly invalid/unavailable drives first
|
# filter clearly invalid/unavailable drives first
|
||||||
drives = select(lambda d: self._is_valid_drive(d), drives)
|
drives = select(self._is_valid_drive, drives)
|
||||||
|
|
||||||
if interface_type:
|
if interface_type:
|
||||||
drives = select(lambda d: d['phyDriveType'] == interface_type, drives)
|
drives = select(lambda d: d['phyDriveType'] == interface_type, drives)
|
||||||
|
@ -390,7 +390,7 @@ class NetAppESeriesStoragePool(object):
|
||||||
msg="Failed to get storage pools. Array id [%s]. Error[%s]. State[%s]. RC[%s]." %
|
msg="Failed to get storage pools. Array id [%s]. Error[%s]. State[%s]. RC[%s]." %
|
||||||
(self.ssid, str(err), self.state, rc))
|
(self.ssid, str(err), self.state, rc))
|
||||||
|
|
||||||
self.debug("searching for storage pool '%s'" % storage_pool_name)
|
self.debug("searching for storage pool '%s'", storage_pool_name)
|
||||||
|
|
||||||
pool_detail = next(select(lambda a: a['name'] == storage_pool_name, resp), None)
|
pool_detail = next(select(lambda a: a['name'] == storage_pool_name, resp), None)
|
||||||
|
|
||||||
|
@ -514,7 +514,7 @@ class NetAppESeriesStoragePool(object):
|
||||||
return needs_migration
|
return needs_migration
|
||||||
|
|
||||||
def migrate_raid_level(self):
|
def migrate_raid_level(self):
|
||||||
self.debug("migrating storage pool to raid level '%s'..." % self.raid_level)
|
self.debug("migrating storage pool to raid level '%s'...", self.raid_level)
|
||||||
sp_raid_migrate_req = dict(
|
sp_raid_migrate_req = dict(
|
||||||
raidLevel=self.raid_level
|
raidLevel=self.raid_level
|
||||||
)
|
)
|
||||||
|
@ -637,7 +637,7 @@ class NetAppESeriesStoragePool(object):
|
||||||
def expand_storage_pool(self):
|
def expand_storage_pool(self):
|
||||||
drives_to_add = self.get_expansion_candidate_drives()
|
drives_to_add = self.get_expansion_candidate_drives()
|
||||||
|
|
||||||
self.debug("adding %s drives to storage pool..." % len(drives_to_add))
|
self.debug("adding %s drives to storage pool...", len(drives_to_add))
|
||||||
sp_expand_req = dict(
|
sp_expand_req = dict(
|
||||||
drives=drives_to_add
|
drives=drives_to_add
|
||||||
)
|
)
|
||||||
|
@ -723,8 +723,8 @@ class NetAppESeriesStoragePool(object):
|
||||||
|
|
||||||
if self.needs_raid_level_migration:
|
if self.needs_raid_level_migration:
|
||||||
self.debug(
|
self.debug(
|
||||||
"CHANGED: raid level migration required; storage pool uses '%s', requested is '%s'" % (
|
"CHANGED: raid level migration required; storage pool uses '%s', requested is '%s'",
|
||||||
self.pool_detail['raidLevel'], self.raid_level))
|
self.pool_detail['raidLevel'], self.raid_level)
|
||||||
changed = True
|
changed = True
|
||||||
|
|
||||||
# if self.reserved_drive_count_differs:
|
# if self.reserved_drive_count_differs:
|
||||||
|
@ -813,7 +813,7 @@ def main():
|
||||||
sp.apply()
|
sp.apply()
|
||||||
except Exception:
|
except Exception:
|
||||||
e = get_exception()
|
e = get_exception()
|
||||||
sp.debug("Exception in apply(): \n%s" % format_exc(e))
|
sp.debug("Exception in apply(): \n%s", format_exc(e))
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -235,7 +235,7 @@ class NetAppESeriesVolume(object):
|
||||||
|
|
||||||
volumes.extend(thinvols)
|
volumes.extend(thinvols)
|
||||||
|
|
||||||
self.debug("searching for volume '%s'" % volume_name)
|
self.debug("searching for volume '%s'", volume_name)
|
||||||
volume_detail = next(ifilter(lambda a: a['name'] == volume_name, volumes), None)
|
volume_detail = next(ifilter(lambda a: a['name'] == volume_name, volumes), None)
|
||||||
|
|
||||||
if volume_detail:
|
if volume_detail:
|
||||||
|
@ -257,7 +257,7 @@ class NetAppESeriesVolume(object):
|
||||||
self.module.fail_json(
|
self.module.fail_json(
|
||||||
msg="Failed to obtain list of storage pools. Array Id [%s]. Error[%s]." % (self.ssid, str(err)))
|
msg="Failed to obtain list of storage pools. Array Id [%s]. Error[%s]." % (self.ssid, str(err)))
|
||||||
|
|
||||||
self.debug("searching for storage pool '%s'" % storage_pool_name)
|
self.debug("searching for storage pool '%s'", storage_pool_name)
|
||||||
pool_detail = next(ifilter(lambda a: a['name'] == storage_pool_name, resp), None)
|
pool_detail = next(ifilter(lambda a: a['name'] == storage_pool_name, resp), None)
|
||||||
|
|
||||||
if pool_detail:
|
if pool_detail:
|
||||||
|
@ -277,7 +277,7 @@ class NetAppESeriesVolume(object):
|
||||||
dataAssuranceEnabled=data_assurance_enabled,
|
dataAssuranceEnabled=data_assurance_enabled,
|
||||||
)
|
)
|
||||||
|
|
||||||
self.debug("creating volume '%s'" % name)
|
self.debug("creating volume '%s'", name)
|
||||||
try:
|
try:
|
||||||
(rc, resp) = request(self.api_url + "/storage-systems/%s/volumes" % (self.ssid),
|
(rc, resp) = request(self.api_url + "/storage-systems/%s/volumes" % (self.ssid),
|
||||||
data=json.dumps(volume_add_req), headers=HEADERS, method='POST',
|
data=json.dumps(volume_add_req), headers=HEADERS, method='POST',
|
||||||
|
@ -302,7 +302,7 @@ class NetAppESeriesVolume(object):
|
||||||
dataAssuranceEnabled=data_assurance_enabled,
|
dataAssuranceEnabled=data_assurance_enabled,
|
||||||
)
|
)
|
||||||
|
|
||||||
self.debug("creating thin-volume '%s'" % name)
|
self.debug("creating thin-volume '%s'", name)
|
||||||
try:
|
try:
|
||||||
(rc, resp) = request(self.api_url + "/storage-systems/%s/thin-volumes" % (self.ssid),
|
(rc, resp) = request(self.api_url + "/storage-systems/%s/thin-volumes" % (self.ssid),
|
||||||
data=json.dumps(thin_volume_add_req), headers=HEADERS, method='POST',
|
data=json.dumps(thin_volume_add_req), headers=HEADERS, method='POST',
|
||||||
|
@ -318,7 +318,7 @@ class NetAppESeriesVolume(object):
|
||||||
|
|
||||||
def delete_volume(self):
|
def delete_volume(self):
|
||||||
# delete the volume
|
# delete the volume
|
||||||
self.debug("deleting volume '%s'" % self.volume_detail['name'])
|
self.debug("deleting volume '%s'", self.volume_detail['name'])
|
||||||
try:
|
try:
|
||||||
(rc, resp) = request(
|
(rc, resp) = request(
|
||||||
self.api_url + "/storage-systems/%s/%s/%s" % (self.ssid, self.volume_resource_name,
|
self.api_url + "/storage-systems/%s/%s/%s" % (self.ssid, self.volume_resource_name,
|
||||||
|
@ -445,7 +445,7 @@ class NetAppESeriesVolume(object):
|
||||||
action = resp['action']
|
action = resp['action']
|
||||||
percent_complete = resp['percentComplete']
|
percent_complete = resp['percentComplete']
|
||||||
|
|
||||||
self.debug('expand action %s, %s complete...' % (action, percent_complete))
|
self.debug('expand action %s, %s complete...', action, percent_complete)
|
||||||
|
|
||||||
if action == 'none':
|
if action == 'none':
|
||||||
self.debug('expand complete')
|
self.debug('expand complete')
|
||||||
|
@ -469,11 +469,8 @@ class NetAppESeriesVolume(object):
|
||||||
elif self.state == 'present':
|
elif self.state == 'present':
|
||||||
# check requested volume size, see if expansion is necessary
|
# check requested volume size, see if expansion is necessary
|
||||||
if self.volume_needs_expansion:
|
if self.volume_needs_expansion:
|
||||||
self.debug(
|
self.debug("CHANGED: requested volume size %s%s is larger than current size %sb",
|
||||||
"CHANGED: requested volume size %s%s is larger than current size %sb" % (self.size,
|
self.size, self.size_unit, self.volume_detail['capacity'])
|
||||||
self.size_unit,
|
|
||||||
self.volume_detail[
|
|
||||||
'capacity']))
|
|
||||||
changed = True
|
changed = True
|
||||||
|
|
||||||
if self.volume_properties_changed:
|
if self.volume_properties_changed:
|
||||||
|
@ -543,7 +540,7 @@ def main():
|
||||||
v.apply()
|
v.apply()
|
||||||
except Exception:
|
except Exception:
|
||||||
e = get_exception()
|
e = get_exception()
|
||||||
v.debug("Exception in apply(): \n%s" % format_exc(e))
|
v.debug("Exception in apply(): \n%s", format_exc(e))
|
||||||
v.module.fail_json(msg="Module failed. Error [%s]." % (str(e)))
|
v.module.fail_json(msg="Module failed. Error [%s]." % (str(e)))
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -306,7 +306,6 @@ class SolidFireVolume(object):
|
||||||
if changed:
|
if changed:
|
||||||
if self.module.check_mode:
|
if self.module.check_mode:
|
||||||
result_message = "Check mode, skipping changes"
|
result_message = "Check mode, skipping changes"
|
||||||
pass
|
|
||||||
else:
|
else:
|
||||||
if self.state == 'present':
|
if self.state == 'present':
|
||||||
if not volume_exists:
|
if not volume_exists:
|
||||||
|
|
|
@ -126,7 +126,6 @@ def get_snapshot(module, array):
|
||||||
for s in array.get_volume(module.params['name'], snap='true'):
|
for s in array.get_volume(module.params['name'], snap='true'):
|
||||||
if s['name'] == snapname:
|
if s['name'] == snapname:
|
||||||
return snapname
|
return snapname
|
||||||
break
|
|
||||||
except:
|
except:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
|
@ -306,7 +306,6 @@ def setInterfaceOption(module, lines, iface, option, raw_value, state):
|
||||||
module.fail_json(msg="Error: unsupported state %s, has to be either present or absent" % state)
|
module.fail_json(msg="Error: unsupported state %s, has to be either present or absent" % state)
|
||||||
|
|
||||||
return changed, lines
|
return changed, lines
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def addOptionAfterLine(option, value, iface, lines, last_line_dict, iface_options):
|
def addOptionAfterLine(option, value, iface, lines, last_line_dict, iface_options):
|
||||||
|
|
1
lib/ansible/plugins/cache/__init__.py
vendored
1
lib/ansible/plugins/cache/__init__.py
vendored
|
@ -183,7 +183,6 @@ class BaseFileCacheModule(BaseCacheModule):
|
||||||
return False
|
return False
|
||||||
else:
|
else:
|
||||||
display.warning("error in '%s' cache plugin while trying to stat %s : %s" % (self.plugin_name, cachefile, to_bytes(e)))
|
display.warning("error in '%s' cache plugin while trying to stat %s : %s" % (self.plugin_name, cachefile, to_bytes(e)))
|
||||||
pass
|
|
||||||
|
|
||||||
def delete(self, key):
|
def delete(self, key):
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -78,22 +78,22 @@ class CallbackModule(CallbackBase):
|
||||||
self.hostname = socket.gethostname()
|
self.hostname = socket.gethostname()
|
||||||
|
|
||||||
def runner_on_failed(self, host, res, ignore_errors=False):
|
def runner_on_failed(self, host, res, ignore_errors=False):
|
||||||
self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s' % (self.hostname, host, self._dump_results(res)))
|
self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s', self.hostname, host, self._dump_results(res))
|
||||||
|
|
||||||
def runner_on_ok(self, host, res):
|
def runner_on_ok(self, host, res):
|
||||||
self.logger.info('%s ansible-command: task execution OK; host: %s; message: %s' % (self.hostname, host, self._dump_results(res)))
|
self.logger.info('%s ansible-command: task execution OK; host: %s; message: %s', self.hostname, host, self._dump_results(res))
|
||||||
|
|
||||||
def runner_on_skipped(self, host, item=None):
|
def runner_on_skipped(self, host, item=None):
|
||||||
self.logger.info('%s ansible-command: task execution SKIPPED; host: %s; message: %s' % (self.hostname, host, 'skipped'))
|
self.logger.info('%s ansible-command: task execution SKIPPED; host: %s; message: %s', self.hostname, host, 'skipped')
|
||||||
|
|
||||||
def runner_on_unreachable(self, host, res):
|
def runner_on_unreachable(self, host, res):
|
||||||
self.logger.error('%s ansible-command: task execution UNREACHABLE; host: %s; message: %s' % (self.hostname, host, self._dump_results(res)))
|
self.logger.error('%s ansible-command: task execution UNREACHABLE; host: %s; message: %s', self.hostname, host, self._dump_results(res))
|
||||||
|
|
||||||
def runner_on_async_failed(self, host, res, jid):
|
def runner_on_async_failed(self, host, res, jid):
|
||||||
self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s' % (self.hostname, host, self._dump_results(res)))
|
self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s', self.hostname, host, self._dump_results(res))
|
||||||
|
|
||||||
def playbook_on_import_for_host(self, host, imported_file):
|
def playbook_on_import_for_host(self, host, imported_file):
|
||||||
self.logger.info('%s ansible-command: playbook IMPORTED; host: %s; message: imported file %s' % (self.hostname, host, imported_file))
|
self.logger.info('%s ansible-command: playbook IMPORTED; host: %s; message: imported file %s', self.hostname, host, imported_file)
|
||||||
|
|
||||||
def playbook_on_not_import_for_host(self, host, missing_file):
|
def playbook_on_not_import_for_host(self, host, missing_file):
|
||||||
self.logger.info('%s ansible-command: playbook NOT IMPORTED; host: %s; message: missing file %s' % (self.hostname, host, missing_file))
|
self.logger.info('%s ansible-command: playbook NOT IMPORTED; host: %s; message: missing file %s', self.hostname, host, missing_file)
|
||||||
|
|
|
@ -502,7 +502,6 @@ class Connection(ConnectionBase):
|
||||||
# unable to save keys, including scenario when key was invalid
|
# unable to save keys, including scenario when key was invalid
|
||||||
# and caught earlier
|
# and caught earlier
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
pass
|
|
||||||
fcntl.lockf(KEY_LOCK, fcntl.LOCK_UN)
|
fcntl.lockf(KEY_LOCK, fcntl.LOCK_UN)
|
||||||
|
|
||||||
self.ssh.close()
|
self.ssh.close()
|
||||||
|
|
|
@ -413,7 +413,7 @@ class PluginLoader:
|
||||||
for i in self._get_paths():
|
for i in self._get_paths():
|
||||||
all_matches.extend(glob.glob(os.path.join(i, "*.py")))
|
all_matches.extend(glob.glob(os.path.join(i, "*.py")))
|
||||||
|
|
||||||
for path in sorted(all_matches, key=lambda match: os.path.basename(match)):
|
for path in sorted(all_matches, key=os.path.basename):
|
||||||
name = os.path.basename(os.path.splitext(path)[0])
|
name = os.path.basename(os.path.splitext(path)[0])
|
||||||
|
|
||||||
if '__init__' in name:
|
if '__init__' in name:
|
||||||
|
|
|
@ -185,9 +185,7 @@ class LookupModule(LookupBase):
|
||||||
path = self.find_file_in_search_path(variables, subdir, fn, ignore_missing=True)
|
path = self.find_file_in_search_path(variables, subdir, fn, ignore_missing=True)
|
||||||
if path is not None:
|
if path is not None:
|
||||||
return [path]
|
return [path]
|
||||||
else:
|
if skip:
|
||||||
if skip:
|
return []
|
||||||
return []
|
raise AnsibleLookupError("No file was found when using with_first_found. Use the 'skip: true' option to allow this task to be skipped if no "
|
||||||
else:
|
"files are found")
|
||||||
raise AnsibleLookupError("No file was found when using with_first_found. Use the 'skip: true' option to allow this task to be skipped if no "
|
|
||||||
"files are found")
|
|
||||||
|
|
|
@ -127,7 +127,6 @@ class StrategyModule(StrategyBase):
|
||||||
# just ignore any errors during task name templating,
|
# just ignore any errors during task name templating,
|
||||||
# we don't care if it just shows the raw name
|
# we don't care if it just shows the raw name
|
||||||
display.debug("templating failed for some reason")
|
display.debug("templating failed for some reason")
|
||||||
pass
|
|
||||||
|
|
||||||
run_once = templar.template(task.run_once) or action and getattr(action, 'BYPASS_HOST_LOOP', False)
|
run_once = templar.template(task.run_once) or action and getattr(action, 'BYPASS_HOST_LOOP', False)
|
||||||
if run_once:
|
if run_once:
|
||||||
|
|
|
@ -266,7 +266,6 @@ class StrategyModule(StrategyBase):
|
||||||
# just ignore any errors during task name templating,
|
# just ignore any errors during task name templating,
|
||||||
# we don't care if it just shows the raw name
|
# we don't care if it just shows the raw name
|
||||||
display.debug("templating failed for some reason")
|
display.debug("templating failed for some reason")
|
||||||
pass
|
|
||||||
display.debug("here goes the callback...")
|
display.debug("here goes the callback...")
|
||||||
self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
|
self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
|
||||||
task.name = saved_name
|
task.name = saved_name
|
||||||
|
|
|
@ -8,7 +8,6 @@ attribute-defined-outside-init
|
||||||
bad-continuation
|
bad-continuation
|
||||||
bad-indentation
|
bad-indentation
|
||||||
bad-mcs-classmethod-argument
|
bad-mcs-classmethod-argument
|
||||||
bad-open-mode
|
|
||||||
bad-whitespace
|
bad-whitespace
|
||||||
bare-except
|
bare-except
|
||||||
blacklisted-name
|
blacklisted-name
|
||||||
|
@ -34,8 +33,6 @@ invalid-encoded-data
|
||||||
invalid-name
|
invalid-name
|
||||||
line-too-long
|
line-too-long
|
||||||
locally-disabled
|
locally-disabled
|
||||||
logging-format-interpolation
|
|
||||||
logging-not-lazy
|
|
||||||
method-hidden
|
method-hidden
|
||||||
misplaced-comparison-constant
|
misplaced-comparison-constant
|
||||||
missing-docstring
|
missing-docstring
|
||||||
|
@ -52,18 +49,14 @@ old-style-class
|
||||||
pointless-statement
|
pointless-statement
|
||||||
pointless-string-statement
|
pointless-string-statement
|
||||||
protected-access
|
protected-access
|
||||||
raising-bad-type
|
|
||||||
redefined-builtin
|
redefined-builtin
|
||||||
redefined-outer-name
|
redefined-outer-name
|
||||||
redefined-variable-type
|
redefined-variable-type
|
||||||
redundant-unittest-assert
|
|
||||||
reimported
|
reimported
|
||||||
relative-import
|
relative-import
|
||||||
signature-differs
|
|
||||||
simplifiable-if-statement
|
simplifiable-if-statement
|
||||||
super-init-not-called
|
super-init-not-called
|
||||||
superfluous-parens
|
superfluous-parens
|
||||||
suppressed-message
|
|
||||||
too-few-public-methods
|
too-few-public-methods
|
||||||
too-many-ancestors
|
too-many-ancestors
|
||||||
too-many-arguments
|
too-many-arguments
|
||||||
|
@ -80,10 +73,7 @@ too-many-statements
|
||||||
undefined-loop-variable
|
undefined-loop-variable
|
||||||
ungrouped-imports
|
ungrouped-imports
|
||||||
unidiomatic-typecheck
|
unidiomatic-typecheck
|
||||||
unnecessary-lambda
|
|
||||||
unnecessary-pass
|
|
||||||
unneeded-not
|
unneeded-not
|
||||||
unreachable
|
|
||||||
unsubscriptable-object
|
unsubscriptable-object
|
||||||
unsupported-membership-test
|
unsupported-membership-test
|
||||||
unused-argument
|
unused-argument
|
||||||
|
@ -91,7 +81,6 @@ unused-import
|
||||||
unused-variable
|
unused-variable
|
||||||
unused-wildcard-import
|
unused-wildcard-import
|
||||||
used-before-assignment
|
used-before-assignment
|
||||||
useless-else-on-loop
|
|
||||||
wildcard-import
|
wildcard-import
|
||||||
wrong-import-order
|
wrong-import-order
|
||||||
wrong-import-position
|
wrong-import-position
|
||||||
|
|
|
@ -87,6 +87,6 @@ class TestAnsibleModuleExitJson(unittest.TestCase):
|
||||||
for i in self.unparsable_cases:
|
for i in self.unparsable_cases:
|
||||||
self.assertRaises(
|
self.assertRaises(
|
||||||
ValueError,
|
ValueError,
|
||||||
lambda data: _filter_non_json_lines(data),
|
_filter_non_json_lines,
|
||||||
data=i
|
data=i
|
||||||
)
|
)
|
||||||
|
|
|
@ -106,7 +106,7 @@ class TestManager(unittest.TestCase):
|
||||||
params = NONE_PARAMS.copy()
|
params = NONE_PARAMS.copy()
|
||||||
del params['vdirect_ip']
|
del params['vdirect_ip']
|
||||||
vdirect_file.VdirectFile(params)
|
vdirect_file.VdirectFile(params)
|
||||||
self.assertFalse("KeyError was not thrown for missing parameter")
|
self.fail("KeyError was not thrown for missing parameter")
|
||||||
except KeyError:
|
except KeyError:
|
||||||
assert True
|
assert True
|
||||||
|
|
||||||
|
@ -134,7 +134,7 @@ class TestManager(unittest.TestCase):
|
||||||
file = vdirect_file.VdirectFile(NONE_PARAMS)
|
file = vdirect_file.VdirectFile(NONE_PARAMS)
|
||||||
try:
|
try:
|
||||||
file.upload("missing_file.vm")
|
file.upload("missing_file.vm")
|
||||||
self.assertFalse("IOException was not thrown for missing file")
|
self.fail("IOException was not thrown for missing file")
|
||||||
except IOError:
|
except IOError:
|
||||||
assert True
|
assert True
|
||||||
|
|
||||||
|
|
|
@ -467,7 +467,6 @@ class TestVaultEditor(unittest.TestCase):
|
||||||
try:
|
try:
|
||||||
ve.decrypt_file(v11_file.name)
|
ve.decrypt_file(v11_file.name)
|
||||||
except errors.AnsibleError:
|
except errors.AnsibleError:
|
||||||
raise
|
|
||||||
error_hit = True
|
error_hit = True
|
||||||
|
|
||||||
# verify decrypted content
|
# verify decrypted content
|
||||||
|
@ -493,7 +492,6 @@ class TestVaultEditor(unittest.TestCase):
|
||||||
try:
|
try:
|
||||||
ve.rekey_file(v10_file.name, vault.match_encrypt_secret(new_secrets)[1])
|
ve.rekey_file(v10_file.name, vault.match_encrypt_secret(new_secrets)[1])
|
||||||
except errors.AnsibleError:
|
except errors.AnsibleError:
|
||||||
raise
|
|
||||||
error_hit = True
|
error_hit = True
|
||||||
|
|
||||||
# verify decrypted content
|
# verify decrypted content
|
||||||
|
@ -510,7 +508,6 @@ class TestVaultEditor(unittest.TestCase):
|
||||||
try:
|
try:
|
||||||
dec_data = vl.decrypt(fdata)
|
dec_data = vl.decrypt(fdata)
|
||||||
except errors.AnsibleError:
|
except errors.AnsibleError:
|
||||||
raise
|
|
||||||
error_hit = True
|
error_hit = True
|
||||||
|
|
||||||
os.unlink(v10_file.name)
|
os.unlink(v10_file.name)
|
||||||
|
|
Loading…
Reference in a new issue