1
0
Fork 0
mirror of https://github.com/ansible-collections/community.general.git synced 2024-09-14 20:13:21 +02:00

[cloud] remove ec2_asg usage of Python base logging - fixes #32476 (#32508)

* replace logging with module.debug in ec2_asg

* Make module global in ec2_asg.

* Fix the debug messages

* pep8
This commit is contained in:
Sloane Hertel 2017-11-07 16:46:49 -05:00 committed by Ryan Brown
parent 2b08fd3fb8
commit 680d06d1ab

View file

@ -375,7 +375,6 @@ viable_instances:
'''
import time
import logging as log
import traceback
from ansible.module_utils._text import to_native
@ -387,8 +386,6 @@ try:
except ImportError:
pass # will be detected by imported HAS_BOTO3
# log.basicConfig(filename='/tmp/ansible_ec2_asg.log', level=log.DEBUG, format='%(asctime)s: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
ASG_ATTRIBUTES = ('AvailabilityZones', 'DefaultCooldown', 'DesiredCapacity',
'HealthCheckGracePeriod', 'HealthCheckType', 'LaunchConfigurationName',
'LoadBalancerNames', 'MaxSize', 'MinSize', 'AutoScalingGroupName', 'PlacementGroup',
@ -497,7 +494,7 @@ def terminate_asg_instance(connection, instance_id, decrement_capacity):
ShouldDecrementDesiredCapacity=decrement_capacity)
def enforce_required_arguments(module):
def enforce_required_arguments():
''' As many arguments are not required for autoscale group deletion
they cannot be mandatory arguments for the module, so we enforce
them here '''
@ -509,7 +506,7 @@ def enforce_required_arguments(module):
module.fail_json(msg="Missing required arguments for autoscaling group create/update: %s" % ",".join(missing_args))
def get_properties(autoscaling_group, module):
def get_properties(autoscaling_group):
properties = dict()
properties['healthy_instances'] = 0
properties['in_service_instances'] = 0
@ -571,7 +568,7 @@ def get_properties(autoscaling_group, module):
return properties
def elb_dreg(asg_connection, module, group_name, instance_id):
def elb_dreg(asg_connection, group_name, instance_id):
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
as_group = describe_autoscaling_groups(asg_connection, group_name)[0]
wait_timeout = module.params.get('wait_timeout')
@ -588,7 +585,7 @@ def elb_dreg(asg_connection, module, group_name, instance_id):
for lb in as_group['LoadBalancerNames']:
deregister_lb_instances(elb_connection, lb, instance_id)
log.debug("De-registering %s from ELB %s", instance_id, lb)
module.debug("De-registering %s from ELB %s" % (instance_id, lb))
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and count > 0:
@ -598,7 +595,7 @@ def elb_dreg(asg_connection, module, group_name, instance_id):
for i in lb_instances['InstanceStates']:
if i['InstanceId'] == instance_id and i['State'] == "InService":
count += 1
log.debug("%s: %s, %s", i['InstanceId'], i['State'], i['Description'])
module.debug("%s: %s, %s" % (i['InstanceId'], i['State'], i['Description']))
time.sleep(10)
if wait_timeout <= time.time():
@ -606,17 +603,17 @@ def elb_dreg(asg_connection, module, group_name, instance_id):
module.fail_json(msg="Waited too long for instance to deregister. {0}".format(time.asctime()))
def elb_healthy(asg_connection, elb_connection, module, group_name):
def elb_healthy(asg_connection, elb_connection, group_name):
healthy_instances = set()
as_group = describe_autoscaling_groups(asg_connection, group_name)[0]
props = get_properties(as_group, module)
props = get_properties(as_group)
# get healthy, inservice instances from ASG
instances = []
for instance, settings in props['instance_facts'].items():
if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy':
instances.append(dict(InstanceId=instance))
log.debug("ASG considers the following instances InService and Healthy: %s", instances)
log.debug("ELB instance status:")
module.debug("ASG considers the following instances InService and Healthy: %s" % instances)
module.debug("ELB instance status:")
lb_instances = list()
for lb in as_group.get('LoadBalancerNames'):
# we catch a race condition that sometimes happens if the instance exists in the ASG
@ -636,21 +633,21 @@ def elb_healthy(asg_connection, elb_connection, module, group_name):
for i in lb_instances.get('InstanceStates'):
if i['State'] == "InService":
healthy_instances.add(i['InstanceId'])
log.debug("ELB Health State %s: %s", i['InstanceId'], i['State'])
module.debug("ELB Health State %s: %s" % (i['InstanceId'], i['State']))
return len(healthy_instances)
def tg_healthy(asg_connection, elbv2_connection, module, group_name):
def tg_healthy(asg_connection, elbv2_connection, group_name):
healthy_instances = set()
as_group = describe_autoscaling_groups(asg_connection, group_name)[0]
props = get_properties(as_group, module)
props = get_properties(as_group)
# get healthy, inservice instances from ASG
instances = []
for instance, settings in props['instance_facts'].items():
if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy':
instances.append(dict(Id=instance))
log.debug("ASG considers the following instances InService and Healthy: %s", instances)
log.debug("Target Group instance status:")
module.debug("ASG considers the following instances InService and Healthy: %s" % instances)
module.debug("Target Group instance status:")
tg_instances = list()
for tg in as_group.get('TargetGroupARNs'):
# we catch a race condition that sometimes happens if the instance exists in the ASG
@ -670,11 +667,11 @@ def tg_healthy(asg_connection, elbv2_connection, module, group_name):
for i in tg_instances.get('TargetHealthDescriptions'):
if i['TargetHealth']['State'] == "healthy":
healthy_instances.add(i['Target']['Id'])
log.debug("Target Group Health State %s: %s", i['Target']['Id'], i['TargetHealth']['State'])
module.debug("Target Group Health State %s: %s" % (i['Target']['Id'], i['TargetHealth']['State']))
return len(healthy_instances)
def wait_for_elb(asg_connection, module, group_name):
def wait_for_elb(asg_connection, group_name):
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
wait_timeout = module.params.get('wait_timeout')
@ -683,7 +680,7 @@ def wait_for_elb(asg_connection, module, group_name):
as_group = describe_autoscaling_groups(asg_connection, group_name)[0]
if as_group.get('LoadBalancerNames') and as_group.get('HealthCheckType') == 'ELB':
log.debug("Waiting for ELB to consider instances healthy.")
module.debug("Waiting for ELB to consider instances healthy.")
elb_connection = boto3_conn(module,
conn_type='client',
resource='elb',
@ -692,19 +689,19 @@ def wait_for_elb(asg_connection, module, group_name):
**aws_connect_params)
wait_timeout = time.time() + wait_timeout
healthy_instances = elb_healthy(asg_connection, elb_connection, module, group_name)
healthy_instances = elb_healthy(asg_connection, elb_connection, group_name)
while healthy_instances < as_group.get('MinSize') and wait_timeout > time.time():
healthy_instances = elb_healthy(asg_connection, elb_connection, module, group_name)
log.debug("ELB thinks %s instances are healthy.", healthy_instances)
healthy_instances = elb_healthy(asg_connection, elb_connection, group_name)
module.debug("ELB thinks %s instances are healthy." % healthy_instances)
time.sleep(10)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg="Waited too long for ELB instances to be healthy. %s" % time.asctime())
log.debug("Waiting complete. ELB thinks %s instances are healthy.", healthy_instances)
module.debug("Waiting complete. ELB thinks %s instances are healthy." % healthy_instances)
def wait_for_target_group(asg_connection, module, group_name):
def wait_for_target_group(asg_connection, group_name):
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
wait_timeout = module.params.get('wait_timeout')
@ -713,7 +710,7 @@ def wait_for_target_group(asg_connection, module, group_name):
as_group = describe_autoscaling_groups(asg_connection, group_name)[0]
if as_group.get('TargetGroupARNs') and as_group.get('HealthCheckType') == 'ELB':
log.debug("Waiting for Target Group to consider instances healthy.")
module.debug("Waiting for Target Group to consider instances healthy.")
elbv2_connection = boto3_conn(module,
conn_type='client',
resource='elbv2',
@ -722,19 +719,19 @@ def wait_for_target_group(asg_connection, module, group_name):
**aws_connect_params)
wait_timeout = time.time() + wait_timeout
healthy_instances = tg_healthy(asg_connection, elbv2_connection, module, group_name)
healthy_instances = tg_healthy(asg_connection, elbv2_connection, group_name)
while healthy_instances < as_group.get('MinSize') and wait_timeout > time.time():
healthy_instances = tg_healthy(asg_connection, elbv2_connection, module, group_name)
log.debug("Target Group thinks %s instances are healthy.", healthy_instances)
healthy_instances = tg_healthy(asg_connection, elbv2_connection, group_name)
module.debug("Target Group thinks %s instances are healthy." % healthy_instances)
time.sleep(10)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg="Waited too long for ELB instances to be healthy. %s" % time.asctime())
log.debug("Waiting complete. Target Group thinks %s instances are healthy.", healthy_instances)
module.debug("Waiting complete. Target Group thinks %s instances are healthy." % healthy_instances)
def suspend_processes(ec2_connection, as_group, module):
def suspend_processes(ec2_connection, as_group):
suspend_processes = set(module.params.get('suspend_processes'))
try:
@ -757,7 +754,7 @@ def suspend_processes(ec2_connection, as_group, module):
@AWSRetry.backoff(tries=3, delay=0.1)
def create_autoscaling_group(connection, module):
def create_autoscaling_group(connection):
group_name = module.params.get('name')
load_balancers = module.params['load_balancers']
target_group_arns = module.params['target_group_arns']
@ -803,7 +800,7 @@ def create_autoscaling_group(connection, module):
if not vpc_zone_identifier and not availability_zones:
availability_zones = module.params['availability_zones'] = [zone['ZoneName'] for
zone in ec2_connection.describe_availability_zones()['AvailabilityZones']]
enforce_required_arguments(module)
enforce_required_arguments()
launch_configs = describe_launch_configurations(connection, launch_config_name)
if len(launch_configs['LaunchConfigurations']) == 0:
module.fail_json(msg="No launch config found with name %s" % launch_config_name)
@ -838,18 +835,18 @@ def create_autoscaling_group(connection, module):
if len(all_ag) == 0:
module.fail_json(msg="No auto scaling group found with the name %s" % group_name)
as_group = all_ag[0]
suspend_processes(connection, as_group, module)
suspend_processes(connection, as_group)
if wait_for_instances:
wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
wait_for_new_inst(connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
if load_balancers:
wait_for_elb(connection, module, group_name)
wait_for_elb(connection, group_name)
# Wait for target group health if target group(s)defined
if target_group_arns:
wait_for_target_group(connection, module, group_name)
wait_for_target_group(connection, group_name)
if notification_topic:
put_notification_config(connection, group_name, notification_topic, notification_types)
as_group = describe_autoscaling_groups(connection, group_name)[0]
asg_properties = get_properties(as_group, module)
asg_properties = get_properties(as_group)
changed = True
return changed, asg_properties
except botocore.exceptions.ClientError as e:
@ -860,10 +857,10 @@ def create_autoscaling_group(connection, module):
exception=traceback.format_exc())
else:
as_group = as_groups['AutoScalingGroups'][0]
initial_asg_properties = get_properties(as_group, module)
initial_asg_properties = get_properties(as_group)
changed = False
if suspend_processes(connection, as_group, module):
if suspend_processes(connection, as_group):
changed = True
# process tag changes
@ -993,20 +990,20 @@ def create_autoscaling_group(connection, module):
module.fail_json(msg="Failed to update Autoscaling Group notifications.",
exception=traceback.format_exc())
if wait_for_instances:
wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
wait_for_new_inst(connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
# Wait for ELB health if ELB(s)defined
if load_balancers:
log.debug('\tWAITING FOR ELB HEALTH')
wait_for_elb(connection, module, group_name)
module.debug('\tWAITING FOR ELB HEALTH')
wait_for_elb(connection, group_name)
# Wait for target group health if target group(s)defined
if target_group_arns:
log.debug('\tWAITING FOR TG HEALTH')
wait_for_target_group(connection, module, group_name)
module.debug('\tWAITING FOR TG HEALTH')
wait_for_target_group(connection, group_name)
try:
as_group = describe_autoscaling_groups(connection, group_name)[0]
asg_properties = get_properties(as_group, module)
asg_properties = get_properties(as_group)
if asg_properties != initial_asg_properties:
changed = True
except botocore.exceptions.ClientError as e:
@ -1018,7 +1015,7 @@ def create_autoscaling_group(connection, module):
return changed, asg_properties
def delete_autoscaling_group(connection, module):
def delete_autoscaling_group(connection):
group_name = module.params.get('name')
notification_topic = module.params.get('notification_topic')
wait_for_instances = module.params.get('wait_for_instances')
@ -1063,8 +1060,8 @@ def get_chunks(l, n):
def update_size(connection, group, max_size, min_size, dc):
log.debug("setting ASG sizes")
log.debug("minimum size: %s, desired_capacity: %s, max size: %s", min_size, dc, max_size)
module.debug("setting ASG sizes")
module.debug("minimum size: %s, desired_capacity: %s, max size: %s" % (min_size, dc, max_size))
updated_group = dict()
updated_group['AutoScalingGroupName'] = group['AutoScalingGroupName']
updated_group['MinSize'] = min_size
@ -1073,7 +1070,7 @@ def update_size(connection, group, max_size, min_size, dc):
update_asg(connection, **updated_group)
def replace(connection, module):
def replace(connection):
batch_size = module.params.get('replace_batch_size')
wait_timeout = module.params.get('wait_timeout')
group_name = module.params.get('name')
@ -1084,8 +1081,8 @@ def replace(connection, module):
replace_instances = module.params.get('replace_instances')
as_group = describe_autoscaling_groups(connection, group_name)[0]
wait_for_new_inst(module, connection, group_name, wait_timeout, as_group['MinSize'], 'viable_instances')
props = get_properties(as_group, module)
wait_for_new_inst(connection, group_name, wait_timeout, as_group['MinSize'], 'viable_instances')
props = get_properties(as_group)
instances = props['instances']
if replace_instances:
instances = replace_instances
@ -1096,16 +1093,16 @@ def replace(connection, module):
if lc_check:
if num_new_inst_needed == 0 and old_instances:
log.debug("No new instances needed, but old instances are present. Removing old instances")
terminate_batch(connection, module, old_instances, instances, True)
module.debug("No new instances needed, but old instances are present. Removing old instances")
terminate_batch(connection, old_instances, instances, True)
as_group = describe_autoscaling_groups(connection, group_name)[0]
props = get_properties(as_group, module)
props = get_properties(as_group)
changed = True
return(changed, props)
# we don't want to spin up extra instances if not necessary
if num_new_inst_needed < batch_size:
log.debug("Overriding batch size to %s", num_new_inst_needed)
module.debug("Overriding batch size to %s" % num_new_inst_needed)
batch_size = num_new_inst_needed
if not old_instances:
@ -1124,30 +1121,30 @@ def replace(connection, module):
as_group = describe_autoscaling_groups(connection, group_name)[0]
update_size(connection, as_group, max_size + batch_size, min_size + batch_size, desired_capacity + batch_size)
wait_for_new_inst(module, connection, group_name, wait_timeout, as_group['MinSize'], 'viable_instances')
wait_for_elb(connection, module, group_name)
wait_for_target_group(connection, module, group_name)
wait_for_new_inst(connection, group_name, wait_timeout, as_group['MinSize'], 'viable_instances')
wait_for_elb(connection, group_name)
wait_for_target_group(connection, group_name)
as_group = describe_autoscaling_groups(connection, group_name)[0]
props = get_properties(as_group, module)
props = get_properties(as_group)
instances = props['instances']
if replace_instances:
instances = replace_instances
log.debug("beginning main loop")
module.debug("beginning main loop")
for i in get_chunks(instances, batch_size):
# break out of this loop if we have enough new instances
break_early, desired_size, term_instances = terminate_batch(connection, module, i, instances, False)
wait_for_term_inst(connection, module, term_instances)
wait_for_new_inst(module, connection, group_name, wait_timeout, desired_size, 'viable_instances')
wait_for_elb(connection, module, group_name)
wait_for_target_group(connection, module, group_name)
break_early, desired_size, term_instances = terminate_batch(connection, i, instances, False)
wait_for_term_inst(connection, term_instances)
wait_for_new_inst(connection, group_name, wait_timeout, desired_size, 'viable_instances')
wait_for_elb(connection, group_name)
wait_for_target_group(connection, group_name)
as_group = describe_autoscaling_groups(connection, group_name)[0]
if break_early:
log.debug("breaking loop")
module.debug("breaking loop")
break
update_size(connection, as_group, max_size, min_size, desired_capacity)
as_group = describe_autoscaling_groups(connection, group_name)[0]
asg_properties = get_properties(as_group, module)
log.debug("Rolling update complete.")
asg_properties = get_properties(as_group)
module.debug("Rolling update complete.")
changed = True
return(changed, asg_properties)
@ -1165,14 +1162,14 @@ def get_instances_by_lc(props, lc_check, initial_instances):
old_instances.append(i)
else:
log.debug("Comparing initial instances with current: %s", initial_instances)
module.debug("Comparing initial instances with current: %s" % initial_instances)
for i in props['instances']:
if i not in initial_instances:
new_instances.append(i)
else:
old_instances.append(i)
log.debug("New instances: %s, %s", len(new_instances), new_instances)
log.debug("Old instances: %s, %s", len(old_instances), old_instances)
module.debug("New instances: %s, %s" % (len(new_instances), new_instances))
module.debug("Old instances: %s, %s" % (len(old_instances), old_instances))
return new_instances, old_instances
@ -1194,7 +1191,7 @@ def list_purgeable_instances(props, lc_check, replace_instances, initial_instanc
return instances_to_terminate
def terminate_batch(connection, module, replace_instances, initial_instances, leftovers=False):
def terminate_batch(connection, replace_instances, initial_instances, leftovers=False):
batch_size = module.params.get('replace_batch_size')
min_size = module.params.get('min_size')
desired_capacity = module.params.get('desired_capacity')
@ -1204,7 +1201,7 @@ def terminate_batch(connection, module, replace_instances, initial_instances, le
break_loop = False
as_group = describe_autoscaling_groups(connection, group_name)[0]
props = get_properties(as_group, module)
props = get_properties(as_group)
desired_size = as_group['MinSize']
new_instances, old_instances = get_instances_by_lc(props, lc_check, initial_instances)
@ -1214,17 +1211,17 @@ def terminate_batch(connection, module, replace_instances, initial_instances, le
# and they have a non-current launch config
instances_to_terminate = list_purgeable_instances(props, lc_check, replace_instances, initial_instances)
log.debug("new instances needed: %s", num_new_inst_needed)
log.debug("new instances: %s", new_instances)
log.debug("old instances: %s", old_instances)
log.debug("batch instances: %s", ",".join(instances_to_terminate))
module.debug("new instances needed: %s" % num_new_inst_needed)
module.debug("new instances: %s" % new_instances)
module.debug("old instances: %s" % old_instances)
module.debug("batch instances: %s" % ",".join(instances_to_terminate))
if num_new_inst_needed == 0:
decrement_capacity = True
if as_group['MinSize'] != min_size:
updated_params = dict(AutoScalingGroupName=as_group['AutoScalingGroupName'], MinSize=min_size)
update_asg(connection, **updated_params)
log.debug("Updating minimum size back to original of %s", min_size)
module.debug("Updating minimum size back to original of %s" % min_size)
# if are some leftover old instances, but we are already at capacity with new ones
# we don't want to decrement capacity
if leftovers:
@ -1232,19 +1229,19 @@ def terminate_batch(connection, module, replace_instances, initial_instances, le
break_loop = True
instances_to_terminate = old_instances
desired_size = min_size
log.debug("No new instances needed")
module.debug("No new instances needed")
if num_new_inst_needed < batch_size and num_new_inst_needed != 0:
instances_to_terminate = instances_to_terminate[:num_new_inst_needed]
decrement_capacity = False
break_loop = False
log.debug("%s new instances needed", num_new_inst_needed)
module.debug("%s new instances needed" % num_new_inst_needed)
log.debug("decrementing capacity: %s", decrement_capacity)
module.debug("decrementing capacity: %s" % decrement_capacity)
for instance_id in instances_to_terminate:
elb_dreg(connection, module, group_name, instance_id)
log.debug("terminating instance: %s", instance_id)
elb_dreg(connection, group_name, instance_id)
module.debug("terminating instance: %s" % instance_id)
terminate_asg_instance(connection, instance_id, decrement_capacity)
# we wait to make sure the machines we marked as Unhealthy are
@ -1253,24 +1250,24 @@ def terminate_batch(connection, module, replace_instances, initial_instances, le
return break_loop, desired_size, instances_to_terminate
def wait_for_term_inst(connection, module, term_instances):
def wait_for_term_inst(connection, term_instances):
wait_timeout = module.params.get('wait_timeout')
group_name = module.params.get('name')
as_group = describe_autoscaling_groups(connection, group_name)[0]
props = get_properties(as_group, module)
props = get_properties(as_group)
count = 1
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and count > 0:
log.debug("waiting for instances to terminate")
module.debug("waiting for instances to terminate")
count = 0
as_group = describe_autoscaling_groups(connection, group_name)[0]
props = get_properties(as_group, module)
props = get_properties(as_group)
instance_facts = props['instance_facts']
instances = (i for i in instance_facts if i in term_instances)
for i in instances:
lifecycle = instance_facts[i]['lifecycle_state']
health = instance_facts[i]['health_status']
log.debug("Instance %s has state of %s,%s", i, lifecycle, health)
module.debug("Instance %s has state of %s,%s" % (i, lifecycle, health))
if lifecycle == 'Terminating' or health == 'Unhealthy':
count += 1
time.sleep(10)
@ -1280,23 +1277,23 @@ def wait_for_term_inst(connection, module, term_instances):
module.fail_json(msg="Waited too long for old instances to terminate. %s" % time.asctime())
def wait_for_new_inst(module, connection, group_name, wait_timeout, desired_size, prop):
def wait_for_new_inst(connection, group_name, wait_timeout, desired_size, prop):
# make sure we have the latest stats after that last loop.
as_group = describe_autoscaling_groups(connection, group_name)[0]
props = get_properties(as_group, module)
log.debug("Waiting for %s = %s, currently %s", prop, desired_size, props[prop])
props = get_properties(as_group)
module.debug("Waiting for %s = %s, currently %s" % (prop, desired_size, props[prop]))
# now we make sure that we have enough instances in a viable state
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and desired_size > props[prop]:
log.debug("Waiting for %s = %s, currently %s", prop, desired_size, props[prop])
module.debug("Waiting for %s = %s, currently %s" % (prop, desired_size, props[prop]))
time.sleep(10)
as_group = describe_autoscaling_groups(connection, group_name)[0]
props = get_properties(as_group, module)
props = get_properties(as_group)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg="Waited too long for new instances to become viable. %s" % time.asctime())
log.debug("Reached %s: %s", prop, desired_size)
module.debug("Reached %s: %s" % (prop, desired_size))
return props
@ -1337,6 +1334,7 @@ def main():
),
)
global module
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[['replace_all_instances', 'replace_instances']]
@ -1362,12 +1360,12 @@ def main():
changed = create_changed = replace_changed = False
if state == 'present':
create_changed, asg_properties = create_autoscaling_group(connection, module)
create_changed, asg_properties = create_autoscaling_group(connection)
elif state == 'absent':
changed = delete_autoscaling_group(connection, module)
changed = delete_autoscaling_group(connection)
module.exit_json(changed=changed)
if replace_all_instances or replace_instances:
replace_changed, asg_properties = replace(connection, module)
replace_changed, asg_properties = replace(connection)
if create_changed or replace_changed:
changed = True
module.exit_json(changed=changed, **asg_properties)