mirror of
https://github.com/ansible-collections/community.general.git
synced 2024-09-14 20:13:21 +02:00
docker_*: always use client.fail() over module.fail_json(), allow to always return data on failure (#51999)
* Always use client.fail() instead of module.fail_json(). * Allow to pass on results on module failure. * Linting.
This commit is contained in:
parent
ad386290b4
commit
d7fd4d0550
12 changed files with 64 additions and 60 deletions
|
@ -20,7 +20,7 @@ import os
|
|||
import re
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, env_fallback, jsonify
|
||||
from ansible.module_utils.basic import AnsibleModule, env_fallback
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlparse
|
||||
from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE, BOOLEANS_FALSE
|
||||
|
||||
|
@ -164,7 +164,11 @@ class AnsibleDockerClient(Client):
|
|||
def __init__(self, argument_spec=None, supports_check_mode=False, mutually_exclusive=None,
|
||||
required_together=None, required_if=None, min_docker_version=MIN_DOCKER_VERSION,
|
||||
min_docker_api_version=None, option_minimal_versions=None,
|
||||
option_minimal_versions_ignore_params=None):
|
||||
option_minimal_versions_ignore_params=None, fail_results=None):
|
||||
|
||||
# Modules can put information in here which will always be returned
|
||||
# in case client.fail() is called.
|
||||
self.fail_results = fail_results or {}
|
||||
|
||||
merged_arg_spec = dict()
|
||||
merged_arg_spec.update(DOCKER_COMMON_ARGS)
|
||||
|
@ -249,8 +253,9 @@ class AnsibleDockerClient(Client):
|
|||
# else:
|
||||
# log_file.write(msg + u'\n')
|
||||
|
||||
def fail(self, msg):
|
||||
self.module.fail_json(msg=msg)
|
||||
def fail(self, msg, **kwargs):
|
||||
self.fail_results.update(kwargs)
|
||||
self.module.fail_json(msg=msg, **sanitize_result(self.fail_results))
|
||||
|
||||
@staticmethod
|
||||
def _get_value(param_name, param_value, env_variable, default_value):
|
||||
|
@ -506,7 +511,7 @@ class AnsibleDockerClient(Client):
|
|||
self.log("Inspecting container Id %s" % result['Id'])
|
||||
result = self.inspect_container(container=result['Id'])
|
||||
self.log("Completed container inspection")
|
||||
except NotFound as exc:
|
||||
except NotFound as dummy:
|
||||
return None
|
||||
except Exception as exc:
|
||||
self.fail("Error inspecting container: %s" % exc)
|
||||
|
@ -545,7 +550,7 @@ class AnsibleDockerClient(Client):
|
|||
self.log("Inspecting network Id %s" % id)
|
||||
result = self.inspect_network(id)
|
||||
self.log("Completed network inspection")
|
||||
except NotFound as exc:
|
||||
except NotFound as dummy:
|
||||
return None
|
||||
except Exception as exc:
|
||||
self.fail("Error inspecting network: %s" % exc)
|
||||
|
|
|
@ -30,7 +30,7 @@ class AnsibleDockerSwarmClient(AnsibleDockerClient):
|
|||
try:
|
||||
info = self.info()
|
||||
except APIError as exc:
|
||||
self.fail(msg="Failed to get node information for %s" % to_native(exc))
|
||||
self.fail("Failed to get node information for %s" % to_native(exc))
|
||||
|
||||
if info:
|
||||
json_str = json.dumps(info, ensure_ascii=False)
|
||||
|
@ -55,7 +55,7 @@ class AnsibleDockerSwarmClient(AnsibleDockerClient):
|
|||
try:
|
||||
info = self.info()
|
||||
except APIError:
|
||||
self.fail(msg="Failed to get host information.")
|
||||
self.fail("Failed to get host information.")
|
||||
|
||||
if info:
|
||||
json_str = json.dumps(info, ensure_ascii=False)
|
||||
|
@ -92,7 +92,7 @@ class AnsibleDockerSwarmClient(AnsibleDockerClient):
|
|||
If host is not a swarm manager then Ansible task on this host should end with 'failed' state
|
||||
"""
|
||||
if not self.check_if_swarm_manager():
|
||||
self.fail(msg="Error running docker swarm module: must run on swarm manager node")
|
||||
self.fail("Error running docker swarm module: must run on swarm manager node")
|
||||
|
||||
def check_if_swarm_worker(self):
|
||||
"""
|
||||
|
@ -139,20 +139,20 @@ class AnsibleDockerSwarmClient(AnsibleDockerClient):
|
|||
node_id = self.get_swarm_node_id()
|
||||
|
||||
if node_id is None:
|
||||
self.fail(msg="Failed to get node information.")
|
||||
self.fail("Failed to get node information.")
|
||||
|
||||
try:
|
||||
node_info = self.inspect_node(node_id=node_id)
|
||||
except APIError as exc:
|
||||
if exc.status_code == 503:
|
||||
self.fail(msg="Cannot inspect node: To inspect node execute module on Swarm Manager")
|
||||
self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager")
|
||||
if exc.status_code == 404:
|
||||
if skip_missing is False:
|
||||
self.fail(msg="Error while reading from Swarm manager: %s" % to_native(exc))
|
||||
self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
|
||||
else:
|
||||
return None
|
||||
except Exception as exc:
|
||||
self.module.fail_json(msg="Error inspecting swarm node: %s" % exc)
|
||||
self.fail("Error inspecting swarm node: %s" % exc)
|
||||
|
||||
json_str = json.dumps(node_info, ensure_ascii=False)
|
||||
node_info = json.loads(json_str)
|
||||
|
@ -169,10 +169,10 @@ class AnsibleDockerSwarmClient(AnsibleDockerClient):
|
|||
node_info = self.nodes()
|
||||
except APIError as exc:
|
||||
if exc.status_code == 503:
|
||||
self.fail(msg="Cannot inspect node: To inspect node execute module on Swarm Manager")
|
||||
self.fail(msg="Error while reading from Swarm manager: %s" % to_native(exc))
|
||||
self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager")
|
||||
self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
|
||||
except Exception as exc:
|
||||
self.module.fail_json(msg="Error inspecting swarm node: %s" % exc)
|
||||
self.fail("Error inspecting swarm node: %s" % exc)
|
||||
|
||||
json_str = json.dumps(node_info, ensure_ascii=False)
|
||||
node_info = json.loads(json_str)
|
||||
|
|
|
@ -737,7 +737,7 @@ class ContainerManager(DockerBaseClass):
|
|||
except Exception as exc:
|
||||
fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
|
||||
msg_format="Error starting project %s")
|
||||
self.client.module.fail_json(**fail_reason)
|
||||
self.client.fail(**fail_reason)
|
||||
else:
|
||||
cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
|
||||
|
||||
|
@ -950,7 +950,7 @@ class ContainerManager(DockerBaseClass):
|
|||
except Exception as exc:
|
||||
fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
|
||||
msg_format="Error stopping project %s")
|
||||
self.client.module.fail_json(**fail_reason)
|
||||
self.client.fail(**fail_reason)
|
||||
else:
|
||||
cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
|
||||
return result
|
||||
|
@ -985,7 +985,7 @@ class ContainerManager(DockerBaseClass):
|
|||
except Exception as exc:
|
||||
fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
|
||||
msg_format="Error restarting project %s")
|
||||
self.client.module.fail_json(**fail_reason)
|
||||
self.client.fail(**fail_reason)
|
||||
else:
|
||||
cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
|
||||
return result
|
||||
|
|
|
@ -903,7 +903,7 @@ def is_volume_permissions(input):
|
|||
return True
|
||||
|
||||
|
||||
def parse_port_range(range_or_port, module):
|
||||
def parse_port_range(range_or_port, client):
|
||||
'''
|
||||
Parses a string containing either a single port or a range of ports.
|
||||
|
||||
|
@ -912,13 +912,13 @@ def parse_port_range(range_or_port, module):
|
|||
if '-' in range_or_port:
|
||||
start, end = [int(port) for port in range_or_port.split('-')]
|
||||
if end < start:
|
||||
module.fail_json(msg='Invalid port range: {0}'.format(range_or_port))
|
||||
client.fail('Invalid port range: {0}'.format(range_or_port))
|
||||
return list(range(start, end + 1))
|
||||
else:
|
||||
return [int(range_or_port)]
|
||||
|
||||
|
||||
def split_colon_ipv6(input, module):
|
||||
def split_colon_ipv6(input, client):
|
||||
'''
|
||||
Split string by ':', while keeping IPv6 addresses in square brackets in one component.
|
||||
'''
|
||||
|
@ -933,7 +933,7 @@ def split_colon_ipv6(input, module):
|
|||
break
|
||||
j = input.find(']', i)
|
||||
if j < 0:
|
||||
module.fail_json(msg='Cannot find closing "]" in input "{0}" for opening "[" at index {1}!'.format(input, i + 1))
|
||||
client.fail('Cannot find closing "]" in input "{0}" for opening "[" at index {1}!'.format(input, i + 1))
|
||||
result.extend(input[start:i].split(':'))
|
||||
k = input.find(':', j)
|
||||
if k < 0:
|
||||
|
@ -1123,7 +1123,7 @@ class TaskParameters(DockerBaseClass):
|
|||
self._process_rate_iops(option=param_name)
|
||||
|
||||
def fail(self, msg):
|
||||
self.client.module.fail_json(msg=msg)
|
||||
self.client.fail(msg)
|
||||
|
||||
@property
|
||||
def update_parameters(self):
|
||||
|
@ -1333,25 +1333,25 @@ class TaskParameters(DockerBaseClass):
|
|||
|
||||
binds = {}
|
||||
for port in self.published_ports:
|
||||
parts = split_colon_ipv6(str(port), self.client.module)
|
||||
parts = split_colon_ipv6(str(port), self.client)
|
||||
container_port = parts[-1]
|
||||
protocol = ''
|
||||
if '/' in container_port:
|
||||
container_port, protocol = parts[-1].split('/')
|
||||
container_ports = parse_port_range(container_port, self.client.module)
|
||||
container_ports = parse_port_range(container_port, self.client)
|
||||
|
||||
p_len = len(parts)
|
||||
if p_len == 1:
|
||||
port_binds = len(container_ports) * [(default_ip,)]
|
||||
elif p_len == 2:
|
||||
port_binds = [(default_ip, port) for port in parse_port_range(parts[0], self.client.module)]
|
||||
port_binds = [(default_ip, port) for port in parse_port_range(parts[0], self.client)]
|
||||
elif p_len == 3:
|
||||
# We only allow IPv4 and IPv6 addresses for the bind address
|
||||
if not re.match(r'^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$', parts[0]) and not re.match(r'^\[[0-9a-fA-F:]+\]$', parts[0]):
|
||||
self.fail(('Bind addresses for published ports must be IPv4 or IPv6 addresses, not hostnames. '
|
||||
'Use the dig lookup to resolve hostnames. (Found hostname: {0})').format(parts[0]))
|
||||
if parts[1]:
|
||||
port_binds = [(parts[0], port) for port in parse_port_range(parts[1], self.client.module)]
|
||||
port_binds = [(parts[0], port) for port in parse_port_range(parts[1], self.client)]
|
||||
else:
|
||||
port_binds = len(container_ports) * [(parts[0],)]
|
||||
|
||||
|
@ -1696,7 +1696,7 @@ class Container(DockerBaseClass):
|
|||
self.parameters_map['expected_healthcheck'] = 'healthcheck'
|
||||
|
||||
def fail(self, msg):
|
||||
self.parameters.client.module.fail_json(msg=msg)
|
||||
self.parameters.client.fail(msg)
|
||||
|
||||
@property
|
||||
def exists(self):
|
||||
|
@ -2355,7 +2355,7 @@ class ContainerManager(DockerBaseClass):
|
|||
self.container_remove(container.Id)
|
||||
|
||||
def fail(self, msg, **kwargs):
|
||||
self.client.module.fail_json(msg=msg, **sanitize_result(kwargs))
|
||||
self.client.fail(msg, **kwargs)
|
||||
|
||||
def _output_logs(self, msg):
|
||||
self.client.module.log(msg=msg)
|
||||
|
|
|
@ -218,7 +218,7 @@ class DockerHostManager(DockerBaseClass):
|
|||
try:
|
||||
return self.client.info()
|
||||
except APIError as exc:
|
||||
self.client.fail_json(msg="Error inspecting docker host: %s" % to_native(exc))
|
||||
self.client.fail("Error inspecting docker host: %s" % to_native(exc))
|
||||
|
||||
def get_docker_disk_usage_facts(self):
|
||||
try:
|
||||
|
@ -227,7 +227,7 @@ class DockerHostManager(DockerBaseClass):
|
|||
else:
|
||||
return dict(LayerSize=self.client.df()['LayersSize'])
|
||||
except APIError as exc:
|
||||
self.client.fail_json(msg="Error inspecting docker host: %s" % to_native(exc))
|
||||
self.client.fail("Error inspecting docker host: %s" % to_native(exc))
|
||||
|
||||
def get_docker_items_list(self, docker_object=None, filters=None, verbose=False):
|
||||
items = None
|
||||
|
@ -248,8 +248,8 @@ class DockerHostManager(DockerBaseClass):
|
|||
elif docker_object == 'volumes':
|
||||
items = self.client.volumes(filters=filters)
|
||||
except APIError as exc:
|
||||
self.client.fail_json(msg="Error inspecting docker host for object '%s': %s" %
|
||||
(docker_object, to_native(exc)))
|
||||
self.client.fail("Error inspecting docker host for object '%s': %s" %
|
||||
(docker_object, to_native(exc)))
|
||||
|
||||
if self.verbose_output:
|
||||
if docker_object != 'volumes':
|
||||
|
|
|
@ -183,16 +183,16 @@ class SwarmNodeManager(DockerBaseClass):
|
|||
|
||||
def node_update(self):
|
||||
if not (self.client.check_if_swarm_node(node_id=self.parameters.hostname)):
|
||||
self.client.fail(msg="This node is not part of a swarm.")
|
||||
self.client.fail("This node is not part of a swarm.")
|
||||
return
|
||||
|
||||
if self.client.check_if_swarm_node_is_down():
|
||||
self.client.fail(msg="Can not update the node. The node is down.")
|
||||
self.client.fail("Can not update the node. The node is down.")
|
||||
|
||||
try:
|
||||
node_info = self.client.inspect_node(node_id=self.parameters.hostname)
|
||||
except APIError as exc:
|
||||
self.client.fail(msg="Failed to get node information for %s" % to_native(exc))
|
||||
self.client.fail("Failed to get node information for %s" % to_native(exc))
|
||||
|
||||
changed = False
|
||||
node_spec = dict(
|
||||
|
@ -250,7 +250,7 @@ class SwarmNodeManager(DockerBaseClass):
|
|||
self.client.update_node(node_id=node_info['ID'], version=node_info['Version']['Index'],
|
||||
node_spec=node_spec)
|
||||
except APIError as exc:
|
||||
self.client.fail(msg="Failed to update node : %s" % to_native(exc))
|
||||
self.client.fail("Failed to update node : %s" % to_native(exc))
|
||||
self.results['node_facts'] = self.client.get_node_inspect(node_id=node_info['ID'])
|
||||
self.results['changed'] = changed
|
||||
else:
|
||||
|
|
|
@ -202,7 +202,7 @@ def main():
|
|||
cache_min_version = '3.3.0'
|
||||
if client.module.params['builder_cache'] and client.docker_py_version < LooseVersion(cache_min_version):
|
||||
msg = "Error: docker version is %s. Minimum version required for builds option is %s. Use `pip install --upgrade docker` to upgrade."
|
||||
client.module.fail(msg=(msg % (docker_version, cache_min_version)))
|
||||
client.fail(msg % (docker_version, cache_min_version))
|
||||
|
||||
result = dict()
|
||||
|
||||
|
|
|
@ -332,7 +332,7 @@ class SwarmManager(DockerBaseClass):
|
|||
advertise_addr=self.parameters.advertise_addr, listen_addr=self.parameters.listen_addr,
|
||||
force_new_cluster=self.parameters.force_new_cluster, swarm_spec=self.parameters.spec)
|
||||
except APIError as exc:
|
||||
self.client.fail(msg="Can not create a new Swarm Cluster: %s" % to_native(exc))
|
||||
self.client.fail("Can not create a new Swarm Cluster: %s" % to_native(exc))
|
||||
|
||||
self.__isSwarmManager()
|
||||
self.results['actions'].append("New Swarm cluster created: %s" % (self.swarm_info['ID']))
|
||||
|
@ -391,7 +391,7 @@ class SwarmManager(DockerBaseClass):
|
|||
version=version, swarm_spec=new_spec, rotate_worker_token=self.parameters.rotate_worker_token,
|
||||
rotate_manager_token=self.parameters.rotate_manager_token)
|
||||
except APIError as exc:
|
||||
self.client.fail(msg="Can not update a Swarm Cluster: %s" % to_native(exc))
|
||||
self.client.fail("Can not update a Swarm Cluster: %s" % to_native(exc))
|
||||
return
|
||||
|
||||
self.inspect_swarm()
|
||||
|
@ -416,7 +416,7 @@ class SwarmManager(DockerBaseClass):
|
|||
remote_addrs=self.parameters.remote_addrs, join_token=self.parameters.join_token, listen_addr=self.parameters.listen_addr,
|
||||
advertise_addr=self.parameters.advertise_addr)
|
||||
except APIError as exc:
|
||||
self.client.fail(msg="Can not join the Swarm Cluster: %s" % to_native(exc))
|
||||
self.client.fail("Can not join the Swarm Cluster: %s" % to_native(exc))
|
||||
self.results['actions'].append("New node is added to swarm cluster")
|
||||
self.results['changed'] = True
|
||||
|
||||
|
@ -427,7 +427,7 @@ class SwarmManager(DockerBaseClass):
|
|||
try:
|
||||
self.client.leave_swarm(force=self.parameters.force)
|
||||
except APIError as exc:
|
||||
self.client.fail(msg="This node can not leave the Swarm Cluster: %s" % to_native(exc))
|
||||
self.client.fail("This node can not leave the Swarm Cluster: %s" % to_native(exc))
|
||||
self.results['actions'].append("Node has left the swarm cluster")
|
||||
self.results['changed'] = True
|
||||
|
||||
|
@ -450,7 +450,7 @@ class SwarmManager(DockerBaseClass):
|
|||
|
||||
def remove(self):
|
||||
if not(self.__isSwarmManager()):
|
||||
self.client.fail(msg="This node is not a manager.")
|
||||
self.client.fail("This node is not a manager.")
|
||||
|
||||
try:
|
||||
status_down = self.__check_node_is_down()
|
||||
|
@ -458,12 +458,12 @@ class SwarmManager(DockerBaseClass):
|
|||
return
|
||||
|
||||
if not(status_down):
|
||||
self.client.fail(msg="Can not remove the node. The status node is ready and not down.")
|
||||
self.client.fail("Can not remove the node. The status node is ready and not down.")
|
||||
|
||||
try:
|
||||
self.client.remove_node(node_id=self.parameters.node_id, force=self.parameters.force)
|
||||
except APIError as exc:
|
||||
self.client.fail(msg="Can not remove the node from the Swarm Cluster: %s" % to_native(exc))
|
||||
self.client.fail("Can not remove the node from the Swarm Cluster: %s" % to_native(exc))
|
||||
self.results['actions'].append("Node is removed from swarm cluster.")
|
||||
self.results['changed'] = True
|
||||
|
||||
|
|
|
@ -180,7 +180,7 @@ class DockerSwarmManager(DockerBaseClass):
|
|||
try:
|
||||
return self.client.inspect_swarm()
|
||||
except APIError as exc:
|
||||
self.client.fail_json(msg="Error inspecting docker swarm: %s" % to_native(exc))
|
||||
self.client.fail("Error inspecting docker swarm: %s" % to_native(exc))
|
||||
|
||||
def get_docker_items_list(self, docker_object=None, filters=None):
|
||||
items = None
|
||||
|
@ -194,8 +194,8 @@ class DockerSwarmManager(DockerBaseClass):
|
|||
elif docker_object == 'services':
|
||||
items = self.client.services(filters=filters)
|
||||
except APIError as exc:
|
||||
self.client.fail_json(msg="Error inspecting docker swarm for object '%s': %s" %
|
||||
(docker_object, to_native(exc)))
|
||||
self.client.fail("Error inspecting docker swarm for object '%s': %s" %
|
||||
(docker_object, to_native(exc)))
|
||||
|
||||
if self.verbose_output:
|
||||
return items
|
||||
|
|
|
@ -1415,14 +1415,13 @@ class DockerServiceManager(object):
|
|||
resolve=module.params['resolve_image']
|
||||
)
|
||||
except DockerException as e:
|
||||
return module.fail_json(
|
||||
msg="Error looking for an image named %s: %s" % (image, e))
|
||||
self.client.fail(
|
||||
"Error looking for an image named %s: %s" % (image, e))
|
||||
try:
|
||||
current_service = self.get_service(module.params['name'])
|
||||
except Exception as e:
|
||||
return module.fail_json(
|
||||
msg='Error looking for service named %s: %s' %
|
||||
(module.params['name'], e))
|
||||
self.client.fail(
|
||||
"Error looking for service named %s: %s" % (module.params['name'], e))
|
||||
try:
|
||||
new_service = DockerService.from_ansible_params(
|
||||
module.params,
|
||||
|
@ -1430,8 +1429,8 @@ class DockerServiceManager(object):
|
|||
image_digest
|
||||
)
|
||||
except Exception as e:
|
||||
return module.fail_json(
|
||||
msg='Error parsing module parameters: %s' % e)
|
||||
self.client.fail(
|
||||
"Error parsing module parameters: %s" % e)
|
||||
|
||||
changed = False
|
||||
msg = 'noop'
|
||||
|
|
|
@ -152,9 +152,9 @@ class TaskParameters(DockerBaseClass):
|
|||
|
||||
if self.force is not None:
|
||||
if self.recreate != 'never':
|
||||
client.module.fail_json(msg='Cannot use the deprecated "force" '
|
||||
'option when "recreate" is set. Please stop '
|
||||
'using the force option.')
|
||||
client.fail('Cannot use the deprecated "force" '
|
||||
'option when "recreate" is set. Please stop '
|
||||
'using the force option.')
|
||||
client.module.warn('The "force" option of docker_volume has been deprecated '
|
||||
'in Ansible 2.8. Please use the "recreate" '
|
||||
'option, which provides the same functionality as "force".')
|
||||
|
|
|
@ -95,7 +95,7 @@ def get_existing_volume(client, volume_name):
|
|||
except NotFound as dummy:
|
||||
return None
|
||||
except Exception as exc:
|
||||
client.module.fail_json(msg="Error inspecting volume: %s" % exc)
|
||||
client.fail("Error inspecting volume: %s" % exc)
|
||||
|
||||
|
||||
def main():
|
||||
|
|
Loading…
Reference in a new issue