mirror of
https://github.com/ansible-collections/community.general.git
synced 2024-09-14 20:13:21 +02:00
Update bare exceptions to specify Exception.
This will keep us from accidentally catching program-exiting exceptions like KeyboardInterupt and SystemExit.
This commit is contained in:
parent
5147e792d3
commit
3fba006207
320 changed files with 659 additions and 656 deletions
|
@ -185,7 +185,7 @@ class ConnectionProcess(object):
|
||||||
self.sock.close()
|
self.sock.close()
|
||||||
if self.connection:
|
if self.connection:
|
||||||
self.connection.close()
|
self.connection.close()
|
||||||
except:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
finally:
|
finally:
|
||||||
if os.path.exists(self.socket_path):
|
if os.path.exists(self.socket_path):
|
||||||
|
|
|
@ -62,7 +62,7 @@ def api_get(link, config):
|
||||||
result = open_url(url, headers=headers, url_username=config.get('auth', 'apiuser').replace('\n', ''),
|
result = open_url(url, headers=headers, url_username=config.get('auth', 'apiuser').replace('\n', ''),
|
||||||
url_password=config.get('auth', 'apipass').replace('\n', ''))
|
url_password=config.get('auth', 'apipass').replace('\n', ''))
|
||||||
return json.loads(result.read())
|
return json.loads(result.read())
|
||||||
except:
|
except Exception:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
@ -99,7 +99,7 @@ def cache_available(config):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
existing = os.stat('/'.join([dpath, 'inventory']))
|
existing = os.stat('/'.join([dpath, 'inventory']))
|
||||||
except:
|
except Exception:
|
||||||
# cache doesn't exist or isn't accessible
|
# cache doesn't exist or isn't accessible
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
|
@ -463,38 +463,38 @@ class AosInventory(object):
|
||||||
# Try to reach all parameters from File, if not available try from ENV
|
# Try to reach all parameters from File, if not available try from ENV
|
||||||
try:
|
try:
|
||||||
self.aos_server = config.get('aos', 'aos_server')
|
self.aos_server = config.get('aos', 'aos_server')
|
||||||
except:
|
except Exception:
|
||||||
if 'AOS_SERVER' in os.environ.keys():
|
if 'AOS_SERVER' in os.environ.keys():
|
||||||
self.aos_server = os.environ['AOS_SERVER']
|
self.aos_server = os.environ['AOS_SERVER']
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.aos_server_port = config.get('aos', 'port')
|
self.aos_server_port = config.get('aos', 'port')
|
||||||
except:
|
except Exception:
|
||||||
if 'AOS_PORT' in os.environ.keys():
|
if 'AOS_PORT' in os.environ.keys():
|
||||||
self.aos_server_port = os.environ['AOS_PORT']
|
self.aos_server_port = os.environ['AOS_PORT']
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.aos_username = config.get('aos', 'username')
|
self.aos_username = config.get('aos', 'username')
|
||||||
except:
|
except Exception:
|
||||||
if 'AOS_USERNAME' in os.environ.keys():
|
if 'AOS_USERNAME' in os.environ.keys():
|
||||||
self.aos_username = os.environ['AOS_USERNAME']
|
self.aos_username = os.environ['AOS_USERNAME']
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.aos_password = config.get('aos', 'password')
|
self.aos_password = config.get('aos', 'password')
|
||||||
except:
|
except Exception:
|
||||||
if 'AOS_PASSWORD' in os.environ.keys():
|
if 'AOS_PASSWORD' in os.environ.keys():
|
||||||
self.aos_password = os.environ['AOS_PASSWORD']
|
self.aos_password = os.environ['AOS_PASSWORD']
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.aos_blueprint = config.get('aos', 'blueprint')
|
self.aos_blueprint = config.get('aos', 'blueprint')
|
||||||
except:
|
except Exception:
|
||||||
if 'AOS_BLUEPRINT' in os.environ.keys():
|
if 'AOS_BLUEPRINT' in os.environ.keys():
|
||||||
self.aos_blueprint = os.environ['AOS_BLUEPRINT']
|
self.aos_blueprint = os.environ['AOS_BLUEPRINT']
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if config.get('aos', 'blueprint_interface') in ['false', 'no']:
|
if config.get('aos', 'blueprint_interface') in ['false', 'no']:
|
||||||
self.aos_blueprint_int = False
|
self.aos_blueprint_int = False
|
||||||
except:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def parse_cli_args(self):
|
def parse_cli_args(self):
|
||||||
|
|
|
@ -397,7 +397,7 @@ class AzureRM(object):
|
||||||
for key in AZURE_CREDENTIAL_ENV_MAPPING:
|
for key in AZURE_CREDENTIAL_ENV_MAPPING:
|
||||||
try:
|
try:
|
||||||
credentials[key] = config.get(profile, key, raw=True)
|
credentials[key] = config.get(profile, key, raw=True)
|
||||||
except:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if credentials.get('client_id') is not None or credentials.get('ad_user') is not None:
|
if credentials.get('client_id') is not None or credentials.get('ad_user') is not None:
|
||||||
|
@ -921,7 +921,7 @@ class AzureInventory(object):
|
||||||
try:
|
try:
|
||||||
config = cp.ConfigParser()
|
config = cp.ConfigParser()
|
||||||
config.read(path)
|
config.read(path)
|
||||||
except:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if config is not None:
|
if config is not None:
|
||||||
|
@ -929,7 +929,7 @@ class AzureInventory(object):
|
||||||
for key in AZURE_CONFIG_SETTINGS:
|
for key in AZURE_CONFIG_SETTINGS:
|
||||||
try:
|
try:
|
||||||
settings[key] = config.get('azure', key, raw=True)
|
settings[key] = config.get('azure', key, raw=True)
|
||||||
except:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
return settings
|
return settings
|
||||||
|
|
|
@ -88,7 +88,7 @@ import json
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import libbrook
|
import libbrook
|
||||||
except:
|
except Exception:
|
||||||
sys.exit('Brook.io inventory script requires libbrook. See https://github.com/doalitic/libbrook')
|
sys.exit('Brook.io inventory script requires libbrook. See https://github.com/doalitic/libbrook')
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -208,7 +208,7 @@ class CollinsInventory(object):
|
||||||
break
|
break
|
||||||
cur_page += 1
|
cur_page += 1
|
||||||
num_retries = 0
|
num_retries = 0
|
||||||
except:
|
except Exception:
|
||||||
self.log.error("Error while communicating with Collins, retrying:\n%s", traceback.format_exc())
|
self.log.error("Error while communicating with Collins, retrying:\n%s", traceback.format_exc())
|
||||||
num_retries += 1
|
num_retries += 1
|
||||||
return assets
|
return assets
|
||||||
|
@ -277,7 +277,7 @@ class CollinsInventory(object):
|
||||||
# Locates all server assets from Collins.
|
# Locates all server assets from Collins.
|
||||||
try:
|
try:
|
||||||
server_assets = self.find_assets()
|
server_assets = self.find_assets()
|
||||||
except:
|
except Exception:
|
||||||
self.log.error("Error while locating assets from Collins:\n%s", traceback.format_exc())
|
self.log.error("Error while locating assets from Collins:\n%s", traceback.format_exc())
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
@ -288,7 +288,7 @@ class CollinsInventory(object):
|
||||||
ip_index = self._asset_get_attribute(asset, 'ANSIBLE_IP_INDEX')
|
ip_index = self._asset_get_attribute(asset, 'ANSIBLE_IP_INDEX')
|
||||||
try:
|
try:
|
||||||
ip_index = int(ip_index)
|
ip_index = int(ip_index)
|
||||||
except:
|
except Exception:
|
||||||
self.log.error(
|
self.log.error(
|
||||||
"ANSIBLE_IP_INDEX attribute on asset %s not an integer: %s", asset,
|
"ANSIBLE_IP_INDEX attribute on asset %s not an integer: %s", asset,
|
||||||
ip_index)
|
ip_index)
|
||||||
|
@ -350,7 +350,7 @@ class CollinsInventory(object):
|
||||||
try:
|
try:
|
||||||
self.write_to_cache(self.cache, self.cache_path_cache)
|
self.write_to_cache(self.cache, self.cache_path_cache)
|
||||||
self.write_to_cache(self.inventory, self.cache_path_inventory)
|
self.write_to_cache(self.inventory, self.cache_path_inventory)
|
||||||
except:
|
except Exception:
|
||||||
self.log.error("Error while writing to cache:\n%s", traceback.format_exc())
|
self.log.error("Error while writing to cache:\n%s", traceback.format_exc())
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
@ -388,7 +388,7 @@ class CollinsInventory(object):
|
||||||
json_inventory = cache.read()
|
json_inventory = cache.read()
|
||||||
self.inventory = json.loads(json_inventory)
|
self.inventory = json.loads(json_inventory)
|
||||||
return True
|
return True
|
||||||
except:
|
except Exception:
|
||||||
self.log.error("Error while loading inventory:\n%s",
|
self.log.error("Error while loading inventory:\n%s",
|
||||||
traceback.format_exc())
|
traceback.format_exc())
|
||||||
self.inventory = {}
|
self.inventory = {}
|
||||||
|
@ -402,7 +402,7 @@ class CollinsInventory(object):
|
||||||
json_cache = cache.read()
|
json_cache = cache.read()
|
||||||
self.cache = json.loads(json_cache)
|
self.cache = json.loads(json_cache)
|
||||||
return True
|
return True
|
||||||
except:
|
except Exception:
|
||||||
self.log.error("Error while loading host cache:\n%s",
|
self.log.error("Error while loading host cache:\n%s",
|
||||||
traceback.format_exc())
|
traceback.format_exc())
|
||||||
self.cache = {}
|
self.cache = {}
|
||||||
|
|
|
@ -335,7 +335,7 @@ class ConsulInventory(object):
|
||||||
metadata = json.loads(metadata['Value'])
|
metadata = json.loads(metadata['Value'])
|
||||||
for k, v in metadata.items():
|
for k, v in metadata.items():
|
||||||
self.add_metadata(node_data, k, v)
|
self.add_metadata(node_data, k, v)
|
||||||
except:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def load_groups_from_kv(self, node_data):
|
def load_groups_from_kv(self, node_data):
|
||||||
|
|
|
@ -364,7 +364,7 @@ from collections import defaultdict
|
||||||
for path in [os.getcwd(), '', os.path.dirname(os.path.abspath(__file__))]:
|
for path in [os.getcwd(), '', os.path.dirname(os.path.abspath(__file__))]:
|
||||||
try:
|
try:
|
||||||
del sys.path[sys.path.index(path)]
|
del sys.path[sys.path.index(path)]
|
||||||
except:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
HAS_DOCKER_PY = True
|
HAS_DOCKER_PY = True
|
||||||
|
|
|
@ -107,7 +107,7 @@ try:
|
||||||
from libcloud.compute.types import Provider
|
from libcloud.compute.types import Provider
|
||||||
from libcloud.compute.providers import get_driver
|
from libcloud.compute.providers import get_driver
|
||||||
_ = Provider.GCE
|
_ = Provider.GCE
|
||||||
except:
|
except Exception:
|
||||||
sys.exit("GCE inventory script requires libcloud >= 0.13")
|
sys.exit("GCE inventory script requires libcloud >= 0.13")
|
||||||
|
|
||||||
|
|
||||||
|
@ -289,7 +289,7 @@ class GceInventory(object):
|
||||||
args = list(secrets.GCE_PARAMS)
|
args = list(secrets.GCE_PARAMS)
|
||||||
kwargs = secrets.GCE_KEYWORD_PARAMS
|
kwargs = secrets.GCE_KEYWORD_PARAMS
|
||||||
secrets_found = True
|
secrets_found = True
|
||||||
except:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if not secrets_found and secrets_path:
|
if not secrets_found and secrets_path:
|
||||||
|
@ -303,7 +303,7 @@ class GceInventory(object):
|
||||||
args = list(getattr(secrets, 'GCE_PARAMS', []))
|
args = list(getattr(secrets, 'GCE_PARAMS', []))
|
||||||
kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
|
kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
|
||||||
secrets_found = True
|
secrets_found = True
|
||||||
except:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if not secrets_found:
|
if not secrets_found:
|
||||||
|
|
|
@ -88,7 +88,7 @@ try:
|
||||||
from chube import api as chube_api
|
from chube import api as chube_api
|
||||||
from chube.datacenter import Datacenter
|
from chube.datacenter import Datacenter
|
||||||
from chube.linode_obj import Linode
|
from chube.linode_obj import Linode
|
||||||
except:
|
except Exception:
|
||||||
try:
|
try:
|
||||||
# remove local paths and other stuff that may
|
# remove local paths and other stuff that may
|
||||||
# cause an import conflict, as chube is sensitive
|
# cause an import conflict, as chube is sensitive
|
||||||
|
|
|
@ -29,7 +29,7 @@ import sys
|
||||||
import json
|
import json
|
||||||
try:
|
try:
|
||||||
import configparser
|
import configparser
|
||||||
except:
|
except Exception:
|
||||||
from six.moves import configparser
|
from six.moves import configparser
|
||||||
|
|
||||||
# Set up defaults
|
# Set up defaults
|
||||||
|
|
|
@ -254,7 +254,7 @@ class NSoTInventory(object):
|
||||||
obj[group]['vars'] = hostvars
|
obj[group]['vars'] = hostvars
|
||||||
try:
|
try:
|
||||||
assert isinstance(query, string_types)
|
assert isinstance(query, string_types)
|
||||||
except:
|
except Exception:
|
||||||
sys.exit('ERR: Group queries must be a single string\n'
|
sys.exit('ERR: Group queries must be a single string\n'
|
||||||
' Group: %s\n'
|
' Group: %s\n'
|
||||||
' Query: %s\n' % (group, query)
|
' Query: %s\n' % (group, query)
|
||||||
|
|
|
@ -69,7 +69,7 @@ def parse_args():
|
||||||
try:
|
try:
|
||||||
# check if rackhd url(ie:10.1.1.45:8080) is specified in the environment
|
# check if rackhd url(ie:10.1.1.45:8080) is specified in the environment
|
||||||
RACKHD_URL = 'http://' + str(os.environ['RACKHD_URL'])
|
RACKHD_URL = 'http://' + str(os.environ['RACKHD_URL'])
|
||||||
except:
|
except Exception:
|
||||||
# use default values
|
# use default values
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@ -81,7 +81,7 @@ if (parse_args().host):
|
||||||
try:
|
try:
|
||||||
nodeids += parse_args().host.split(',')
|
nodeids += parse_args().host.split(',')
|
||||||
RackhdInventory(nodeids)
|
RackhdInventory(nodeids)
|
||||||
except:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
if (parse_args().list):
|
if (parse_args().list):
|
||||||
try:
|
try:
|
||||||
|
@ -92,5 +92,5 @@ if (parse_args().list):
|
||||||
if entry['type'] == 'compute':
|
if entry['type'] == 'compute':
|
||||||
nodeids.append(entry['id'])
|
nodeids.append(entry['id'])
|
||||||
RackhdInventory(nodeids)
|
RackhdInventory(nodeids)
|
||||||
except:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
|
@ -242,7 +242,7 @@ def _list_into_cache(regions):
|
||||||
# pylint: disable=unexpected-keyword-arg
|
# pylint: disable=unexpected-keyword-arg
|
||||||
ip_versions = map(int, get_config(p, 'rax', 'access_ip_version',
|
ip_versions = map(int, get_config(p, 'rax', 'access_ip_version',
|
||||||
'RAX_ACCESS_IP_VERSION', 4, islist=True))
|
'RAX_ACCESS_IP_VERSION', 4, islist=True))
|
||||||
except:
|
except Exception:
|
||||||
ip_versions = [4]
|
ip_versions = [4]
|
||||||
else:
|
else:
|
||||||
ip_versions = [v for v in ip_versions if v in [4, 6]]
|
ip_versions = [v for v in ip_versions if v in [4, 6]]
|
||||||
|
|
|
@ -258,7 +258,7 @@ class RudderInventory(object):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
response, content = self.conn.request(target.geturl(), method, body, headers)
|
response, content = self.conn.request(target.geturl(), method, body, headers)
|
||||||
except:
|
except Exception:
|
||||||
self.fail_with_error('Error connecting to Rudder server')
|
self.fail_with_error('Error connecting to Rudder server')
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -53,7 +53,7 @@ import json
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import requests
|
import requests
|
||||||
except:
|
except Exception:
|
||||||
sys.exit('requests package is required for this inventory script')
|
sys.exit('requests package is required for this inventory script')
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -40,7 +40,7 @@ def get_hosts(host=None):
|
||||||
else:
|
else:
|
||||||
returned = {'all': set(), '_metadata': {}}
|
returned = {'all': set(), '_metadata': {}}
|
||||||
p = Popen([VBOX, 'list', '-l', 'vms'], stdout=PIPE)
|
p = Popen([VBOX, 'list', '-l', 'vms'], stdout=PIPE)
|
||||||
except:
|
except Exception:
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
hostvars = {}
|
hostvars = {}
|
||||||
|
@ -50,7 +50,7 @@ def get_hosts(host=None):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
k, v = line.split(':', 1)
|
k, v = line.split(':', 1)
|
||||||
except:
|
except Exception:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if k == '':
|
if k == '':
|
||||||
|
@ -67,7 +67,7 @@ def get_hosts(host=None):
|
||||||
if 'Value' in ipinfo:
|
if 'Value' in ipinfo:
|
||||||
a, ip = ipinfo.split(':', 1)
|
a, ip = ipinfo.split(':', 1)
|
||||||
hostvars[curname]['ansible_ssh_host'] = ip.strip()
|
hostvars[curname]['ansible_ssh_host'] = ip.strip()
|
||||||
except:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
continue
|
continue
|
||||||
|
|
|
@ -45,7 +45,7 @@ except ImportError:
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from zabbix_api import ZabbixAPI
|
from zabbix_api import ZabbixAPI
|
||||||
except:
|
except Exception:
|
||||||
print("Error: Zabbix API library must be installed: pip install zabbix-api.",
|
print("Error: Zabbix API library must be installed: pip install zabbix-api.",
|
||||||
file=sys.stderr)
|
file=sys.stderr)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
|
@ -305,7 +305,7 @@ class AzureRM(object):
|
||||||
for key in AZURE_CREDENTIAL_ENV_MAPPING:
|
for key in AZURE_CREDENTIAL_ENV_MAPPING:
|
||||||
try:
|
try:
|
||||||
credentials[key] = config.get(profile, key, raw=True)
|
credentials[key] = config.get(profile, key, raw=True)
|
||||||
except:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if credentials.get('client_id') is not None or credentials.get('ad_user') is not None:
|
if credentials.get('client_id') is not None or credentials.get('ad_user') is not None:
|
||||||
|
@ -571,7 +571,7 @@ class AzureKeyVaultSecret:
|
||||||
try:
|
try:
|
||||||
config = cp.ConfigParser()
|
config = cp.ConfigParser()
|
||||||
config.read(path)
|
config.read(path)
|
||||||
except:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if config is not None:
|
if config is not None:
|
||||||
|
@ -579,7 +579,7 @@ class AzureKeyVaultSecret:
|
||||||
for key in AZURE_VAULT_SETTINGS:
|
for key in AZURE_VAULT_SETTINGS:
|
||||||
try:
|
try:
|
||||||
settings[key] = config.get('azure_keyvault', key, raw=True)
|
settings[key] = config.get('azure_keyvault', key, raw=True)
|
||||||
except:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
return settings
|
return settings
|
||||||
|
|
|
@ -107,7 +107,7 @@ def opts_docs(cli_class_name, cli_module_name):
|
||||||
# parse the common options
|
# parse the common options
|
||||||
try:
|
try:
|
||||||
cli.parse()
|
cli.parse()
|
||||||
except:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
# base/common cli info
|
# base/common cli info
|
||||||
|
@ -154,7 +154,7 @@ def opts_docs(cli_class_name, cli_module_name):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
cli.parse()
|
cli.parse()
|
||||||
except:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
# FIXME/TODO: needed?
|
# FIXME/TODO: needed?
|
||||||
|
|
|
@ -221,7 +221,7 @@ def runtest(modfile, argspath, modname, module_style, interpreters):
|
||||||
print(out)
|
print(out)
|
||||||
print(err)
|
print(err)
|
||||||
results = json.loads(out)
|
results = json.loads(out)
|
||||||
except:
|
except Exception:
|
||||||
print("*" * 35)
|
print("*" * 35)
|
||||||
print("INVALID OUTPUT FORMAT")
|
print("INVALID OUTPUT FORMAT")
|
||||||
print(out)
|
print(out)
|
||||||
|
|
|
@ -62,7 +62,7 @@ ansible_facts = {}
|
||||||
for fact in facts:
|
for fact in facts:
|
||||||
try:
|
try:
|
||||||
ansible_facts[fact] = parsed['ansible_facts']['ansible_' + fact]
|
ansible_facts[fact] = parsed['ansible_facts']['ansible_' + fact]
|
||||||
except:
|
except Exception:
|
||||||
ansible_facts[fact] = "N/A"
|
ansible_facts[fact] = "N/A"
|
||||||
|
|
||||||
nicename = ansible_facts['distribution'] + ' ' + ansible_facts['distribution_version']
|
nicename = ansible_facts['distribution'] + ' ' + ansible_facts['distribution_version']
|
||||||
|
|
|
@ -78,7 +78,7 @@ class ConfigCLI(CLI):
|
||||||
raise AnsibleOptionsError("%s is not a valid file" % (self.config_file))
|
raise AnsibleOptionsError("%s is not a valid file" % (self.config_file))
|
||||||
|
|
||||||
os.environ['ANSIBLE_CONFIG'] = to_native(self.config_file)
|
os.environ['ANSIBLE_CONFIG'] = to_native(self.config_file)
|
||||||
except:
|
except Exception:
|
||||||
if self.action in ['view']:
|
if self.action in ['view']:
|
||||||
raise
|
raise
|
||||||
elif self.action in ['edit', 'update']:
|
elif self.action in ['edit', 'update']:
|
||||||
|
|
|
@ -30,7 +30,7 @@ HAS_PYCRYPTO_ATFORK = False
|
||||||
try:
|
try:
|
||||||
from Crypto.Random import atfork
|
from Crypto.Random import atfork
|
||||||
HAS_PYCRYPTO_ATFORK = True
|
HAS_PYCRYPTO_ATFORK = True
|
||||||
except:
|
except Exception:
|
||||||
# We only need to call atfork if pycrypto is used because it will need to
|
# We only need to call atfork if pycrypto is used because it will need to
|
||||||
# reinitialize its RNG. Since old paramiko could be using pycrypto, we
|
# reinitialize its RNG. Since old paramiko could be using pycrypto, we
|
||||||
# need to take charge of calling it.
|
# need to take charge of calling it.
|
||||||
|
@ -153,7 +153,7 @@ class WorkerProcess(multiprocessing.Process):
|
||||||
task_fields=self._task.dump_attrs(),
|
task_fields=self._task.dump_attrs(),
|
||||||
)
|
)
|
||||||
self._final_q.put(task_result, block=False)
|
self._final_q.put(task_result, block=False)
|
||||||
except:
|
except Exception:
|
||||||
display.debug(u"WORKER EXCEPTION: %s" % to_text(e))
|
display.debug(u"WORKER EXCEPTION: %s" % to_text(e))
|
||||||
display.debug(u"WORKER TRACEBACK: %s" % to_text(traceback.format_exc()))
|
display.debug(u"WORKER TRACEBACK: %s" % to_text(traceback.format_exc()))
|
||||||
|
|
||||||
|
|
|
@ -183,7 +183,7 @@ class GalaxyAPI(object):
|
||||||
role_name = parts[-1]
|
role_name = parts[-1]
|
||||||
if notify:
|
if notify:
|
||||||
display.display("- downloading role '%s', owned by %s" % (role_name, user_name))
|
display.display("- downloading role '%s', owned by %s" % (role_name, user_name))
|
||||||
except:
|
except Exception:
|
||||||
raise AnsibleError("Invalid role name (%s). Specify role as format: username.rolename" % role_name)
|
raise AnsibleError("Invalid role name (%s). Specify role as format: username.rolename" % role_name)
|
||||||
|
|
||||||
url = '%s/roles/?owner__username=%s&name=%s' % (self.baseurl, user_name, role_name)
|
url = '%s/roles/?owner__username=%s&name=%s' % (self.baseurl, user_name, role_name)
|
||||||
|
@ -210,7 +210,7 @@ class GalaxyAPI(object):
|
||||||
results += data['results']
|
results += data['results']
|
||||||
done = (data.get('next_link', None) is None)
|
done = (data.get('next_link', None) is None)
|
||||||
return results
|
return results
|
||||||
except:
|
except Exception:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
@g_connect
|
@g_connect
|
||||||
|
|
|
@ -60,12 +60,12 @@ class GalaxyLogin(object):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.github_username = input("Github Username: ")
|
self.github_username = input("Github Username: ")
|
||||||
except:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.github_password = getpass.getpass("Password for %s: " % self.github_username)
|
self.github_password = getpass.getpass("Password for %s: " % self.github_username)
|
||||||
except:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if not self.github_username or not self.github_password:
|
if not self.github_username or not self.github_password:
|
||||||
|
|
|
@ -100,7 +100,7 @@ class GalaxyRole(object):
|
||||||
try:
|
try:
|
||||||
f = open(meta_path, 'r')
|
f = open(meta_path, 'r')
|
||||||
self._metadata = yaml.safe_load(f)
|
self._metadata = yaml.safe_load(f)
|
||||||
except:
|
except Exception:
|
||||||
display.vvvvv("Unable to load metadata for %s" % self.name)
|
display.vvvvv("Unable to load metadata for %s" % self.name)
|
||||||
return False
|
return False
|
||||||
finally:
|
finally:
|
||||||
|
@ -120,7 +120,7 @@ class GalaxyRole(object):
|
||||||
try:
|
try:
|
||||||
f = open(info_path, 'r')
|
f = open(info_path, 'r')
|
||||||
self._install_info = yaml.safe_load(f)
|
self._install_info = yaml.safe_load(f)
|
||||||
except:
|
except Exception:
|
||||||
display.vvvvv("Unable to load Galaxy install info for %s" % self.name)
|
display.vvvvv("Unable to load Galaxy install info for %s" % self.name)
|
||||||
return False
|
return False
|
||||||
finally:
|
finally:
|
||||||
|
@ -144,7 +144,7 @@ class GalaxyRole(object):
|
||||||
with open(info_path, 'w+') as f:
|
with open(info_path, 'w+') as f:
|
||||||
try:
|
try:
|
||||||
self._install_info = yaml.safe_dump(info, f)
|
self._install_info = yaml.safe_dump(info, f)
|
||||||
except:
|
except Exception:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
@ -159,7 +159,7 @@ class GalaxyRole(object):
|
||||||
try:
|
try:
|
||||||
rmtree(self.path)
|
rmtree(self.path)
|
||||||
return True
|
return True
|
||||||
except:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
return False
|
return False
|
||||||
|
@ -285,7 +285,7 @@ class GalaxyRole(object):
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
self._metadata = yaml.safe_load(role_tar_file.extractfile(meta_file))
|
self._metadata = yaml.safe_load(role_tar_file.extractfile(meta_file))
|
||||||
except:
|
except Exception:
|
||||||
raise AnsibleError("this role does not appear to have a valid meta/main.yml file.")
|
raise AnsibleError("this role does not appear to have a valid meta/main.yml file.")
|
||||||
|
|
||||||
# we strip off any higher-level directories for all of the files contained within
|
# we strip off any higher-level directories for all of the files contained within
|
||||||
|
|
|
@ -116,7 +116,7 @@ def retry(retries=None, retry_pause=1):
|
||||||
raise Exception("Retry limit exceeded: %d" % retries)
|
raise Exception("Retry limit exceeded: %d" % retries)
|
||||||
try:
|
try:
|
||||||
ret = f(*args, **kwargs)
|
ret = f(*args, **kwargs)
|
||||||
except:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
if ret:
|
if ret:
|
||||||
break
|
break
|
||||||
|
|
|
@ -1069,7 +1069,7 @@ class AzureRMAuth(object):
|
||||||
for key in AZURE_CREDENTIAL_ENV_MAPPING:
|
for key in AZURE_CREDENTIAL_ENV_MAPPING:
|
||||||
try:
|
try:
|
||||||
credentials[key] = config.get(profile, key, raw=True)
|
credentials[key] = config.get(profile, key, raw=True)
|
||||||
except:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if credentials.get('subscription_id'):
|
if credentials.get('subscription_id'):
|
||||||
|
|
|
@ -553,7 +553,7 @@ def human_to_bytes(number, default_unit=None, isbits=False):
|
||||||
raise ValueError("human_to_bytes() can't interpret following string: %s" % str(number))
|
raise ValueError("human_to_bytes() can't interpret following string: %s" % str(number))
|
||||||
try:
|
try:
|
||||||
num = float(m.group(1))
|
num = float(m.group(1))
|
||||||
except:
|
except Exception:
|
||||||
raise ValueError("human_to_bytes() can't interpret following number: %s (original input string: %s)" % (m.group(1), number))
|
raise ValueError("human_to_bytes() can't interpret following number: %s (original input string: %s)" % (m.group(1), number))
|
||||||
|
|
||||||
unit = m.group(2)
|
unit = m.group(2)
|
||||||
|
@ -566,7 +566,7 @@ def human_to_bytes(number, default_unit=None, isbits=False):
|
||||||
range_key = unit[0].upper()
|
range_key = unit[0].upper()
|
||||||
try:
|
try:
|
||||||
limit = SIZE_RANGES[range_key]
|
limit = SIZE_RANGES[range_key]
|
||||||
except:
|
except Exception:
|
||||||
raise ValueError("human_to_bytes() failed to convert %s (unit = %s). The suffix must be one of %s" % (number, unit, ", ".join(SIZE_RANGES.keys())))
|
raise ValueError("human_to_bytes() failed to convert %s (unit = %s). The suffix must be one of %s" % (number, unit, ", ".join(SIZE_RANGES.keys())))
|
||||||
|
|
||||||
# default value
|
# default value
|
||||||
|
@ -1028,7 +1028,7 @@ class AnsibleModule(object):
|
||||||
f = open('/proc/mounts', 'r')
|
f = open('/proc/mounts', 'r')
|
||||||
mount_data = f.readlines()
|
mount_data = f.readlines()
|
||||||
f.close()
|
f.close()
|
||||||
except:
|
except Exception:
|
||||||
return (False, None)
|
return (False, None)
|
||||||
path_mount_point = self.find_mount_point(path)
|
path_mount_point = self.find_mount_point(path)
|
||||||
for line in mount_data:
|
for line in mount_data:
|
||||||
|
@ -1310,7 +1310,7 @@ class AnsibleModule(object):
|
||||||
output['attr_flags'] = res[1].replace('-', '').strip()
|
output['attr_flags'] = res[1].replace('-', '').strip()
|
||||||
output['version'] = res[0].strip()
|
output['version'] = res[0].strip()
|
||||||
output['attributes'] = format_attributes(output['attr_flags'])
|
output['attributes'] = format_attributes(output['attr_flags'])
|
||||||
except:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
return output
|
return output
|
||||||
|
|
||||||
|
@ -1820,7 +1820,7 @@ class AnsibleModule(object):
|
||||||
if value.startswith("{"):
|
if value.startswith("{"):
|
||||||
try:
|
try:
|
||||||
return json.loads(value)
|
return json.loads(value)
|
||||||
except:
|
except Exception:
|
||||||
(result, exc) = self.safe_eval(value, dict(), include_exceptions=True)
|
(result, exc) = self.safe_eval(value, dict(), include_exceptions=True)
|
||||||
if exc is not None:
|
if exc is not None:
|
||||||
raise TypeError('unable to evaluate string as dictionary')
|
raise TypeError('unable to evaluate string as dictionary')
|
||||||
|
@ -2163,7 +2163,7 @@ class AnsibleModule(object):
|
||||||
if not os.access(cwd, os.F_OK | os.R_OK):
|
if not os.access(cwd, os.F_OK | os.R_OK):
|
||||||
raise Exception()
|
raise Exception()
|
||||||
return cwd
|
return cwd
|
||||||
except:
|
except Exception:
|
||||||
# we don't have access to the cwd, probably because of sudo.
|
# we don't have access to the cwd, probably because of sudo.
|
||||||
# Try and move to a neutral location to prevent errors
|
# Try and move to a neutral location to prevent errors
|
||||||
for cwd in [self.tmpdir, os.path.expandvars('$HOME'), tempfile.gettempdir()]:
|
for cwd in [self.tmpdir, os.path.expandvars('$HOME'), tempfile.gettempdir()]:
|
||||||
|
@ -2171,7 +2171,7 @@ class AnsibleModule(object):
|
||||||
if os.access(cwd, os.F_OK | os.R_OK):
|
if os.access(cwd, os.F_OK | os.R_OK):
|
||||||
os.chdir(cwd)
|
os.chdir(cwd)
|
||||||
return cwd
|
return cwd
|
||||||
except:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
# we won't error here, as it may *not* be a problem,
|
# we won't error here, as it may *not* be a problem,
|
||||||
# and we don't want to break modules unnecessarily
|
# and we don't want to break modules unnecessarily
|
||||||
|
|
|
@ -28,7 +28,7 @@ def get_distribution():
|
||||||
distribution = 'Amazon'
|
distribution = 'Amazon'
|
||||||
else:
|
else:
|
||||||
distribution = 'OtherLinux'
|
distribution = 'OtherLinux'
|
||||||
except:
|
except Exception:
|
||||||
# FIXME: MethodMissing, I assume?
|
# FIXME: MethodMissing, I assume?
|
||||||
distribution = platform.dist()[0].capitalize()
|
distribution = platform.dist()[0].capitalize()
|
||||||
return distribution
|
return distribution
|
||||||
|
|
|
@ -49,7 +49,7 @@ try:
|
||||||
import boto3
|
import boto3
|
||||||
import botocore
|
import botocore
|
||||||
HAS_BOTO3 = True
|
HAS_BOTO3 = True
|
||||||
except:
|
except Exception:
|
||||||
HAS_BOTO3 = False
|
HAS_BOTO3 = False
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -50,7 +50,7 @@ def api_wrapper(func):
|
||||||
module.fail_json(msg=e.message)
|
module.fail_json(msg=e.message)
|
||||||
except core.exceptions.SystemNotFoundException as e:
|
except core.exceptions.SystemNotFoundException as e:
|
||||||
module.fail_json(msg=e.message)
|
module.fail_json(msg=e.message)
|
||||||
except:
|
except Exception:
|
||||||
raise
|
raise
|
||||||
return __wrapper
|
return __wrapper
|
||||||
|
|
||||||
|
|
|
@ -135,7 +135,7 @@ def not_in_host_file(self, host):
|
||||||
hash.update(host)
|
hash.update(host)
|
||||||
if hash.digest() == kn_host.decode('base64'):
|
if hash.digest() == kn_host.decode('base64'):
|
||||||
return False
|
return False
|
||||||
except:
|
except Exception:
|
||||||
# invalid hashed host key, skip it
|
# invalid hashed host key, skip it
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
|
@ -164,7 +164,7 @@ def add_host_key(module, fqdn, port=22, key_type="rsa", create_dir=False):
|
||||||
if create_dir:
|
if create_dir:
|
||||||
try:
|
try:
|
||||||
os.makedirs(user_ssh_dir, int('700', 8))
|
os.makedirs(user_ssh_dir, int('700', 8))
|
||||||
except:
|
except Exception:
|
||||||
module.fail_json(msg="failed to create host key directory: %s" % user_ssh_dir)
|
module.fail_json(msg="failed to create host key directory: %s" % user_ssh_dir)
|
||||||
else:
|
else:
|
||||||
module.fail_json(msg="%s does not exist" % user_ssh_dir)
|
module.fail_json(msg="%s does not exist" % user_ssh_dir)
|
||||||
|
|
|
@ -81,7 +81,7 @@ try:
|
||||||
from solidfire.models import Schedule, ScheduleInfo
|
from solidfire.models import Schedule, ScheduleInfo
|
||||||
|
|
||||||
HAS_SF_SDK = True
|
HAS_SF_SDK = True
|
||||||
except:
|
except Exception:
|
||||||
HAS_SF_SDK = False
|
HAS_SF_SDK = False
|
||||||
|
|
||||||
|
|
||||||
|
@ -124,7 +124,7 @@ def create_sf_connection(module, port=None):
|
||||||
try:
|
try:
|
||||||
return_val = ElementFactory.create(hostname, username, password, port=port)
|
return_val = ElementFactory.create(hostname, username, password, port=port)
|
||||||
return return_val
|
return return_val
|
||||||
except:
|
except Exception:
|
||||||
raise Exception("Unable to create SF connection")
|
raise Exception("Unable to create SF connection")
|
||||||
else:
|
else:
|
||||||
module.fail_json(msg="the python SolidFire SDK module is required")
|
module.fail_json(msg="the python SolidFire SDK module is required")
|
||||||
|
@ -237,7 +237,7 @@ def request(url, data=None, headers=None, method='GET', use_proxy=True,
|
||||||
data = json.loads(raw_data)
|
data = json.loads(raw_data)
|
||||||
else:
|
else:
|
||||||
raw_data = None
|
raw_data = None
|
||||||
except:
|
except Exception:
|
||||||
if ignore_errors:
|
if ignore_errors:
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -6,7 +6,7 @@ try:
|
||||||
import solidfire.common
|
import solidfire.common
|
||||||
|
|
||||||
HAS_SF_SDK = True
|
HAS_SF_SDK = True
|
||||||
except:
|
except Exception:
|
||||||
HAS_SF_SDK = False
|
HAS_SF_SDK = False
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -78,7 +78,7 @@ def axapi_call(module, url, post=None):
|
||||||
data = {"response": {"status": "OK"}}
|
data = {"response": {"status": "OK"}}
|
||||||
else:
|
else:
|
||||||
data = {"response": {"status": "fail", "err": {"msg": raw_data}}}
|
data = {"response": {"status": "fail", "err": {"msg": raw_data}}}
|
||||||
except:
|
except Exception:
|
||||||
module.fail_json(msg="could not read the result from the host")
|
module.fail_json(msg="could not read the result from the host")
|
||||||
finally:
|
finally:
|
||||||
rsp.close()
|
rsp.close()
|
||||||
|
@ -126,7 +126,7 @@ def axapi_call_v3(module, url, method=None, body=None, signature=None):
|
||||||
data = {"response": {"status": "OK"}}
|
data = {"response": {"status": "OK"}}
|
||||||
else:
|
else:
|
||||||
data = {"response": {"status": "fail", "err": {"msg": raw_data}}}
|
data = {"response": {"status": "fail", "err": {"msg": raw_data}}}
|
||||||
except:
|
except Exception:
|
||||||
module.fail_json(msg="could not read the result from the host")
|
module.fail_json(msg="could not read the result from the host")
|
||||||
finally:
|
finally:
|
||||||
rsp.close()
|
rsp.close()
|
||||||
|
|
|
@ -148,7 +148,7 @@ class ACIModule(object):
|
||||||
return true
|
return true
|
||||||
elif bool_value is False:
|
elif bool_value is False:
|
||||||
return false
|
return false
|
||||||
except:
|
except Exception:
|
||||||
# This provides backward compatibility to Ansible v2.4, deprecate in Ansible v2.8
|
# This provides backward compatibility to Ansible v2.4, deprecate in Ansible v2.8
|
||||||
if value == true:
|
if value == true:
|
||||||
self.module.deprecate("Boolean value '%s' is no longer valid, please use 'yes' as a boolean value." % value, '2.9')
|
self.module.deprecate("Boolean value '%s' is no longer valid, please use 'yes' as a boolean value." % value, '2.9')
|
||||||
|
@ -164,7 +164,7 @@ class ACIModule(object):
|
||||||
''' Return an ACI-compatible ISO8601 formatted time: 2123-12-12T00:00:00.000+00:00 '''
|
''' Return an ACI-compatible ISO8601 formatted time: 2123-12-12T00:00:00.000+00:00 '''
|
||||||
try:
|
try:
|
||||||
return dt.isoformat(timespec='milliseconds')
|
return dt.isoformat(timespec='milliseconds')
|
||||||
except:
|
except Exception:
|
||||||
tz = dt.strftime('%z')
|
tz = dt.strftime('%z')
|
||||||
return '%s.%03d%s:%s' % (dt.strftime('%Y-%m-%dT%H:%M:%S'), dt.microsecond / 1000, tz[:3], tz[3:])
|
return '%s.%03d%s:%s' % (dt.strftime('%Y-%m-%dT%H:%M:%S'), dt.microsecond / 1000, tz[:3], tz[3:])
|
||||||
|
|
||||||
|
@ -231,7 +231,7 @@ class ACIModule(object):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
sig_key = load_privatekey(FILETYPE_PEM, open(self.params['private_key'], 'r').read())
|
sig_key = load_privatekey(FILETYPE_PEM, open(self.params['private_key'], 'r').read())
|
||||||
except:
|
except Exception:
|
||||||
self.module.fail_json(msg='Cannot load private key %s' % self.params['private_key'])
|
self.module.fail_json(msg='Cannot load private key %s' % self.params['private_key'])
|
||||||
|
|
||||||
# NOTE: ACI documentation incorrectly adds a space between method and path
|
# NOTE: ACI documentation incorrectly adds a space between method and path
|
||||||
|
|
|
@ -183,7 +183,7 @@ class MSCModule(object):
|
||||||
elif self.status >= 400:
|
elif self.status >= 400:
|
||||||
try:
|
try:
|
||||||
payload = json.loads(resp.read())
|
payload = json.loads(resp.read())
|
||||||
except:
|
except Exception:
|
||||||
payload = json.loads(info['body'])
|
payload = json.loads(info['body'])
|
||||||
if 'code' in payload:
|
if 'code' in payload:
|
||||||
self.fail_json(msg='MSC Error {code}: {message}'.format(**payload), data=data, info=info, payload=payload)
|
self.fail_json(msg='MSC Error {code}: {message}'.format(**payload), data=data, info=info, payload=payload)
|
||||||
|
|
|
@ -147,7 +147,7 @@ def content_to_dict(module, content):
|
||||||
if not content_dict:
|
if not content_dict:
|
||||||
raise Exception()
|
raise Exception()
|
||||||
|
|
||||||
except:
|
except Exception:
|
||||||
module.fail_json(msg="Unable to convert 'content' to a dict, please check if valid")
|
module.fail_json(msg="Unable to convert 'content' to a dict, please check if valid")
|
||||||
|
|
||||||
# replace the string with the dict
|
# replace the string with the dict
|
||||||
|
@ -163,7 +163,7 @@ def do_load_resource(module, collection, name):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
item = find_collection_item(collection, name, '')
|
item = find_collection_item(collection, name, '')
|
||||||
except:
|
except Exception:
|
||||||
module.fail_json(msg="An error occurred while running 'find_collection_item'")
|
module.fail_json(msg="An error occurred while running 'find_collection_item'")
|
||||||
|
|
||||||
if item.exists:
|
if item.exists:
|
||||||
|
|
|
@ -38,7 +38,7 @@ try:
|
||||||
from ansible.module_utils.network.cnos import cnos_errorcodes
|
from ansible.module_utils.network.cnos import cnos_errorcodes
|
||||||
from ansible.module_utils.network.cnos import cnos_devicerules
|
from ansible.module_utils.network.cnos import cnos_devicerules
|
||||||
HAS_LIB = True
|
HAS_LIB = True
|
||||||
except:
|
except Exception:
|
||||||
HAS_LIB = False
|
HAS_LIB = False
|
||||||
from distutils.cmd import Command
|
from distutils.cmd import Command
|
||||||
from ansible.module_utils._text import to_text
|
from ansible.module_utils._text import to_text
|
||||||
|
@ -1372,7 +1372,7 @@ def enterEnableModeForDevice(enablePassword, timeout, obj):
|
||||||
gotit = buff.find("#")
|
gotit = buff.find("#")
|
||||||
if(gotit != -1):
|
if(gotit != -1):
|
||||||
return retVal
|
return retVal
|
||||||
except:
|
except Exception:
|
||||||
retVal = retVal + "\n Error-101"
|
retVal = retVal + "\n Error-101"
|
||||||
flag = True
|
flag = True
|
||||||
if(retVal == ""):
|
if(retVal == ""):
|
||||||
|
@ -1396,7 +1396,7 @@ def waitForDeviceResponse(command, prompt, timeout, obj):
|
||||||
gotit = buff.find(prompt)
|
gotit = buff.find(prompt)
|
||||||
if(gotit != -1):
|
if(gotit != -1):
|
||||||
flag = True
|
flag = True
|
||||||
except:
|
except Exception:
|
||||||
# debugOutput(prompt)
|
# debugOutput(prompt)
|
||||||
if prompt == "(yes/no)?":
|
if prompt == "(yes/no)?":
|
||||||
retVal = retVal
|
retVal = retVal
|
||||||
|
|
|
@ -461,7 +461,7 @@ class Template:
|
||||||
if value:
|
if value:
|
||||||
try:
|
try:
|
||||||
return ast.literal_eval(value)
|
return ast.literal_eval(value)
|
||||||
except:
|
except Exception:
|
||||||
return str(value)
|
return str(value)
|
||||||
else:
|
else:
|
||||||
return None
|
return None
|
||||||
|
|
|
@ -79,7 +79,7 @@ def backup(module, running_config):
|
||||||
if not os.path.exists(backup_path):
|
if not os.path.exists(backup_path):
|
||||||
try:
|
try:
|
||||||
os.mkdir(backup_path)
|
os.mkdir(backup_path)
|
||||||
except:
|
except Exception:
|
||||||
module.fail_json(msg="Can't create directory {0} Permission denied ?".format(backup_path))
|
module.fail_json(msg="Can't create directory {0} Permission denied ?".format(backup_path))
|
||||||
tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
|
tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
|
||||||
if 0 < len(backup_filename):
|
if 0 < len(backup_filename):
|
||||||
|
@ -88,7 +88,7 @@ def backup(module, running_config):
|
||||||
filename = '%s/%s_config.%s' % (backup_path, module.params['host'], tstamp)
|
filename = '%s/%s_config.%s' % (backup_path, module.params['host'], tstamp)
|
||||||
try:
|
try:
|
||||||
open(filename, 'w').write(running_config)
|
open(filename, 'w').write(running_config)
|
||||||
except:
|
except Exception:
|
||||||
module.fail_json(msg="Can't create backup file {0} Permission denied ?".format(filename))
|
module.fail_json(msg="Can't create backup file {0} Permission denied ?".format(filename))
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -292,7 +292,7 @@ class MerakiModule(object):
|
||||||
body=json.loads(to_native(info['body'])))
|
body=json.loads(to_native(info['body'])))
|
||||||
try:
|
try:
|
||||||
return json.loads(to_native(resp.read()))
|
return json.loads(to_native(resp.read()))
|
||||||
except:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def exit_json(self, **kwargs):
|
def exit_json(self, **kwargs):
|
||||||
|
|
|
@ -217,7 +217,7 @@ def rax_find_loadbalancer(module, rax_module, loadbalancer):
|
||||||
clb = rax_module.cloud_loadbalancers
|
clb = rax_module.cloud_loadbalancers
|
||||||
try:
|
try:
|
||||||
found = clb.get(loadbalancer)
|
found = clb.get(loadbalancer)
|
||||||
except:
|
except Exception:
|
||||||
found = []
|
found = []
|
||||||
for lb in clb.list():
|
for lb in clb.list():
|
||||||
if loadbalancer == lb.name:
|
if loadbalancer == lb.name:
|
||||||
|
|
|
@ -35,7 +35,7 @@ class RedfishUtils(object):
|
||||||
except URLError as e:
|
except URLError as e:
|
||||||
return {'ret': False, 'msg': "URL Error: %s" % e.reason}
|
return {'ret': False, 'msg': "URL Error: %s" % e.reason}
|
||||||
# Almost all errors should be caught above, but just in case
|
# Almost all errors should be caught above, but just in case
|
||||||
except:
|
except Exception:
|
||||||
return {'ret': False, 'msg': "Unknown error"}
|
return {'ret': False, 'msg': "Unknown error"}
|
||||||
return {'ret': True, 'data': data}
|
return {'ret': True, 'data': data}
|
||||||
|
|
||||||
|
@ -53,7 +53,7 @@ class RedfishUtils(object):
|
||||||
except URLError as e:
|
except URLError as e:
|
||||||
return {'ret': False, 'msg': "URL Error: %s" % e.reason}
|
return {'ret': False, 'msg': "URL Error: %s" % e.reason}
|
||||||
# Almost all errors should be caught above, but just in case
|
# Almost all errors should be caught above, but just in case
|
||||||
except:
|
except Exception:
|
||||||
return {'ret': False, 'msg': "Unknown error"}
|
return {'ret': False, 'msg': "Unknown error"}
|
||||||
return {'ret': True, 'resp': resp}
|
return {'ret': True, 'resp': resp}
|
||||||
|
|
||||||
|
@ -71,7 +71,7 @@ class RedfishUtils(object):
|
||||||
except URLError as e:
|
except URLError as e:
|
||||||
return {'ret': False, 'msg': "URL Error: %s" % e.reason}
|
return {'ret': False, 'msg': "URL Error: %s" % e.reason}
|
||||||
# Almost all errors should be caught above, but just in case
|
# Almost all errors should be caught above, but just in case
|
||||||
except:
|
except Exception:
|
||||||
return {'ret': False, 'msg': "Unknown error"}
|
return {'ret': False, 'msg': "Unknown error"}
|
||||||
return {'ret': True, 'resp': resp}
|
return {'ret': True, 'resp': resp}
|
||||||
|
|
||||||
|
@ -89,7 +89,7 @@ class RedfishUtils(object):
|
||||||
except URLError as e:
|
except URLError as e:
|
||||||
return {'ret': False, 'msg': "URL Error: %s" % e.reason}
|
return {'ret': False, 'msg': "URL Error: %s" % e.reason}
|
||||||
# Almost all errors should be caught above, but just in case
|
# Almost all errors should be caught above, but just in case
|
||||||
except:
|
except Exception:
|
||||||
return {'ret': False, 'msg': "Unknown error"}
|
return {'ret': False, 'msg': "Unknown error"}
|
||||||
return {'ret': True, 'resp': resp}
|
return {'ret': True, 'resp': resp}
|
||||||
|
|
||||||
|
|
|
@ -31,7 +31,7 @@
|
||||||
try:
|
try:
|
||||||
import ucsmsdk
|
import ucsmsdk
|
||||||
HAS_UCSMSDK = True
|
HAS_UCSMSDK = True
|
||||||
except:
|
except Exception:
|
||||||
HAS_UCSMSDK = False
|
HAS_UCSMSDK = False
|
||||||
|
|
||||||
ucs_argument_spec = dict(
|
ucs_argument_spec = dict(
|
||||||
|
|
|
@ -71,13 +71,13 @@ urllib_request.HTTPRedirectHandler.http_error_308 = urllib_request.HTTPRedirectH
|
||||||
try:
|
try:
|
||||||
from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse
|
from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse
|
||||||
HAS_URLPARSE = True
|
HAS_URLPARSE = True
|
||||||
except:
|
except Exception:
|
||||||
HAS_URLPARSE = False
|
HAS_URLPARSE = False
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import ssl
|
import ssl
|
||||||
HAS_SSL = True
|
HAS_SSL = True
|
||||||
except:
|
except Exception:
|
||||||
HAS_SSL = False
|
HAS_SSL = False
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@ -436,7 +436,7 @@ def generic_urlparse(parts):
|
||||||
generic_parts['password'] = password
|
generic_parts['password'] = password
|
||||||
generic_parts['hostname'] = hostname
|
generic_parts['hostname'] = hostname
|
||||||
generic_parts['port'] = port
|
generic_parts['port'] = port
|
||||||
except:
|
except Exception:
|
||||||
generic_parts['username'] = None
|
generic_parts['username'] = None
|
||||||
generic_parts['password'] = None
|
generic_parts['password'] = None
|
||||||
generic_parts['hostname'] = parts[1]
|
generic_parts['hostname'] = parts[1]
|
||||||
|
@ -673,7 +673,7 @@ class SSLValidationHandler(urllib_request.BaseHandler):
|
||||||
(http_version, resp_code, msg) = re.match(br'(HTTP/\d\.\d) (\d\d\d) (.*)', response).groups()
|
(http_version, resp_code, msg) = re.match(br'(HTTP/\d\.\d) (\d\d\d) (.*)', response).groups()
|
||||||
if int(resp_code) not in valid_codes:
|
if int(resp_code) not in valid_codes:
|
||||||
raise Exception
|
raise Exception
|
||||||
except:
|
except Exception:
|
||||||
raise ProxyError('Connection to proxy failed')
|
raise ProxyError('Connection to proxy failed')
|
||||||
|
|
||||||
def detect_no_proxy(self, url):
|
def detect_no_proxy(self, url):
|
||||||
|
@ -784,7 +784,7 @@ class SSLValidationHandler(urllib_request.BaseHandler):
|
||||||
# cleanup the temp file created, don't worry
|
# cleanup the temp file created, don't worry
|
||||||
# if it fails for some reason
|
# if it fails for some reason
|
||||||
os.remove(tmp_ca_cert_path)
|
os.remove(tmp_ca_cert_path)
|
||||||
except:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@ -792,7 +792,7 @@ class SSLValidationHandler(urllib_request.BaseHandler):
|
||||||
# if it fails for some reason
|
# if it fails for some reason
|
||||||
if to_add_ca_cert_path:
|
if to_add_ca_cert_path:
|
||||||
os.remove(to_add_ca_cert_path)
|
os.remove(to_add_ca_cert_path)
|
||||||
except:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
return req
|
return req
|
||||||
|
@ -1305,7 +1305,7 @@ def fetch_url(module, url, data=None, headers=None, method=None,
|
||||||
try:
|
try:
|
||||||
# Lowercase keys, to conform to py2 behavior, so that py3 and py2 are predictable
|
# Lowercase keys, to conform to py2 behavior, so that py3 and py2 are predictable
|
||||||
info.update(dict((k.lower(), v) for k, v in e.info().items()))
|
info.update(dict((k.lower(), v) for k, v in e.info().items()))
|
||||||
except:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
info.update({'msg': to_native(e), 'body': body, 'status': e.code})
|
info.update({'msg': to_native(e), 'body': body, 'status': e.code})
|
||||||
|
|
|
@ -153,7 +153,7 @@ from ansible.module_utils.aws.direct_connect import (DirectConnectError, delete_
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from botocore.exceptions import BotoCoreError, ClientError
|
from botocore.exceptions import BotoCoreError, ClientError
|
||||||
except:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
# handled by imported AnsibleAWSModule
|
# handled by imported AnsibleAWSModule
|
||||||
|
|
||||||
|
|
|
@ -162,7 +162,7 @@ import time
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import botocore
|
import botocore
|
||||||
except:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
# handled by imported HAS_BOTO3
|
# handled by imported HAS_BOTO3
|
||||||
|
|
||||||
|
|
|
@ -201,7 +201,7 @@ def do_grant(kms, keyarn, role_arn, granttypes, mode='grant', dry_run=True, clea
|
||||||
kms.put_key_policy(KeyId=keyarn, PolicyName='default', Policy=policy_json_string)
|
kms.put_key_policy(KeyId=keyarn, PolicyName='default', Policy=policy_json_string)
|
||||||
# returns nothing, so we have to just assume it didn't throw
|
# returns nothing, so we have to just assume it didn't throw
|
||||||
ret['changed'] = True
|
ret['changed'] = True
|
||||||
except:
|
except Exception:
|
||||||
raise
|
raise
|
||||||
|
|
||||||
ret['changes_needed'] = changes_needed
|
ret['changes_needed'] = changes_needed
|
||||||
|
|
|
@ -91,7 +91,7 @@ rules:
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from botocore.exceptions import ClientError, BotoCoreError
|
from botocore.exceptions import ClientError, BotoCoreError
|
||||||
except:
|
except Exception:
|
||||||
# handled by HAS_BOTO3 check in main
|
# handled by HAS_BOTO3 check in main
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
|
@ -166,7 +166,7 @@ def create_update_parameter(client, module):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
existing_parameter = client.get_parameter(Name=args['Name'], WithDecryption=True)
|
existing_parameter = client.get_parameter(Name=args['Name'], WithDecryption=True)
|
||||||
except:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if existing_parameter:
|
if existing_parameter:
|
||||||
|
|
|
@ -477,7 +477,7 @@ def stack_operation(cfn, stack_name, operation, events_limit, op_token=None):
|
||||||
try:
|
try:
|
||||||
stack = get_stack_facts(cfn, stack_name)
|
stack = get_stack_facts(cfn, stack_name)
|
||||||
existed.append('yes')
|
existed.append('yes')
|
||||||
except:
|
except Exception:
|
||||||
# If the stack previously existed, and now can't be found then it's
|
# If the stack previously existed, and now can't be found then it's
|
||||||
# been deleted successfully.
|
# been deleted successfully.
|
||||||
if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways.
|
if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways.
|
||||||
|
|
|
@ -608,7 +608,7 @@ def get_reservations(module, ec2, vpc, tags=None, state=None, zone=None):
|
||||||
if isinstance(tags, str):
|
if isinstance(tags, str):
|
||||||
try:
|
try:
|
||||||
tags = literal_eval(tags)
|
tags = literal_eval(tags)
|
||||||
except:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
# if not a string type, convert and make sure it's a text string
|
# if not a string type, convert and make sure it's a text string
|
||||||
|
|
|
@ -1156,7 +1156,7 @@ def create_autoscaling_group(connection):
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
ag['LaunchConfigurationName'] = as_group['LaunchConfigurationName']
|
ag['LaunchConfigurationName'] = as_group['LaunchConfigurationName']
|
||||||
except:
|
except Exception:
|
||||||
launch_template = as_group['LaunchTemplate']
|
launch_template = as_group['LaunchTemplate']
|
||||||
# Prefer LaunchTemplateId over Name as it's more specific. Only one can be used for update_asg.
|
# Prefer LaunchTemplateId over Name as it's more specific. Only one can be used for update_asg.
|
||||||
ag['LaunchTemplate'] = {"LaunchTemplateId": launch_template['LaunchTemplateId'], "Version": launch_template['Version']}
|
ag['LaunchTemplate'] = {"LaunchTemplateId": launch_template['LaunchTemplateId'], "Version": launch_template['Version']}
|
||||||
|
|
|
@ -516,7 +516,7 @@ class ElbManager(object):
|
||||||
def get_info(self):
|
def get_info(self):
|
||||||
try:
|
try:
|
||||||
check_elb = self.elb_conn.get_all_load_balancers(self.name)[0]
|
check_elb = self.elb_conn.get_all_load_balancers(self.name)[0]
|
||||||
except:
|
except Exception:
|
||||||
check_elb = None
|
check_elb = None
|
||||||
|
|
||||||
if not check_elb:
|
if not check_elb:
|
||||||
|
@ -528,11 +528,11 @@ class ElbManager(object):
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
lb_cookie_policy = check_elb.policies.lb_cookie_stickiness_policies[0].__dict__['policy_name']
|
lb_cookie_policy = check_elb.policies.lb_cookie_stickiness_policies[0].__dict__['policy_name']
|
||||||
except:
|
except Exception:
|
||||||
lb_cookie_policy = None
|
lb_cookie_policy = None
|
||||||
try:
|
try:
|
||||||
app_cookie_policy = check_elb.policies.app_cookie_stickiness_policies[0].__dict__['policy_name']
|
app_cookie_policy = check_elb.policies.app_cookie_stickiness_policies[0].__dict__['policy_name']
|
||||||
except:
|
except Exception:
|
||||||
app_cookie_policy = None
|
app_cookie_policy = None
|
||||||
|
|
||||||
info = {
|
info = {
|
||||||
|
|
|
@ -506,7 +506,7 @@ class Ec2Metadata(object):
|
||||||
self._data['%s' % (new_uri)] = content
|
self._data['%s' % (new_uri)] = content
|
||||||
for (key, value) in dict.items():
|
for (key, value) in dict.items():
|
||||||
self._data['%s:%s' % (new_uri, key.lower())] = value
|
self._data['%s:%s' % (new_uri, key.lower())] = value
|
||||||
except:
|
except Exception:
|
||||||
self._data['%s' % (new_uri)] = content # not a stringifed JSON string
|
self._data['%s' % (new_uri)] = content # not a stringifed JSON string
|
||||||
|
|
||||||
def fix_invalid_varnames(self, data):
|
def fix_invalid_varnames(self, data):
|
||||||
|
|
|
@ -124,7 +124,7 @@ from ansible.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dic
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from botocore.exceptions import BotoCoreError, ClientError
|
from botocore.exceptions import BotoCoreError, ClientError
|
||||||
except:
|
except Exception:
|
||||||
pass # Handled by AnsibleAWSModule
|
pass # Handled by AnsibleAWSModule
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -306,7 +306,7 @@ def run(ecr, params, verbosity):
|
||||||
ecr.set_repository_policy(
|
ecr.set_repository_policy(
|
||||||
registry_id, name, policy_text, force_set_policy)
|
registry_id, name, policy_text, force_set_policy)
|
||||||
result['changed'] = True
|
result['changed'] = True
|
||||||
except:
|
except Exception:
|
||||||
# Some failure w/ the policy. It's helpful to know what the
|
# Some failure w/ the policy. It's helpful to know what the
|
||||||
# policy is.
|
# policy is.
|
||||||
result['policy'] = policy_text
|
result['policy'] = policy_text
|
||||||
|
|
|
@ -512,7 +512,7 @@ class ElbManager(object):
|
||||||
def get_info(self):
|
def get_info(self):
|
||||||
try:
|
try:
|
||||||
check_elb = self.elb_conn.get_all_load_balancers(self.name)[0]
|
check_elb = self.elb_conn.get_all_load_balancers(self.name)[0]
|
||||||
except:
|
except Exception:
|
||||||
check_elb = None
|
check_elb = None
|
||||||
|
|
||||||
if not check_elb:
|
if not check_elb:
|
||||||
|
@ -524,11 +524,11 @@ class ElbManager(object):
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
lb_cookie_policy = check_elb.policies.lb_cookie_stickiness_policies[0].__dict__['policy_name']
|
lb_cookie_policy = check_elb.policies.lb_cookie_stickiness_policies[0].__dict__['policy_name']
|
||||||
except:
|
except Exception:
|
||||||
lb_cookie_policy = None
|
lb_cookie_policy = None
|
||||||
try:
|
try:
|
||||||
app_cookie_policy = check_elb.policies.app_cookie_stickiness_policies[0].__dict__['policy_name']
|
app_cookie_policy = check_elb.policies.app_cookie_stickiness_policies[0].__dict__['policy_name']
|
||||||
except:
|
except Exception:
|
||||||
app_cookie_policy = None
|
app_cookie_policy = None
|
||||||
|
|
||||||
info = {
|
info = {
|
||||||
|
|
|
@ -134,7 +134,7 @@ from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from botocore.exceptions import ClientError
|
from botocore.exceptions import ClientError
|
||||||
except:
|
except Exception:
|
||||||
pass # will be protected by AnsibleAWSModule
|
pass # will be protected by AnsibleAWSModule
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -224,7 +224,7 @@ def set_queue_attribute(queue, attribute, value, check_mode=False):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
existing_value = queue.get_attributes(attributes=attribute)[attribute]
|
existing_value = queue.get_attributes(attributes=attribute)[attribute]
|
||||||
except:
|
except Exception:
|
||||||
existing_value = ''
|
existing_value = ''
|
||||||
|
|
||||||
# convert dict attributes to JSON strings (sort keys for comparing)
|
# convert dict attributes to JSON strings (sort keys for comparing)
|
||||||
|
|
|
@ -68,7 +68,7 @@ from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||||
try:
|
try:
|
||||||
from msrestazure.azure_exceptions import CloudError
|
from msrestazure.azure_exceptions import CloudError
|
||||||
from azure.common import AzureHttpError
|
from azure.common import AzureHttpError
|
||||||
except:
|
except Exception:
|
||||||
# handled in azure_rm_common
|
# handled in azure_rm_common
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
|
@ -121,7 +121,7 @@ appserviceplans:
|
||||||
try:
|
try:
|
||||||
from msrestazure.azure_exceptions import CloudError
|
from msrestazure.azure_exceptions import CloudError
|
||||||
from azure.common import AzureMissingResourceHttpError, AzureHttpError
|
from azure.common import AzureMissingResourceHttpError, AzureHttpError
|
||||||
except:
|
except Exception:
|
||||||
# This is handled in azure_rm_common
|
# This is handled in azure_rm_common
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
|
@ -74,7 +74,7 @@ from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from msrestazure.azure_exceptions import CloudError
|
from msrestazure.azure_exceptions import CloudError
|
||||||
except:
|
except Exception:
|
||||||
# handled in azure_rm_common
|
# handled in azure_rm_common
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
|
@ -120,7 +120,7 @@ try:
|
||||||
from azure.mgmt.cdn.models import ErrorResponseException
|
from azure.mgmt.cdn.models import ErrorResponseException
|
||||||
from azure.common import AzureHttpError
|
from azure.common import AzureHttpError
|
||||||
from azure.mgmt.cdn import CdnManagementClient
|
from azure.mgmt.cdn import CdnManagementClient
|
||||||
except:
|
except Exception:
|
||||||
# handled in azure_rm_common
|
# handled in azure_rm_common
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
|
@ -590,7 +590,7 @@ class AzureRMDeploymentManager(AzureRMModuleBase):
|
||||||
)
|
)
|
||||||
for op in self._get_failed_nested_operations(operations)
|
for op in self._get_failed_nested_operations(operations)
|
||||||
]
|
]
|
||||||
except:
|
except Exception:
|
||||||
# If we fail here, the original error gets lost and user receives wrong error message/stacktrace
|
# If we fail here, the original error gets lost and user receives wrong error message/stacktrace
|
||||||
pass
|
pass
|
||||||
self.log(dict(failed_deployment_operations=results), pretty_print=True)
|
self.log(dict(failed_deployment_operations=results), pretty_print=True)
|
||||||
|
|
|
@ -98,7 +98,7 @@ from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||||
try:
|
try:
|
||||||
from msrestazure.azure_exceptions import CloudError
|
from msrestazure.azure_exceptions import CloudError
|
||||||
from azure.common import AzureMissingResourceHttpError, AzureHttpError
|
from azure.common import AzureMissingResourceHttpError, AzureHttpError
|
||||||
except:
|
except Exception:
|
||||||
# This is handled in azure_rm_common
|
# This is handled in azure_rm_common
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
|
@ -80,7 +80,7 @@ from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||||
try:
|
try:
|
||||||
from msrestazure.azure_exceptions import CloudError
|
from msrestazure.azure_exceptions import CloudError
|
||||||
from azure.common import AzureMissingResourceHttpError, AzureHttpError
|
from azure.common import AzureMissingResourceHttpError, AzureHttpError
|
||||||
except:
|
except Exception:
|
||||||
# This is handled in azure_rm_common
|
# This is handled in azure_rm_common
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
|
@ -100,7 +100,7 @@ azure_functionapps:
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from msrestazure.azure_exceptions import CloudError
|
from msrestazure.azure_exceptions import CloudError
|
||||||
except:
|
except Exception:
|
||||||
# This is handled in azure_rm_common
|
# This is handled in azure_rm_common
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
|
@ -167,7 +167,7 @@ images:
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from msrestazure.azure_exceptions import CloudError
|
from msrestazure.azure_exceptions import CloudError
|
||||||
except:
|
except Exception:
|
||||||
# This is handled in azure_rm_common
|
# This is handled in azure_rm_common
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
|
@ -86,7 +86,7 @@ from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||||
try:
|
try:
|
||||||
from msrestazure.azure_exceptions import CloudError
|
from msrestazure.azure_exceptions import CloudError
|
||||||
from azure.common import AzureHttpError
|
from azure.common import AzureHttpError
|
||||||
except:
|
except Exception:
|
||||||
# handled in azure_rm_common
|
# handled in azure_rm_common
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
|
@ -79,7 +79,7 @@ from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from msrestazure.azure_exceptions import CloudError
|
from msrestazure.azure_exceptions import CloudError
|
||||||
except:
|
except Exception:
|
||||||
# handled in azure_rm_common
|
# handled in azure_rm_common
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
|
@ -108,7 +108,7 @@ azure_networkinterfaces:
|
||||||
try:
|
try:
|
||||||
from msrestazure.azure_exceptions import CloudError
|
from msrestazure.azure_exceptions import CloudError
|
||||||
from azure.common import AzureMissingResourceHttpError, AzureHttpError
|
from azure.common import AzureMissingResourceHttpError, AzureHttpError
|
||||||
except:
|
except Exception:
|
||||||
# This is handled in azure_rm_common
|
# This is handled in azure_rm_common
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@ -165,7 +165,7 @@ class AzureRMNetworkInterfaceFacts(AzureRMModuleBase):
|
||||||
item = None
|
item = None
|
||||||
try:
|
try:
|
||||||
item = self.network_client.network_interfaces.get(self.resource_group, self.name)
|
item = self.network_client.network_interfaces.get(self.resource_group, self.name)
|
||||||
except:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if item and self.has_tags(item.tags, self.tags):
|
if item and self.has_tags(item.tags, self.tags):
|
||||||
|
|
|
@ -77,7 +77,7 @@ azure_publicipaddresses:
|
||||||
try:
|
try:
|
||||||
from msrestazure.azure_exceptions import CloudError
|
from msrestazure.azure_exceptions import CloudError
|
||||||
from azure.common import AzureMissingResourceHttpError, AzureHttpError
|
from azure.common import AzureMissingResourceHttpError, AzureHttpError
|
||||||
except:
|
except Exception:
|
||||||
# This is handled in azure_rm_common
|
# This is handled in azure_rm_common
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
|
@ -258,7 +258,7 @@ class AzureRMResource(AzureRMModuleBase):
|
||||||
try:
|
try:
|
||||||
response = json.loads(original.text)
|
response = json.loads(original.text)
|
||||||
needs_update = (dict_merge(response, self.body) != response)
|
needs_update = (dict_merge(response, self.body) != response)
|
||||||
except:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if needs_update:
|
if needs_update:
|
||||||
|
@ -266,7 +266,7 @@ class AzureRMResource(AzureRMModuleBase):
|
||||||
if self.state == 'present':
|
if self.state == 'present':
|
||||||
try:
|
try:
|
||||||
response = json.loads(response.text)
|
response = json.loads(response.text)
|
||||||
except:
|
except Exception:
|
||||||
response = response.text
|
response = response.text
|
||||||
else:
|
else:
|
||||||
response = None
|
response = None
|
||||||
|
|
|
@ -198,7 +198,7 @@ class AzureRMResourceFacts(AzureRMModuleBase):
|
||||||
self.results['response'] = response
|
self.results['response'] = response
|
||||||
else:
|
else:
|
||||||
self.results['response'] = [response]
|
self.results['response'] = [response]
|
||||||
except:
|
except Exception:
|
||||||
self.results['response'] = []
|
self.results['response'] = []
|
||||||
|
|
||||||
return self.results
|
return self.results
|
||||||
|
|
|
@ -77,7 +77,7 @@ azure_resourcegroups:
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from msrestazure.azure_exceptions import CloudError
|
from msrestazure.azure_exceptions import CloudError
|
||||||
except:
|
except Exception:
|
||||||
# This is handled in azure_rm_common
|
# This is handled in azure_rm_common
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
|
@ -98,7 +98,7 @@ routes:
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from msrestazure.azure_exceptions import CloudError
|
from msrestazure.azure_exceptions import CloudError
|
||||||
except:
|
except Exception:
|
||||||
# This is handled in azure_rm_common
|
# This is handled in azure_rm_common
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
|
@ -190,7 +190,7 @@ azure_securitygroups:
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from msrestazure.azure_exceptions import CloudError
|
from msrestazure.azure_exceptions import CloudError
|
||||||
except:
|
except Exception:
|
||||||
# This is handled in azure_rm_common
|
# This is handled in azure_rm_common
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
|
@ -93,7 +93,7 @@ azure_storageaccounts:
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from msrestazure.azure_exceptions import CloudError
|
from msrestazure.azure_exceptions import CloudError
|
||||||
except:
|
except Exception:
|
||||||
# This is handled in azure_rm_common
|
# This is handled in azure_rm_common
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
|
@ -141,7 +141,7 @@ from ansible.module_utils.common.dict_transformations import (
|
||||||
try:
|
try:
|
||||||
from msrestazure.azure_exceptions import CloudError
|
from msrestazure.azure_exceptions import CloudError
|
||||||
from azure.common import AzureHttpError
|
from azure.common import AzureHttpError
|
||||||
except:
|
except Exception:
|
||||||
# handled in azure_rm_common
|
# handled in azure_rm_common
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
|
@ -210,7 +210,7 @@ from ansible.module_utils.common.dict_transformations import _camel_to_snake
|
||||||
try:
|
try:
|
||||||
from msrestazure.azure_exceptions import CloudError
|
from msrestazure.azure_exceptions import CloudError
|
||||||
from azure.common import AzureHttpError
|
from azure.common import AzureHttpError
|
||||||
except:
|
except Exception:
|
||||||
# handled in azure_rm_common
|
# handled in azure_rm_common
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
|
@ -190,7 +190,7 @@ vms:
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from msrestazure.azure_exceptions import CloudError
|
from msrestazure.azure_exceptions import CloudError
|
||||||
except:
|
except Exception:
|
||||||
# This is handled in azure_rm_common
|
# This is handled in azure_rm_common
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
|
@ -240,7 +240,7 @@ import re
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from msrestazure.azure_exceptions import CloudError
|
from msrestazure.azure_exceptions import CloudError
|
||||||
except:
|
except Exception:
|
||||||
# handled in azure_rm_common
|
# handled in azure_rm_common
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@ -308,7 +308,7 @@ class AzureRMVirtualMachineScaleSetFacts(AzureRMModuleBase):
|
||||||
subnet_id = (vmss['properties']['virtualMachineProfile']['networkProfile']['networkInterfaceConfigurations'][0]
|
subnet_id = (vmss['properties']['virtualMachineProfile']['networkProfile']['networkInterfaceConfigurations'][0]
|
||||||
['properties']['ipConfigurations'][0]['properties']['subnet']['id'])
|
['properties']['ipConfigurations'][0]['properties']['subnet']['id'])
|
||||||
subnet_name = re.sub('.*subnets\\/', '', subnet_id)
|
subnet_name = re.sub('.*subnets\\/', '', subnet_id)
|
||||||
except:
|
except Exception:
|
||||||
self.log('Could not extract subnet name')
|
self.log('Could not extract subnet name')
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@ -316,13 +316,13 @@ class AzureRMVirtualMachineScaleSetFacts(AzureRMModuleBase):
|
||||||
['properties']['ipConfigurations'][0]['properties']['loadBalancerBackendAddressPools'][0]['id'])
|
['properties']['ipConfigurations'][0]['properties']['loadBalancerBackendAddressPools'][0]['id'])
|
||||||
load_balancer_name = re.sub('\\/backendAddressPools.*', '', re.sub('.*loadBalancers\\/', '', backend_address_pool_id))
|
load_balancer_name = re.sub('\\/backendAddressPools.*', '', re.sub('.*loadBalancers\\/', '', backend_address_pool_id))
|
||||||
virtual_network_name = re.sub('.*virtualNetworks\\/', '', re.sub('\\/subnets.*', '', subnet_id))
|
virtual_network_name = re.sub('.*virtualNetworks\\/', '', re.sub('\\/subnets.*', '', subnet_id))
|
||||||
except:
|
except Exception:
|
||||||
self.log('Could not extract load balancer / virtual network name')
|
self.log('Could not extract load balancer / virtual network name')
|
||||||
|
|
||||||
try:
|
try:
|
||||||
ssh_password_enabled = (not vmss['properties']['virtualMachineProfile']['osProfile'],
|
ssh_password_enabled = (not vmss['properties']['virtualMachineProfile']['osProfile'],
|
||||||
['linuxConfiguration']['disablePasswordAuthentication'])
|
['linuxConfiguration']['disablePasswordAuthentication'])
|
||||||
except:
|
except Exception:
|
||||||
self.log('Could not extract SSH password enabled')
|
self.log('Could not extract SSH password enabled')
|
||||||
|
|
||||||
data_disks = vmss['properties']['virtualMachineProfile']['storageProfile'].get('dataDisks', [])
|
data_disks = vmss['properties']['virtualMachineProfile']['storageProfile'].get('dataDisks', [])
|
||||||
|
|
|
@ -90,7 +90,7 @@ azure_vmimages:
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from msrestazure.azure_exceptions import CloudError
|
from msrestazure.azure_exceptions import CloudError
|
||||||
except:
|
except Exception:
|
||||||
# This is handled in azure_rm_common
|
# This is handled in azure_rm_common
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
|
@ -86,7 +86,7 @@ azure_virtualnetworks:
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from msrestazure.azure_exceptions import CloudError
|
from msrestazure.azure_exceptions import CloudError
|
||||||
except:
|
except Exception:
|
||||||
# This is handled in azure_rm_common
|
# This is handled in azure_rm_common
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
|
@ -150,7 +150,7 @@ try:
|
||||||
from msrestazure.azure_exceptions import CloudError
|
from msrestazure.azure_exceptions import CloudError
|
||||||
from msrestazure.azure_operation import AzureOperationPoller
|
from msrestazure.azure_operation import AzureOperationPoller
|
||||||
from azure.common import AzureMissingResourceHttpError, AzureHttpError
|
from azure.common import AzureMissingResourceHttpError, AzureHttpError
|
||||||
except:
|
except Exception:
|
||||||
# This is handled in azure_rm_common
|
# This is handled in azure_rm_common
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
|
@ -165,7 +165,7 @@ class CloudStackFacts(object):
|
||||||
try:
|
try:
|
||||||
# this data come form users, we try what we can to parse it...
|
# this data come form users, we try what we can to parse it...
|
||||||
return yaml.safe_load(self._fetch(CS_USERDATA_BASE_URL))
|
return yaml.safe_load(self._fetch(CS_USERDATA_BASE_URL))
|
||||||
except:
|
except Exception:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def _fetch(self, path):
|
def _fetch(self, path):
|
||||||
|
|
|
@ -890,7 +890,7 @@ try:
|
||||||
else:
|
else:
|
||||||
from docker.utils.types import Ulimit, LogConfig
|
from docker.utils.types import Ulimit, LogConfig
|
||||||
from docker.errors import APIError, NotFound
|
from docker.errors import APIError, NotFound
|
||||||
except Exception as dummy:
|
except Exception:
|
||||||
# missing docker-py handled in ansible.module_utils.docker
|
# missing docker-py handled in ansible.module_utils.docker
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
|
@ -263,7 +263,7 @@ try:
|
||||||
from docker.errors import NotFound
|
from docker.errors import NotFound
|
||||||
if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
|
if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
|
||||||
from docker.types import IPAMPool, IPAMConfig
|
from docker.types import IPAMPool, IPAMConfig
|
||||||
except Exception as dummy:
|
except Exception:
|
||||||
# missing docker-py handled in ansible.module_utils.docker_common
|
# missing docker-py handled in ansible.module_utils.docker_common
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
|
@ -471,7 +471,7 @@ from ansible.module_utils._text import to_text
|
||||||
try:
|
try:
|
||||||
from distutils.version import LooseVersion
|
from distutils.version import LooseVersion
|
||||||
from docker import types
|
from docker import types
|
||||||
except Exception as dummy:
|
except Exception:
|
||||||
# missing docker-py handled in ansible.module_utils.docker
|
# missing docker-py handled in ansible.module_utils.docker
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@ -846,7 +846,7 @@ class DockerService(DockerBaseClass):
|
||||||
network_id = None
|
network_id = None
|
||||||
try:
|
try:
|
||||||
network_id = list(filter(lambda n: n['name'] == network_name, docker_networks))[0]['id']
|
network_id = list(filter(lambda n: n['name'] == network_name, docker_networks))[0]['id']
|
||||||
except Exception as dummy:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
if network_id:
|
if network_id:
|
||||||
networks.append({'Target': network_id})
|
networks.append({'Target': network_id})
|
||||||
|
|
|
@ -330,11 +330,11 @@ def get_instance_info(inst):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
netname = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
|
netname = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
|
||||||
except:
|
except Exception:
|
||||||
netname = None
|
netname = None
|
||||||
try:
|
try:
|
||||||
subnetname = inst.extra['networkInterfaces'][0]['subnetwork'].split('/')[-1]
|
subnetname = inst.extra['networkInterfaces'][0]['subnetwork'].split('/')[-1]
|
||||||
except:
|
except Exception:
|
||||||
subnetname = None
|
subnetname = None
|
||||||
if 'disks' in inst.extra:
|
if 'disks' in inst.extra:
|
||||||
disk_names = [disk_info['source'].split('/')[-1]
|
disk_names = [disk_info['source'].split('/')[-1]
|
||||||
|
|
|
@ -239,7 +239,7 @@ def main():
|
||||||
zone, node_name = node.split('/')
|
zone, node_name = node.split('/')
|
||||||
nodes.append(gce.ex_get_node(node_name, zone))
|
nodes.append(gce.ex_get_node(node_name, zone))
|
||||||
output_nodes.append(node)
|
output_nodes.append(node)
|
||||||
except:
|
except Exception:
|
||||||
# skip nodes that are badly formatted or don't exist
|
# skip nodes that are badly formatted or don't exist
|
||||||
pass
|
pass
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -188,7 +188,7 @@ def main():
|
||||||
is_attached = True
|
is_attached = True
|
||||||
json_output['attached_mode'] = d['mode']
|
json_output['attached_mode'] = d['mode']
|
||||||
json_output['attached_to_instance'] = inst.name
|
json_output['attached_to_instance'] = inst.name
|
||||||
except:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
# find disk if it already exists
|
# find disk if it already exists
|
||||||
|
@ -210,7 +210,7 @@ def main():
|
||||||
size_gb = int(round(float(size_gb)))
|
size_gb = int(round(float(size_gb)))
|
||||||
if size_gb < 1:
|
if size_gb < 1:
|
||||||
raise Exception
|
raise Exception
|
||||||
except:
|
except Exception:
|
||||||
module.fail_json(msg="Must supply a size_gb larger than 1 GB",
|
module.fail_json(msg="Must supply a size_gb larger than 1 GB",
|
||||||
changed=False)
|
changed=False)
|
||||||
|
|
||||||
|
|
|
@ -203,7 +203,7 @@ def _validate_params(params):
|
||||||
try:
|
try:
|
||||||
check_params(params, fields)
|
check_params(params, fields)
|
||||||
_validate_backend_params(params['backends'])
|
_validate_backend_params(params['backends'])
|
||||||
except:
|
except Exception:
|
||||||
raise
|
raise
|
||||||
|
|
||||||
return (True, '')
|
return (True, '')
|
||||||
|
@ -233,7 +233,7 @@ def _validate_backend_params(backends):
|
||||||
for backend in backends:
|
for backend in backends:
|
||||||
try:
|
try:
|
||||||
check_params(backend, fields)
|
check_params(backend, fields)
|
||||||
except:
|
except Exception:
|
||||||
raise
|
raise
|
||||||
|
|
||||||
if 'max_rate' in backend and 'max_rate_per_instance' in backend:
|
if 'max_rate' in backend and 'max_rate_per_instance' in backend:
|
||||||
|
|
|
@ -178,7 +178,7 @@ def get_global_forwarding_rule(client, name, project_id=None):
|
||||||
req = client.globalForwardingRules().get(
|
req = client.globalForwardingRules().get(
|
||||||
project=project_id, forwardingRule=name)
|
project=project_id, forwardingRule=name)
|
||||||
return GCPUtils.execute_api_client_req(req, raise_404=False)
|
return GCPUtils.execute_api_client_req(req, raise_404=False)
|
||||||
except:
|
except Exception:
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
@ -204,7 +204,7 @@ def create_global_forwarding_rule(client, params, project_id):
|
||||||
name=params['forwarding_rule_name'],
|
name=params['forwarding_rule_name'],
|
||||||
project_id=project_id)
|
project_id=project_id)
|
||||||
return (True, return_data)
|
return (True, return_data)
|
||||||
except:
|
except Exception:
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
@ -229,7 +229,7 @@ def delete_global_forwarding_rule(client, name, project_id):
|
||||||
project=project_id, forwardingRule=name)
|
project=project_id, forwardingRule=name)
|
||||||
return_data = GCPUtils.execute_api_client_req(req, client)
|
return_data = GCPUtils.execute_api_client_req(req, client)
|
||||||
return (True, return_data)
|
return (True, return_data)
|
||||||
except:
|
except Exception:
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
@ -270,7 +270,7 @@ def update_global_forwarding_rule(client, forwarding_rule, params, name, project
|
||||||
return_data = GCPUtils.execute_api_client_req(
|
return_data = GCPUtils.execute_api_client_req(
|
||||||
req, client=client, raw=False)
|
req, client=client, raw=False)
|
||||||
return (True, return_data)
|
return (True, return_data)
|
||||||
except:
|
except Exception:
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -261,7 +261,7 @@ def get_healthcheck(client, name, project_id=None, resource_type='HTTP'):
|
||||||
args = {'project': project_id, entity_name: name}
|
args = {'project': project_id, entity_name: name}
|
||||||
req = resource.get(**args)
|
req = resource.get(**args)
|
||||||
return GCPUtils.execute_api_client_req(req, raise_404=False)
|
return GCPUtils.execute_api_client_req(req, raise_404=False)
|
||||||
except:
|
except Exception:
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
@ -289,7 +289,7 @@ def create_healthcheck(client, params, project_id, resource_type='HTTP'):
|
||||||
name=params['healthcheck_name'],
|
name=params['healthcheck_name'],
|
||||||
project_id=project_id)
|
project_id=project_id)
|
||||||
return (True, return_data)
|
return (True, return_data)
|
||||||
except:
|
except Exception:
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
@ -315,7 +315,7 @@ def delete_healthcheck(client, name, project_id, resource_type='HTTP'):
|
||||||
req = resource.delete(**args)
|
req = resource.delete(**args)
|
||||||
return_data = GCPUtils.execute_api_client_req(req, client)
|
return_data = GCPUtils.execute_api_client_req(req, client)
|
||||||
return (True, return_data)
|
return (True, return_data)
|
||||||
except:
|
except Exception:
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
@ -356,7 +356,7 @@ def update_healthcheck(client, healthcheck, params, name, project_id,
|
||||||
return_data = GCPUtils.execute_api_client_req(
|
return_data = GCPUtils.execute_api_client_req(
|
||||||
req, client=client, raw=False)
|
req, client=client, raw=False)
|
||||||
return (True, return_data)
|
return (True, return_data)
|
||||||
except:
|
except Exception:
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -161,7 +161,7 @@ def create_target_http_proxy(client, params, project_id):
|
||||||
name=params['target_proxy_name'],
|
name=params['target_proxy_name'],
|
||||||
project_id=project_id)
|
project_id=project_id)
|
||||||
return (True, return_data)
|
return (True, return_data)
|
||||||
except:
|
except Exception:
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
@ -186,7 +186,7 @@ def delete_target_http_proxy(client, name, project_id):
|
||||||
project=project_id, targetHttpProxy=name)
|
project=project_id, targetHttpProxy=name)
|
||||||
return_data = GCPUtils.execute_api_client_req(req, client)
|
return_data = GCPUtils.execute_api_client_req(req, client)
|
||||||
return (True, return_data)
|
return (True, return_data)
|
||||||
except:
|
except Exception:
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
@ -227,7 +227,7 @@ def update_target_http_proxy(client, target_proxy, params, name, project_id):
|
||||||
return_data = GCPUtils.execute_api_client_req(
|
return_data = GCPUtils.execute_api_client_req(
|
||||||
req, client=client, raw=False)
|
req, client=client, raw=False)
|
||||||
return (True, return_data)
|
return (True, return_data)
|
||||||
except:
|
except Exception:
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue