1
0
Fork 0
mirror of https://github.com/ansible-collections/community.general.git synced 2024-09-14 20:13:21 +02:00

Fix invalid string escape sequences.

This commit is contained in:
Matt Clay 2017-11-21 10:24:37 -08:00
parent 6ac9d05de6
commit 9735a70059
49 changed files with 81 additions and 81 deletions

View file

@ -11,7 +11,7 @@ ANSIBLE_METADATA = {'metadata_version': '1.1',
'supported_by': 'community'} 'supported_by': 'community'}
DOCUMENTATION = ''' DOCUMENTATION = r'''
--- ---
module: ec2_ami_find module: ec2_ami_find
version_added: '2.0' version_added: '2.0'

View file

@ -11,7 +11,7 @@ ANSIBLE_METADATA = {'metadata_version': '1.1',
'supported_by': 'community'} 'supported_by': 'community'}
DOCUMENTATION = ''' DOCUMENTATION = r'''
--- ---
module: cloudwatchevent_rule module: cloudwatchevent_rule
short_description: Manage CloudWatch Event rules and targets short_description: Manage CloudWatch Event rules and targets

View file

@ -368,7 +368,7 @@ def get_target_from_rule(module, client, rule, name, group, groups, vpc_id):
group_id or a non-None ip range. group_id or a non-None ip range.
""" """
FOREIGN_SECURITY_GROUP_REGEX = '^(\S+)/(sg-\S+)/(\S+)' FOREIGN_SECURITY_GROUP_REGEX = r'^(\S+)/(sg-\S+)/(\S+)'
group_id = None group_id = None
group_name = None group_name = None
ip = None ip = None

View file

@ -169,9 +169,9 @@ class AnsibleSubnetSearchException(AnsibleRouteTableException):
pass pass
CIDR_RE = re.compile('^(\d{1,3}\.){3}\d{1,3}\/\d{1,2}$') CIDR_RE = re.compile(r'^(\d{1,3}\.){3}\d{1,3}/\d{1,2}$')
SUBNET_RE = re.compile('^subnet-[A-z0-9]+$') SUBNET_RE = re.compile(r'^subnet-[A-z0-9]+$')
ROUTE_TABLE_RE = re.compile('^rtb-[A-z0-9]+$') ROUTE_TABLE_RE = re.compile(r'^rtb-[A-z0-9]+$')
def find_subnets(vpc_conn, vpc_id, identified_subnets): def find_subnets(vpc_conn, vpc_id, identified_subnets):

View file

@ -238,7 +238,7 @@ def get_account_id(module, region=None, endpoint=None, **aws_connect_kwargs):
except ClientError as e: except ClientError as e:
if (e.response['Error']['Code'] == 'AccessDenied'): if (e.response['Error']['Code'] == 'AccessDenied'):
except_msg = to_native(e.message) except_msg = to_native(e.message)
account_id = except_msg.search("arn:aws:iam::([0-9]{12,32}):\w+/").group(1) account_id = except_msg.search(r"arn:aws:iam::([0-9]{12,32}):\w+/").group(1)
if account_id is None: if account_id is None:
module.fail_json_aws(e, msg="getting account information") module.fail_json_aws(e, msg="getting account information")
except Exception as e: except Exception as e:

View file

@ -235,7 +235,7 @@ def validate_params(module, aws):
function_name = module.params['function_name'] function_name = module.params['function_name']
# validate function name # validate function name
if not re.search('^[\w\-:]+$', function_name): if not re.search(r'^[\w\-:]+$', function_name):
module.fail_json( module.fail_json(
msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name) msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name)
) )

View file

@ -222,7 +222,7 @@ def validate_params(module, aws):
function_name = module.params['lambda_function_arn'] function_name = module.params['lambda_function_arn']
# validate function name # validate function name
if not re.search('^[\w\-:]+$', function_name): if not re.search(r'^[\w\-:]+$', function_name):
module.fail_json( module.fail_json(
msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name) msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name)
) )

View file

@ -361,7 +361,7 @@ def main():
# validate function_name if present # validate function_name if present
function_name = module.params['function_name'] function_name = module.params['function_name']
if function_name: if function_name:
if not re.search("^[\w\-:]+$", function_name): if not re.search(r"^[\w\-:]+$", function_name):
module.fail_json( module.fail_json(
msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name) msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name)
) )

View file

@ -188,7 +188,7 @@ def validate_params(module):
# validate function name # validate function name
if function_name.startswith('arn:'): if function_name.startswith('arn:'):
if not re.search('^[\w\-]+$', function_name): if not re.search(r'^[\w\-]+$', function_name):
module.fail_json( module.fail_json(
msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format( msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(
function_name) function_name)
@ -197,7 +197,7 @@ def validate_params(module):
module.fail_json( module.fail_json(
msg='Function name "{0}" exceeds 64 character limit'.format(function_name)) msg='Function name "{0}" exceeds 64 character limit'.format(function_name))
else: else:
if not re.search('^[\w\-:]+$', function_name): if not re.search(r'^[\w\-:]+$', function_name):
module.fail_json( module.fail_json(
msg='ARN {0} is invalid. ARNs must contain only alphanumeric characters, hyphens and colons.'.format(function_name) msg='ARN {0} is invalid. ARNs must contain only alphanumeric characters, hyphens and colons.'.format(function_name)
) )

View file

@ -613,8 +613,8 @@ AZURE_ENUM_MODULES = ['azure.mgmt.compute.models']
def extract_names_from_blob_uri(blob_uri, storage_suffix): def extract_names_from_blob_uri(blob_uri, storage_suffix):
# HACK: ditch this once python SDK supports get by URI # HACK: ditch this once python SDK supports get by URI
m = re.match('^https://(?P<accountname>[^\.]+)\.blob\.{0}/' m = re.match(r'^https://(?P<accountname>[^.]+)\.blob\.{0}/'
'(?P<containername>[^/]+)/(?P<blobname>.+)$'.format(storage_suffix), blob_uri) r'(?P<containername>[^/]+)/(?P<blobname>.+)$'.format(storage_suffix), blob_uri)
if not m: if not m:
raise Exception("unable to parse blob uri '%s'" % blob_uri) raise Exception("unable to parse blob uri '%s'" % blob_uri)
extracted_names = m.groupdict() extracted_names = m.groupdict()

View file

@ -13,7 +13,7 @@ ANSIBLE_METADATA = {'metadata_version': '1.1',
'supported_by': 'community'} 'supported_by': 'community'}
DOCUMENTATION = ''' DOCUMENTATION = r'''
--- ---
module: proxmox_kvm module: proxmox_kvm
short_description: Management of Qemu(KVM) Virtual Machines in Proxmox VE cluster. short_description: Management of Qemu(KVM) Virtual Machines in Proxmox VE cluster.

View file

@ -271,8 +271,8 @@ except ImportError:
from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.basic import AnsibleModule
NAME_RE = '({0}|{0}{1}*{0})'.format('[a-zA-Z0-9]', '[a-zA-Z0-9\-]') NAME_RE = r'({0}|{0}{1}*{0})'.format(r'[a-zA-Z0-9]', r'[a-zA-Z0-9\-]')
HOSTNAME_RE = '({0}\.)*{0}$'.format(NAME_RE) HOSTNAME_RE = r'({0}\.)*{0}$'.format(NAME_RE)
MAX_DEVICES = 100 MAX_DEVICES = 100
PACKET_DEVICE_STATES = ( PACKET_DEVICE_STATES = (
@ -403,7 +403,7 @@ def get_hostname_list(module):
if (len(hostnames) == 1) and (count > 0): if (len(hostnames) == 1) and (count > 0):
hostname_spec = hostnames[0] hostname_spec = hostnames[0]
count_range = range(count_offset, count_offset + count) count_range = range(count_offset, count_offset + count)
if re.search("%\d{0,2}d", hostname_spec): if re.search(r"%\d{0,2}d", hostname_spec):
hostnames = [hostname_spec % i for i in count_range] hostnames = [hostname_spec % i for i in count_range]
elif count > 1: elif count > 1:
hostname_spec = '%s%%02d' % hostname_spec hostname_spec = '%s%%02d' % hostname_spec

View file

@ -215,7 +215,7 @@ LOCATIONS = ['us/las',
'de/fkb'] 'de/fkb']
uuid_match = re.compile( uuid_match = re.compile(
'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I) r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
def _wait_for_completion(profitbricks, promise, wait_timeout, msg): def _wait_for_completion(profitbricks, promise, wait_timeout, msg):

View file

@ -95,7 +95,7 @@ LOCATIONS = ['us/las',
'de/fkb'] 'de/fkb']
uuid_match = re.compile( uuid_match = re.compile(
'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I) r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
def _wait_for_completion(profitbricks, promise, wait_timeout, msg): def _wait_for_completion(profitbricks, promise, wait_timeout, msg):

View file

@ -98,7 +98,7 @@ from ansible.module_utils.basic import AnsibleModule
uuid_match = re.compile( uuid_match = re.compile(
'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I) r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
def _wait_for_completion(profitbricks, promise, wait_timeout, msg): def _wait_for_completion(profitbricks, promise, wait_timeout, msg):

View file

@ -147,7 +147,7 @@ from ansible.module_utils._text import to_native
uuid_match = re.compile( uuid_match = re.compile(
'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I) r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
def _wait_for_completion(profitbricks, promise, wait_timeout, msg): def _wait_for_completion(profitbricks, promise, wait_timeout, msg):

View file

@ -95,7 +95,7 @@ from ansible.module_utils.basic import AnsibleModule
uuid_match = re.compile( uuid_match = re.compile(
'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I) r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
def _wait_for_completion(profitbricks, promise, wait_timeout, msg): def _wait_for_completion(profitbricks, promise, wait_timeout, msg):

View file

@ -145,7 +145,7 @@ class Imgadm(object):
# Helper method to massage stderr # Helper method to massage stderr
def errmsg(self, stderr): def errmsg(self, stderr):
match = re.match('^imgadm .*?: error \(\w+\): (.*): .*', stderr) match = re.match(r'^imgadm .*?: error \(\w+\): (.*): .*', stderr)
if match: if match:
return match.groups()[0] return match.groups()[0]
else: else:
@ -236,7 +236,7 @@ class Imgadm(object):
if rc != 0: if rc != 0:
self.module.fail_json(msg='Failed to import image: {0}'.format(self.errmsg(stderr))) self.module.fail_json(msg='Failed to import image: {0}'.format(self.errmsg(stderr)))
regex = 'Image {0} \(.*\) is already installed, skipping'.format(self.uuid) regex = r'Image {0} \(.*\) is already installed, skipping'.format(self.uuid)
if re.match(regex, stdout): if re.match(regex, stdout):
self.changed = False self.changed = False

View file

@ -181,7 +181,7 @@ def alter_retention_policy(module, client, retention_policy):
duration = module.params['duration'] duration = module.params['duration']
replication = module.params['replication'] replication = module.params['replication']
default = module.params['default'] default = module.params['default']
duration_regexp = re.compile('(\d+)([hdw]{1})|(^INF$){1}') duration_regexp = re.compile(r'(\d+)([hdw]{1})|(^INF$){1}')
changed = False changed = False
duration_lookup = duration_regexp.search(duration) duration_lookup = duration_regexp.search(duration)

View file

@ -126,7 +126,7 @@ from ansible.module_utils._text import to_native
def db_exists(cursor, db): def db_exists(cursor, db):
res = cursor.execute("SHOW DATABASES LIKE %s", (db.replace("_", "\_"),)) res = cursor.execute("SHOW DATABASES LIKE %s", (db.replace("_", r"\_"),))
return bool(res) return bool(res)

View file

@ -503,7 +503,7 @@ def get_database_privileges(cursor, user, db):
datacl = cursor.fetchone()[0] datacl = cursor.fetchone()[0]
if datacl is None: if datacl is None:
return set() return set()
r = re.search('%s\\\\?\"?=(C?T?c?)/[^,]+\,?' % user, datacl) r = re.search(r'%s\\?"?=(C?T?c?)/[^,]+,?' % user, datacl)
if r is None: if r is None:
return set() return set()
o = set() o = set()

View file

@ -287,7 +287,7 @@ def main():
age = None age = None
else: else:
# convert age to seconds: # convert age to seconds:
m = re.match("^(-?\d+)(s|m|h|d|w)?$", params['age'].lower()) m = re.match(r"^(-?\d+)(s|m|h|d|w)?$", params['age'].lower())
seconds_per_unit = {"s": 1, "m": 60, "h": 3600, "d": 86400, "w": 604800} seconds_per_unit = {"s": 1, "m": 60, "h": 3600, "d": 86400, "w": 604800}
if m: if m:
age = int(m.group(1)) * seconds_per_unit.get(m.group(2), 1) age = int(m.group(1)) * seconds_per_unit.get(m.group(2), 1)
@ -298,7 +298,7 @@ def main():
size = None size = None
else: else:
# convert size to bytes: # convert size to bytes:
m = re.match("^(-?\d+)(b|k|m|g|t)?$", params['size'].lower()) m = re.match(r"^(-?\d+)(b|k|m|g|t)?$", params['size'].lower())
bytes_per_unit = {"b": 1, "k": 1024, "m": 1024**2, "g": 1024**3, "t": 1024**4} bytes_per_unit = {"b": 1, "k": 1024, "m": 1024**2, "g": 1024**3, "t": 1024**4}
if m: if m:
size = int(m.group(1)) * bytes_per_unit.get(m.group(2), 1) size = int(m.group(1)) * bytes_per_unit.get(m.group(2), 1)

View file

@ -179,7 +179,7 @@ def main():
module.fail_json(msg="%s needs a key parameter" % state) module.fail_json(msg="%s needs a key parameter" % state)
# All xattr must begin in user namespace # All xattr must begin in user namespace
if key is not None and not re.match('^user\.', key): if key is not None and not re.match(r'^user\.', key):
key = 'user.%s' % key key = 'user.%s' % key
if (state == 'present' or value is not None): if (state == 'present' or value is not None):

View file

@ -274,7 +274,7 @@ from ansible.module_utils.basic import AnsibleModule, json_dict_bytes_to_unicode
from ansible.module_utils.six import iteritems, string_types from ansible.module_utils.six import iteritems, string_types
from ansible.module_utils._text import to_bytes, to_native from ansible.module_utils._text import to_bytes, to_native
_IDENT = "[a-zA-Z-][a-zA-Z0-9_\-\.]*" _IDENT = r"[a-zA-Z-][a-zA-Z0-9_\-\.]*"
_NSIDENT = _IDENT + "|" + _IDENT + ":" + _IDENT _NSIDENT = _IDENT + "|" + _IDENT + ":" + _IDENT
# Note: we can't reasonably support the 'if you need to put both ' and " in a string, concatenate # Note: we can't reasonably support the 'if you need to put both ' and " in a string, concatenate
# strings wrapped by the other delimiter' XPath trick, especially as simple XPath. # strings wrapped by the other delimiter' XPath trick, especially as simple XPath.

View file

@ -12,7 +12,7 @@ ANSIBLE_METADATA = {'metadata_version': '1.1',
'supported_by': 'core'} 'supported_by': 'core'}
DOCUMENTATION = ''' DOCUMENTATION = r'''
--- ---
module: add_host module: add_host
short_description: add a host (and alternatively a group) to the ansible-playbook in-memory inventory short_description: add a host (and alternatively a group) to the ansible-playbook in-memory inventory

View file

@ -75,7 +75,7 @@ def main():
def monit_version(): def monit_version():
rc, out, err = module.run_command('%s -V' % MONIT, check_rc=True) rc, out, err = module.run_command('%s -V' % MONIT, check_rc=True)
version_line = out.split('\n')[0] version_line = out.split('\n')[0]
version = re.search("[0-9]+\.[0-9]+", version_line).group().split('.') version = re.search(r"[0-9]+\.[0-9]+", version_line).group().split('.')
# Use only major and minor even if there are more these should be enough # Use only major and minor even if there are more these should be enough
return int(version[0]), int(version[1]) return int(version[0]), int(version[1])

View file

@ -206,7 +206,7 @@ def send_msg(msg, server='localhost', port='6667', channel=None, nick_to=None, k
motd += irc.recv(1024) motd += irc.recv(1024)
# The server might send back a shorter nick than we specified (due to NICKLEN), # The server might send back a shorter nick than we specified (due to NICKLEN),
# so grab that and use it from now on (assuming we find the 00[1-4] response). # so grab that and use it from now on (assuming we find the 00[1-4] response).
match = re.search('^:\S+ 00[1-4] (?P<nick>\S+) :', motd, flags=re.M) match = re.search(r'^:\S+ 00[1-4] (?P<nick>\S+) :', motd, flags=re.M)
if match: if match:
nick = match.group('nick') nick = match.group('nick')
break break
@ -223,7 +223,7 @@ def send_msg(msg, server='localhost', port='6667', channel=None, nick_to=None, k
start = time.time() start = time.time()
while 1: while 1:
join += irc.recv(1024) join += irc.recv(1024)
if re.search('^:\S+ 366 %s %s :' % (nick, channel), join, flags=re.M): if re.search(r'^:\S+ 366 %s %s :' % (nick, channel), join, flags=re.M):
break break
elif time.time() - start > timeout: elif time.time() - start > timeout:
raise Exception('Timeout waiting for IRC JOIN response') raise Exception('Timeout waiting for IRC JOIN response')

View file

@ -137,7 +137,7 @@ from ansible.module_utils.basic import AnsibleModule
def parse_out(string): def parse_out(string):
return re.sub("\s+", " ", string).strip() return re.sub(r"\s+", " ", string).strip()
def has_changed(string): def has_changed(string):

View file

@ -220,7 +220,7 @@ class Npm(object):
if dep: if dep:
# node.js v0.10.22 changed the `npm outdated` module separator # node.js v0.10.22 changed the `npm outdated` module separator
# from "@" to " ". Split on both for backwards compatibility. # from "@" to " ". Split on both for backwards compatibility.
pkg, other = re.split('\s|@', dep, 1) pkg, other = re.split(r'\s|@', dep, 1)
outdated.append(pkg) outdated.append(pkg)
return outdated return outdated

View file

@ -150,7 +150,7 @@ from ansible.module_utils.basic import AnsibleModule
def parse_for_packages(stdout): def parse_for_packages(stdout):
packages = [] packages = []
data = stdout.split('\n') data = stdout.split('\n')
regex = re.compile('^\(\d+/\d+\)\s+\S+\s+(\S+)') regex = re.compile(r'^\(\d+/\d+\)\s+\S+\s+(\S+)')
for l in data: for l in data:
p = regex.search(l) p = regex.search(l)
if p: if p:

View file

@ -208,8 +208,8 @@ class SourcesList(object):
return s return s
# Drop options and protocols. # Drop options and protocols.
line = re.sub('\[[^\]]+\]', '', line) line = re.sub(r'\[[^\]]+\]', '', line)
line = re.sub('\w+://', '', line) line = re.sub(r'\w+://', '', line)
# split line into valid keywords # split line into valid keywords
parts = [part for part in line.split() if part not in VALID_SOURCE_TYPES] parts = [part for part in line.split() if part not in VALID_SOURCE_TYPES]

View file

@ -235,7 +235,7 @@ def package_present(names, pkg_spec, module):
# "file:/local/package/directory/ is empty" message on stderr # "file:/local/package/directory/ is empty" message on stderr
# while still installing the package, so we need to look for # while still installing the package, so we need to look for
# for a message like "packagename-1.0: ok" just in case. # for a message like "packagename-1.0: ok" just in case.
match = re.search("\W%s-[^:]+: ok\W" % pkg_spec[name]['stem'], pkg_spec[name]['stdout']) match = re.search(r"\W%s-[^:]+: ok\W" % pkg_spec[name]['stem'], pkg_spec[name]['stdout'])
if match: if match:
# It turns out we were able to install the package. # It turns out we were able to install the package.
@ -286,7 +286,7 @@ def package_latest(names, pkg_spec, module):
pkg_spec[name]['changed'] = False pkg_spec[name]['changed'] = False
for installed_name in pkg_spec[name]['installed_names']: for installed_name in pkg_spec[name]['installed_names']:
module.debug("package_latest(): checking for pre-upgrade package name: %s" % installed_name) module.debug("package_latest(): checking for pre-upgrade package name: %s" % installed_name)
match = re.search("\W%s->.+: ok\W" % installed_name, pkg_spec[name]['stdout']) match = re.search(r"\W%s->.+: ok\W" % installed_name, pkg_spec[name]['stdout'])
if match: if match:
module.debug("package_latest(): pre-upgrade package name match: %s" % installed_name) module.debug("package_latest(): pre-upgrade package name match: %s" % installed_name)
@ -502,7 +502,7 @@ def upgrade_packages(pkg_spec, module):
# Try to find any occurrence of a package changing version like: # Try to find any occurrence of a package changing version like:
# "bzip2-1.0.6->1.0.6p0: ok". # "bzip2-1.0.6->1.0.6p0: ok".
match = re.search("\W\w.+->.+: ok\W", pkg_spec['*']['stdout']) match = re.search(r"\W\w.+->.+: ok\W", pkg_spec['*']['stdout'])
if match: if match:
pkg_spec['*']['changed'] = True pkg_spec['*']['changed'] = True

View file

@ -194,7 +194,7 @@ def upgrade(module, pacman_path):
} }
if rc == 0: if rc == 0:
regex = re.compile('([\w-]+) ((?:\S+)-(?:\S+)) -> ((?:\S+)-(?:\S+))') regex = re.compile(r'([\w-]+) ((?:\S+)-(?:\S+)) -> ((?:\S+)-(?:\S+))')
for p in data: for p in data:
m = regex.search(p) m = regex.search(p)
packages.append(m.group(1)) packages.append(m.group(1))
@ -419,11 +419,11 @@ def main():
for i, pkg in enumerate(pkgs): for i, pkg in enumerate(pkgs):
if not pkg: # avoid empty strings if not pkg: # avoid empty strings
continue continue
elif re.match(".*\.pkg\.tar(\.(gz|bz2|xz|lrz|lzo|Z))?$", pkg): elif re.match(r".*\.pkg\.tar(\.(gz|bz2|xz|lrz|lzo|Z))?$", pkg):
# The package given is a filename, extract the raw pkg name from # The package given is a filename, extract the raw pkg name from
# it and store the filename # it and store the filename
pkg_files.append(pkg) pkg_files.append(pkg)
pkgs[i] = re.sub('-[0-9].*$', '', pkgs[i].split('/')[-1]) pkgs[i] = re.sub(r'-[0-9].*$', '', pkgs[i].split('/')[-1])
else: else:
pkg_files.append(None) pkg_files.append(None)

View file

@ -95,8 +95,8 @@ def main():
# Try to spot where this has happened and fix it. # Try to spot where this has happened and fix it.
for fragment in params['name']: for fragment in params['name']:
if ( if (
re.search('^\d+(?:\.\d+)*', fragment) re.search(r'^\d+(?:\.\d+)*', fragment)
and packages and re.search('@[^,]*$', packages[-1]) and packages and re.search(r'@[^,]*$', packages[-1])
): ):
packages[-1] += ',' + fragment packages[-1] += ',' + fragment
else: else:

View file

@ -231,7 +231,7 @@ def codex_list(module):
if rc != 0: if rc != 0:
module.fail_json(msg="unable to list grimoire collection, fix your Codex") module.fail_json(msg="unable to list grimoire collection, fix your Codex")
rex = re.compile("^\s*\[\d+\] : (?P<grim>[\w\-\+\.]+) : [\w\-\+\./]+(?: : (?P<ver>[\w\-\+\.]+))?\s*$") rex = re.compile(r"^\s*\[\d+\] : (?P<grim>[\w\-+.]+) : [\w\-+./]+(?: : (?P<ver>[\w\-+.]+))?\s*$")
# drop 4-line header and empty trailing line # drop 4-line header and empty trailing line
for line in stdout.splitlines()[4:-1]: for line in stdout.splitlines()[4:-1]:

View file

@ -102,7 +102,7 @@ def query_package(module, name, depot=None):
else: else:
rc, stdout, stderr = module.run_command("%s %s | grep %s" % (cmd_list, pipes.quote(name), pipes.quote(name)), use_unsafe_shell=True) rc, stdout, stderr = module.run_command("%s %s | grep %s" % (cmd_list, pipes.quote(name), pipes.quote(name)), use_unsafe_shell=True)
if rc == 0: if rc == 0:
version = re.sub("\s\s+|\t" , " ", stdout).strip().split()[1] version = re.sub(r"\s\s+|\t", " ", stdout).strip().split()[1]
else: else:
version = None version = None

View file

@ -231,7 +231,7 @@ def main():
for cpu, details in memory_details_summary.items(): for cpu, details in memory_details_summary.items():
cpu_total_memory_size = details.get('total_memory_size') cpu_total_memory_size = details.get('total_memory_size')
if cpu_total_memory_size: if cpu_total_memory_size:
ram = re.search('(\d+)\s+(\w+)', cpu_total_memory_size) ram = re.search(r'(\d+)\s+(\w+)', cpu_total_memory_size)
if ram: if ram:
if ram.group(2) == 'GB': if ram.group(2) == 'GB':
facts['hw_memory_total'] = facts['hw_memory_total'] + int(ram.group(1)) facts['hw_memory_total'] = facts['hw_memory_total'] + int(ram.group(1))

View file

@ -144,20 +144,20 @@ def parse_lv(data):
name = None name = None
for line in data.splitlines(): for line in data.splitlines():
match = re.search("LOGICAL VOLUME:\s+(\w+)\s+VOLUME GROUP:\s+(\w+)", line) match = re.search(r"LOGICAL VOLUME:\s+(\w+)\s+VOLUME GROUP:\s+(\w+)", line)
if match is not None: if match is not None:
name = match.group(1) name = match.group(1)
vg = match.group(2) vg = match.group(2)
continue continue
match = re.search("LPs:\s+(\d+).*PPs", line) match = re.search(r"LPs:\s+(\d+).*PPs", line)
if match is not None: if match is not None:
lps = int(match.group(1)) lps = int(match.group(1))
continue continue
match = re.search("PP SIZE:\s+(\d+)", line) match = re.search(r"PP SIZE:\s+(\d+)", line)
if match is not None: if match is not None:
pp_size = int(match.group(1)) pp_size = int(match.group(1))
continue continue
match = re.search("INTER-POLICY:\s+(\w+)", line) match = re.search(r"INTER-POLICY:\s+(\w+)", line)
if match is not None: if match is not None:
policy = match.group(1) policy = match.group(1)
continue continue
@ -174,22 +174,22 @@ def parse_vg(data):
for line in data.splitlines(): for line in data.splitlines():
match = re.search("VOLUME GROUP:\s+(\w+)", line) match = re.search(r"VOLUME GROUP:\s+(\w+)", line)
if match is not None: if match is not None:
name = match.group(1) name = match.group(1)
continue continue
match = re.search("TOTAL PP.*\((\d+)", line) match = re.search(r"TOTAL PP.*\((\d+)", line)
if match is not None: if match is not None:
size = int(match.group(1)) size = int(match.group(1))
continue continue
match = re.search("PP SIZE:\s+(\d+)", line) match = re.search(r"PP SIZE:\s+(\d+)", line)
if match is not None: if match is not None:
pp_size = int(match.group(1)) pp_size = int(match.group(1))
continue continue
match = re.search("FREE PP.*\((\d+)", line) match = re.search(r"FREE PP.*\((\d+)", line)
if match is not None: if match is not None:
free = int(match.group(1)) free = int(match.group(1))
continue continue

View file

@ -404,7 +404,7 @@ def parsekey(module, raw_key, rank=None):
type_index = None # index of keytype in key string|list type_index = None # index of keytype in key string|list
# remove comment yaml escapes # remove comment yaml escapes
raw_key = raw_key.replace('\#', '#') raw_key = raw_key.replace(r'\#', '#')
# split key safely # split key safely
lex = shlex.shlex(raw_key) lex = shlex.shlex(raw_key)

View file

@ -74,7 +74,7 @@ def activate(module):
def is_policy_enabled(module, name): def is_policy_enabled(module, name):
cmd = "%s list" % (AWALL_PATH) cmd = "%s list" % (AWALL_PATH)
rc, stdout, stderr = module.run_command(cmd) rc, stdout, stderr = module.run_command(cmd)
if re.search("^%s\s+enabled" % name, stdout, re.MULTILINE): if re.search(r"^%s\s+enabled" % name, stdout, re.MULTILINE):
return True return True
return False return False

View file

@ -261,7 +261,7 @@ def get_quotas(name, nofail):
out = run_gluster(['volume', 'quota', name, 'list']) out = run_gluster(['volume', 'quota', name, 'list'])
for row in out.split('\n'): for row in out.split('\n'):
if row[:1] == '/': if row[:1] == '/':
q = re.split('\s+', row) q = re.split(r'\s+', row)
quotas[q[0]] = q[1] quotas[q[0]] = q[1]
return quotas return quotas

View file

@ -161,14 +161,14 @@ def optionDict(line, iface, option, value):
def getValueFromLine(s): def getValueFromLine(s):
spaceRe = re.compile('\s+') spaceRe = re.compile(r'\s+')
for m in spaceRe.finditer(s): for m in spaceRe.finditer(s):
pass pass
valueEnd = m.start() valueEnd = m.start()
option = s.split()[0] option = s.split()[0]
optionStart = s.find(option) optionStart = s.find(option)
optionLen = len(option) optionLen = len(option)
valueStart = re.search('\s', s[optionLen + optionStart:]).end() + optionLen + optionStart valueStart = re.search(r'\s', s[optionLen + optionStart:]).end() + optionLen + optionStart
return s[valueStart:valueEnd] return s[valueStart:valueEnd]
@ -286,7 +286,7 @@ def setInterfaceOption(module, lines, iface, option, raw_value, state):
old_value = target_option['value'] old_value = target_option['value']
prefix_start = old_line.find(option) prefix_start = old_line.find(option)
optionLen = len(option) optionLen = len(option)
old_value_position = re.search("\s+".join(old_value.split()), old_line[prefix_start + optionLen:]) old_value_position = re.search(r"\s+".join(old_value.split()), old_line[prefix_start + optionLen:])
start = old_value_position.start() + prefix_start + optionLen start = old_value_position.start() + prefix_start + optionLen
end = old_value_position.end() + prefix_start + optionLen end = old_value_position.end() + prefix_start + optionLen
line = old_line[:start] + value + old_line[end:] line = old_line[:start] + value + old_line[end:]

View file

@ -66,7 +66,7 @@ class Blacklist(object):
return False return False
def get_pattern(self): def get_pattern(self):
return '^blacklist\s*' + self.module + '$' return r'^blacklist\s*' + self.module + '$'
def readlines(self): def readlines(self):
f = open(self.filename, 'r') f = open(self.filename, 'r')

View file

@ -70,10 +70,10 @@ def is_available(name, ubuntuMode):
* if the locale is present in /etc/locales.gen * if the locale is present in /etc/locales.gen
* or if the locale is present in /usr/share/i18n/SUPPORTED""" * or if the locale is present in /usr/share/i18n/SUPPORTED"""
if ubuntuMode: if ubuntuMode:
__regexp = '^(?P<locale>\S+_\S+) (?P<charset>\S+)\s*$' __regexp = r'^(?P<locale>\S+_\S+) (?P<charset>\S+)\s*$'
__locales_available = '/usr/share/i18n/SUPPORTED' __locales_available = '/usr/share/i18n/SUPPORTED'
else: else:
__regexp = '^#{0,1}\s*(?P<locale>\S+_\S+) (?P<charset>\S+)\s*$' __regexp = r'^#{0,1}\s*(?P<locale>\S+_\S+) (?P<charset>\S+)\s*$'
__locales_available = '/etc/locale.gen' __locales_available = '/etc/locale.gen'
re_compiled = re.compile(__regexp) re_compiled = re.compile(__regexp)
@ -117,11 +117,11 @@ def replace_line(existing_line, new_line):
def set_locale(name, enabled=True): def set_locale(name, enabled=True):
""" Sets the state of the locale. Defaults to enabled. """ """ Sets the state of the locale. Defaults to enabled. """
search_string = '#{0,1}\s*%s (?P<charset>.+)' % name search_string = r'#{0,1}\s*%s (?P<charset>.+)' % name
if enabled: if enabled:
new_string = '%s \g<charset>' % (name) new_string = r'%s \g<charset>' % (name)
else: else:
new_string = '# %s \g<charset>' % (name) new_string = r'# %s \g<charset>' % (name)
try: try:
f = open("/etc/locale.gen", "r") f = open("/etc/locale.gen", "r")
lines = [re.sub(search_string, new_string, line) for line in f] lines = [re.sub(search_string, new_string, line) for line in f]

View file

@ -226,7 +226,7 @@ def get_lvm_version(module):
rc, out, err = module.run_command("%s version" % (ver_cmd)) rc, out, err = module.run_command("%s version" % (ver_cmd))
if rc != 0: if rc != 0:
return None return None
m = re.search("LVM version:\s+(\d+)\.(\d+)\.(\d+).*(\d{4}-\d{2}-\d{2})", out) m = re.search(r"LVM version:\s+(\d+)\.(\d+)\.(\d+).*(\d{4}-\d{2}-\d{2})", out)
if not m: if not m:
return None return None
return mkversion(m.group(1), m.group(2), m.group(3)) return mkversion(m.group(1), m.group(2), m.group(3))

View file

@ -169,11 +169,11 @@ class Sv(object):
else: else:
self.full_state = out self.full_state = out
m = re.search('\(pid (\d+)\)', out) m = re.search(r'\(pid (\d+)\)', out)
if m: if m:
self.pid = m.group(1) self.pid = m.group(1)
m = re.search(' (\d+)s', out) m = re.search(r' (\d+)s', out)
if m: if m:
self.duration = m.group(1) self.duration = m.group(1)

View file

@ -672,10 +672,10 @@ class LinuxService(Service):
initpath = '/etc/init' initpath = '/etc/init'
if self.upstart_version >= LooseVersion('0.6.7'): if self.upstart_version >= LooseVersion('0.6.7'):
manreg = re.compile('^manual\s*$', re.M | re.I) manreg = re.compile(r'^manual\s*$', re.M | re.I)
config_line = 'manual\n' config_line = 'manual\n'
else: else:
manreg = re.compile('^start on manual\s*$', re.M | re.I) manreg = re.compile(r'^start on manual\s*$', re.M | re.I)
config_line = 'start on manual\n' config_line = 'start on manual\n'
conf_file_name = "%s/%s.conf" % (initpath, self.name) conf_file_name = "%s/%s.conf" % (initpath, self.name)
override_file_name = "%s/%s.override" % (initpath, self.name) override_file_name = "%s/%s.override" % (initpath, self.name)
@ -1308,7 +1308,7 @@ class SunOSService(Service):
# Support for synchronous restart/refresh is only supported on # Support for synchronous restart/refresh is only supported on
# Oracle Solaris >= 11.2 # Oracle Solaris >= 11.2
for line in open('/etc/release', 'r').readlines(): for line in open('/etc/release', 'r').readlines():
m = re.match('\s+Oracle Solaris (\d+\.\d+).*', line.rstrip()) m = re.match(r'\s+Oracle Solaris (\d+\.\d+).*', line.rstrip())
if m and m.groups()[0] >= 11.2: if m and m.groups()[0] >= 11.2:
return True return True

View file

@ -180,11 +180,11 @@ class Svc(object):
else: else:
self.full_state = out self.full_state = out
m = re.search('\(pid (\d+)\)', out) m = re.search(r'\(pid (\d+)\)', out)
if m: if m:
self.pid = m.group(1) self.pid = m.group(1)
m = re.search('(\d+) seconds', out) m = re.search(r'(\d+) seconds', out)
if m: if m:
self.duration = m.group(1) self.duration = m.group(1)

View file

@ -517,7 +517,7 @@ class SmartOSTimezone(Timezone):
# sm-set-timezone knows no state and will always set the timezone. # sm-set-timezone knows no state and will always set the timezone.
# XXX: https://github.com/joyent/smtools/pull/2 # XXX: https://github.com/joyent/smtools/pull/2
m = re.match('^\* Changed (to)? timezone (to)? (%s).*' % value, stdout.splitlines()[1]) m = re.match(r'^\* Changed (to)? timezone (to)? (%s).*' % value, stdout.splitlines()[1])
if not (m and m.groups()[-1] == value): if not (m and m.groups()[-1] == value):
self.module.fail_json(msg='Failed to set timezone') self.module.fail_json(msg='Failed to set timezone')
else: else: