2013-10-31 21:52:37 +01:00
|
|
|
# This code is part of Ansible, but is an independent component.
|
|
|
|
# This particular file snippet, and this file snippet only, is BSD licensed.
|
|
|
|
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
|
|
|
# still belong to the author of the module, and may assign their own license
|
|
|
|
# to the complete work.
|
|
|
|
#
|
|
|
|
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
|
|
|
|
# All rights reserved.
|
|
|
|
#
|
|
|
|
# Redistribution and use in source and binary forms, with or without modification,
|
|
|
|
# are permitted provided that the following conditions are met:
|
|
|
|
#
|
|
|
|
# * Redistributions of source code must retain the above copyright
|
|
|
|
# notice, this list of conditions and the following disclaimer.
|
|
|
|
# * Redistributions in binary form must reproduce the above copyright notice,
|
|
|
|
# this list of conditions and the following disclaimer in the documentation
|
|
|
|
# and/or other materials provided with the distribution.
|
|
|
|
#
|
|
|
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
|
|
|
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
|
|
|
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
|
|
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
|
|
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
|
|
|
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
#
|
|
|
|
|
|
|
|
# == BEGIN DYNAMICALLY INSERTED CODE ==
|
|
|
|
|
2014-06-10 19:29:15 +02:00
|
|
|
ANSIBLE_VERSION = "<<ANSIBLE_VERSION>>"
|
|
|
|
|
2013-10-31 21:52:37 +01:00
|
|
|
MODULE_ARGS = "<<INCLUDE_ANSIBLE_MODULE_ARGS>>"
|
|
|
|
MODULE_COMPLEX_ARGS = "<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>"
|
|
|
|
|
|
|
|
BOOLEANS_TRUE = ['yes', 'on', '1', 'true', 1]
|
|
|
|
BOOLEANS_FALSE = ['no', 'off', '0', 'false', 0]
|
|
|
|
BOOLEANS = BOOLEANS_TRUE + BOOLEANS_FALSE
|
|
|
|
|
2015-05-14 16:50:22 +02:00
|
|
|
SELINUX_SPECIAL_FS="<<SELINUX_SPECIAL_FILESYSTEMS>>"
|
|
|
|
|
2013-10-31 21:52:37 +01:00
|
|
|
# ansible modules can be written in any language. To simplify
|
|
|
|
# development of Python modules, the functions available here
|
|
|
|
# can be inserted in any module source automatically by including
|
|
|
|
# #<<INCLUDE_ANSIBLE_MODULE_COMMON>> on a blank line by itself inside
|
|
|
|
# of an ansible module. The source of this common code lives
|
|
|
|
# in lib/ansible/module_common.py
|
|
|
|
|
2014-05-19 17:26:06 +02:00
|
|
|
import locale
|
2013-10-31 21:52:37 +01:00
|
|
|
import os
|
|
|
|
import re
|
2014-03-06 20:33:18 +01:00
|
|
|
import pipes
|
2013-10-31 21:52:37 +01:00
|
|
|
import shlex
|
|
|
|
import subprocess
|
|
|
|
import sys
|
|
|
|
import syslog
|
|
|
|
import types
|
|
|
|
import time
|
2014-08-04 22:32:41 +02:00
|
|
|
import select
|
2013-10-31 21:52:37 +01:00
|
|
|
import shutil
|
|
|
|
import stat
|
2014-03-20 11:12:58 +01:00
|
|
|
import tempfile
|
2013-10-31 21:52:37 +01:00
|
|
|
import traceback
|
|
|
|
import grp
|
|
|
|
import pwd
|
|
|
|
import platform
|
|
|
|
import errno
|
2014-03-10 22:06:52 +01:00
|
|
|
import tempfile
|
2013-10-31 21:52:37 +01:00
|
|
|
|
|
|
|
try:
|
|
|
|
import json
|
|
|
|
except ImportError:
|
|
|
|
try:
|
|
|
|
import simplejson as json
|
|
|
|
except ImportError:
|
|
|
|
sys.stderr.write('Error: ansible requires a json module, none found!')
|
|
|
|
sys.exit(1)
|
|
|
|
except SyntaxError:
|
|
|
|
sys.stderr.write('SyntaxError: probably due to json and python being for different versions')
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
HAVE_SELINUX=False
|
|
|
|
try:
|
|
|
|
import selinux
|
|
|
|
HAVE_SELINUX=True
|
|
|
|
except ImportError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
HAVE_HASHLIB=False
|
|
|
|
try:
|
2014-11-07 06:28:04 +01:00
|
|
|
from hashlib import sha1 as _sha1
|
2013-10-31 21:52:37 +01:00
|
|
|
HAVE_HASHLIB=True
|
2014-11-07 06:28:04 +01:00
|
|
|
except ImportError:
|
|
|
|
from sha import sha as _sha1
|
|
|
|
|
|
|
|
try:
|
|
|
|
from hashlib import md5 as _md5
|
2013-10-31 21:52:37 +01:00
|
|
|
except ImportError:
|
2014-11-12 05:23:03 +01:00
|
|
|
try:
|
|
|
|
from md5 import md5 as _md5
|
|
|
|
except ImportError:
|
|
|
|
# MD5 unavailable. Possibly FIPS mode
|
|
|
|
_md5 = None
|
2013-10-31 21:52:37 +01:00
|
|
|
|
|
|
|
try:
|
|
|
|
from hashlib import sha256 as _sha256
|
|
|
|
except ImportError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
try:
|
2013-11-01 00:47:05 +01:00
|
|
|
from systemd import journal
|
|
|
|
has_journal = True
|
2013-10-31 21:52:37 +01:00
|
|
|
except ImportError:
|
2013-11-01 00:47:05 +01:00
|
|
|
import syslog
|
|
|
|
has_journal = False
|
2013-10-31 21:52:37 +01:00
|
|
|
|
2014-04-08 16:05:07 +02:00
|
|
|
try:
|
|
|
|
from ast import literal_eval as _literal_eval
|
|
|
|
except ImportError:
|
|
|
|
# a replacement for literal_eval that works with python 2.4. from:
|
|
|
|
# https://mail.python.org/pipermail/python-list/2009-September/551880.html
|
|
|
|
# which is essentially a cut/past from an earlier (2.6) version of python's
|
|
|
|
# ast.py
|
|
|
|
from compiler import parse
|
|
|
|
from compiler.ast import *
|
|
|
|
def _literal_eval(node_or_string):
|
|
|
|
"""
|
|
|
|
Safely evaluate an expression node or a string containing a Python
|
|
|
|
expression. The string or node provided may only consist of the following
|
|
|
|
Python literal structures: strings, numbers, tuples, lists, dicts, booleans,
|
|
|
|
and None.
|
|
|
|
"""
|
|
|
|
_safe_names = {'None': None, 'True': True, 'False': False}
|
|
|
|
if isinstance(node_or_string, basestring):
|
|
|
|
node_or_string = parse(node_or_string, mode='eval')
|
|
|
|
if isinstance(node_or_string, Expression):
|
|
|
|
node_or_string = node_or_string.node
|
|
|
|
def _convert(node):
|
|
|
|
if isinstance(node, Const) and isinstance(node.value, (basestring, int, float, long, complex)):
|
|
|
|
return node.value
|
|
|
|
elif isinstance(node, Tuple):
|
|
|
|
return tuple(map(_convert, node.nodes))
|
|
|
|
elif isinstance(node, List):
|
|
|
|
return list(map(_convert, node.nodes))
|
|
|
|
elif isinstance(node, Dict):
|
|
|
|
return dict((_convert(k), _convert(v)) for k, v in node.items)
|
|
|
|
elif isinstance(node, Name):
|
|
|
|
if node.name in _safe_names:
|
|
|
|
return _safe_names[node.name]
|
|
|
|
elif isinstance(node, UnarySub):
|
|
|
|
return -_convert(node.expr)
|
|
|
|
raise ValueError('malformed string')
|
|
|
|
return _convert(node_or_string)
|
|
|
|
|
2013-10-31 21:52:37 +01:00
|
|
|
FILE_COMMON_ARGUMENTS=dict(
|
|
|
|
src = dict(),
|
|
|
|
mode = dict(),
|
|
|
|
owner = dict(),
|
|
|
|
group = dict(),
|
|
|
|
seuser = dict(),
|
|
|
|
serole = dict(),
|
|
|
|
selevel = dict(),
|
|
|
|
setype = dict(),
|
2014-09-16 19:03:40 +02:00
|
|
|
follow = dict(type='bool', default=False),
|
2013-10-31 21:52:37 +01:00
|
|
|
# not taken by the file module, but other modules call file so it must ignore them.
|
2014-08-19 17:46:46 +02:00
|
|
|
content = dict(no_log=True),
|
2013-10-31 21:52:37 +01:00
|
|
|
backup = dict(),
|
|
|
|
force = dict(),
|
2014-02-24 21:24:25 +01:00
|
|
|
remote_src = dict(), # used by assemble
|
2014-06-27 11:54:59 +02:00
|
|
|
regexp = dict(), # used by assemble
|
2014-04-03 23:29:15 +02:00
|
|
|
delimiter = dict(), # used by assemble
|
2014-04-02 23:41:11 +02:00
|
|
|
directory_mode = dict(), # used by copy
|
2013-10-31 21:52:37 +01:00
|
|
|
)
|
|
|
|
|
2015-02-09 19:13:13 +01:00
|
|
|
PASSWD_ARG_RE = re.compile(r'^[-]{0,2}pass[-]?(word|wd)?')
|
2014-03-10 22:06:52 +01:00
|
|
|
|
2013-10-31 21:52:37 +01:00
|
|
|
def get_platform():
|
|
|
|
''' what's the platform? example: Linux is a platform. '''
|
|
|
|
return platform.system()
|
|
|
|
|
|
|
|
def get_distribution():
|
|
|
|
''' return the distribution name '''
|
|
|
|
if platform.system() == 'Linux':
|
|
|
|
try:
|
|
|
|
distribution = platform.linux_distribution()[0].capitalize()
|
2013-12-05 17:36:54 +01:00
|
|
|
if not distribution and os.path.isfile('/etc/system-release'):
|
2013-11-28 01:31:25 +01:00
|
|
|
distribution = platform.linux_distribution(supported_dists=['system'])[0].capitalize()
|
|
|
|
if 'Amazon' in distribution:
|
|
|
|
distribution = 'Amazon'
|
|
|
|
else:
|
2013-10-31 21:52:37 +01:00
|
|
|
distribution = 'OtherLinux'
|
|
|
|
except:
|
|
|
|
# FIXME: MethodMissing, I assume?
|
|
|
|
distribution = platform.dist()[0].capitalize()
|
|
|
|
else:
|
|
|
|
distribution = None
|
|
|
|
return distribution
|
|
|
|
|
2014-06-16 15:06:50 +02:00
|
|
|
def get_distribution_version():
|
|
|
|
''' return the distribution version '''
|
|
|
|
if platform.system() == 'Linux':
|
|
|
|
try:
|
|
|
|
distribution_version = platform.linux_distribution()[1]
|
2014-07-15 18:04:46 +02:00
|
|
|
if not distribution_version and os.path.isfile('/etc/system-release'):
|
|
|
|
distribution_version = platform.linux_distribution(supported_dists=['system'])[1]
|
2014-06-16 15:06:50 +02:00
|
|
|
except:
|
|
|
|
# FIXME: MethodMissing, I assume?
|
|
|
|
distribution_version = platform.dist()[1]
|
|
|
|
else:
|
2014-07-28 17:35:33 +02:00
|
|
|
distribution_version = None
|
2014-06-16 15:06:50 +02:00
|
|
|
return distribution_version
|
|
|
|
|
2013-10-31 21:52:37 +01:00
|
|
|
def load_platform_subclass(cls, *args, **kwargs):
|
|
|
|
'''
|
|
|
|
used by modules like User to have different implementations based on detected platform. See User
|
|
|
|
module for an example.
|
|
|
|
'''
|
|
|
|
|
|
|
|
this_platform = get_platform()
|
|
|
|
distribution = get_distribution()
|
|
|
|
subclass = None
|
|
|
|
|
|
|
|
# get the most specific superclass for this platform
|
|
|
|
if distribution is not None:
|
|
|
|
for sc in cls.__subclasses__():
|
|
|
|
if sc.distribution is not None and sc.distribution == distribution and sc.platform == this_platform:
|
|
|
|
subclass = sc
|
|
|
|
if subclass is None:
|
|
|
|
for sc in cls.__subclasses__():
|
|
|
|
if sc.platform == this_platform and sc.distribution is None:
|
|
|
|
subclass = sc
|
|
|
|
if subclass is None:
|
|
|
|
subclass = cls
|
|
|
|
|
|
|
|
return super(cls, subclass).__new__(subclass)
|
|
|
|
|
2014-10-08 20:30:36 +02:00
|
|
|
|
|
|
|
def json_dict_unicode_to_bytes(d):
|
|
|
|
''' Recursively convert dict keys and values to byte str
|
|
|
|
|
|
|
|
Specialized for json return because this only handles, lists, tuples,
|
|
|
|
and dict container types (the containers that the json module returns)
|
|
|
|
'''
|
|
|
|
|
|
|
|
if isinstance(d, unicode):
|
|
|
|
return d.encode('utf-8')
|
|
|
|
elif isinstance(d, dict):
|
|
|
|
return dict(map(json_dict_unicode_to_bytes, d.iteritems()))
|
|
|
|
elif isinstance(d, list):
|
|
|
|
return list(map(json_dict_unicode_to_bytes, d))
|
|
|
|
elif isinstance(d, tuple):
|
|
|
|
return tuple(map(json_dict_unicode_to_bytes, d))
|
|
|
|
else:
|
|
|
|
return d
|
|
|
|
|
2015-01-27 05:37:20 +01:00
|
|
|
def json_dict_bytes_to_unicode(d):
|
|
|
|
''' Recursively convert dict keys and values to byte str
|
|
|
|
|
|
|
|
Specialized for json return because this only handles, lists, tuples,
|
|
|
|
and dict container types (the containers that the json module returns)
|
|
|
|
'''
|
|
|
|
|
|
|
|
if isinstance(d, str):
|
|
|
|
return unicode(d, 'utf-8')
|
|
|
|
elif isinstance(d, dict):
|
|
|
|
return dict(map(json_dict_bytes_to_unicode, d.iteritems()))
|
|
|
|
elif isinstance(d, list):
|
|
|
|
return list(map(json_dict_bytes_to_unicode, d))
|
|
|
|
elif isinstance(d, tuple):
|
|
|
|
return tuple(map(json_dict_bytes_to_unicode, d))
|
|
|
|
else:
|
|
|
|
return d
|
|
|
|
|
2015-02-09 19:13:13 +01:00
|
|
|
def heuristic_log_sanitize(data):
|
|
|
|
''' Remove strings that look like passwords from log messages '''
|
|
|
|
# Currently filters:
|
|
|
|
# user:pass@foo/whatever and http://username:pass@wherever/foo
|
|
|
|
# This code has false positives and consumes parts of logs that are
|
|
|
|
# not passwds
|
|
|
|
|
|
|
|
# begin: start of a passwd containing string
|
|
|
|
# end: end of a passwd containing string
|
|
|
|
# sep: char between user and passwd
|
|
|
|
# prev_begin: where in the overall string to start a search for
|
|
|
|
# a passwd
|
|
|
|
# sep_search_end: where in the string to end a search for the sep
|
|
|
|
output = []
|
|
|
|
begin = len(data)
|
|
|
|
prev_begin = begin
|
|
|
|
sep = 1
|
|
|
|
while sep:
|
|
|
|
# Find the potential end of a passwd
|
|
|
|
try:
|
|
|
|
end = data.rindex('@', 0, begin)
|
|
|
|
except ValueError:
|
|
|
|
# No passwd in the rest of the data
|
|
|
|
output.insert(0, data[0:begin])
|
|
|
|
break
|
|
|
|
|
|
|
|
# Search for the beginning of a passwd
|
|
|
|
sep = None
|
|
|
|
sep_search_end = end
|
|
|
|
while not sep:
|
|
|
|
# URL-style username+password
|
|
|
|
try:
|
|
|
|
begin = data.rindex('://', 0, sep_search_end)
|
|
|
|
except ValueError:
|
|
|
|
# No url style in the data, check for ssh style in the
|
|
|
|
# rest of the string
|
|
|
|
begin = 0
|
|
|
|
# Search for separator
|
|
|
|
try:
|
|
|
|
sep = data.index(':', begin + 3, end)
|
|
|
|
except ValueError:
|
|
|
|
# No separator; choices:
|
|
|
|
if begin == 0:
|
|
|
|
# Searched the whole string so there's no password
|
|
|
|
# here. Return the remaining data
|
|
|
|
output.insert(0, data[0:begin])
|
|
|
|
break
|
|
|
|
# Search for a different beginning of the password field.
|
|
|
|
sep_search_end = begin
|
|
|
|
continue
|
|
|
|
if sep:
|
|
|
|
# Password was found; remove it.
|
|
|
|
output.insert(0, data[end:prev_begin])
|
|
|
|
output.insert(0, '********')
|
|
|
|
output.insert(0, data[begin:sep + 1])
|
|
|
|
prev_begin = begin
|
|
|
|
|
|
|
|
return ''.join(output)
|
|
|
|
|
2014-10-08 20:30:36 +02:00
|
|
|
|
2013-10-31 21:52:37 +01:00
|
|
|
class AnsibleModule(object):
|
|
|
|
|
|
|
|
def __init__(self, argument_spec, bypass_checks=False, no_log=False,
|
|
|
|
check_invalid_arguments=True, mutually_exclusive=None, required_together=None,
|
|
|
|
required_one_of=None, add_file_common_args=False, supports_check_mode=False):
|
|
|
|
|
|
|
|
'''
|
|
|
|
common code for quickly building an ansible module in Python
|
|
|
|
(although you can write modules in anything that can return JSON)
|
|
|
|
see library/* for examples
|
|
|
|
'''
|
|
|
|
|
|
|
|
self.argument_spec = argument_spec
|
|
|
|
self.supports_check_mode = supports_check_mode
|
|
|
|
self.check_mode = False
|
2014-01-31 23:09:10 +01:00
|
|
|
self.no_log = no_log
|
2014-05-13 20:52:38 +02:00
|
|
|
self.cleanup_files = []
|
2013-10-31 21:52:37 +01:00
|
|
|
|
|
|
|
self.aliases = {}
|
|
|
|
|
|
|
|
if add_file_common_args:
|
|
|
|
for k, v in FILE_COMMON_ARGUMENTS.iteritems():
|
|
|
|
if k not in self.argument_spec:
|
|
|
|
self.argument_spec[k] = v
|
|
|
|
|
2014-05-19 17:26:06 +02:00
|
|
|
# check the locale as set by the current environment, and
|
|
|
|
# reset to LANG=C if it's an invalid/unavailable locale
|
|
|
|
self._check_locale()
|
|
|
|
|
2013-10-31 21:52:37 +01:00
|
|
|
(self.params, self.args) = self._load_params()
|
|
|
|
|
2014-03-10 22:06:52 +01:00
|
|
|
self._legal_inputs = ['CHECKMODE', 'NO_LOG']
|
2013-10-31 21:52:37 +01:00
|
|
|
|
|
|
|
self.aliases = self._handle_aliases()
|
|
|
|
|
|
|
|
if check_invalid_arguments:
|
|
|
|
self._check_invalid_arguments()
|
|
|
|
self._check_for_check_mode()
|
2014-01-31 23:09:10 +01:00
|
|
|
self._check_for_no_log()
|
2013-10-31 21:52:37 +01:00
|
|
|
|
2014-02-07 19:42:08 +01:00
|
|
|
# check exclusive early
|
|
|
|
if not bypass_checks:
|
|
|
|
self._check_mutually_exclusive(mutually_exclusive)
|
|
|
|
|
2013-10-31 21:52:37 +01:00
|
|
|
self._set_defaults(pre=True)
|
|
|
|
|
|
|
|
if not bypass_checks:
|
|
|
|
self._check_required_arguments()
|
|
|
|
self._check_argument_values()
|
|
|
|
self._check_argument_types()
|
|
|
|
self._check_required_together(required_together)
|
|
|
|
self._check_required_one_of(required_one_of)
|
|
|
|
|
|
|
|
self._set_defaults(pre=False)
|
2014-01-31 23:09:10 +01:00
|
|
|
if not self.no_log:
|
2013-10-31 21:52:37 +01:00
|
|
|
self._log_invocation()
|
|
|
|
|
2014-03-18 16:17:44 +01:00
|
|
|
# finally, make sure we're in a sane working dir
|
|
|
|
self._set_cwd()
|
|
|
|
|
2013-10-31 21:52:37 +01:00
|
|
|
def load_file_common_arguments(self, params):
|
|
|
|
'''
|
|
|
|
many modules deal with files, this encapsulates common
|
|
|
|
options that the file module accepts such that it is directly
|
|
|
|
available to all modules and they can share code.
|
|
|
|
'''
|
|
|
|
|
|
|
|
path = params.get('path', params.get('dest', None))
|
|
|
|
if path is None:
|
|
|
|
return {}
|
|
|
|
else:
|
|
|
|
path = os.path.expanduser(path)
|
|
|
|
|
2014-09-16 19:03:40 +02:00
|
|
|
# if the path is a symlink, and we're following links, get
|
|
|
|
# the target of the link instead for testing
|
|
|
|
if params.get('follow', False) and os.path.islink(path):
|
|
|
|
path = os.path.realpath(path)
|
|
|
|
|
2013-10-31 21:52:37 +01:00
|
|
|
mode = params.get('mode', None)
|
|
|
|
owner = params.get('owner', None)
|
|
|
|
group = params.get('group', None)
|
|
|
|
|
|
|
|
# selinux related options
|
|
|
|
seuser = params.get('seuser', None)
|
|
|
|
serole = params.get('serole', None)
|
|
|
|
setype = params.get('setype', None)
|
|
|
|
selevel = params.get('selevel', None)
|
|
|
|
secontext = [seuser, serole, setype]
|
|
|
|
|
|
|
|
if self.selinux_mls_enabled():
|
|
|
|
secontext.append(selevel)
|
|
|
|
|
|
|
|
default_secontext = self.selinux_default_context(path)
|
|
|
|
for i in range(len(default_secontext)):
|
|
|
|
if i is not None and secontext[i] == '_default':
|
|
|
|
secontext[i] = default_secontext[i]
|
|
|
|
|
|
|
|
return dict(
|
|
|
|
path=path, mode=mode, owner=owner, group=group,
|
|
|
|
seuser=seuser, serole=serole, setype=setype,
|
|
|
|
selevel=selevel, secontext=secontext,
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
# Detect whether using selinux that is MLS-aware.
|
|
|
|
# While this means you can set the level/range with
|
|
|
|
# selinux.lsetfilecon(), it may or may not mean that you
|
|
|
|
# will get the selevel as part of the context returned
|
|
|
|
# by selinux.lgetfilecon().
|
|
|
|
|
|
|
|
def selinux_mls_enabled(self):
|
|
|
|
if not HAVE_SELINUX:
|
|
|
|
return False
|
|
|
|
if selinux.is_selinux_mls_enabled() == 1:
|
|
|
|
return True
|
|
|
|
else:
|
|
|
|
return False
|
|
|
|
|
|
|
|
def selinux_enabled(self):
|
|
|
|
if not HAVE_SELINUX:
|
|
|
|
seenabled = self.get_bin_path('selinuxenabled')
|
|
|
|
if seenabled is not None:
|
|
|
|
(rc,out,err) = self.run_command(seenabled)
|
|
|
|
if rc == 0:
|
|
|
|
self.fail_json(msg="Aborting, target uses selinux but python bindings (libselinux-python) aren't installed!")
|
|
|
|
return False
|
|
|
|
if selinux.is_selinux_enabled() == 1:
|
|
|
|
return True
|
|
|
|
else:
|
|
|
|
return False
|
|
|
|
|
|
|
|
# Determine whether we need a placeholder for selevel/mls
|
|
|
|
def selinux_initial_context(self):
|
|
|
|
context = [None, None, None]
|
|
|
|
if self.selinux_mls_enabled():
|
|
|
|
context.append(None)
|
|
|
|
return context
|
|
|
|
|
|
|
|
def _to_filesystem_str(self, path):
|
|
|
|
'''Returns filesystem path as a str, if it wasn't already.
|
|
|
|
|
|
|
|
Used in selinux interactions because it cannot accept unicode
|
|
|
|
instances, and specifying complex args in a playbook leaves
|
|
|
|
you with unicode instances. This method currently assumes
|
|
|
|
that your filesystem encoding is UTF-8.
|
|
|
|
|
|
|
|
'''
|
|
|
|
if isinstance(path, unicode):
|
|
|
|
path = path.encode("utf-8")
|
|
|
|
return path
|
|
|
|
|
|
|
|
# If selinux fails to find a default, return an array of None
|
|
|
|
def selinux_default_context(self, path, mode=0):
|
|
|
|
context = self.selinux_initial_context()
|
|
|
|
if not HAVE_SELINUX or not self.selinux_enabled():
|
|
|
|
return context
|
|
|
|
try:
|
|
|
|
ret = selinux.matchpathcon(self._to_filesystem_str(path), mode)
|
|
|
|
except OSError:
|
|
|
|
return context
|
|
|
|
if ret[0] == -1:
|
|
|
|
return context
|
|
|
|
# Limit split to 4 because the selevel, the last in the list,
|
|
|
|
# may contain ':' characters
|
|
|
|
context = ret[1].split(':', 3)
|
|
|
|
return context
|
|
|
|
|
|
|
|
def selinux_context(self, path):
|
|
|
|
context = self.selinux_initial_context()
|
|
|
|
if not HAVE_SELINUX or not self.selinux_enabled():
|
|
|
|
return context
|
|
|
|
try:
|
|
|
|
ret = selinux.lgetfilecon_raw(self._to_filesystem_str(path))
|
|
|
|
except OSError, e:
|
|
|
|
if e.errno == errno.ENOENT:
|
|
|
|
self.fail_json(path=path, msg='path %s does not exist' % path)
|
|
|
|
else:
|
|
|
|
self.fail_json(path=path, msg='failed to retrieve selinux context')
|
|
|
|
if ret[0] == -1:
|
|
|
|
return context
|
|
|
|
# Limit split to 4 because the selevel, the last in the list,
|
|
|
|
# may contain ':' characters
|
|
|
|
context = ret[1].split(':', 3)
|
|
|
|
return context
|
|
|
|
|
|
|
|
def user_and_group(self, filename):
|
|
|
|
filename = os.path.expanduser(filename)
|
|
|
|
st = os.lstat(filename)
|
|
|
|
uid = st.st_uid
|
|
|
|
gid = st.st_gid
|
|
|
|
return (uid, gid)
|
|
|
|
|
2014-04-17 23:16:54 +02:00
|
|
|
def find_mount_point(self, path):
|
|
|
|
path = os.path.abspath(os.path.expanduser(os.path.expandvars(path)))
|
|
|
|
while not os.path.ismount(path):
|
|
|
|
path = os.path.dirname(path)
|
|
|
|
return path
|
|
|
|
|
2015-05-14 16:50:22 +02:00
|
|
|
def is_special_selinux_path(self, path):
|
2014-04-17 23:16:54 +02:00
|
|
|
"""
|
2015-05-14 16:50:22 +02:00
|
|
|
Returns a tuple containing (True, selinux_context) if the given path is on a
|
|
|
|
NFS or other 'special' fs mount point, otherwise the return will be (False, None).
|
2014-04-17 23:16:54 +02:00
|
|
|
"""
|
|
|
|
try:
|
|
|
|
f = open('/proc/mounts', 'r')
|
|
|
|
mount_data = f.readlines()
|
|
|
|
f.close()
|
|
|
|
except:
|
|
|
|
return (False, None)
|
|
|
|
path_mount_point = self.find_mount_point(path)
|
|
|
|
for line in mount_data:
|
|
|
|
(device, mount_point, fstype, options, rest) = line.split(' ', 4)
|
2015-05-14 16:50:22 +02:00
|
|
|
|
|
|
|
if path_mount_point == mount_point:
|
|
|
|
for fs in SELINUX_SPECIAL_FS.split(','):
|
|
|
|
if fs in fstype:
|
|
|
|
special_context = self.selinux_context(path_mount_point)
|
|
|
|
return (True, special_context)
|
|
|
|
|
2014-04-17 23:16:54 +02:00
|
|
|
return (False, None)
|
|
|
|
|
2013-10-31 21:52:37 +01:00
|
|
|
def set_default_selinux_context(self, path, changed):
|
|
|
|
if not HAVE_SELINUX or not self.selinux_enabled():
|
|
|
|
return changed
|
|
|
|
context = self.selinux_default_context(path)
|
|
|
|
return self.set_context_if_different(path, context, False)
|
|
|
|
|
|
|
|
def set_context_if_different(self, path, context, changed):
|
|
|
|
|
|
|
|
if not HAVE_SELINUX or not self.selinux_enabled():
|
|
|
|
return changed
|
|
|
|
cur_context = self.selinux_context(path)
|
|
|
|
new_context = list(cur_context)
|
|
|
|
# Iterate over the current context instead of the
|
|
|
|
# argument context, which may have selevel.
|
|
|
|
|
2015-05-14 16:50:22 +02:00
|
|
|
(is_special_se, sp_context) = self.is_special_selinux_path(path)
|
|
|
|
if is_special_se:
|
|
|
|
new_context = sp_context
|
2014-04-17 23:16:54 +02:00
|
|
|
else:
|
|
|
|
for i in range(len(cur_context)):
|
|
|
|
if len(context) > i:
|
|
|
|
if context[i] is not None and context[i] != cur_context[i]:
|
|
|
|
new_context[i] = context[i]
|
|
|
|
if context[i] is None:
|
|
|
|
new_context[i] = cur_context[i]
|
2013-11-08 19:17:02 +01:00
|
|
|
|
2013-10-31 21:52:37 +01:00
|
|
|
if cur_context != new_context:
|
|
|
|
try:
|
|
|
|
if self.check_mode:
|
|
|
|
return True
|
|
|
|
rc = selinux.lsetfilecon(self._to_filesystem_str(path),
|
|
|
|
str(':'.join(new_context)))
|
|
|
|
except OSError:
|
|
|
|
self.fail_json(path=path, msg='invalid selinux context', new_context=new_context, cur_context=cur_context, input_was=context)
|
|
|
|
if rc != 0:
|
|
|
|
self.fail_json(path=path, msg='set selinux context failed')
|
|
|
|
changed = True
|
|
|
|
return changed
|
|
|
|
|
|
|
|
def set_owner_if_different(self, path, owner, changed):
|
|
|
|
path = os.path.expanduser(path)
|
|
|
|
if owner is None:
|
|
|
|
return changed
|
|
|
|
orig_uid, orig_gid = self.user_and_group(path)
|
|
|
|
try:
|
|
|
|
uid = int(owner)
|
|
|
|
except ValueError:
|
|
|
|
try:
|
|
|
|
uid = pwd.getpwnam(owner).pw_uid
|
|
|
|
except KeyError:
|
|
|
|
self.fail_json(path=path, msg='chown failed: failed to look up user %s' % owner)
|
|
|
|
if orig_uid != uid:
|
|
|
|
if self.check_mode:
|
|
|
|
return True
|
|
|
|
try:
|
|
|
|
os.lchown(path, uid, -1)
|
|
|
|
except OSError:
|
|
|
|
self.fail_json(path=path, msg='chown failed')
|
|
|
|
changed = True
|
|
|
|
return changed
|
|
|
|
|
|
|
|
def set_group_if_different(self, path, group, changed):
|
|
|
|
path = os.path.expanduser(path)
|
|
|
|
if group is None:
|
|
|
|
return changed
|
|
|
|
orig_uid, orig_gid = self.user_and_group(path)
|
|
|
|
try:
|
|
|
|
gid = int(group)
|
|
|
|
except ValueError:
|
|
|
|
try:
|
|
|
|
gid = grp.getgrnam(group).gr_gid
|
|
|
|
except KeyError:
|
|
|
|
self.fail_json(path=path, msg='chgrp failed: failed to look up group %s' % group)
|
|
|
|
if orig_gid != gid:
|
|
|
|
if self.check_mode:
|
|
|
|
return True
|
|
|
|
try:
|
|
|
|
os.lchown(path, -1, gid)
|
|
|
|
except OSError:
|
|
|
|
self.fail_json(path=path, msg='chgrp failed')
|
|
|
|
changed = True
|
|
|
|
return changed
|
|
|
|
|
|
|
|
def set_mode_if_different(self, path, mode, changed):
|
|
|
|
path = os.path.expanduser(path)
|
2013-12-07 03:06:35 +01:00
|
|
|
path_stat = os.lstat(path)
|
|
|
|
|
2013-10-31 21:52:37 +01:00
|
|
|
if mode is None:
|
|
|
|
return changed
|
2013-12-07 03:06:35 +01:00
|
|
|
|
|
|
|
if not isinstance(mode, int):
|
|
|
|
try:
|
2013-10-31 21:52:37 +01:00
|
|
|
mode = int(mode, 8)
|
2013-12-07 03:06:35 +01:00
|
|
|
except Exception:
|
|
|
|
try:
|
|
|
|
mode = self._symbolic_mode_to_octal(path_stat, mode)
|
|
|
|
except Exception, e:
|
|
|
|
self.fail_json(path=path,
|
|
|
|
msg="mode must be in octal or symbolic form",
|
|
|
|
details=str(e))
|
2013-10-31 21:52:37 +01:00
|
|
|
|
2013-12-07 03:06:35 +01:00
|
|
|
prev_mode = stat.S_IMODE(path_stat.st_mode)
|
2013-10-31 21:52:37 +01:00
|
|
|
|
|
|
|
if prev_mode != mode:
|
|
|
|
if self.check_mode:
|
|
|
|
return True
|
|
|
|
# FIXME: comparison against string above will cause this to be executed
|
|
|
|
# every time
|
|
|
|
try:
|
2015-02-16 16:07:58 +01:00
|
|
|
if hasattr(os, 'lchmod'):
|
2013-10-31 21:52:37 +01:00
|
|
|
os.lchmod(path, mode)
|
|
|
|
else:
|
2015-02-16 16:07:58 +01:00
|
|
|
if not os.path.islink(path):
|
|
|
|
os.chmod(path, mode)
|
|
|
|
else:
|
|
|
|
# Attempt to set the perms of the symlink but be
|
|
|
|
# careful not to change the perms of the underlying
|
|
|
|
# file while trying
|
|
|
|
underlying_stat = os.stat(path)
|
|
|
|
os.chmod(path, mode)
|
|
|
|
new_underlying_stat = os.stat(path)
|
|
|
|
if underlying_stat.st_mode != new_underlying_stat.st_mode:
|
|
|
|
os.chmod(path, stat.S_IMODE(underlying_stat.st_mode))
|
|
|
|
q_stat = os.stat(path)
|
2013-10-31 21:52:37 +01:00
|
|
|
except OSError, e:
|
|
|
|
if os.path.islink(path) and e.errno == errno.EPERM: # Can't set mode on symbolic links
|
|
|
|
pass
|
2015-02-16 16:07:58 +01:00
|
|
|
elif e.errno in (errno.ENOENT, errno.ELOOP): # Can't set mode on broken symbolic links
|
2013-10-31 21:52:37 +01:00
|
|
|
pass
|
|
|
|
else:
|
|
|
|
raise e
|
|
|
|
except Exception, e:
|
|
|
|
self.fail_json(path=path, msg='chmod failed', details=str(e))
|
|
|
|
|
2013-12-07 03:06:35 +01:00
|
|
|
path_stat = os.lstat(path)
|
|
|
|
new_mode = stat.S_IMODE(path_stat.st_mode)
|
2013-10-31 21:52:37 +01:00
|
|
|
|
|
|
|
if new_mode != prev_mode:
|
|
|
|
changed = True
|
|
|
|
return changed
|
|
|
|
|
2013-12-07 03:06:35 +01:00
|
|
|
def _symbolic_mode_to_octal(self, path_stat, symbolic_mode):
|
|
|
|
new_mode = stat.S_IMODE(path_stat.st_mode)
|
|
|
|
|
|
|
|
mode_re = re.compile(r'^(?P<users>[ugoa]+)(?P<operator>[-+=])(?P<perms>[rwxXst]*|[ugo])$')
|
|
|
|
for mode in symbolic_mode.split(','):
|
|
|
|
match = mode_re.match(mode)
|
|
|
|
if match:
|
|
|
|
users = match.group('users')
|
|
|
|
operator = match.group('operator')
|
|
|
|
perms = match.group('perms')
|
|
|
|
|
|
|
|
if users == 'a': users = 'ugo'
|
|
|
|
|
|
|
|
for user in users:
|
|
|
|
mode_to_apply = self._get_octal_mode_from_symbolic_perms(path_stat, user, perms)
|
|
|
|
new_mode = self._apply_operation_to_mode(user, operator, mode_to_apply, new_mode)
|
|
|
|
else:
|
|
|
|
raise ValueError("bad symbolic permission for mode: %s" % mode)
|
|
|
|
return new_mode
|
|
|
|
|
|
|
|
def _apply_operation_to_mode(self, user, operator, mode_to_apply, current_mode):
|
|
|
|
if operator == '=':
|
|
|
|
if user == 'u': mask = stat.S_IRWXU | stat.S_ISUID
|
|
|
|
elif user == 'g': mask = stat.S_IRWXG | stat.S_ISGID
|
|
|
|
elif user == 'o': mask = stat.S_IRWXO | stat.S_ISVTX
|
|
|
|
|
|
|
|
# mask out u, g, or o permissions from current_mode and apply new permissions
|
|
|
|
inverse_mask = mask ^ 07777
|
|
|
|
new_mode = (current_mode & inverse_mask) | mode_to_apply
|
|
|
|
elif operator == '+':
|
|
|
|
new_mode = current_mode | mode_to_apply
|
|
|
|
elif operator == '-':
|
|
|
|
new_mode = current_mode - (current_mode & mode_to_apply)
|
|
|
|
return new_mode
|
|
|
|
|
|
|
|
def _get_octal_mode_from_symbolic_perms(self, path_stat, user, perms):
|
|
|
|
prev_mode = stat.S_IMODE(path_stat.st_mode)
|
|
|
|
|
|
|
|
is_directory = stat.S_ISDIR(path_stat.st_mode)
|
|
|
|
has_x_permissions = (prev_mode & 00111) > 0
|
|
|
|
apply_X_permission = is_directory or has_x_permissions
|
|
|
|
|
|
|
|
# Permission bits constants documented at:
|
|
|
|
# http://docs.python.org/2/library/stat.html#stat.S_ISUID
|
2014-08-28 02:15:57 +02:00
|
|
|
if apply_X_permission:
|
|
|
|
X_perms = {
|
|
|
|
'u': {'X': stat.S_IXUSR},
|
|
|
|
'g': {'X': stat.S_IXGRP},
|
|
|
|
'o': {'X': stat.S_IXOTH}
|
|
|
|
}
|
|
|
|
else:
|
|
|
|
X_perms = {
|
|
|
|
'u': {'X': 0},
|
|
|
|
'g': {'X': 0},
|
|
|
|
'o': {'X': 0}
|
|
|
|
}
|
|
|
|
|
2013-12-07 03:06:35 +01:00
|
|
|
user_perms_to_modes = {
|
|
|
|
'u': {
|
|
|
|
'r': stat.S_IRUSR,
|
|
|
|
'w': stat.S_IWUSR,
|
|
|
|
'x': stat.S_IXUSR,
|
|
|
|
's': stat.S_ISUID,
|
|
|
|
't': 0,
|
|
|
|
'u': prev_mode & stat.S_IRWXU,
|
|
|
|
'g': (prev_mode & stat.S_IRWXG) << 3,
|
|
|
|
'o': (prev_mode & stat.S_IRWXO) << 6 },
|
|
|
|
'g': {
|
|
|
|
'r': stat.S_IRGRP,
|
|
|
|
'w': stat.S_IWGRP,
|
|
|
|
'x': stat.S_IXGRP,
|
|
|
|
's': stat.S_ISGID,
|
|
|
|
't': 0,
|
|
|
|
'u': (prev_mode & stat.S_IRWXU) >> 3,
|
|
|
|
'g': prev_mode & stat.S_IRWXG,
|
|
|
|
'o': (prev_mode & stat.S_IRWXO) << 3 },
|
|
|
|
'o': {
|
|
|
|
'r': stat.S_IROTH,
|
|
|
|
'w': stat.S_IWOTH,
|
|
|
|
'x': stat.S_IXOTH,
|
|
|
|
's': 0,
|
|
|
|
't': stat.S_ISVTX,
|
|
|
|
'u': (prev_mode & stat.S_IRWXU) >> 6,
|
|
|
|
'g': (prev_mode & stat.S_IRWXG) >> 3,
|
|
|
|
'o': prev_mode & stat.S_IRWXO }
|
|
|
|
}
|
|
|
|
|
2014-08-28 02:15:57 +02:00
|
|
|
# Insert X_perms into user_perms_to_modes
|
|
|
|
for key, value in X_perms.items():
|
|
|
|
user_perms_to_modes[key].update(value)
|
|
|
|
|
2013-12-07 03:06:35 +01:00
|
|
|
or_reduce = lambda mode, perm: mode | user_perms_to_modes[user][perm]
|
|
|
|
return reduce(or_reduce, perms, 0)
|
|
|
|
|
2014-03-14 04:07:35 +01:00
|
|
|
def set_fs_attributes_if_different(self, file_args, changed):
|
2013-10-31 21:52:37 +01:00
|
|
|
# set modes owners and context as needed
|
|
|
|
changed = self.set_context_if_different(
|
|
|
|
file_args['path'], file_args['secontext'], changed
|
|
|
|
)
|
|
|
|
changed = self.set_owner_if_different(
|
|
|
|
file_args['path'], file_args['owner'], changed
|
|
|
|
)
|
|
|
|
changed = self.set_group_if_different(
|
|
|
|
file_args['path'], file_args['group'], changed
|
|
|
|
)
|
|
|
|
changed = self.set_mode_if_different(
|
|
|
|
file_args['path'], file_args['mode'], changed
|
|
|
|
)
|
|
|
|
return changed
|
|
|
|
|
|
|
|
def set_directory_attributes_if_different(self, file_args, changed):
|
2014-03-14 04:07:35 +01:00
|
|
|
return self.set_fs_attributes_if_different(file_args, changed)
|
|
|
|
|
|
|
|
def set_file_attributes_if_different(self, file_args, changed):
|
|
|
|
return self.set_fs_attributes_if_different(file_args, changed)
|
2013-10-31 21:52:37 +01:00
|
|
|
|
|
|
|
def add_path_info(self, kwargs):
|
|
|
|
'''
|
|
|
|
for results that are files, supplement the info about the file
|
|
|
|
in the return path with stats about the file path.
|
|
|
|
'''
|
|
|
|
|
|
|
|
path = kwargs.get('path', kwargs.get('dest', None))
|
|
|
|
if path is None:
|
|
|
|
return kwargs
|
|
|
|
if os.path.exists(path):
|
|
|
|
(uid, gid) = self.user_and_group(path)
|
|
|
|
kwargs['uid'] = uid
|
|
|
|
kwargs['gid'] = gid
|
|
|
|
try:
|
|
|
|
user = pwd.getpwuid(uid)[0]
|
|
|
|
except KeyError:
|
|
|
|
user = str(uid)
|
|
|
|
try:
|
|
|
|
group = grp.getgrgid(gid)[0]
|
|
|
|
except KeyError:
|
|
|
|
group = str(gid)
|
|
|
|
kwargs['owner'] = user
|
|
|
|
kwargs['group'] = group
|
|
|
|
st = os.lstat(path)
|
|
|
|
kwargs['mode'] = oct(stat.S_IMODE(st[stat.ST_MODE]))
|
|
|
|
# secontext not yet supported
|
|
|
|
if os.path.islink(path):
|
|
|
|
kwargs['state'] = 'link'
|
|
|
|
elif os.path.isdir(path):
|
|
|
|
kwargs['state'] = 'directory'
|
2013-11-01 14:41:22 +01:00
|
|
|
elif os.stat(path).st_nlink > 1:
|
|
|
|
kwargs['state'] = 'hard'
|
2013-10-31 21:52:37 +01:00
|
|
|
else:
|
|
|
|
kwargs['state'] = 'file'
|
|
|
|
if HAVE_SELINUX and self.selinux_enabled():
|
|
|
|
kwargs['secontext'] = ':'.join(self.selinux_context(path))
|
|
|
|
kwargs['size'] = st[stat.ST_SIZE]
|
|
|
|
else:
|
|
|
|
kwargs['state'] = 'absent'
|
|
|
|
return kwargs
|
|
|
|
|
2014-05-19 17:26:06 +02:00
|
|
|
def _check_locale(self):
|
|
|
|
'''
|
|
|
|
Uses the locale module to test the currently set locale
|
|
|
|
(per the LANG and LC_CTYPE environment settings)
|
|
|
|
'''
|
|
|
|
try:
|
|
|
|
# setting the locale to '' uses the default locale
|
|
|
|
# as it would be returned by locale.getdefaultlocale()
|
|
|
|
locale.setlocale(locale.LC_ALL, '')
|
|
|
|
except locale.Error, e:
|
|
|
|
# fallback to the 'C' locale, which may cause unicode
|
|
|
|
# issues but is preferable to simply failing because
|
|
|
|
# of an unknown locale
|
|
|
|
locale.setlocale(locale.LC_ALL, 'C')
|
2014-05-20 06:22:14 +02:00
|
|
|
os.environ['LANG'] = 'C'
|
|
|
|
os.environ['LC_CTYPE'] = 'C'
|
2014-05-19 17:26:06 +02:00
|
|
|
except Exception, e:
|
|
|
|
self.fail_json(msg="An unknown error was encountered while attempting to validate the locale: %s" % e)
|
2013-10-31 21:52:37 +01:00
|
|
|
|
|
|
|
def _handle_aliases(self):
|
|
|
|
aliases_results = {} #alias:canon
|
|
|
|
for (k,v) in self.argument_spec.iteritems():
|
|
|
|
self._legal_inputs.append(k)
|
|
|
|
aliases = v.get('aliases', None)
|
|
|
|
default = v.get('default', None)
|
|
|
|
required = v.get('required', False)
|
|
|
|
if default is not None and required:
|
|
|
|
# not alias specific but this is a good place to check this
|
2014-05-03 18:40:05 +02:00
|
|
|
self.fail_json(msg="internal error: required and default are mutually exclusive for %s" % k)
|
2013-10-31 21:52:37 +01:00
|
|
|
if aliases is None:
|
|
|
|
continue
|
|
|
|
if type(aliases) != list:
|
|
|
|
self.fail_json(msg='internal error: aliases must be a list')
|
|
|
|
for alias in aliases:
|
|
|
|
self._legal_inputs.append(alias)
|
|
|
|
aliases_results[alias] = k
|
|
|
|
if alias in self.params:
|
|
|
|
self.params[k] = self.params[alias]
|
|
|
|
|
|
|
|
return aliases_results
|
|
|
|
|
|
|
|
def _check_for_check_mode(self):
|
|
|
|
for (k,v) in self.params.iteritems():
|
|
|
|
if k == 'CHECKMODE':
|
|
|
|
if not self.supports_check_mode:
|
|
|
|
self.exit_json(skipped=True, msg="remote module does not support check mode")
|
|
|
|
if self.supports_check_mode:
|
|
|
|
self.check_mode = True
|
|
|
|
|
2014-01-31 23:09:10 +01:00
|
|
|
def _check_for_no_log(self):
|
|
|
|
for (k,v) in self.params.iteritems():
|
|
|
|
if k == 'NO_LOG':
|
|
|
|
self.no_log = self.boolean(v)
|
|
|
|
|
2013-10-31 21:52:37 +01:00
|
|
|
def _check_invalid_arguments(self):
|
|
|
|
for (k,v) in self.params.iteritems():
|
2014-03-10 22:06:52 +01:00
|
|
|
# these should be in legal inputs already
|
|
|
|
#if k in ('CHECKMODE', 'NO_LOG'):
|
|
|
|
# continue
|
2013-10-31 21:52:37 +01:00
|
|
|
if k not in self._legal_inputs:
|
|
|
|
self.fail_json(msg="unsupported parameter for module: %s" % k)
|
|
|
|
|
|
|
|
def _count_terms(self, check):
|
|
|
|
count = 0
|
|
|
|
for term in check:
|
2013-11-01 00:47:05 +01:00
|
|
|
if term in self.params:
|
|
|
|
count += 1
|
2013-10-31 21:52:37 +01:00
|
|
|
return count
|
|
|
|
|
|
|
|
def _check_mutually_exclusive(self, spec):
|
|
|
|
if spec is None:
|
|
|
|
return
|
|
|
|
for check in spec:
|
|
|
|
count = self._count_terms(check)
|
|
|
|
if count > 1:
|
|
|
|
self.fail_json(msg="parameters are mutually exclusive: %s" % check)
|
|
|
|
|
|
|
|
def _check_required_one_of(self, spec):
|
|
|
|
if spec is None:
|
|
|
|
return
|
|
|
|
for check in spec:
|
|
|
|
count = self._count_terms(check)
|
|
|
|
if count == 0:
|
|
|
|
self.fail_json(msg="one of the following is required: %s" % ','.join(check))
|
|
|
|
|
|
|
|
def _check_required_together(self, spec):
|
|
|
|
if spec is None:
|
|
|
|
return
|
|
|
|
for check in spec:
|
|
|
|
counts = [ self._count_terms([field]) for field in check ]
|
|
|
|
non_zero = [ c for c in counts if c > 0 ]
|
|
|
|
if len(non_zero) > 0:
|
|
|
|
if 0 in counts:
|
|
|
|
self.fail_json(msg="parameters are required together: %s" % check)
|
|
|
|
|
|
|
|
def _check_required_arguments(self):
|
|
|
|
''' ensure all required arguments are present '''
|
|
|
|
missing = []
|
|
|
|
for (k,v) in self.argument_spec.iteritems():
|
|
|
|
required = v.get('required', False)
|
|
|
|
if required and k not in self.params:
|
|
|
|
missing.append(k)
|
|
|
|
if len(missing) > 0:
|
|
|
|
self.fail_json(msg="missing required arguments: %s" % ",".join(missing))
|
|
|
|
|
|
|
|
def _check_argument_values(self):
|
|
|
|
''' ensure all arguments have the requested values, and there are no stray arguments '''
|
|
|
|
for (k,v) in self.argument_spec.iteritems():
|
|
|
|
choices = v.get('choices',None)
|
|
|
|
if choices is None:
|
|
|
|
continue
|
|
|
|
if type(choices) == list:
|
|
|
|
if k in self.params:
|
|
|
|
if self.params[k] not in choices:
|
|
|
|
choices_str=",".join([str(c) for c in choices])
|
|
|
|
msg="value of %s must be one of: %s, got: %s" % (k, choices_str, self.params[k])
|
|
|
|
self.fail_json(msg=msg)
|
|
|
|
else:
|
|
|
|
self.fail_json(msg="internal error: do not know how to interpret argument_spec")
|
|
|
|
|
2013-10-31 23:44:13 +01:00
|
|
|
def safe_eval(self, str, locals=None, include_exceptions=False):
|
|
|
|
|
|
|
|
# do not allow method calls to modules
|
|
|
|
if not isinstance(str, basestring):
|
|
|
|
# already templated to a datastructure, perhaps?
|
|
|
|
if include_exceptions:
|
|
|
|
return (str, None)
|
|
|
|
return str
|
|
|
|
if re.search(r'\w\.\w+\(', str):
|
|
|
|
if include_exceptions:
|
|
|
|
return (str, None)
|
|
|
|
return str
|
|
|
|
# do not allow imports
|
|
|
|
if re.search(r'import \w+', str):
|
|
|
|
if include_exceptions:
|
|
|
|
return (str, None)
|
|
|
|
return str
|
|
|
|
try:
|
|
|
|
result = None
|
|
|
|
if not locals:
|
2014-04-08 16:05:07 +02:00
|
|
|
result = _literal_eval(str)
|
2013-10-31 23:44:13 +01:00
|
|
|
else:
|
2014-04-08 16:05:07 +02:00
|
|
|
result = _literal_eval(str, None, locals)
|
2013-10-31 23:44:13 +01:00
|
|
|
if include_exceptions:
|
|
|
|
return (result, None)
|
|
|
|
else:
|
|
|
|
return result
|
|
|
|
except Exception, e:
|
|
|
|
if include_exceptions:
|
|
|
|
return (str, e)
|
|
|
|
return str
|
|
|
|
|
2013-10-31 21:52:37 +01:00
|
|
|
def _check_argument_types(self):
|
|
|
|
''' ensure all arguments have the requested type '''
|
|
|
|
for (k, v) in self.argument_spec.iteritems():
|
|
|
|
wanted = v.get('type', None)
|
|
|
|
if wanted is None:
|
|
|
|
continue
|
|
|
|
if k not in self.params:
|
|
|
|
continue
|
|
|
|
|
|
|
|
value = self.params[k]
|
|
|
|
is_invalid = False
|
|
|
|
|
|
|
|
if wanted == 'str':
|
|
|
|
if not isinstance(value, basestring):
|
|
|
|
self.params[k] = str(value)
|
|
|
|
elif wanted == 'list':
|
|
|
|
if not isinstance(value, list):
|
|
|
|
if isinstance(value, basestring):
|
|
|
|
self.params[k] = value.split(",")
|
2014-03-28 16:18:29 +01:00
|
|
|
elif isinstance(value, int) or isinstance(value, float):
|
|
|
|
self.params[k] = [ str(value) ]
|
2013-10-31 21:52:37 +01:00
|
|
|
else:
|
|
|
|
is_invalid = True
|
|
|
|
elif wanted == 'dict':
|
|
|
|
if not isinstance(value, dict):
|
|
|
|
if isinstance(value, basestring):
|
2013-10-31 23:44:13 +01:00
|
|
|
if value.startswith("{"):
|
|
|
|
try:
|
|
|
|
self.params[k] = json.loads(value)
|
|
|
|
except:
|
|
|
|
(result, exc) = self.safe_eval(value, dict(), include_exceptions=True)
|
|
|
|
if exc is not None:
|
|
|
|
self.fail_json(msg="unable to evaluate dictionary for %s" % k)
|
|
|
|
self.params[k] = result
|
|
|
|
elif '=' in value:
|
2014-08-15 18:03:29 +02:00
|
|
|
self.params[k] = dict([x.strip().split("=", 1) for x in value.split(",")])
|
2013-10-31 23:44:13 +01:00
|
|
|
else:
|
|
|
|
self.fail_json(msg="dictionary requested, could not parse JSON or key=value")
|
2013-10-31 21:52:37 +01:00
|
|
|
else:
|
|
|
|
is_invalid = True
|
|
|
|
elif wanted == 'bool':
|
|
|
|
if not isinstance(value, bool):
|
|
|
|
if isinstance(value, basestring):
|
|
|
|
self.params[k] = self.boolean(value)
|
|
|
|
else:
|
|
|
|
is_invalid = True
|
|
|
|
elif wanted == 'int':
|
|
|
|
if not isinstance(value, int):
|
|
|
|
if isinstance(value, basestring):
|
|
|
|
self.params[k] = int(value)
|
|
|
|
else:
|
|
|
|
is_invalid = True
|
2014-01-13 05:15:23 +01:00
|
|
|
elif wanted == 'float':
|
|
|
|
if not isinstance(value, float):
|
|
|
|
if isinstance(value, basestring):
|
|
|
|
self.params[k] = float(value)
|
|
|
|
else:
|
|
|
|
is_invalid = True
|
2013-10-31 21:52:37 +01:00
|
|
|
else:
|
|
|
|
self.fail_json(msg="implementation error: unknown type %s requested for %s" % (wanted, k))
|
|
|
|
|
|
|
|
if is_invalid:
|
|
|
|
self.fail_json(msg="argument %s is of invalid type: %s, required: %s" % (k, type(value), wanted))
|
|
|
|
|
|
|
|
def _set_defaults(self, pre=True):
|
2013-11-01 00:47:05 +01:00
|
|
|
for (k,v) in self.argument_spec.iteritems():
|
|
|
|
default = v.get('default', None)
|
|
|
|
if pre == True:
|
|
|
|
# this prevents setting defaults on required items
|
|
|
|
if default is not None and k not in self.params:
|
|
|
|
self.params[k] = default
|
|
|
|
else:
|
|
|
|
# make sure things without a default still get set None
|
|
|
|
if k not in self.params:
|
|
|
|
self.params[k] = default
|
2013-10-31 21:52:37 +01:00
|
|
|
|
|
|
|
def _load_params(self):
|
|
|
|
''' read the input and return a dictionary and the arguments string '''
|
|
|
|
args = MODULE_ARGS
|
|
|
|
items = shlex.split(args)
|
|
|
|
params = {}
|
|
|
|
for x in items:
|
|
|
|
try:
|
|
|
|
(k, v) = x.split("=",1)
|
|
|
|
except Exception, e:
|
2013-10-31 23:44:13 +01:00
|
|
|
self.fail_json(msg="this module requires key=value arguments (%s)" % (items))
|
2014-07-21 18:20:49 +02:00
|
|
|
if k in params:
|
|
|
|
self.fail_json(msg="duplicate parameter: %s (value=%s)" % (k, v))
|
2013-10-31 21:52:37 +01:00
|
|
|
params[k] = v
|
2014-10-08 20:30:36 +02:00
|
|
|
params2 = json_dict_unicode_to_bytes(json.loads(MODULE_COMPLEX_ARGS))
|
2013-10-31 21:52:37 +01:00
|
|
|
params2.update(params)
|
|
|
|
return (params2, args)
|
|
|
|
|
|
|
|
def _log_invocation(self):
|
|
|
|
''' log that ansible ran the module '''
|
|
|
|
# TODO: generalize a separate log function and make log_invocation use it
|
|
|
|
# Sanitize possible password argument when logging.
|
|
|
|
log_args = dict()
|
|
|
|
passwd_keys = ['password', 'login_password']
|
2014-02-13 21:23:49 +01:00
|
|
|
|
2013-10-31 21:52:37 +01:00
|
|
|
for param in self.params:
|
|
|
|
canon = self.aliases.get(param, param)
|
|
|
|
arg_opts = self.argument_spec.get(canon, {})
|
|
|
|
no_log = arg_opts.get('no_log', False)
|
2014-09-05 02:57:52 +02:00
|
|
|
|
2014-08-19 17:46:46 +02:00
|
|
|
if self.boolean(no_log):
|
2013-10-31 21:52:37 +01:00
|
|
|
log_args[param] = 'NOT_LOGGING_PARAMETER'
|
|
|
|
elif param in passwd_keys:
|
|
|
|
log_args[param] = 'NOT_LOGGING_PASSWORD'
|
|
|
|
else:
|
2014-09-05 02:57:52 +02:00
|
|
|
param_val = self.params[param]
|
|
|
|
if not isinstance(param_val, basestring):
|
|
|
|
param_val = str(param_val)
|
|
|
|
elif isinstance(param_val, unicode):
|
|
|
|
param_val = param_val.encode('utf-8')
|
2015-02-09 19:13:13 +01:00
|
|
|
log_args[param] = heuristic_log_sanitize(param_val)
|
2013-10-31 21:52:37 +01:00
|
|
|
|
|
|
|
module = 'ansible-%s' % os.path.basename(__file__)
|
2014-09-04 20:14:37 +02:00
|
|
|
msg = []
|
2013-10-31 21:52:37 +01:00
|
|
|
for arg in log_args:
|
2014-09-04 20:14:37 +02:00
|
|
|
arg_val = log_args[arg]
|
|
|
|
if not isinstance(arg_val, basestring):
|
|
|
|
arg_val = str(arg_val)
|
|
|
|
elif isinstance(arg_val, unicode):
|
|
|
|
arg_val = arg_val.encode('utf-8')
|
|
|
|
msg.append('%s=%s ' % (arg, arg_val))
|
2013-10-31 21:52:37 +01:00
|
|
|
if msg:
|
2014-09-04 20:14:37 +02:00
|
|
|
msg = 'Invoked with %s' % ''.join(msg)
|
2013-10-31 21:52:37 +01:00
|
|
|
else:
|
|
|
|
msg = 'Invoked'
|
|
|
|
|
2014-03-25 21:07:05 +01:00
|
|
|
# 6655 - allow for accented characters
|
2014-09-04 20:14:37 +02:00
|
|
|
if isinstance(msg, unicode):
|
2014-09-05 16:39:17 +02:00
|
|
|
# We should never get here as msg should be type str, not unicode
|
2014-09-04 20:14:37 +02:00
|
|
|
msg = msg.encode('utf-8')
|
2014-03-25 21:07:05 +01:00
|
|
|
|
2013-10-31 21:52:37 +01:00
|
|
|
if (has_journal):
|
2014-12-24 19:31:44 +01:00
|
|
|
journal_args = [("MODULE", os.path.basename(__file__))]
|
2013-10-31 21:52:37 +01:00
|
|
|
for arg in log_args:
|
2014-12-24 19:31:44 +01:00
|
|
|
journal_args.append((arg.upper(), str(log_args[arg])))
|
2013-10-31 21:52:37 +01:00
|
|
|
try:
|
2014-12-24 19:31:44 +01:00
|
|
|
journal.send("%s %s" % (module, msg), **dict(journal_args))
|
2013-10-31 21:52:37 +01:00
|
|
|
except IOError, e:
|
|
|
|
# fall back to syslog since logging to journal failed
|
2013-12-02 10:05:58 +01:00
|
|
|
syslog.openlog(str(module), 0, syslog.LOG_USER)
|
2014-03-25 21:07:05 +01:00
|
|
|
syslog.syslog(syslog.LOG_NOTICE, msg) #1
|
2013-10-31 21:52:37 +01:00
|
|
|
else:
|
2013-12-02 10:05:58 +01:00
|
|
|
syslog.openlog(str(module), 0, syslog.LOG_USER)
|
2014-03-25 21:07:05 +01:00
|
|
|
syslog.syslog(syslog.LOG_NOTICE, msg) #2
|
2013-10-31 21:52:37 +01:00
|
|
|
|
2014-03-18 16:17:44 +01:00
|
|
|
def _set_cwd(self):
|
|
|
|
try:
|
|
|
|
cwd = os.getcwd()
|
|
|
|
if not os.access(cwd, os.F_OK|os.R_OK):
|
|
|
|
raise
|
|
|
|
return cwd
|
|
|
|
except:
|
|
|
|
# we don't have access to the cwd, probably because of sudo.
|
|
|
|
# Try and move to a neutral location to prevent errors
|
|
|
|
for cwd in [os.path.expandvars('$HOME'), tempfile.gettempdir()]:
|
|
|
|
try:
|
|
|
|
if os.access(cwd, os.F_OK|os.R_OK):
|
|
|
|
os.chdir(cwd)
|
|
|
|
return cwd
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
# we won't error here, as it may *not* be a problem,
|
|
|
|
# and we don't want to break modules unnecessarily
|
|
|
|
return None
|
|
|
|
|
2013-10-31 21:52:37 +01:00
|
|
|
def get_bin_path(self, arg, required=False, opt_dirs=[]):
|
|
|
|
'''
|
|
|
|
find system executable in PATH.
|
|
|
|
Optional arguments:
|
|
|
|
- required: if executable is not found and required is true, fail_json
|
|
|
|
- opt_dirs: optional list of directories to search in addition to PATH
|
|
|
|
if found return full path; otherwise return None
|
|
|
|
'''
|
|
|
|
sbin_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin']
|
|
|
|
paths = []
|
|
|
|
for d in opt_dirs:
|
|
|
|
if d is not None and os.path.exists(d):
|
|
|
|
paths.append(d)
|
|
|
|
paths += os.environ.get('PATH', '').split(os.pathsep)
|
|
|
|
bin_path = None
|
|
|
|
# mangle PATH to include /sbin dirs
|
|
|
|
for p in sbin_paths:
|
|
|
|
if p not in paths and os.path.exists(p):
|
|
|
|
paths.append(p)
|
|
|
|
for d in paths:
|
|
|
|
path = os.path.join(d, arg)
|
|
|
|
if os.path.exists(path) and self.is_executable(path):
|
|
|
|
bin_path = path
|
|
|
|
break
|
|
|
|
if required and bin_path is None:
|
|
|
|
self.fail_json(msg='Failed to find required executable %s' % arg)
|
|
|
|
return bin_path
|
|
|
|
|
|
|
|
def boolean(self, arg):
|
|
|
|
''' return a bool for the arg '''
|
|
|
|
if arg is None or type(arg) == bool:
|
|
|
|
return arg
|
|
|
|
if type(arg) in types.StringTypes:
|
|
|
|
arg = arg.lower()
|
|
|
|
if arg in BOOLEANS_TRUE:
|
|
|
|
return True
|
|
|
|
elif arg in BOOLEANS_FALSE:
|
|
|
|
return False
|
|
|
|
else:
|
|
|
|
self.fail_json(msg='Boolean %s not in either boolean list' % arg)
|
|
|
|
|
|
|
|
def jsonify(self, data):
|
2014-02-11 21:19:00 +01:00
|
|
|
for encoding in ("utf-8", "latin-1", "unicode_escape"):
|
|
|
|
try:
|
|
|
|
return json.dumps(data, encoding=encoding)
|
2014-03-25 14:50:29 +01:00
|
|
|
# Old systems using simplejson module does not support encoding keyword.
|
|
|
|
except TypeError, e:
|
|
|
|
return json.dumps(data)
|
2014-02-11 21:19:00 +01:00
|
|
|
except UnicodeDecodeError, e:
|
|
|
|
continue
|
|
|
|
self.fail_json(msg='Invalid unicode encoding encountered')
|
2013-10-31 21:52:37 +01:00
|
|
|
|
|
|
|
def from_json(self, data):
|
|
|
|
return json.loads(data)
|
|
|
|
|
2014-05-13 20:52:38 +02:00
|
|
|
def add_cleanup_file(self, path):
|
|
|
|
if path not in self.cleanup_files:
|
|
|
|
self.cleanup_files.append(path)
|
|
|
|
|
|
|
|
def do_cleanup_files(self):
|
|
|
|
for path in self.cleanup_files:
|
|
|
|
self.cleanup(path)
|
|
|
|
|
2013-10-31 21:52:37 +01:00
|
|
|
def exit_json(self, **kwargs):
|
|
|
|
''' return from the module, without error '''
|
|
|
|
self.add_path_info(kwargs)
|
2013-11-01 00:47:05 +01:00
|
|
|
if not 'changed' in kwargs:
|
2013-10-31 21:52:37 +01:00
|
|
|
kwargs['changed'] = False
|
2014-05-13 20:52:38 +02:00
|
|
|
self.do_cleanup_files()
|
2013-10-31 21:52:37 +01:00
|
|
|
print self.jsonify(kwargs)
|
|
|
|
sys.exit(0)
|
|
|
|
|
|
|
|
def fail_json(self, **kwargs):
|
|
|
|
''' return from the module, with an error message '''
|
|
|
|
self.add_path_info(kwargs)
|
|
|
|
assert 'msg' in kwargs, "implementation error -- msg to explain the error is required"
|
|
|
|
kwargs['failed'] = True
|
2014-05-13 20:52:38 +02:00
|
|
|
self.do_cleanup_files()
|
2013-10-31 21:52:37 +01:00
|
|
|
print self.jsonify(kwargs)
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
def is_executable(self, path):
|
|
|
|
'''is the given path executable?'''
|
|
|
|
return (stat.S_IXUSR & os.stat(path)[stat.ST_MODE]
|
|
|
|
or stat.S_IXGRP & os.stat(path)[stat.ST_MODE]
|
|
|
|
or stat.S_IXOTH & os.stat(path)[stat.ST_MODE])
|
|
|
|
|
|
|
|
def digest_from_file(self, filename, digest_method):
|
|
|
|
''' Return hex digest of local file for a given digest_method, or None if file is not present. '''
|
|
|
|
if not os.path.exists(filename):
|
|
|
|
return None
|
|
|
|
if os.path.isdir(filename):
|
|
|
|
self.fail_json(msg="attempted to take checksum of directory: %s" % filename)
|
|
|
|
digest = digest_method
|
|
|
|
blocksize = 64 * 1024
|
|
|
|
infile = open(filename, 'rb')
|
|
|
|
block = infile.read(blocksize)
|
|
|
|
while block:
|
|
|
|
digest.update(block)
|
|
|
|
block = infile.read(blocksize)
|
|
|
|
infile.close()
|
|
|
|
return digest.hexdigest()
|
|
|
|
|
|
|
|
def md5(self, filename):
|
2014-11-10 21:00:49 +01:00
|
|
|
''' Return MD5 hex digest of local file using digest_from_file().
|
|
|
|
|
|
|
|
Do not use this function unless you have no other choice for:
|
|
|
|
1) Optional backwards compatibility
|
|
|
|
2) Compatibility with a third party protocol
|
|
|
|
|
|
|
|
This function will not work on systems complying with FIPS-140-2.
|
|
|
|
|
|
|
|
Most uses of this function can use the module.sha1 function instead.
|
|
|
|
'''
|
2014-11-12 05:23:03 +01:00
|
|
|
if not _md5:
|
|
|
|
raise ValueError('MD5 not available. Possibly running in FIPS mode')
|
2013-10-31 21:52:37 +01:00
|
|
|
return self.digest_from_file(filename, _md5())
|
|
|
|
|
2014-11-07 06:28:04 +01:00
|
|
|
def sha1(self, filename):
|
|
|
|
''' Return SHA1 hex digest of local file using digest_from_file(). '''
|
|
|
|
return self.digest_from_file(filename, _sha1())
|
|
|
|
|
2013-10-31 21:52:37 +01:00
|
|
|
def sha256(self, filename):
|
|
|
|
''' Return SHA-256 hex digest of local file using digest_from_file(). '''
|
|
|
|
if not HAVE_HASHLIB:
|
|
|
|
self.fail_json(msg="SHA-256 checksums require hashlib, which is available in Python 2.5 and higher")
|
|
|
|
return self.digest_from_file(filename, _sha256())
|
|
|
|
|
|
|
|
def backup_local(self, fn):
|
|
|
|
'''make a date-marked backup of the specified file, return True or False on success or failure'''
|
|
|
|
|
2015-04-07 05:37:32 +02:00
|
|
|
backupdest = ''
|
|
|
|
if os.path.exists(fn):
|
|
|
|
# backups named basename-YYYY-MM-DD@HH:MM:SS~
|
|
|
|
ext = time.strftime("%Y-%m-%d@%H:%M:%S~", time.localtime(time.time()))
|
|
|
|
backupdest = '%s.%s' % (fn, ext)
|
|
|
|
|
|
|
|
try:
|
|
|
|
shutil.copy2(fn, backupdest)
|
|
|
|
except (shutil.Error, IOError), e:
|
|
|
|
self.fail_json(msg='Could not make backup of %s to %s: %s' % (fn, backupdest, e))
|
|
|
|
|
2013-10-31 21:52:37 +01:00
|
|
|
return backupdest
|
|
|
|
|
2014-05-13 20:52:38 +02:00
|
|
|
def cleanup(self, tmpfile):
|
2013-10-31 21:52:37 +01:00
|
|
|
if os.path.exists(tmpfile):
|
|
|
|
try:
|
|
|
|
os.unlink(tmpfile)
|
|
|
|
except OSError, e:
|
|
|
|
sys.stderr.write("could not cleanup %s: %s" % (tmpfile, e))
|
|
|
|
|
|
|
|
def atomic_move(self, src, dest):
|
|
|
|
'''atomically move src to dest, copying attributes from dest, returns true on success
|
|
|
|
it uses os.rename to ensure this as it is an atomic operation, rest of the function is
|
|
|
|
to work around limitations, corner cases and ensure selinux context is saved if possible'''
|
|
|
|
context = None
|
2014-03-25 19:00:38 +01:00
|
|
|
dest_stat = None
|
2013-10-31 21:52:37 +01:00
|
|
|
if os.path.exists(dest):
|
|
|
|
try:
|
2014-03-25 19:00:38 +01:00
|
|
|
dest_stat = os.stat(dest)
|
|
|
|
os.chmod(src, dest_stat.st_mode & 07777)
|
|
|
|
os.chown(src, dest_stat.st_uid, dest_stat.st_gid)
|
2013-10-31 21:52:37 +01:00
|
|
|
except OSError, e:
|
|
|
|
if e.errno != errno.EPERM:
|
|
|
|
raise
|
|
|
|
if self.selinux_enabled():
|
|
|
|
context = self.selinux_context(dest)
|
|
|
|
else:
|
|
|
|
if self.selinux_enabled():
|
|
|
|
context = self.selinux_default_context(dest)
|
|
|
|
|
2014-03-24 21:10:43 +01:00
|
|
|
creating = not os.path.exists(dest)
|
2014-06-03 16:36:19 +02:00
|
|
|
|
|
|
|
try:
|
|
|
|
login_name = os.getlogin()
|
|
|
|
except OSError:
|
|
|
|
# not having a tty can cause the above to fail, so
|
|
|
|
# just get the LOGNAME environment variable instead
|
|
|
|
login_name = os.environ.get('LOGNAME', None)
|
|
|
|
|
|
|
|
# if the original login_name doesn't match the currently
|
|
|
|
# logged-in user, or if the SUDO_USER environment variable
|
|
|
|
# is set, then this user has switched their credentials
|
|
|
|
switched_user = login_name and login_name != pwd.getpwuid(os.getuid())[0] or os.environ.get('SUDO_USER')
|
2014-03-24 21:10:43 +01:00
|
|
|
|
2013-10-31 21:52:37 +01:00
|
|
|
try:
|
2014-03-14 04:07:35 +01:00
|
|
|
# Optimistically try a rename, solves some corner cases and can avoid useless work, throws exception if not atomic.
|
2013-10-31 21:52:37 +01:00
|
|
|
os.rename(src, dest)
|
|
|
|
except (IOError,OSError), e:
|
2014-05-03 18:40:05 +02:00
|
|
|
# only try workarounds for errno 18 (cross device), 1 (not permitted) and 13 (permission denied)
|
2013-10-31 21:52:37 +01:00
|
|
|
if e.errno != errno.EPERM and e.errno != errno.EXDEV and e.errno != errno.EACCES:
|
|
|
|
self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, e))
|
|
|
|
|
|
|
|
dest_dir = os.path.dirname(dest)
|
|
|
|
dest_file = os.path.basename(dest)
|
2014-08-07 07:07:55 +02:00
|
|
|
try:
|
|
|
|
tmp_dest = tempfile.NamedTemporaryFile(
|
|
|
|
prefix=".ansible_tmp", dir=dest_dir, suffix=dest_file)
|
|
|
|
except (OSError, IOError), e:
|
|
|
|
self.fail_json(msg='The destination directory (%s) is not writable by the current user.' % dest_dir)
|
2013-10-31 21:52:37 +01:00
|
|
|
|
|
|
|
try: # leaves tmp file behind when sudo and not root
|
2014-05-27 23:04:02 +02:00
|
|
|
if switched_user and os.getuid() != 0:
|
2013-11-01 00:47:05 +01:00
|
|
|
# cleanup will happen by 'rm' of tempdir
|
2014-03-20 12:30:55 +01:00
|
|
|
# copy2 will preserve some metadata
|
2014-03-20 11:12:58 +01:00
|
|
|
shutil.copy2(src, tmp_dest.name)
|
2013-10-31 21:52:37 +01:00
|
|
|
else:
|
2014-03-20 11:12:58 +01:00
|
|
|
shutil.move(src, tmp_dest.name)
|
2013-10-31 21:52:37 +01:00
|
|
|
if self.selinux_enabled():
|
2014-03-20 12:30:55 +01:00
|
|
|
self.set_context_if_different(
|
2014-03-20 11:12:58 +01:00
|
|
|
tmp_dest.name, context, False)
|
2014-08-21 21:07:18 +02:00
|
|
|
try:
|
|
|
|
tmp_stat = os.stat(tmp_dest.name)
|
|
|
|
if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
|
|
|
|
os.chown(tmp_dest.name, dest_stat.st_uid, dest_stat.st_gid)
|
|
|
|
except OSError, e:
|
|
|
|
if e.errno != errno.EPERM:
|
|
|
|
raise
|
2014-03-20 11:12:58 +01:00
|
|
|
os.rename(tmp_dest.name, dest)
|
2013-10-31 21:52:37 +01:00
|
|
|
except (shutil.Error, OSError, IOError), e:
|
2014-03-20 11:12:58 +01:00
|
|
|
self.cleanup(tmp_dest.name)
|
2013-10-31 21:52:37 +01:00
|
|
|
self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, e))
|
|
|
|
|
2014-04-29 15:40:08 +02:00
|
|
|
if creating:
|
|
|
|
# make sure the file has the correct permissions
|
|
|
|
# based on the current value of umask
|
|
|
|
umask = os.umask(0)
|
|
|
|
os.umask(umask)
|
2015-02-16 20:16:26 +01:00
|
|
|
os.chmod(dest, 0666 & ~umask)
|
2014-05-27 23:04:02 +02:00
|
|
|
if switched_user:
|
2014-04-29 15:40:08 +02:00
|
|
|
os.chown(dest, os.getuid(), os.getgid())
|
2014-03-24 21:10:43 +01:00
|
|
|
|
2013-10-31 21:52:37 +01:00
|
|
|
if self.selinux_enabled():
|
|
|
|
# rename might not preserve context
|
|
|
|
self.set_context_if_different(dest, context, False)
|
|
|
|
|
2014-11-11 06:41:50 +01:00
|
|
|
def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None, use_unsafe_shell=False, prompt_regex=None):
|
2013-10-31 21:52:37 +01:00
|
|
|
'''
|
|
|
|
Execute a command, returns rc, stdout, and stderr.
|
|
|
|
args is the command to run
|
|
|
|
If args is a list, the command will be run with shell=False.
|
2014-03-10 22:11:24 +01:00
|
|
|
If args is a string and use_unsafe_shell=False it will split args to a list and run with shell=False
|
|
|
|
If args is a string and use_unsafe_shell=True it run with shell=True.
|
2013-10-31 21:52:37 +01:00
|
|
|
Other arguments:
|
2014-11-11 06:41:50 +01:00
|
|
|
- check_rc (boolean) Whether to call fail_json in case of
|
|
|
|
non zero RC. Default is False.
|
|
|
|
- close_fds (boolean) See documentation for subprocess.Popen().
|
|
|
|
Default is True.
|
|
|
|
- executable (string) See documentation for subprocess.Popen().
|
|
|
|
Default is None.
|
|
|
|
- prompt_regex (string) A regex string (not a compiled regex) which
|
|
|
|
can be used to detect prompts in the stdout
|
|
|
|
which would otherwise cause the execution
|
|
|
|
to hang (especially if no input data is
|
|
|
|
specified)
|
2013-10-31 21:52:37 +01:00
|
|
|
'''
|
2014-03-10 22:11:24 +01:00
|
|
|
|
|
|
|
shell = False
|
2013-10-31 21:52:37 +01:00
|
|
|
if isinstance(args, list):
|
2014-03-13 19:51:10 +01:00
|
|
|
if use_unsafe_shell:
|
|
|
|
args = " ".join([pipes.quote(x) for x in args])
|
|
|
|
shell = True
|
2014-03-10 22:11:24 +01:00
|
|
|
elif isinstance(args, basestring) and use_unsafe_shell:
|
2013-10-31 21:52:37 +01:00
|
|
|
shell = True
|
2014-03-10 22:11:24 +01:00
|
|
|
elif isinstance(args, basestring):
|
2014-04-15 19:04:41 +02:00
|
|
|
args = shlex.split(args.encode('utf-8'))
|
2013-10-31 21:52:37 +01:00
|
|
|
else:
|
|
|
|
msg = "Argument 'args' to run_command must be list or string"
|
|
|
|
self.fail_json(rc=257, cmd=args, msg=msg)
|
2014-03-10 22:11:24 +01:00
|
|
|
|
2014-11-11 06:41:50 +01:00
|
|
|
prompt_re = None
|
|
|
|
if prompt_regex:
|
|
|
|
try:
|
|
|
|
prompt_re = re.compile(prompt_regex, re.MULTILINE)
|
|
|
|
except re.error:
|
|
|
|
self.fail_json(msg="invalid prompt regular expression given to run_command")
|
|
|
|
|
2014-03-12 15:10:45 +01:00
|
|
|
# expand things like $HOME and ~
|
2014-03-12 16:57:28 +01:00
|
|
|
if not shell:
|
|
|
|
args = [ os.path.expandvars(os.path.expanduser(x)) for x in args ]
|
2014-03-12 15:10:45 +01:00
|
|
|
|
2013-10-31 21:52:37 +01:00
|
|
|
rc = 0
|
|
|
|
msg = None
|
|
|
|
st_in = None
|
2013-11-07 21:50:41 +01:00
|
|
|
|
|
|
|
# Set a temporart env path if a prefix is passed
|
|
|
|
env=os.environ
|
|
|
|
if path_prefix:
|
|
|
|
env['PATH']="%s:%s" % (path_prefix, env['PATH'])
|
|
|
|
|
2014-03-06 20:33:18 +01:00
|
|
|
# create a printable version of the command for use
|
|
|
|
# in reporting later, which strips out things like
|
|
|
|
# passwords from the args list
|
2015-02-09 19:13:13 +01:00
|
|
|
if isinstance(args, basestring):
|
2015-03-25 20:22:45 +01:00
|
|
|
if isinstance(args, unicode):
|
|
|
|
b_args = args.encode('utf-8')
|
|
|
|
else:
|
|
|
|
b_args = args
|
|
|
|
to_clean_args = shlex.split(b_args)
|
|
|
|
del b_args
|
2014-03-06 20:33:18 +01:00
|
|
|
else:
|
2015-02-09 19:13:13 +01:00
|
|
|
to_clean_args = args
|
|
|
|
|
|
|
|
clean_args = []
|
|
|
|
is_passwd = False
|
|
|
|
for arg in to_clean_args:
|
|
|
|
if is_passwd:
|
|
|
|
is_passwd = False
|
|
|
|
clean_args.append('********')
|
|
|
|
continue
|
|
|
|
if PASSWD_ARG_RE.match(arg):
|
|
|
|
sep_idx = arg.find('=')
|
|
|
|
if sep_idx > -1:
|
|
|
|
clean_args.append('%s=********' % arg[:sep_idx])
|
|
|
|
continue
|
|
|
|
else:
|
|
|
|
is_passwd = True
|
|
|
|
clean_args.append(heuristic_log_sanitize(arg))
|
|
|
|
clean_args = ' '.join(pipes.quote(arg) for arg in clean_args)
|
2014-03-06 20:33:18 +01:00
|
|
|
|
2013-10-31 21:52:37 +01:00
|
|
|
if data:
|
|
|
|
st_in = subprocess.PIPE
|
2014-03-10 22:11:24 +01:00
|
|
|
|
|
|
|
kwargs = dict(
|
|
|
|
executable=executable,
|
|
|
|
shell=shell,
|
|
|
|
close_fds=close_fds,
|
2014-08-04 22:32:41 +02:00
|
|
|
stdin=st_in,
|
2014-03-10 22:11:24 +01:00
|
|
|
stdout=subprocess.PIPE,
|
|
|
|
stderr=subprocess.PIPE
|
|
|
|
)
|
|
|
|
|
|
|
|
if path_prefix:
|
|
|
|
kwargs['env'] = env
|
2014-03-12 21:59:24 +01:00
|
|
|
if cwd and os.path.isdir(cwd):
|
2014-03-10 22:11:24 +01:00
|
|
|
kwargs['cwd'] = cwd
|
|
|
|
|
2014-03-13 22:15:23 +01:00
|
|
|
# store the pwd
|
|
|
|
prev_dir = os.getcwd()
|
2014-03-10 22:11:24 +01:00
|
|
|
|
2014-03-13 22:15:23 +01:00
|
|
|
# make sure we're in the right working directory
|
|
|
|
if cwd and os.path.isdir(cwd):
|
|
|
|
try:
|
2014-03-12 19:59:50 +01:00
|
|
|
os.chdir(cwd)
|
2014-03-13 22:15:23 +01:00
|
|
|
except (OSError, IOError), e:
|
2014-05-03 18:40:05 +02:00
|
|
|
self.fail_json(rc=e.errno, msg="Could not open %s, %s" % (cwd, str(e)))
|
2014-03-12 20:33:31 +01:00
|
|
|
|
2014-03-13 22:15:23 +01:00
|
|
|
try:
|
2014-03-10 22:11:24 +01:00
|
|
|
cmd = subprocess.Popen(args, **kwargs)
|
|
|
|
|
2014-08-04 22:32:41 +02:00
|
|
|
# the communication logic here is essentially taken from that
|
|
|
|
# of the _communicate() function in ssh.py
|
|
|
|
|
|
|
|
stdout = ''
|
|
|
|
stderr = ''
|
|
|
|
rpipes = [cmd.stdout, cmd.stderr]
|
|
|
|
|
2013-10-31 21:52:37 +01:00
|
|
|
if data:
|
|
|
|
if not binary_data:
|
2014-03-13 20:28:51 +01:00
|
|
|
data += '\n'
|
2014-08-04 22:32:41 +02:00
|
|
|
cmd.stdin.write(data)
|
|
|
|
cmd.stdin.close()
|
|
|
|
|
|
|
|
while True:
|
|
|
|
rfd, wfd, efd = select.select(rpipes, [], rpipes, 1)
|
|
|
|
if cmd.stdout in rfd:
|
|
|
|
dat = os.read(cmd.stdout.fileno(), 9000)
|
|
|
|
stdout += dat
|
|
|
|
if dat == '':
|
|
|
|
rpipes.remove(cmd.stdout)
|
|
|
|
if cmd.stderr in rfd:
|
|
|
|
dat = os.read(cmd.stderr.fileno(), 9000)
|
|
|
|
stderr += dat
|
|
|
|
if dat == '':
|
|
|
|
rpipes.remove(cmd.stderr)
|
2014-11-11 06:41:50 +01:00
|
|
|
# if we're checking for prompts, do it now
|
|
|
|
if prompt_re:
|
|
|
|
if prompt_re.search(stdout) and not data:
|
|
|
|
return (257, stdout, "A prompt was encountered while running a command, but no input data was specified")
|
2014-08-04 22:32:41 +02:00
|
|
|
# only break out if no pipes are left to read or
|
|
|
|
# the pipes are completely read and
|
|
|
|
# the process is terminated
|
|
|
|
if (not rpipes or not rfd) and cmd.poll() is not None:
|
|
|
|
break
|
|
|
|
# No pipes are left to read but process is not yet terminated
|
|
|
|
# Only then it is safe to wait for the process to be finished
|
|
|
|
# NOTE: Actually cmd.poll() is always None here if rpipes is empty
|
|
|
|
elif not rpipes and cmd.poll() == None:
|
|
|
|
cmd.wait()
|
|
|
|
# The process is terminated. Since no pipes to read from are
|
|
|
|
# left, there is no need to call select() again.
|
|
|
|
break
|
|
|
|
|
|
|
|
cmd.stdout.close()
|
|
|
|
cmd.stderr.close()
|
|
|
|
|
2013-10-31 21:52:37 +01:00
|
|
|
rc = cmd.returncode
|
|
|
|
except (OSError, IOError), e:
|
2014-03-06 20:33:18 +01:00
|
|
|
self.fail_json(rc=e.errno, msg=str(e), cmd=clean_args)
|
2013-10-31 21:52:37 +01:00
|
|
|
except:
|
2014-03-06 20:33:18 +01:00
|
|
|
self.fail_json(rc=257, msg=traceback.format_exc(), cmd=clean_args)
|
2014-03-13 21:06:59 +01:00
|
|
|
|
2013-10-31 21:52:37 +01:00
|
|
|
if rc != 0 and check_rc:
|
2015-02-09 19:13:13 +01:00
|
|
|
msg = heuristic_log_sanitize(stderr.rstrip())
|
2014-08-04 22:32:41 +02:00
|
|
|
self.fail_json(cmd=clean_args, rc=rc, stdout=stdout, stderr=stderr, msg=msg)
|
2014-03-13 22:15:23 +01:00
|
|
|
|
|
|
|
# reset the pwd
|
|
|
|
os.chdir(prev_dir)
|
|
|
|
|
2014-08-04 22:32:41 +02:00
|
|
|
return (rc, stdout, stderr)
|
2013-10-31 21:52:37 +01:00
|
|
|
|
2014-03-12 15:55:54 +01:00
|
|
|
def append_to_file(self, filename, str):
|
|
|
|
filename = os.path.expandvars(os.path.expanduser(filename))
|
|
|
|
fh = open(filename, 'a')
|
|
|
|
fh.write(str)
|
|
|
|
fh.close()
|
|
|
|
|
2013-10-31 21:52:37 +01:00
|
|
|
def pretty_bytes(self,size):
|
|
|
|
ranges = (
|
|
|
|
(1<<70L, 'ZB'),
|
|
|
|
(1<<60L, 'EB'),
|
|
|
|
(1<<50L, 'PB'),
|
|
|
|
(1<<40L, 'TB'),
|
|
|
|
(1<<30L, 'GB'),
|
|
|
|
(1<<20L, 'MB'),
|
|
|
|
(1<<10L, 'KB'),
|
|
|
|
(1, 'Bytes')
|
|
|
|
)
|
|
|
|
for limit, suffix in ranges:
|
|
|
|
if size >= limit:
|
|
|
|
break
|
|
|
|
return '%.2f %s' % (float(size)/ limit, suffix)
|
|
|
|
|
2014-03-19 15:30:10 +01:00
|
|
|
def get_module_path():
|
|
|
|
return os.path.dirname(os.path.realpath(__file__))
|