mirror of
https://github.com/ansible-collections/community.general.git
synced 2024-09-14 20:13:21 +02:00
PythonRunner: a command runner for python (#8289)
* PythonRunner: a command runner for python * add changelog frag * Update changelogs/fragments/8289-python-runner.yml Co-authored-by: Felix Fontein <felix@fontein.de> --------- Co-authored-by: Felix Fontein <felix@fontein.de>
This commit is contained in:
parent
fc2024d837
commit
7051fe3449
4 changed files with 263 additions and 0 deletions
4
.github/BOTMETA.yml
vendored
4
.github/BOTMETA.yml
vendored
|
@ -294,6 +294,8 @@ files:
|
|||
labels: module_utils
|
||||
$module_utils/btrfs.py:
|
||||
maintainers: gnfzdz
|
||||
$module_utils/cmd_runner.py:
|
||||
maintainers: russoz
|
||||
$module_utils/deps.py:
|
||||
maintainers: russoz
|
||||
$module_utils/gconftool2.py:
|
||||
|
@ -339,6 +341,8 @@ files:
|
|||
$module_utils/pipx.py:
|
||||
labels: pipx
|
||||
maintainers: russoz
|
||||
$module_utils/python_runner.py:
|
||||
maintainers: russoz
|
||||
$module_utils/puppet.py:
|
||||
labels: puppet
|
||||
maintainers: russoz
|
||||
|
|
2
changelogs/fragments/8289-python-runner.yml
Normal file
2
changelogs/fragments/8289-python-runner.yml
Normal file
|
@ -0,0 +1,2 @@
|
|||
minor_changes:
|
||||
- PythonRunner module utils - specialisation of ``CmdRunner`` to execute Python scripts (https://github.com/ansible-collections/community.general/pull/8289).
|
34
plugins/module_utils/python_runner.py
Normal file
34
plugins/module_utils/python_runner.py
Normal file
|
@ -0,0 +1,34 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2024, Alexei Znamensky <russoz@gmail.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import os
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, _ensure_list
|
||||
|
||||
|
||||
class PythonRunner(CmdRunner):
|
||||
def __init__(self, module, command, arg_formats=None, default_args_order=(),
|
||||
check_rc=False, force_lang="C", path_prefix=None, environ_update=None,
|
||||
python="python", venv=None):
|
||||
self.python = python
|
||||
self.venv = venv
|
||||
self.has_venv = venv is not None
|
||||
|
||||
if (os.path.isabs(python) or '/' in python):
|
||||
self.python = python
|
||||
elif self.has_venv:
|
||||
path_prefix = os.path.join(venv, "bin")
|
||||
if environ_update is None:
|
||||
environ_update = {}
|
||||
environ_update["PATH"] = "%s:%s" % (path_prefix, os.environ["PATH"])
|
||||
environ_update["VIRTUAL_ENV"] = venv
|
||||
|
||||
python_cmd = [self.python] + _ensure_list(command)
|
||||
|
||||
super(PythonRunner, self).__init__(module, python_cmd, arg_formats, default_args_order,
|
||||
check_rc, force_lang, path_prefix, environ_update)
|
223
tests/unit/plugins/module_utils/test_python_runner.py
Normal file
223
tests/unit/plugins/module_utils/test_python_runner.py
Normal file
|
@ -0,0 +1,223 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2024, Alexei Znamensky <russoz@gmail.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from ansible_collections.community.general.tests.unit.compat.mock import MagicMock, PropertyMock
|
||||
from ansible_collections.community.general.plugins.module_utils.cmd_runner import cmd_runner_fmt
|
||||
from ansible_collections.community.general.plugins.module_utils.python_runner import PythonRunner
|
||||
|
||||
|
||||
TC_RUNNER = dict(
|
||||
# SAMPLE: This shows all possible elements of a test case. It does not actually run.
|
||||
#
|
||||
# testcase_name=(
|
||||
# # input
|
||||
# dict(
|
||||
# args_bundle = dict(
|
||||
# param1=dict(
|
||||
# type="int",
|
||||
# value=11,
|
||||
# fmt_func=cmd_runner_fmt.as_opt_eq_val,
|
||||
# fmt_arg="--answer",
|
||||
# ),
|
||||
# param2=dict(
|
||||
# fmt_func=cmd_runner_fmt.as_bool,
|
||||
# fmt_arg="--bb-here",
|
||||
# )
|
||||
# ),
|
||||
# runner_init_args = dict(
|
||||
# command="testing",
|
||||
# default_args_order=(),
|
||||
# check_rc=False,
|
||||
# force_lang="C",
|
||||
# path_prefix=None,
|
||||
# environ_update=None,
|
||||
# ),
|
||||
# runner_ctx_args = dict(
|
||||
# args_order=['aa', 'bb'],
|
||||
# output_process=None,
|
||||
# ignore_value_none=True,
|
||||
# ),
|
||||
# ),
|
||||
# # command execution
|
||||
# dict(
|
||||
# runner_ctx_run_args = dict(bb=True),
|
||||
# rc = 0,
|
||||
# out = "",
|
||||
# err = "",
|
||||
# ),
|
||||
# # expected
|
||||
# dict(
|
||||
# results=(),
|
||||
# run_info=dict(
|
||||
# cmd=['/mock/bin/testing', '--answer=11', '--bb-here'],
|
||||
# environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C'},
|
||||
# ),
|
||||
# exc=None,
|
||||
# ),
|
||||
# ),
|
||||
#
|
||||
aa_bb=(
|
||||
dict(
|
||||
args_bundle=dict(
|
||||
aa=dict(type="int", value=11, fmt_func=cmd_runner_fmt.as_opt_eq_val, fmt_arg="--answer"),
|
||||
bb=dict(fmt_func=cmd_runner_fmt.as_bool, fmt_arg="--bb-here"),
|
||||
),
|
||||
runner_init_args=dict(command="testing"),
|
||||
runner_ctx_args=dict(args_order=['aa', 'bb']),
|
||||
),
|
||||
dict(runner_ctx_run_args=dict(bb=True), rc=0, out="", err=""),
|
||||
dict(
|
||||
run_info=dict(
|
||||
cmd=['/mock/bin/python', 'testing', '--answer=11', '--bb-here'],
|
||||
environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C'},
|
||||
args_order=('aa', 'bb'),
|
||||
),
|
||||
),
|
||||
),
|
||||
aa_bb_py3=(
|
||||
dict(
|
||||
args_bundle=dict(
|
||||
aa=dict(type="int", value=11, fmt_func=cmd_runner_fmt.as_opt_eq_val, fmt_arg="--answer"),
|
||||
bb=dict(fmt_func=cmd_runner_fmt.as_bool, fmt_arg="--bb-here"),
|
||||
),
|
||||
runner_init_args=dict(command="toasting", python="python3"),
|
||||
runner_ctx_args=dict(args_order=['aa', 'bb']),
|
||||
),
|
||||
dict(runner_ctx_run_args=dict(bb=True), rc=0, out="", err=""),
|
||||
dict(
|
||||
run_info=dict(
|
||||
cmd=['/mock/bin/python3', 'toasting', '--answer=11', '--bb-here'],
|
||||
environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C'},
|
||||
args_order=('aa', 'bb'),
|
||||
),
|
||||
),
|
||||
),
|
||||
aa_bb_abspath=(
|
||||
dict(
|
||||
args_bundle=dict(
|
||||
aa=dict(type="int", value=11, fmt_func=cmd_runner_fmt.as_opt_eq_val, fmt_arg="--answer"),
|
||||
bb=dict(fmt_func=cmd_runner_fmt.as_bool, fmt_arg="--bb-here"),
|
||||
),
|
||||
runner_init_args=dict(command="toasting", python="/crazy/local/bin/python3"),
|
||||
runner_ctx_args=dict(args_order=['aa', 'bb']),
|
||||
),
|
||||
dict(runner_ctx_run_args=dict(bb=True), rc=0, out="", err=""),
|
||||
dict(
|
||||
run_info=dict(
|
||||
cmd=['/crazy/local/bin/python3', 'toasting', '--answer=11', '--bb-here'],
|
||||
environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C'},
|
||||
args_order=('aa', 'bb'),
|
||||
),
|
||||
),
|
||||
),
|
||||
aa_bb_venv=(
|
||||
dict(
|
||||
args_bundle=dict(
|
||||
aa=dict(type="int", value=11, fmt_func=cmd_runner_fmt.as_opt_eq_val, fmt_arg="--answer"),
|
||||
bb=dict(fmt_func=cmd_runner_fmt.as_bool, fmt_arg="--bb-here"),
|
||||
),
|
||||
runner_init_args=dict(command="toasting", venv="/venv"),
|
||||
runner_ctx_args=dict(args_order=['aa', 'bb']),
|
||||
),
|
||||
dict(runner_ctx_run_args=dict(bb=True), rc=0, out="", err=""),
|
||||
dict(
|
||||
run_info=dict(
|
||||
cmd=['/venv/bin/python', 'toasting', '--answer=11', '--bb-here'],
|
||||
environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C', 'VIRTUAL_ENV': '/venv', 'PATH': '/venv/bin'},
|
||||
args_order=('aa', 'bb'),
|
||||
),
|
||||
),
|
||||
),
|
||||
)
|
||||
TC_RUNNER_IDS = sorted(TC_RUNNER.keys())
|
||||
|
||||
|
||||
@pytest.mark.parametrize('runner_input, cmd_execution, expected',
|
||||
(TC_RUNNER[tc] for tc in TC_RUNNER_IDS),
|
||||
ids=TC_RUNNER_IDS)
|
||||
def test_runner_context(runner_input, cmd_execution, expected):
|
||||
arg_spec = {}
|
||||
params = {}
|
||||
arg_formats = {}
|
||||
for k, v in runner_input['args_bundle'].items():
|
||||
try:
|
||||
arg_spec[k] = {'type': v['type']}
|
||||
except KeyError:
|
||||
pass
|
||||
try:
|
||||
params[k] = v['value']
|
||||
except KeyError:
|
||||
pass
|
||||
try:
|
||||
arg_formats[k] = v['fmt_func'](v['fmt_arg'])
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
orig_results = tuple(cmd_execution[x] for x in ('rc', 'out', 'err'))
|
||||
|
||||
print("arg_spec={0}\nparams={1}\narg_formats={2}\n".format(
|
||||
arg_spec,
|
||||
params,
|
||||
arg_formats,
|
||||
))
|
||||
|
||||
module = MagicMock()
|
||||
type(module).argument_spec = PropertyMock(return_value=arg_spec)
|
||||
type(module).params = PropertyMock(return_value=params)
|
||||
module.get_bin_path.return_value = os.path.join(
|
||||
runner_input["runner_init_args"].get("venv", "/mock"),
|
||||
"bin",
|
||||
runner_input["runner_init_args"].get("python", "python")
|
||||
)
|
||||
module.run_command.return_value = orig_results
|
||||
|
||||
runner = PythonRunner(
|
||||
module=module,
|
||||
arg_formats=arg_formats,
|
||||
**runner_input['runner_init_args']
|
||||
)
|
||||
|
||||
def _extract_path(run_info):
|
||||
path = run_info.get("environ_update", {}).get("PATH")
|
||||
if path is not None:
|
||||
run_info["environ_update"] = dict((k, v)
|
||||
for k, v in run_info["environ_update"].items()
|
||||
if k != "PATH")
|
||||
return run_info, path
|
||||
|
||||
def _assert_run_info_env_path(actual, expected):
|
||||
actual2 = set(actual.split(":"))
|
||||
assert expected in actual2, "Missing expected path {0} in output PATH: {1}".format(expected, actual)
|
||||
|
||||
def _assert_run_info(actual, expected):
|
||||
reduced = dict((k, actual[k]) for k in expected.keys())
|
||||
reduced, act_path = _extract_path(reduced)
|
||||
expected, exp_path = _extract_path(expected)
|
||||
if exp_path is not None:
|
||||
_assert_run_info_env_path(act_path, exp_path)
|
||||
assert reduced == expected, "{0}".format(reduced)
|
||||
|
||||
def _assert_run(expected, ctx, results):
|
||||
_assert_run_info(ctx.run_info, expected['run_info'])
|
||||
assert results == expected.get('results', orig_results)
|
||||
|
||||
exc = expected.get("exc")
|
||||
if exc:
|
||||
with pytest.raises(exc):
|
||||
with runner.context(**runner_input['runner_ctx_args']) as ctx:
|
||||
results = ctx.run(**cmd_execution['runner_ctx_run_args'])
|
||||
_assert_run(expected, ctx, results)
|
||||
|
||||
else:
|
||||
with runner.context(**runner_input['runner_ctx_args']) as ctx:
|
||||
results = ctx.run(**cmd_execution['runner_ctx_run_args'])
|
||||
_assert_run(expected, ctx, results)
|
Loading…
Reference in a new issue