mirror of
https://github.com/ansible-collections/community.general.git
synced 2024-09-14 20:13:21 +02:00
Use lint/junit output for compile test. (#22395)
* Refactor sanity classes for use in all tests. * Use lint/junit output for compile test. * Add missing options for compile test. * Fix early bailout on requirements install.
This commit is contained in:
parent
a8910e78ca
commit
3ab344e8bd
6 changed files with 489 additions and 321 deletions
|
@ -14,11 +14,14 @@ from lib.executor import (
|
|||
IntegrationConfig,
|
||||
SubprocessError,
|
||||
ShellConfig,
|
||||
TestConfig,
|
||||
SanityConfig,
|
||||
create_shell_command,
|
||||
)
|
||||
|
||||
from lib.test import (
|
||||
TestConfig,
|
||||
)
|
||||
|
||||
from lib.core_ci import (
|
||||
AnsibleCoreCI,
|
||||
)
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
from __future__ import absolute_import, print_function
|
||||
|
||||
import os
|
||||
import re
|
||||
import tempfile
|
||||
import time
|
||||
import textwrap
|
||||
|
@ -42,6 +43,10 @@ from lib.util import (
|
|||
is_shippable,
|
||||
)
|
||||
|
||||
from lib.test import (
|
||||
TestConfig,
|
||||
)
|
||||
|
||||
from lib.ansible_util import (
|
||||
ansible_environment,
|
||||
)
|
||||
|
@ -70,6 +75,13 @@ from lib.classification import (
|
|||
categorize_changes,
|
||||
)
|
||||
|
||||
from lib.test import (
|
||||
TestMessage,
|
||||
TestSuccess,
|
||||
TestFailure,
|
||||
TestSkipped,
|
||||
)
|
||||
|
||||
SUPPORTED_PYTHON_VERSIONS = (
|
||||
'2.6',
|
||||
'2.7',
|
||||
|
@ -124,18 +136,18 @@ def install_command_requirements(args):
|
|||
if not args.requirements:
|
||||
return
|
||||
|
||||
cmd = generate_pip_install(args.command)
|
||||
|
||||
if not cmd:
|
||||
return
|
||||
packages = []
|
||||
|
||||
if isinstance(args, TestConfig):
|
||||
if args.coverage:
|
||||
cmd += ['coverage']
|
||||
|
||||
if isinstance(args, SanityConfig):
|
||||
packages.append('coverage')
|
||||
if args.junit:
|
||||
cmd += ['junit-xml']
|
||||
packages.append('junit-xml')
|
||||
|
||||
cmd = generate_pip_install(args.command, packages)
|
||||
|
||||
if not cmd:
|
||||
return
|
||||
|
||||
try:
|
||||
run_command(args, cmd)
|
||||
|
@ -163,18 +175,27 @@ def generate_egg_info(args):
|
|||
run_command(args, ['python', 'setup.py', 'egg_info'], capture=args.verbosity < 3)
|
||||
|
||||
|
||||
def generate_pip_install(command):
|
||||
def generate_pip_install(command, packages=None):
|
||||
"""
|
||||
:type command: str
|
||||
:type packages: list[str] | None
|
||||
:rtype: list[str] | None
|
||||
"""
|
||||
constraints = 'test/runner/requirements/constraints.txt'
|
||||
requirements = 'test/runner/requirements/%s.txt' % command
|
||||
|
||||
if not os.path.exists(requirements) or not os.path.getsize(requirements):
|
||||
options = []
|
||||
|
||||
if os.path.exists(requirements) and os.path.getsize(requirements):
|
||||
options += ['-r', requirements]
|
||||
|
||||
if packages:
|
||||
options += packages
|
||||
|
||||
if not options:
|
||||
return None
|
||||
|
||||
return ['pip', 'install', '--disable-pip-version-check', '-r', requirements, '-c', constraints]
|
||||
return ['pip', 'install', '--disable-pip-version-check', '-c', constraints] + options
|
||||
|
||||
|
||||
def command_shell(args):
|
||||
|
@ -672,40 +693,91 @@ def command_compile(args):
|
|||
|
||||
install_command_requirements(args)
|
||||
|
||||
version_commands = []
|
||||
total = 0
|
||||
failed = []
|
||||
|
||||
for version in COMPILE_PYTHON_VERSIONS:
|
||||
# run all versions unless version given, in which case run only that version
|
||||
if args.python and version != args.python:
|
||||
continue
|
||||
|
||||
# optional list of regex patterns to exclude from tests
|
||||
skip_file = 'test/compile/python%s-skip.txt' % version
|
||||
|
||||
if os.path.exists(skip_file):
|
||||
with open(skip_file, 'r') as skip_fd:
|
||||
skip_paths = skip_fd.read().splitlines()
|
||||
else:
|
||||
skip_paths = []
|
||||
|
||||
# augment file exclusions
|
||||
skip_paths += [e.path for e in exclude]
|
||||
|
||||
skip_paths = sorted(skip_paths)
|
||||
|
||||
python = 'python%s' % version
|
||||
cmd = [python, 'test/compile/compile.py']
|
||||
|
||||
if skip_paths:
|
||||
cmd += ['-x', '|'.join(skip_paths)]
|
||||
|
||||
cmd += [target.path if target.path == '.' else './%s' % target.path for target in include]
|
||||
|
||||
version_commands.append((version, cmd))
|
||||
|
||||
for version, command in version_commands:
|
||||
display.info('Compile with Python %s' % version)
|
||||
run_command(args, command)
|
||||
|
||||
result = compile_version(args, version, include, exclude)
|
||||
result.write(args)
|
||||
|
||||
total += 1
|
||||
|
||||
if isinstance(result, TestFailure):
|
||||
failed.append('compile --python %s' % version)
|
||||
|
||||
if failed:
|
||||
raise ApplicationError('The %d compile test(s) listed below (out of %d) failed. See error output above for details.\n%s' % (
|
||||
len(failed), total, '\n'.join(failed)))
|
||||
|
||||
|
||||
def compile_version(args, python_version, include, exclude):
|
||||
"""
|
||||
:type args: CompileConfig
|
||||
:type python_version: str
|
||||
:type include: tuple[CompletionTarget]
|
||||
:param exclude: tuple[CompletionTarget]
|
||||
:rtype: TestResult
|
||||
"""
|
||||
command = 'compile'
|
||||
test = ''
|
||||
|
||||
# optional list of regex patterns to exclude from tests
|
||||
skip_file = 'test/compile/python%s-skip.txt' % python_version
|
||||
|
||||
if os.path.exists(skip_file):
|
||||
with open(skip_file, 'r') as skip_fd:
|
||||
skip_paths = skip_fd.read().splitlines()
|
||||
else:
|
||||
skip_paths = []
|
||||
|
||||
# augment file exclusions
|
||||
skip_paths += [e.path for e in exclude]
|
||||
|
||||
skip_paths = sorted(skip_paths)
|
||||
|
||||
python = 'python%s' % python_version
|
||||
cmd = [python, 'test/compile/compile.py']
|
||||
|
||||
if skip_paths:
|
||||
cmd += ['-x', '|'.join(skip_paths)]
|
||||
|
||||
cmd += [target.path if target.path == '.' else './%s' % target.path for target in include]
|
||||
|
||||
try:
|
||||
stdout, stderr = run_command(args, cmd, capture=True)
|
||||
status = 0
|
||||
except SubprocessError as ex:
|
||||
stdout = ex.stdout
|
||||
stderr = ex.stderr
|
||||
status = ex.status
|
||||
|
||||
if stderr:
|
||||
raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
|
||||
|
||||
if args.explain:
|
||||
return TestSkipped(command, test, python_version=python_version)
|
||||
|
||||
pattern = r'^(?P<path>[^:]*):(?P<line>[0-9]+):(?P<column>[0-9]+): (?P<message>.*)$'
|
||||
|
||||
results = [re.search(pattern, line).groupdict() for line in stdout.splitlines()]
|
||||
|
||||
results = [TestMessage(
|
||||
message=r['message'],
|
||||
path=r['path'].replace('./', ''),
|
||||
line=int(r['line']),
|
||||
column=int(r['column']),
|
||||
) for r in results]
|
||||
|
||||
if results:
|
||||
return TestFailure(command, test, messages=results, python_version=python_version)
|
||||
|
||||
return TestSuccess(command, test, python_version=python_version)
|
||||
|
||||
|
||||
def intercept_command(args, cmd, capture=False, env=None, data=None, cwd=None, python_version=None):
|
||||
|
@ -1013,30 +1085,6 @@ class NoTestsForChanges(ApplicationWarning):
|
|||
super(NoTestsForChanges, self).__init__('No tests found for detected changes.')
|
||||
|
||||
|
||||
class TestConfig(EnvironmentConfig):
|
||||
"""Configuration common to all test commands."""
|
||||
def __init__(self, args, command):
|
||||
"""
|
||||
:type args: any
|
||||
:type command: str
|
||||
"""
|
||||
super(TestConfig, self).__init__(args, command)
|
||||
|
||||
self.coverage = args.coverage # type: bool
|
||||
self.include = args.include # type: list [str]
|
||||
self.exclude = args.exclude # type: list [str]
|
||||
self.require = args.require # type: list [str]
|
||||
|
||||
self.changed = args.changed # type: bool
|
||||
self.tracked = args.tracked # type: bool
|
||||
self.untracked = args.untracked # type: bool
|
||||
self.committed = args.committed # type: bool
|
||||
self.staged = args.staged # type: bool
|
||||
self.unstaged = args.unstaged # type: bool
|
||||
self.changed_from = args.changed_from # type: str
|
||||
self.changed_path = args.changed_path # type: list [str]
|
||||
|
||||
|
||||
class ShellConfig(EnvironmentConfig):
|
||||
"""Configuration for the shell command."""
|
||||
def __init__(self, args):
|
||||
|
@ -1057,8 +1105,6 @@ class SanityConfig(TestConfig):
|
|||
self.test = args.test # type: list [str]
|
||||
self.skip_test = args.skip_test # type: list [str]
|
||||
self.list_tests = args.list_tests # type: bool
|
||||
self.lint = args.lint # type: bool
|
||||
self.junit = args.junit # type: bool
|
||||
|
||||
if args.base_branch:
|
||||
self.base_branch = args.base_branch # str
|
||||
|
|
|
@ -2,7 +2,6 @@
|
|||
|
||||
from __future__ import absolute_import, print_function
|
||||
|
||||
import datetime
|
||||
import glob
|
||||
import json
|
||||
import os
|
||||
|
@ -41,6 +40,15 @@ from lib.executor import (
|
|||
SanityConfig,
|
||||
)
|
||||
|
||||
from lib.test import (
|
||||
TestSuccess,
|
||||
TestFailure,
|
||||
TestSkipped,
|
||||
TestMessage,
|
||||
)
|
||||
|
||||
COMMAND = 'sanity'
|
||||
|
||||
PEP8_SKIP_PATH = 'test/sanity/pep8/skip.txt'
|
||||
PEP8_LEGACY_PATH = 'test/sanity/pep8/legacy-files.txt'
|
||||
|
||||
|
@ -317,7 +325,7 @@ def command_sanity_pep8(args, targets):
|
|||
status = ex.status
|
||||
|
||||
if stderr:
|
||||
raise SubprocessError(cmd=cmd, status=status, stderr=stderr)
|
||||
raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
|
||||
|
||||
if args.explain:
|
||||
return SanitySkipped(test)
|
||||
|
@ -444,7 +452,7 @@ def command_sanity_yamllint(args, targets):
|
|||
status = ex.status
|
||||
|
||||
if stderr:
|
||||
raise SubprocessError(cmd=cmd, status=status, stderr=stderr)
|
||||
raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
|
||||
|
||||
if args.explain:
|
||||
return SanitySkipped(test)
|
||||
|
@ -542,143 +550,27 @@ def sanity_get_tests():
|
|||
return SANITY_TESTS
|
||||
|
||||
|
||||
class SanityResult(object):
|
||||
"""Base class for sanity test results."""
|
||||
def __init__(self, test, python_version=None):
|
||||
"""
|
||||
:type test: str
|
||||
:type python_version: str
|
||||
"""
|
||||
self.test = test
|
||||
self.python_version = python_version
|
||||
|
||||
try:
|
||||
import junit_xml
|
||||
except ImportError:
|
||||
junit_xml = None
|
||||
|
||||
self.junit = junit_xml
|
||||
|
||||
def write(self, args):
|
||||
"""
|
||||
:type args: SanityConfig
|
||||
"""
|
||||
self.write_console()
|
||||
self.write_bot(args)
|
||||
|
||||
if args.lint:
|
||||
self.write_lint()
|
||||
|
||||
if args.junit:
|
||||
if self.junit:
|
||||
self.write_junit(args)
|
||||
else:
|
||||
display.warning('Skipping junit xml output because the `junit-xml` python package was not found.', unique=True)
|
||||
|
||||
def write_console(self):
|
||||
"""Write results to console."""
|
||||
pass
|
||||
|
||||
def write_lint(self):
|
||||
"""Write lint results to stdout."""
|
||||
pass
|
||||
|
||||
def write_bot(self, args):
|
||||
"""
|
||||
:type args: SanityConfig
|
||||
"""
|
||||
pass
|
||||
|
||||
def write_junit(self, args):
|
||||
"""
|
||||
:type args: SanityConfig
|
||||
"""
|
||||
pass
|
||||
|
||||
def create_path(self, directory, extension):
|
||||
"""
|
||||
:type directory: str
|
||||
:type extension: str
|
||||
:rtype: str
|
||||
"""
|
||||
path = 'test/results/%s/ansible-test-%s' % (directory, self.test)
|
||||
|
||||
if self.python_version:
|
||||
path += '-python-%s' % self.python_version
|
||||
|
||||
path += extension
|
||||
|
||||
return path
|
||||
|
||||
def save_junit(self, args, test_case, properties=None):
|
||||
"""
|
||||
:type args: SanityConfig
|
||||
:type test_case: junit_xml.TestCase
|
||||
:type properties: dict[str, str] | None
|
||||
:rtype: str | None
|
||||
"""
|
||||
path = self.create_path('junit', '.xml')
|
||||
|
||||
test_suites = [
|
||||
self.junit.TestSuite(
|
||||
name='ansible-test',
|
||||
test_cases=[test_case],
|
||||
timestamp=datetime.datetime.utcnow().replace(microsecond=0).isoformat(),
|
||||
properties=properties,
|
||||
),
|
||||
]
|
||||
|
||||
report = self.junit.TestSuite.to_xml_string(test_suites=test_suites, prettyprint=True, encoding='utf-8')
|
||||
|
||||
if args.explain:
|
||||
return
|
||||
|
||||
with open(path, 'wb') as xml:
|
||||
xml.write(report.encode('utf-8', 'strict'))
|
||||
|
||||
|
||||
class SanitySuccess(SanityResult):
|
||||
class SanitySuccess(TestSuccess):
|
||||
"""Sanity test success."""
|
||||
def __init__(self, test, python_version=None):
|
||||
"""
|
||||
:type test: str
|
||||
:type python_version: str
|
||||
"""
|
||||
super(SanitySuccess, self).__init__(test, python_version)
|
||||
|
||||
def write_junit(self, args):
|
||||
"""
|
||||
:type args: SanityConfig
|
||||
"""
|
||||
test_case = self.junit.TestCase(name=self.test)
|
||||
|
||||
self.save_junit(args, test_case)
|
||||
super(SanitySuccess, self).__init__(COMMAND, test, python_version)
|
||||
|
||||
|
||||
class SanitySkipped(SanityResult):
|
||||
class SanitySkipped(TestSkipped):
|
||||
"""Sanity test skipped."""
|
||||
def __init__(self, test, python_version=None):
|
||||
"""
|
||||
:type test: str
|
||||
:type python_version: str
|
||||
"""
|
||||
super(SanitySkipped, self).__init__(test, python_version)
|
||||
|
||||
def write_console(self):
|
||||
"""Write results to console."""
|
||||
display.info('No tests applicable.', verbosity=1)
|
||||
|
||||
def write_junit(self, args):
|
||||
"""
|
||||
:type args: SanityConfig
|
||||
"""
|
||||
test_case = self.junit.TestCase(name=self.test)
|
||||
test_case.add_skipped_info('No tests applicable.')
|
||||
|
||||
self.save_junit(args, test_case)
|
||||
super(SanitySkipped, self).__init__(COMMAND, test, python_version)
|
||||
|
||||
|
||||
class SanityFailure(SanityResult):
|
||||
class SanityFailure(TestFailure):
|
||||
"""Sanity test failure."""
|
||||
def __init__(self, test, python_version=None, messages=None, summary=None):
|
||||
"""
|
||||
|
@ -687,117 +579,10 @@ class SanityFailure(SanityResult):
|
|||
:type messages: list[SanityMessage]
|
||||
:type summary: str
|
||||
"""
|
||||
super(SanityFailure, self).__init__(test, python_version)
|
||||
|
||||
self.messages = messages
|
||||
self.summary = summary
|
||||
|
||||
def write_console(self):
|
||||
"""Write results to console."""
|
||||
if self.summary:
|
||||
display.error(self.summary)
|
||||
else:
|
||||
display.error('Found %d %s issue(s) which need to be resolved:' % (len(self.messages), self.test))
|
||||
|
||||
for message in self.messages:
|
||||
display.error(message)
|
||||
|
||||
def write_lint(self):
|
||||
"""Write lint results to stdout."""
|
||||
if self.summary:
|
||||
command = self.format_command()
|
||||
message = 'The test `%s` failed. See stderr output for details.' % command
|
||||
path = 'test/runner/ansible-test'
|
||||
message = SanityMessage(message, path)
|
||||
print(message)
|
||||
else:
|
||||
for message in self.messages:
|
||||
print(message)
|
||||
|
||||
def write_junit(self, args):
|
||||
"""
|
||||
:type args: SanityConfig
|
||||
"""
|
||||
title = self.format_title()
|
||||
output = self.format_block()
|
||||
|
||||
test_case = self.junit.TestCase(classname='sanity', name=self.test)
|
||||
|
||||
# Include a leading newline to improve readability on Shippable "Tests" tab.
|
||||
# Without this, the first line becomes indented.
|
||||
test_case.add_failure_info(message=title, output='\n%s' % output)
|
||||
|
||||
self.save_junit(args, test_case)
|
||||
|
||||
def write_bot(self, args):
|
||||
"""
|
||||
:type args: SanityConfig
|
||||
"""
|
||||
message = self.format_title()
|
||||
output = self.format_block()
|
||||
|
||||
bot_data = dict(
|
||||
results=[
|
||||
dict(
|
||||
message=message,
|
||||
output=output,
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
path = self.create_path('bot', '.json')
|
||||
|
||||
if args.explain:
|
||||
return
|
||||
|
||||
with open(path, 'wb') as bot_fd:
|
||||
json.dump(bot_data, bot_fd, indent=4, sort_keys=True)
|
||||
bot_fd.write('\n')
|
||||
|
||||
def format_command(self):
|
||||
"""
|
||||
:rtype: str
|
||||
"""
|
||||
command = 'ansible-test sanity --test %s' % self.test
|
||||
|
||||
if self.python_version:
|
||||
command += ' --python %s' % self.python_version
|
||||
|
||||
return command
|
||||
|
||||
def format_title(self):
|
||||
"""
|
||||
:rtype: str
|
||||
"""
|
||||
command = self.format_command()
|
||||
|
||||
if self.summary:
|
||||
reason = 'error'
|
||||
else:
|
||||
reason = 'error' if len(self.messages) == 1 else 'errors'
|
||||
|
||||
title = 'The test `%s` failed with the following %s:' % (command, reason)
|
||||
|
||||
return title
|
||||
|
||||
def format_block(self):
|
||||
"""
|
||||
:rtype: str
|
||||
"""
|
||||
if self.summary:
|
||||
block = self.summary
|
||||
else:
|
||||
block = '\n'.join(str(m) for m in self.messages)
|
||||
|
||||
message = block.strip()
|
||||
|
||||
# Hack to remove ANSI color reset code from SubprocessError messages.
|
||||
message = message.replace(display.clear, '')
|
||||
|
||||
return message
|
||||
super(SanityFailure, self).__init__(COMMAND, test, python_version, messages, summary)
|
||||
|
||||
|
||||
class SanityMessage(object):
|
||||
class SanityMessage(TestMessage):
|
||||
"""Single sanity test message for one file."""
|
||||
def __init__(self, message, path, line=0, column=0, level='error', code=None):
|
||||
"""
|
||||
|
@ -808,20 +593,7 @@ class SanityMessage(object):
|
|||
:type level: str
|
||||
:type code: str | None
|
||||
"""
|
||||
self.path = path
|
||||
self.line = line
|
||||
self.column = column
|
||||
self.level = level
|
||||
self.code = code
|
||||
self.message = message
|
||||
|
||||
def __str__(self):
|
||||
if self.code:
|
||||
msg = '%s %s' % (self.code, self.message)
|
||||
else:
|
||||
msg = self.message
|
||||
|
||||
return '%s:%s:%s: %s' % (self.path, self.line, self.column, msg)
|
||||
super(SanityMessage, self).__init__(message, path, line, column, level, code)
|
||||
|
||||
|
||||
class SanityTargets(object):
|
||||
|
|
340
test/runner/lib/test.py
Normal file
340
test/runner/lib/test.py
Normal file
|
@ -0,0 +1,340 @@
|
|||
"""Classes for storing and processing test results."""
|
||||
|
||||
from __future__ import absolute_import, print_function
|
||||
|
||||
import datetime
|
||||
import json
|
||||
|
||||
from lib.util import (
|
||||
display,
|
||||
EnvironmentConfig,
|
||||
)
|
||||
|
||||
|
||||
class TestConfig(EnvironmentConfig):
|
||||
"""Configuration common to all test commands."""
|
||||
def __init__(self, args, command):
|
||||
"""
|
||||
:type args: any
|
||||
:type command: str
|
||||
"""
|
||||
super(TestConfig, self).__init__(args, command)
|
||||
|
||||
self.coverage = args.coverage # type: bool
|
||||
self.include = args.include # type: list [str]
|
||||
self.exclude = args.exclude # type: list [str]
|
||||
self.require = args.require # type: list [str]
|
||||
|
||||
self.changed = args.changed # type: bool
|
||||
self.tracked = args.tracked # type: bool
|
||||
self.untracked = args.untracked # type: bool
|
||||
self.committed = args.committed # type: bool
|
||||
self.staged = args.staged # type: bool
|
||||
self.unstaged = args.unstaged # type: bool
|
||||
self.changed_from = args.changed_from # type: str
|
||||
self.changed_path = args.changed_path # type: list [str]
|
||||
|
||||
self.lint = args.lint if 'lint' in args else False # type: bool
|
||||
self.junit = args.junit if 'junit' in args else False # type: bool
|
||||
|
||||
|
||||
class TestResult(object):
|
||||
"""Base class for test results."""
|
||||
def __init__(self, command, test, python_version=None):
|
||||
"""
|
||||
:type command: str
|
||||
:type test: str
|
||||
:type python_version: str
|
||||
"""
|
||||
self.command = command
|
||||
self.test = test
|
||||
self.python_version = python_version
|
||||
self.name = self.test or self.command
|
||||
|
||||
if self.python_version:
|
||||
self.name += '-python-%s' % self.python_version
|
||||
|
||||
try:
|
||||
import junit_xml
|
||||
except ImportError:
|
||||
junit_xml = None
|
||||
|
||||
self.junit = junit_xml
|
||||
|
||||
def write(self, args):
|
||||
"""
|
||||
:type args: TestConfig
|
||||
"""
|
||||
self.write_console()
|
||||
self.write_bot(args)
|
||||
|
||||
if args.lint:
|
||||
self.write_lint()
|
||||
|
||||
if args.junit:
|
||||
if self.junit:
|
||||
self.write_junit(args)
|
||||
else:
|
||||
display.warning('Skipping junit xml output because the `junit-xml` python package was not found.', unique=True)
|
||||
|
||||
def write_console(self):
|
||||
"""Write results to console."""
|
||||
pass
|
||||
|
||||
def write_lint(self):
|
||||
"""Write lint results to stdout."""
|
||||
pass
|
||||
|
||||
def write_bot(self, args):
|
||||
"""
|
||||
:type args: TestConfig
|
||||
"""
|
||||
pass
|
||||
|
||||
def write_junit(self, args):
|
||||
"""
|
||||
:type args: TestConfig
|
||||
"""
|
||||
pass
|
||||
|
||||
def create_path(self, directory, extension):
|
||||
"""
|
||||
:type directory: str
|
||||
:type extension: str
|
||||
:rtype: str
|
||||
"""
|
||||
path = 'test/results/%s/ansible-test-%s' % (directory, self.command)
|
||||
|
||||
if self.test:
|
||||
path += '-%s' % self.test
|
||||
|
||||
if self.python_version:
|
||||
path += '-python-%s' % self.python_version
|
||||
|
||||
path += extension
|
||||
|
||||
return path
|
||||
|
||||
def save_junit(self, args, test_case, properties=None):
|
||||
"""
|
||||
:type args: TestConfig
|
||||
:type test_case: junit_xml.TestCase
|
||||
:type properties: dict[str, str] | None
|
||||
:rtype: str | None
|
||||
"""
|
||||
path = self.create_path('junit', '.xml')
|
||||
|
||||
test_suites = [
|
||||
self.junit.TestSuite(
|
||||
name='ansible-test',
|
||||
test_cases=[test_case],
|
||||
timestamp=datetime.datetime.utcnow().replace(microsecond=0).isoformat(),
|
||||
properties=properties,
|
||||
),
|
||||
]
|
||||
|
||||
report = self.junit.TestSuite.to_xml_string(test_suites=test_suites, prettyprint=True, encoding='utf-8')
|
||||
|
||||
if args.explain:
|
||||
return
|
||||
|
||||
with open(path, 'wb') as xml:
|
||||
xml.write(report.encode('utf-8', 'strict'))
|
||||
|
||||
|
||||
class TestSuccess(TestResult):
|
||||
"""Test success."""
|
||||
def __init__(self, command, test, python_version=None):
|
||||
"""
|
||||
:type command: str
|
||||
:type test: str
|
||||
:type python_version: str
|
||||
"""
|
||||
super(TestSuccess, self).__init__(command, test, python_version)
|
||||
|
||||
def write_junit(self, args):
|
||||
"""
|
||||
:type args: TestConfig
|
||||
"""
|
||||
test_case = self.junit.TestCase(classname=self.command, name=self.name)
|
||||
|
||||
self.save_junit(args, test_case)
|
||||
|
||||
|
||||
class TestSkipped(TestResult):
|
||||
"""Test skipped."""
|
||||
def __init__(self, command, test, python_version=None):
|
||||
"""
|
||||
:type command: str
|
||||
:type test: str
|
||||
:type python_version: str
|
||||
"""
|
||||
super(TestSkipped, self).__init__(command, test, python_version)
|
||||
|
||||
def write_console(self):
|
||||
"""Write results to console."""
|
||||
display.info('No tests applicable.', verbosity=1)
|
||||
|
||||
def write_junit(self, args):
|
||||
"""
|
||||
:type args: TestConfig
|
||||
"""
|
||||
test_case = self.junit.TestCase(classname=self.command, name=self.name)
|
||||
test_case.add_skipped_info('No tests applicable.')
|
||||
|
||||
self.save_junit(args, test_case)
|
||||
|
||||
|
||||
class TestFailure(TestResult):
|
||||
"""Test failure."""
|
||||
def __init__(self, command, test, python_version=None, messages=None, summary=None):
|
||||
"""
|
||||
:type command: str
|
||||
:type test: str
|
||||
:type python_version: str
|
||||
:type messages: list[TestMessage]
|
||||
:type summary: str
|
||||
"""
|
||||
super(TestFailure, self).__init__(command, test, python_version)
|
||||
|
||||
self.messages = messages
|
||||
self.summary = summary
|
||||
|
||||
def write_console(self):
|
||||
"""Write results to console."""
|
||||
if self.summary:
|
||||
display.error(self.summary)
|
||||
else:
|
||||
if self.python_version:
|
||||
specifier = ' on python %s' % self.python_version
|
||||
else:
|
||||
specifier = ''
|
||||
|
||||
display.error('Found %d %s issue(s)%s which need to be resolved:' % (len(self.messages), self.test or self.command, specifier))
|
||||
|
||||
for message in self.messages:
|
||||
display.error(message)
|
||||
|
||||
def write_lint(self):
|
||||
"""Write lint results to stdout."""
|
||||
if self.summary:
|
||||
command = self.format_command()
|
||||
message = 'The test `%s` failed. See stderr output for details.' % command
|
||||
path = 'test/runner/ansible-test'
|
||||
message = TestMessage(message, path)
|
||||
print(message)
|
||||
else:
|
||||
for message in self.messages:
|
||||
print(message)
|
||||
|
||||
def write_junit(self, args):
|
||||
"""
|
||||
:type args: TestConfig
|
||||
"""
|
||||
title = self.format_title()
|
||||
output = self.format_block()
|
||||
|
||||
test_case = self.junit.TestCase(classname=self.command, name=self.name)
|
||||
|
||||
# Include a leading newline to improve readability on Shippable "Tests" tab.
|
||||
# Without this, the first line becomes indented.
|
||||
test_case.add_failure_info(message=title, output='\n%s' % output)
|
||||
|
||||
self.save_junit(args, test_case)
|
||||
|
||||
def write_bot(self, args):
|
||||
"""
|
||||
:type args: TestConfig
|
||||
"""
|
||||
message = self.format_title()
|
||||
output = self.format_block()
|
||||
|
||||
bot_data = dict(
|
||||
results=[
|
||||
dict(
|
||||
message=message,
|
||||
output=output,
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
path = self.create_path('bot', '.json')
|
||||
|
||||
if args.explain:
|
||||
return
|
||||
|
||||
with open(path, 'wb') as bot_fd:
|
||||
json.dump(bot_data, bot_fd, indent=4, sort_keys=True)
|
||||
bot_fd.write('\n')
|
||||
|
||||
def format_command(self):
|
||||
"""
|
||||
:rtype: str
|
||||
"""
|
||||
command = 'ansible-test %s' % self.command
|
||||
|
||||
if self.test:
|
||||
command += ' --test %s' % self.test
|
||||
|
||||
if self.python_version:
|
||||
command += ' --python %s' % self.python_version
|
||||
|
||||
return command
|
||||
|
||||
def format_title(self):
|
||||
"""
|
||||
:rtype: str
|
||||
"""
|
||||
command = self.format_command()
|
||||
|
||||
if self.summary:
|
||||
reason = 'error'
|
||||
else:
|
||||
reason = 'error' if len(self.messages) == 1 else 'errors'
|
||||
|
||||
title = 'The test `%s` failed with the following %s:' % (command, reason)
|
||||
|
||||
return title
|
||||
|
||||
def format_block(self):
|
||||
"""
|
||||
:rtype: str
|
||||
"""
|
||||
if self.summary:
|
||||
block = self.summary
|
||||
else:
|
||||
block = '\n'.join(str(m) for m in self.messages)
|
||||
|
||||
message = block.strip()
|
||||
|
||||
# Hack to remove ANSI color reset code from SubprocessError messages.
|
||||
message = message.replace(display.clear, '')
|
||||
|
||||
return message
|
||||
|
||||
|
||||
class TestMessage(object):
|
||||
"""Single test message for one file."""
|
||||
def __init__(self, message, path, line=0, column=0, level='error', code=None):
|
||||
"""
|
||||
:type message: str
|
||||
:type path: str
|
||||
:type line: int
|
||||
:type column: int
|
||||
:type level: str
|
||||
:type code: str | None
|
||||
"""
|
||||
self.path = path
|
||||
self.line = line
|
||||
self.column = column
|
||||
self.level = level
|
||||
self.code = code
|
||||
self.message = message
|
||||
|
||||
def __str__(self):
|
||||
if self.code:
|
||||
msg = '%s %s' % (self.code, self.message)
|
||||
else:
|
||||
msg = self.message
|
||||
|
||||
return '%s:%s:%s: %s' % (self.path, self.line, self.column, msg)
|
|
@ -260,6 +260,7 @@ def parse_args():
|
|||
choices=COMPILE_PYTHON_VERSIONS,
|
||||
help='python version: %s' % ', '.join(COMPILE_PYTHON_VERSIONS))
|
||||
|
||||
add_lint(compiler)
|
||||
add_extra_docker_options(compiler, integration=False)
|
||||
|
||||
sanity = subparsers.add_parser('sanity',
|
||||
|
@ -286,14 +287,6 @@ def parse_args():
|
|||
action='store_true',
|
||||
help='list available tests')
|
||||
|
||||
sanity.add_argument('--lint',
|
||||
action='store_true',
|
||||
help='write lint output to stdout, everything else stderr')
|
||||
|
||||
sanity.add_argument('--junit',
|
||||
action='store_true',
|
||||
help='write test failures to junit xml files')
|
||||
|
||||
sanity.add_argument('--python',
|
||||
metavar='VERSION',
|
||||
choices=SUPPORTED_PYTHON_VERSIONS,
|
||||
|
@ -302,6 +295,7 @@ def parse_args():
|
|||
sanity.add_argument('--base-branch',
|
||||
help=argparse.SUPPRESS)
|
||||
|
||||
add_lint(sanity)
|
||||
add_extra_docker_options(sanity, integration=False)
|
||||
|
||||
shell = subparsers.add_parser('shell',
|
||||
|
@ -377,6 +371,19 @@ def parse_args():
|
|||
return args
|
||||
|
||||
|
||||
def add_lint(parser):
|
||||
"""
|
||||
:type parser: argparse.ArgumentParser
|
||||
"""
|
||||
parser.add_argument('--lint',
|
||||
action='store_true',
|
||||
help='write lint output to stdout, everything else stderr')
|
||||
|
||||
parser.add_argument('--junit',
|
||||
action='store_true',
|
||||
help='write test failures to junit xml files')
|
||||
|
||||
|
||||
def add_changes(parser, argparse):
|
||||
"""
|
||||
:type parser: argparse.ArgumentParser
|
||||
|
|
|
@ -19,7 +19,7 @@ retry.py pip install tox --disable-pip-version-check
|
|||
|
||||
errors=0
|
||||
|
||||
ansible-test compile --color -v || ((errors++))
|
||||
ansible-test compile --color -v --junit --requirements || ((errors++))
|
||||
ansible-test sanity --color -v --junit --tox --skip-test ansible-doc --python 2.7 || ((errors++))
|
||||
ansible-test sanity --color -v --junit --tox --test ansible-doc --coverage || ((errors++))
|
||||
|
||||
|
|
Loading…
Reference in a new issue