selftest: Add test to verify junit xml report

Change-Id: I8cad02abe776cc00b513113dbaf3c948ea7956cd
This commit is contained in:
Pau Espin 2020-06-15 13:27:07 +02:00 committed by pespin
parent e592de8056
commit d4dc2ad3a2
9 changed files with 225 additions and 6 deletions

View File

@ -0,0 +1 @@
../_prep.py

View File

@ -0,0 +1,62 @@
<testsuites errors="2" failures="1" name="trial" tests="10" time="102">
<testsuite disabled="0" errors="0" failures="0" hostname="localhost" id="0" name="suiteA" skipped="0" tests="2">
<testcase classname="suiteA" name="suiteA-0" time="30">
<system-out>test log file not available</system-out>
</testcase>
<testcase classname="suiteA" name="suiteA-1" time="10">
<system-out>yay this is a test-applied stdout</system-out>
</testcase>
<properties>
<property name="ref:foobar/potato" value="1234"></property>
<property name="ref:orange" value="abcd"></property>
</properties>
</testsuite>
<testsuite disabled="0" errors="2" failures="0" hostname="localhost" id="1" name="suiteB" skipped="0" tests="3">
<testcase classname="suiteB" name="suiteB-0" time="10">
<system-out>test log file not available</system-out>
</testcase>
<testcase classname="suiteB" name="suiteB-1" time="0">
<error>could not run</error>
<system-out>test log file not available</system-out>
</testcase>
<testcase classname="suiteB" name="suiteB-2" time="0">
<error>could not run</error>
<system-out>test log file not available</system-out>
</testcase>
<properties>
<property name="ref:foobar/potato" value="1234"></property>
<property name="ref:orange" value="abcd"></property>
</properties>
</testsuite>
<testsuite disabled="2" errors="0" failures="0" hostname="localhost" id="2" name="suiteC" skipped="2" tests="3">
<testcase classname="suiteC" name="suiteC-0" time="0">
<skipped></skipped>
<system-out>test log file not available</system-out>
</testcase>
<testcase classname="suiteC" name="suiteC-1" time="10">
<system-out>test log file not available</system-out>
</testcase>
<testcase classname="suiteC" name="suiteC-2" time="0">
<skipped></skipped>
<system-out>test log file not available</system-out>
</testcase>
<properties>
<property name="ref:foobar/potato" value="1234"></property>
<property name="ref:orange" value="abcd"></property>
</properties>
</testsuite>
<testsuite disabled="0" errors="0" failures="1" hostname="localhost" id="3" name="suiteD" skipped="0" tests="2">
<testcase classname="suiteD" name="suiteD-0" time="12">
<failure type="fake_fail_type">fake_fail_message</failure>
<system-err>system stderr fake content</system-err>
<system-out>test log file not available</system-out>
</testcase>
<testcase classname="suiteD" name="suiteD-1" time="10">
<system-out>test log file not available</system-out>
</testcase>
<properties>
<property name="ref:foobar/potato" value="1234"></property>
<property name="ref:orange" value="abcd"></property>
</properties>
</testsuite>
</testsuites>

View File

View File

View File

@ -0,0 +1,17 @@
cnf ResourcesPool: DBG: Found main configuration file in [PATH]/selftest/report_test/main.conf which is [PATH]/selftest/report_test/main.conf
cnf ResourcesPool: DBG: MAIN CONFIG:
{'default_suites_conf_path': '[PATH]/selftest/report_test/default-suites.conf',
'defaults_conf_path': '[PATH]/selftest/report_test/defaults.conf',
'resource_conf_path': '[PATH]/selftest/report_test/resources.conf',
'scenarios_dir': ['[PATH]/selftest/report_test/scenarios'],
'state_dir': '/var/tmp/osmo-gsm-tester/state',
'suites_dir': ['[PATH]/selftest/report_test/suites'],
'trial_dir': '[PATH]/selftest/report_test/trial'}
tst suiteA: DBG: {combining='config'}
tst {combining_scenarios='config'}: DBG: {definition_conf={}} [suiteA↪{combining_scenarios='config'}]
tst suiteB: DBG: {combining='config'}
tst {combining_scenarios='config'}: DBG: {definition_conf={}} [suiteB↪{combining_scenarios='config'}]
tst suiteC: DBG: {combining='config'}
tst {combining_scenarios='config'}: DBG: {definition_conf={}} [suiteC↪{combining_scenarios='config'}]
tst suiteD: DBG: {combining='config'}
tst {combining_scenarios='config'}: DBG: {definition_conf={}} [suiteD↪{combining_scenarios='config'}]

View File

@ -0,0 +1,3 @@
/[^ ]*/selftest/ [PATH]/selftest/
\.py:[0-9]* .py:[LINENR]
\([0-9.]+ sec\) (N.N sec)

View File

@ -0,0 +1,133 @@
#!/usr/bin/env python3
import _prep
from osmo_gsm_tester.core import report
from osmo_gsm_tester.core import log
from osmo_gsm_tester.core import util
from osmo_gsm_tester.core import test
from osmo_gsm_tester.core import suite
from osmo_gsm_tester.core import config
import os
import sys
import shutil
import difflib
import xml.etree.ElementTree as et
class FakeTrial(log.Origin):
def __init__(self):
super().__init__(log.C_TST, 'trial')
self.dir = util.Dir(example_trial_dir)
self._run_dir = None
self.suites = []
def get_all_inst_hash_info(self):
return { 'foobar/potato': '1234', 'orange': 'abcd' }
def get_run_dir(self):
if self._run_dir is not None:
return self._run_dir
self._run_dir = util.Dir(self.dir.new_child('test_run'))
self._run_dir.mkdir()
return self._run_dir
class FakeSuiteDefinition(log.Origin):
def __init__(self, name, num_tests):
super().__init__(log.C_TST, name)
self.test_basenames = [name + '-' + str(tid) for tid in range(num_tests) ]
self.conf = {}
self.suite_dir = util.Dir(example_trial_dir).new_child('suitedef' + name)
def fake_run_test(test_obj, status, duration, sysout=None):
test_obj.status = status
test_obj.duration = duration
if sysout is not None:
test_obj.set_report_stdout(sysout)
if status == test.Test.FAIL:
test_obj.fail_type = 'fake_fail_type'
test_obj.fail_message = 'fake_fail_message'
test_obj.fail_tb = 'system stderr fake content'
def fake_run_suite(suite_obj, duration):
suite_obj.duration = duration
suite_obj.determine_status()
config.override_conf = os.path.join(os.path.dirname(sys.argv[0]), 'main.conf')
example_trial_dir = os.path.join('test_trial_tmp')
trial = FakeTrial()
# Suite passes with 2 tests passing
s_def = FakeSuiteDefinition('suiteA', 2)
s = suite.SuiteRun(trial, s_def.name(), s_def)
trial.suites.append(s)
fake_run_test(s.tests[0], test.Test.PASS, 30)
fake_run_test(s.tests[1], test.Test.PASS, 10, 'yay this is a test-applied stdout')
#fake_run_test(suiteA.tests[0], test.Test.UNKNOWN, 20)
fake_run_suite(s, 50)
# Suite passes first test but next ones are not ececuted
s_def = FakeSuiteDefinition('suiteB', 3)
s = suite.SuiteRun(trial, s_def.name(), s_def)
trial.suites.append(s)
fake_run_test(s.tests[0], test.Test.PASS, 10)
fake_run_suite(s, 20)
# Suite passes one test selected, others are skipped
s_def = FakeSuiteDefinition('suiteC', 3)
s = suite.SuiteRun(trial, s_def.name(), s_def)
trial.suites.append(s)
s.tests[0].set_skip()
fake_run_test(s.tests[1], test.Test.PASS, 10)
s.tests[2].set_skip()
fake_run_suite(s, 12)
# Suite fails due to one of its tests failing
s_def = FakeSuiteDefinition('suiteD', 2)
s = suite.SuiteRun(trial, s_def.name(), s_def)
trial.suites.append(s)
fake_run_test(s.tests[0], test.Test.FAIL, 12)
fake_run_test(s.tests[1], test.Test.PASS, 10)
fake_run_suite(s, 20)
element = report.trial_to_junit(trial)
def indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def udiff(expect, got, expect_path):
expect = expect.splitlines(1)
got = got.splitlines(1)
for line in difflib.unified_diff(expect, got,
fromfile=expect_path, tofile='got'):
sys.stderr.write(line)
if not line.endswith('\n'):
sys.stderr.write('[no-newline]\n')
indent(element)
#canonicalize() is only available in python3.8+, and we need it to have reliable string output:
if hasattr(et, 'canonicalize'):
got = et.canonicalize(et.tostring(element)).rstrip()
exp_path = os.path.join(os.path.dirname(sys.argv[0]), 'expected_junit_output.xml')
with open(exp_path, 'r') as f:
exp = f.read().rstrip()
udiff(exp, got, exp_path)
#deleting generated tmp trial dir:
shutil.rmtree(example_trial_dir, ignore_errors=True)
# vim: expandtab tabstop=4 shiftwidth=4

View File

View File

@ -189,18 +189,21 @@ class SuiteRun(log.Origin):
util.import_path_remove(suite_libdir)
self.duration = time.time() - self.start_timestamp
passed, skipped, failed, errors = self.count_test_results()
# if no tests ran, count it as failure
if passed and not failed and not errors:
self.status = SuiteRun.PASS
else:
self.status = SuiteRun.FAIL
self.determine_status()
log.large_separator(self._trial.name(), self.name(), self.status, sublevel=2, space_above=False)
def passed(self):
return self.status == SuiteRun.PASS
def determine_status(self):
passed, skipped, failed, errors = self.count_test_results()
# if no tests ran, count it as failure
if passed and not failed and not errors:
self.status = SuiteRun.PASS
else:
self.status = SuiteRun.FAIL
def count_test_results(self):
passed = 0
skipped = 0