Add per-test KPI support

tests can now use 'tenv.test().set_kpis(some_dict)' to set any kind of
data as KPIs, which will be presented in the junit report.

The representation of KPIs in the xml file doesn't follow the junit
format, mainly because it has no support for per-test properties.

Change-Id: I00e976f65a202e82d440bf33708f06c8ce2643e2
This commit is contained in:
Pau Espin 2020-06-15 14:27:50 +02:00 committed by pespin
parent d4dc2ad3a2
commit e3d1b61175
5 changed files with 98 additions and 3 deletions

View File

@ -1,4 +1,4 @@
<testsuites errors="2" failures="1" name="trial" tests="10" time="102">
<testsuites errors="2" failures="2" name="trial" tests="12" time="122">
<testsuite disabled="0" errors="0" failures="0" hostname="localhost" id="0" name="suiteA" skipped="0" tests="2">
<testcase classname="suiteA" name="suiteA-0" time="30">
<system-out>test log file not available</system-out>
@ -59,4 +59,34 @@
<property name="ref:orange" value="abcd"></property>
</properties>
</testsuite>
</testsuites>
<testsuite disabled="0" errors="0" failures="1" hostname="localhost" id="4" name="suiteE" skipped="0" tests="2">
<testcase classname="suiteE" name="suiteE-0" time="12">
<failure type="fake_fail_type">fake_fail_message</failure>
<system-err>system stderr fake content</system-err>
<kpis>
<kpi_node name="ueA">
<property name="kpiA" value="30"></property>
<property name="kpiB" value="foobar"></property>
<kpi_node name="yet-another-level">
<property name="foo" value="bar"></property>
</kpi_node>
</kpi_node>
<kpi_node name="enbD">
<property name="foobar-boolean" value="True"></property>
</kpi_node>
<property name="somekpi" value="someval"></property>
</kpis>
<system-out>test log file not available</system-out>
</testcase>
<testcase classname="suiteE" name="suiteE-1" time="10">
<kpis>
<property name="abcd" value="abcdval"></property>
</kpis>
<system-out>test log file not available</system-out>
</testcase>
<properties>
<property name="ref:foobar/potato" value="1234"></property>
<property name="ref:orange" value="abcd"></property>
</properties>
</testsuite>
</testsuites>

View File

@ -15,3 +15,5 @@ tst suiteC: DBG: {combining='config'}
tst {combining_scenarios='config'}: DBG: {definition_conf={}} [suiteC↪{combining_scenarios='config'}]
tst suiteD: DBG: {combining='config'}
tst {combining_scenarios='config'}: DBG: {definition_conf={}} [suiteD↪{combining_scenarios='config'}]
tst suiteE: DBG: {combining='config'}
tst {combining_scenarios='config'}: DBG: {definition_conf={}} [suiteE↪{combining_scenarios='config'}]

View File

@ -39,11 +39,13 @@ class FakeSuiteDefinition(log.Origin):
self.suite_dir = util.Dir(example_trial_dir).new_child('suitedef' + name)
def fake_run_test(test_obj, status, duration, sysout=None):
def fake_run_test(test_obj, status, duration, sysout=None, kpis=None):
test_obj.status = status
test_obj.duration = duration
if sysout is not None:
test_obj.set_report_stdout(sysout)
if kpis is not None:
test_obj.set_kpis(kpis)
if status == test.Test.FAIL:
test_obj.fail_type = 'fake_fail_type'
test_obj.fail_message = 'fake_fail_message'
@ -92,6 +94,14 @@ fake_run_test(s.tests[0], test.Test.FAIL, 12)
fake_run_test(s.tests[1], test.Test.PASS, 10)
fake_run_suite(s, 20)
# Test adding KPIs
s_def = FakeSuiteDefinition('suiteE', 2)
s = suite.SuiteRun(trial, s_def.name(), s_def)
trial.suites.append(s)
fake_run_test(s.tests[0], test.Test.FAIL, 12, kpis={'ueA': {'kpiA': 30, 'kpiB': 'foobar', 'yet-another-level': {'foo': 'bar'}}, 'enbD': {'foobar-boolean': True }, 'somekpi': 'someval'})
fake_run_test(s.tests[1], test.Test.PASS, 10, kpis={'abcd': 'abcdval'})
fake_run_suite(s, 20)
element = report.trial_to_junit(trial)
def indent(elem, level=0):
@ -126,6 +136,9 @@ if hasattr(et, 'canonicalize'):
with open(exp_path, 'r') as f:
exp = f.read().rstrip()
udiff(exp, got, exp_path)
# Uncomment to update exp_path:
#with open(exp_path, 'w') as f:
# f.write(got)
#deleting generated tmp trial dir:
shutil.rmtree(example_trial_dir, ignore_errors=True)

View File

@ -53,6 +53,46 @@ def hash_info_to_junit(testsuite, hash_info):
prop.set('name', 'ref:' + key)
prop.set('value', val)
def dict_to_junit(parent, d):
for key, val in d.items():
if isinstance(val, dict):
node = et.SubElement(parent, 'kpi_node')
node.set('name', key)
dict_to_junit(node, val)
continue
if isinstance(val, (tuple, list)):
node = et.SubElement(parent, 'kpi_node')
node.set('name', key)
list_to_junit(node, val)
continue
# scalar:
node = et.SubElement(parent, 'property')
node.set('name', key)
node.set('value', str(val))
def list_to_junit(parent, li):
for i in range(len(li)):
if isinstance(li[i], dict):
node = et.SubElement(parent, 'kpi_node')
node.set('name', str(i))
dict_to_junit(node, li[i])
continue
if isinstance(val, (tuple, list)):
node = et.SubElement(parent, 'kpi_node')
node.set('name', str(i))
list_to_junit(node, li[i])
continue
# scalar:
node = et.SubElement(parent, 'property')
node.set('name', str(i))
node.set('value', str(li[i]))
def kpis_to_junit(parent, kpis):
if not kpis:
return
assert isinstance(kpis, dict)
knode = et.SubElement(parent, 'kpis')
dict_to_junit(knode, kpis)
def trial_to_junit_write(trial, junit_path):
elements = et.ElementTree(element=trial_to_junit(trial))
@ -118,6 +158,7 @@ def test_to_junit(t):
elif t.status != test.Test.PASS:
error = et.SubElement(testcase, 'error')
error.text = 'could not run'
kpis_to_junit(testcase, t.kpis())
sout = et.SubElement(testcase, 'system-out')
sout.text = escape_xml_invalid_characters(t.report_stdout())
return testcase

View File

@ -49,6 +49,7 @@ class Test(log.Origin):
self.fail_message = None
self.log_targets = []
self._report_stdout = None
self._kpis = None
self.timeout = int(config_test_specific['timeout']) if 'timeout' in config_test_specific else None
def module_name(self):
@ -139,6 +140,14 @@ class Test(log.Origin):
def config_test_specific(self):
return self._config_test_specific
def set_kpis(self, kpis):
if not isinstance(kpis, dict):
raise log.Error('Expected dictionary in toplevel kpis')
self._kpis = kpis
def kpis(self):
return self._kpis
def set_report_stdout(self, text):
'Overwrite stdout text stored in report from inside a test'
self._report_stdout = text