Tests: add minimal pytest support

pytest is a powerful test framework. The initial motivation is to have
much better detailed reporting when a test fails, see for example
https://docs.pytest.org/en/latest/

Additionally, it has full parallelization support at the test level (via
the pytest-xdist plugin) instead of being limited to the suite level
(which currently has to be hard-coded via CMakeLists.txt).

Usage with the build dir in /tmp/wsbuild and src dir in /tmp/wireshark:

    export WS_BIN_PATH=/tmp/wsbuild/run
    pytest /tmp/wireshark/tests

For parallelization support and verbose printing:

    pip install pytest-xdist
    pytest -nauto -v /tmp/wireshark/tests

To limit yourself to a case based on a pattern:

    pytest -nauto -v /tmp/wireshark/tests -k test_unit_ctest_coverage

Tested on Arch Linux with Python 3.6.5, pytest-3.6.2, xdist-1.22.2.
pytest -n8 finished in 82 seconds while ctest -j8 required 87 seconds.

Change-Id: I832f4dd9f988d6656df795327e81610accf54b9f
Reviewed-on: https://code.wireshark.org/review/28651
Reviewed-by: Gerald Combs <gerald@wireshark.org>
Reviewed-by: Anders Broman <a.broman58@gmail.com>
This commit is contained in:
Peter Wu 2018-07-06 17:34:01 +02:00 committed by Anders Broman
parent 3d1e3023d2
commit 0ce2cdda8b
3 changed files with 49 additions and 4 deletions

34
test/conftest.py Normal file
View File

@ -0,0 +1,34 @@
#
# -*- coding: utf-8 -*-
# Wireshark tests
#
# Copyright (c) 2018 Peter Wu <peter@lekensteyn.nl>
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
'''py.test configuration'''
import os
import sys
import config
# XXX remove globals in config and create py.test-specific fixtures
try:
_program_path = os.environ['WS_BIN_PATH']
except KeyError:
print('Please set env var WS_BIN_PATH to the run directory with binaries')
sys.exit(1)
if not config.setProgramPath(_program_path):
print('One or more required executables not found at {}\n'.format(_program_path))
sys.exit(1)
# this is set only to please case_unittests.test_unit_ctest_coverage
def pytest_collection_modifyitems(items):
'''Find all test groups.'''
suites = []
for item in items:
name = item.nodeid.split("::")[0].replace(".py", "").replace("/", ".")
if name not in suites:
suites.append(name)
config.all_groups = list(sorted(suites))

2
test/pytest.ini Normal file
View File

@ -0,0 +1,2 @@
[pytest]
python_files=suite_*.py group_*.py

View File

@ -146,6 +146,17 @@ class SubprocessTestCase(unittest.TestCase):
except:
pass
def _error_count(self, result):
if not result:
return 0
if hasattr(result, 'failures'):
# Python standard unittest runner
return len(result.failures) + len(result.errors)
if hasattr(result, '_excinfo'):
# pytest test runner
return len(result._excinfo or [])
self.fail("Unexpected test result %r" % result)
def run(self, result=None):
# Subclass run() so that we can do the following:
# - Open our log file and add it to the cleanup list.
@ -162,9 +173,7 @@ class SubprocessTestCase(unittest.TestCase):
# to handle line endings in the future.
self.log_fd = io.open(self.log_fname, 'w', encoding='UTF-8', newline='\n')
self.cleanup_files.append(self.log_fname)
pre_run_problem_count = 0
if result:
pre_run_problem_count = len(result.failures) + len(result.errors)
pre_run_problem_count = self._error_count(result)
try:
super(SubprocessTestCase, self).run(result=result)
except KeyboardInterrupt:
@ -176,7 +185,7 @@ class SubprocessTestCase(unittest.TestCase):
self.kill_processes()
self.log_fd.close()
if result:
post_run_problem_count = len(result.failures) + len(result.errors)
post_run_problem_count = self._error_count(result)
if pre_run_problem_count != post_run_problem_count:
self.dump_files.append(self.log_fname)
# Leave some evidence behind.