2017-08-19 13:27:10 +00:00
|
|
|
#!/bin/sh
|
|
|
|
|
|
|
|
# Helper script to starte a TITAN-generated test suite, supporting
|
|
|
|
# dynamically linked suites to ensure JUNIT generation is possible.
|
|
|
|
|
2019-06-19 16:15:38 +00:00
|
|
|
# Copyright 2017 Harald Welte
|
|
|
|
# Copyright 2018 sysmocom - s.f.m.c. GmbH
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
2017-08-19 13:27:10 +00:00
|
|
|
if [ $# -lt 1 ]; then
|
|
|
|
echo "You have to specify the test suite name"
|
|
|
|
echo "Syntax example: $0 osmo-ttcn3-hacks/ggsn_tests/GGSN_Test ./GGSN_Test.cfg"
|
|
|
|
exit 1
|
|
|
|
fi
|
|
|
|
|
|
|
|
SUITE=$1
|
add compare-results.sh, call from start-testsuite.sh
Compare current test results to the expected results, and exit in error on
discrepancies.
Add compare-result.sh: (trivially) grep junit xml output to determine which
tests passed and which didn't, and compare against an expected-result.log,
another junit file from a previous run. Summarize and determine success.
Include an "xfail" feature: tests that are expected to fail are marked as
"xfail", unexpected failures as "FAIL".
In various subdirs, copy the current jenkins jobs' junit xml outputs as
expected-results.log, so that we will start getting useful output in both
jenkins runs and manual local runs.
In start-testsuite.sh, after running the tests, invoke the results comparison.
Due to the single-line parsing nature, the script so far does not distinguish
between error and failure. I doubt that we actually need to do that though.
Related: OS#3136
Change-Id: I87d62a8be73d73a5eeff61a842e7c27a0066079d
2018-04-05 14:56:38 +00:00
|
|
|
SUITE_DIR="$(dirname "$SUITE")"
|
|
|
|
SUITE_NAME="$(basename "$SUITE")"
|
|
|
|
CFG="$SUITE_NAME.cfg"
|
2017-08-19 13:27:10 +00:00
|
|
|
if [ $# -gt 1 ]; then
|
|
|
|
CFG=$2
|
|
|
|
fi
|
|
|
|
|
2018-02-02 19:09:00 +00:00
|
|
|
if [ $# -gt 2 ]; then
|
|
|
|
TEST=$3
|
|
|
|
fi
|
|
|
|
|
2019-07-16 10:29:49 +00:00
|
|
|
# this is an example for using a non-installed custom (e.g. git master) TITAN
|
|
|
|
#TTCN3_DIR="/home/laforge/projects/git/titan/titan.core/Install"
|
|
|
|
#export TTCN3_DIR
|
|
|
|
#TITAN_LIBRARY_PATH="$TTCN3_DIR/lib"
|
|
|
|
#TTCN3_BIN_DIR="$TTCN3_DIR/bin"
|
|
|
|
|
|
|
|
# below is for the debian packages
|
|
|
|
TTCN3_BIN_DIR="/usr/bin"
|
2019-06-04 15:11:04 +00:00
|
|
|
TITAN_LIBRARY_PATH="/usr/lib/titan:/usr/ttcn3/lib"
|
2019-07-16 10:29:49 +00:00
|
|
|
LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$SUITE_DIR:$TITAN_LIBRARY_PATH" "$TTCN3_BIN_DIR/ttcn3_start" $SUITE $CFG $TEST
|
add compare-results.sh, call from start-testsuite.sh
Compare current test results to the expected results, and exit in error on
discrepancies.
Add compare-result.sh: (trivially) grep junit xml output to determine which
tests passed and which didn't, and compare against an expected-result.log,
another junit file from a previous run. Summarize and determine success.
Include an "xfail" feature: tests that are expected to fail are marked as
"xfail", unexpected failures as "FAIL".
In various subdirs, copy the current jenkins jobs' junit xml outputs as
expected-results.log, so that we will start getting useful output in both
jenkins runs and manual local runs.
In start-testsuite.sh, after running the tests, invoke the results comparison.
Due to the single-line parsing nature, the script so far does not distinguish
between error and failure. I doubt that we actually need to do that though.
Related: OS#3136
Change-Id: I87d62a8be73d73a5eeff61a842e7c27a0066079d
2018-04-05 14:56:38 +00:00
|
|
|
|
2018-04-11 12:53:28 +00:00
|
|
|
expected="$SUITE_DIR/expected-results.xml"
|
add compare-results.sh, call from start-testsuite.sh
Compare current test results to the expected results, and exit in error on
discrepancies.
Add compare-result.sh: (trivially) grep junit xml output to determine which
tests passed and which didn't, and compare against an expected-result.log,
another junit file from a previous run. Summarize and determine success.
Include an "xfail" feature: tests that are expected to fail are marked as
"xfail", unexpected failures as "FAIL".
In various subdirs, copy the current jenkins jobs' junit xml outputs as
expected-results.log, so that we will start getting useful output in both
jenkins runs and manual local runs.
In start-testsuite.sh, after running the tests, invoke the results comparison.
Due to the single-line parsing nature, the script so far does not distinguish
between error and failure. I doubt that we actually need to do that though.
Related: OS#3136
Change-Id: I87d62a8be73d73a5eeff61a842e7c27a0066079d
2018-04-05 14:56:38 +00:00
|
|
|
if [ ! -f "$expected" ]; then
|
|
|
|
echo "No expected results found, not comparing outcome. ($expected)"
|
|
|
|
exit 0
|
|
|
|
fi
|
|
|
|
|
|
|
|
# find the most recent junit output log here
|
|
|
|
last_log="$(ls -1tr junit*.log | tail -n 1)"
|
|
|
|
if [ ! -f "$last_log" ]; then
|
|
|
|
echo "No junit log found."
|
|
|
|
exit 1
|
|
|
|
fi
|
|
|
|
|
2019-10-21 23:54:43 +00:00
|
|
|
compare="$SUITE_DIR/../compare-results.py"
|
add compare-results.sh, call from start-testsuite.sh
Compare current test results to the expected results, and exit in error on
discrepancies.
Add compare-result.sh: (trivially) grep junit xml output to determine which
tests passed and which didn't, and compare against an expected-result.log,
another junit file from a previous run. Summarize and determine success.
Include an "xfail" feature: tests that are expected to fail are marked as
"xfail", unexpected failures as "FAIL".
In various subdirs, copy the current jenkins jobs' junit xml outputs as
expected-results.log, so that we will start getting useful output in both
jenkins runs and manual local runs.
In start-testsuite.sh, after running the tests, invoke the results comparison.
Due to the single-line parsing nature, the script so far does not distinguish
between error and failure. I doubt that we actually need to do that though.
Related: OS#3136
Change-Id: I87d62a8be73d73a5eeff61a842e7c27a0066079d
2018-04-05 14:56:38 +00:00
|
|
|
if [ ! -x "$compare" ]; then
|
|
|
|
echo "ERROR: cannot find $compare"
|
|
|
|
exit 1
|
|
|
|
fi
|
|
|
|
|
|
|
|
"$compare" "$expected" "$last_log" $OSMO_TTCN3_COMPARE_ARGS
|