zephyr/scripts/tests/twister_blackbox/test_testplan.py
Dmitrii Golovanov a72244f2d0 twister: ztest: harness: Fix missed TestCase statuses
Fix a problem of Ztest suite names not taken into account by Twister
to identify a TestCase, so in some situations a Ztest test's status
was not assigned to the proper TestCase and it remains 'None'
whereas the actual status value lost, eventually the resulting total
execution counters not correct.

The issue was observed in these situations:
 * Ztest application with multiple test suites having same test names.
 * Ztest suite is 'skipped' entirely on execution with all its tests.

The proposed solution extends Twister test case name for Ztest to
include Ztest suite name, so the resulting identifier looks like:
   `<test_scenario_name>.<ztest_suite_name>.<ztest_name>`

The above naming scheme now requires ztest_suite_name part to be
provided for `--sub-test` command line option.

Testcase identifiers in twister.json and testplan.json will also
include ztest_suite_name component.

The Twister Ztest(Test) Harness is improved to track all state changes
known from the test application's log for Ztest suites and test cases,
so now it parses log output from a Ztest application more scurpulously.
Regular expressions to match log records are extended and optimized
to compile them only once and, in some cases, fixed (suite summary).

Signed-off-by: Dmitrii Golovanov <dmitrii.golovanov@intel.com>
2024-11-22 08:26:59 -05:00

149 lines
5.1 KiB
Python

#!/usr/bin/env python3
# Copyright (c) 2024 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
"""
Blackbox tests for twister's command line functions - those requiring testplan.json
"""
import importlib
import mock
import os
import pytest
import sys
import json
# pylint: disable=no-name-in-module
from conftest import ZEPHYR_BASE, TEST_DATA, testsuite_filename_mock
from twisterlib.testplan import TestPlan
from twisterlib.error import TwisterRuntimeError
class TestTestPlan:
TESTDATA_1 = [
('dummy.agnostic.group2.a2_tests.assert1', SystemExit, 4),
(
os.path.join('scripts', 'tests', 'twister_blackbox', 'test_data', 'tests',
'dummy', 'agnostic', 'group1', 'subgroup1',
'dummy.agnostic.group2.assert1'),
TwisterRuntimeError,
None
),
]
TESTDATA_2 = [
('buildable', 7),
('runnable', 5),
]
TESTDATA_3 = [
(True, 1),
(False, 7),
]
@classmethod
def setup_class(cls):
apath = os.path.join(ZEPHYR_BASE, 'scripts', 'twister')
cls.loader = importlib.machinery.SourceFileLoader('__main__', apath)
cls.spec = importlib.util.spec_from_loader(cls.loader.name, cls.loader)
cls.twister_module = importlib.util.module_from_spec(cls.spec)
@classmethod
def teardown_class(cls):
pass
@pytest.mark.parametrize(
'test, expected_exception, expected_subtest_count',
TESTDATA_1,
ids=['valid', 'not found']
)
@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
def test_subtest(self, out_path, test, expected_exception, expected_subtest_count):
test_platforms = ['qemu_x86', 'intel_adl_crb']
path = os.path.join(TEST_DATA, 'tests', 'dummy')
args = ['-i', '--outdir', out_path, '-T', path, '--sub-test', test, '-y'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(expected_exception) as exc:
self.loader.exec_module(self.twister_module)
if expected_exception != SystemExit:
assert True
return
with open(os.path.join(out_path, 'testplan.json')) as f:
j = json.load(f)
filtered_j = [
(ts['platform'], ts['name'], tc['identifier']) \
for ts in j['testsuites'] \
for tc in ts['testcases'] if 'reason' not in tc
]
assert str(exc.value) == '0'
assert len(filtered_j) == expected_subtest_count
@pytest.mark.parametrize(
'filter, expected_count',
TESTDATA_2,
ids=['buildable', 'runnable']
)
@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
def test_filter(self, out_path, filter, expected_count):
test_platforms = ['qemu_x86', 'intel_adl_crb']
path = os.path.join(TEST_DATA, 'tests', 'dummy')
args = ['-i', '--outdir', out_path, '-T', path, '--filter', filter, '-y'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as exc:
self.loader.exec_module(self.twister_module)
assert str(exc.value) == '0'
import pprint
with open(os.path.join(out_path, 'testplan.json')) as f:
j = json.load(f)
pprint.pprint(j)
filtered_j = [
(ts['platform'], ts['name'], tc['identifier']) \
for ts in j['testsuites'] \
for tc in ts['testcases'] if 'reason' not in tc
]
pprint.pprint(filtered_j)
assert expected_count == len(filtered_j)
@pytest.mark.parametrize(
'integration, expected_count',
TESTDATA_3,
ids=['integration', 'no integration']
)
@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
@mock.patch.object(TestPlan, 'SAMPLE_FILENAME', '')
def test_integration(self, out_path, integration, expected_count):
test_platforms = ['qemu_x86', 'intel_adl_crb']
path = os.path.join(TEST_DATA, 'tests', 'dummy')
args = ['-i', '--outdir', out_path, '-T', path, '-y'] + \
(['--integration'] if integration else []) + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as exc:
self.loader.exec_module(self.twister_module)
assert str(exc.value) == '0'
with open(os.path.join(out_path, 'testplan.json')) as f:
j = json.load(f)
filtered_j = [
(ts['platform'], ts['name'], tc['identifier']) \
for ts in j['testsuites'] \
for tc in ts['testcases'] if 'reason' not in tc
]
assert expected_count == len(filtered_j)