twister: ztest: harness: Fix missed TestCase statuses

Fix a problem of Ztest suite names not taken into account by Twister
to identify a TestCase, so in some situations a Ztest test's status
was not assigned to the proper TestCase and it remains 'None'
whereas the actual status value lost, eventually the resulting total
execution counters not correct.

The issue was observed in these situations:
 * Ztest application with multiple test suites having same test names.
 * Ztest suite is 'skipped' entirely on execution with all its tests.

The proposed solution extends Twister test case name for Ztest to
include Ztest suite name, so the resulting identifier looks like:
   `<test_scenario_name>.<ztest_suite_name>.<ztest_name>`

The above naming scheme now requires ztest_suite_name part to be
provided for `--sub-test` command line option.

Testcase identifiers in twister.json and testplan.json will also
include ztest_suite_name component.

The Twister Ztest(Test) Harness is improved to track all state changes
known from the test application's log for Ztest suites and test cases,
so now it parses log output from a Ztest application more scurpulously.
Regular expressions to match log records are extended and optimized
to compile them only once and, in some cases, fixed (suite summary).

Signed-off-by: Dmitrii Golovanov <dmitrii.golovanov@intel.com>
This commit is contained in:
Dmitrii Golovanov 2024-10-17 09:02:35 +02:00 committed by Anas Nashif
parent 74d1f60faf
commit a72244f2d0
18 changed files with 254 additions and 110 deletions

View File

@ -1,7 +1,7 @@
#!/usr/bin/env python3
# vim: set syntax=python ts=4 :
#
# Copyright (c) 2018 Intel Corporation
# Copyright (c) 2018-2024 Intel Corporation
# Copyright 2022 NXP
# Copyright (c) 2024 Arm Limited (or its affiliates). All rights reserved.
#
@ -149,7 +149,8 @@ Artificially long but functional example:
test_plan_report_xor.add_argument("--list-tests", action="store_true",
help="""List of all sub-test functions recursively found in
all --testsuite-root arguments. Note different sub-tests can share
the same section name and come from different directories.
the same test scenario identifier (section.subsection)
and come from different directories.
The output is flattened and reports --sub-test names only,
not their directories. For instance net.socket.getaddrinfo_ok
and net.socket.fd_set belong to different directories.
@ -239,17 +240,22 @@ Artificially long but functional example:
test_xor_subtest.add_argument(
"-s", "--test", "--scenario", action="append", type = norm_path,
help="Run only the specified testsuite scenario. These are named by "
"<path/relative/to/Zephyr/base/section.name.in.testcase.yaml>")
help="""Run only the specified test suite scenario. These are named by
'path/relative/to/Zephyr/base/section.subsection_in_testcase_yaml',
or just 'section.subsection' identifier. With '--testsuite-root' option
the scenario will be found faster.
""")
test_xor_subtest.add_argument(
"--sub-test", action="append",
help="""Recursively find sub-test functions and run the entire
test section where they were found, including all sibling test
help="""Recursively find sub-test functions (test cases) and run the entire
test scenario (section.subsection) where they were found, including all sibling test
functions. Sub-tests are named by:
section.name.in.testcase.yaml.function_name_without_test_prefix
Example: In kernel.fifo.fifo_loop: 'kernel.fifo' is a section name
and 'fifo_loop' is a name of a function found in main.c without test prefix.
'section.subsection_in_testcase_yaml.ztest_suite.ztest_without_test_prefix'.
Example_1: 'kernel.fifo.fifo_api_1cpu.fifo_loop' where 'kernel.fifo' is a test scenario
name (section.subsection) and 'fifo_api_1cpu.fifo_loop' is
a Ztest suite_name.test_name identificator.
Example_2: 'debug.coredump.logging_backend' is a standalone test scenario name.
""")
parser.add_argument(

View File

@ -31,7 +31,6 @@ logger.setLevel(logging.DEBUG)
_WINDOWS = platform.system() == 'Windows'
result_re = re.compile(r".*(PASS|FAIL|SKIP) - (test_)?(\S*) in (\d*[.,]?\d*) seconds")
class Harness:
GCOV_START = "GCOV_COVERAGE_DUMP_START"
GCOV_END = "GCOV_COVERAGE_DUMP_END"
@ -59,12 +58,19 @@ class Harness:
self.ztest = False
self.detected_suite_names = []
self.run_id = None
self.started_suites = {}
self.started_cases = {}
self.matched_run_id = False
self.run_id_exists = False
self.instance: TestInstance | None = None
self.testcase_output = ""
self._match = False
@property
def trace(self) -> bool:
return self.instance.handler.options.verbose > 2
@property
def status(self) -> TwisterStatus:
return self._status
@ -710,42 +716,124 @@ class Gtest(Harness):
class Test(Harness):
__test__ = False # for pytest to skip this class when collects tests
RUN_PASSED = "PROJECT EXECUTION SUCCESSFUL"
RUN_FAILED = "PROJECT EXECUTION FAILED"
test_suite_start_pattern = r"Running TESTSUITE (?P<suite_name>.*)"
ZTEST_START_PATTERN = r"START - (test_)?([a-zA-Z0-9_-]+)"
test_suite_start_pattern = re.compile(r"Running TESTSUITE (?P<suite_name>\S*)")
test_suite_end_pattern = re.compile(r"TESTSUITE (?P<suite_name>\S*)\s+(?P<suite_status>succeeded|failed)")
test_case_start_pattern = re.compile(r"START - (test_)?([a-zA-Z0-9_-]+)")
test_case_end_pattern = re.compile(r".*(PASS|FAIL|SKIP) - (test_)?(\S*) in (\d*[.,]?\d*) seconds")
test_suite_summary_pattern = re.compile(r"SUITE (?P<suite_status>\S*) - .* \[(?P<suite_name>\S*)\]: .* duration = (\d*[.,]?\d*) seconds")
test_case_summary_pattern = re.compile(r" - (PASS|FAIL|SKIP) - \[([^\.]*).(test_)?(\S*)\] duration = (\d*[.,]?\d*) seconds")
def get_testcase(self, tc_name, phase, ts_name=None):
""" Search a Ztest case among detected in the test image binary
expecting the same test names as already known from the ELF.
Track suites and cases unexpectedly found in the log.
"""
ts_names = self.started_suites.keys()
if ts_name:
if ts_name not in self.instance.testsuite.ztest_suite_names:
logger.warning(f"On {phase}: unexpected Ztest suite '{ts_name}' "
f"not present among: {self.instance.testsuite.ztest_suite_names}")
if ts_name not in self.detected_suite_names:
if self.trace:
logger.debug(f"On {phase}: detected new Ztest suite '{ts_name}'")
self.detected_suite_names.append(ts_name)
ts_names = [ ts_name ] if ts_name in ts_names else []
# Firstly try to match the test case ID to the first running Ztest suite with this test name.
for ts_name_ in ts_names:
if self.started_suites[ts_name_]['count'] < (0 if phase == 'TS_SUM' else 1):
continue
tc_fq_id = "{}.{}.{}".format(self.id, ts_name_, tc_name)
if tc := self.instance.get_case_by_name(tc_fq_id):
if self.trace:
logger.debug(f"On {phase}: Ztest case '{tc_name}' matched to '{tc_fq_id}")
return tc
logger.debug(f"On {phase}: Ztest case '{tc_name}' is not known in {self.started_suites} running suite(s).")
tc_id = "{}.{}".format(self.id, tc_name)
return self.instance.get_case_or_create(tc_id)
def start_suite(self, suite_name):
if suite_name not in self.detected_suite_names:
self.detected_suite_names.append(suite_name)
if suite_name not in self.instance.testsuite.ztest_suite_names:
logger.warning(f"Unexpected Ztest suite '{suite_name}'")
if suite_name in self.started_suites:
if self.started_suites[suite_name]['count'] > 0:
logger.warning(f"Already STARTED '{suite_name}':{self.started_suites[suite_name]}")
elif self.trace:
logger.debug(f"START suite '{suite_name}'")
self.started_suites[suite_name]['count'] += 1
self.started_suites[suite_name]['repeat'] += 1
else:
self.started_suites[suite_name] = { 'count': 1, 'repeat': 0 }
def end_suite(self, suite_name, phase='', suite_status=None):
if suite_name in self.started_suites:
if phase == 'TS_SUM' and self.started_suites[suite_name]['count'] == 0:
return
if self.started_suites[suite_name]['count'] < 1:
logger.error(f"Already ENDED {phase} suite '{suite_name}':{self.started_suites[suite_name]}")
elif self.trace:
logger.debug(f"END {phase} suite '{suite_name}':{self.started_suites[suite_name]}")
self.started_suites[suite_name]['count'] -= 1
elif suite_status == 'SKIP':
self.start_suite(suite_name) # register skipped suites at their summary end
self.started_suites[suite_name]['count'] -= 1
else:
logger.warning(f"END {phase} suite '{suite_name}' without START detected")
def start_case(self, tc_name):
if tc_name in self.started_cases:
if self.started_cases[tc_name]['count'] > 0:
logger.warning(f"Already STARTED '{tc_name}':{self.started_cases[tc_name]}")
self.started_cases[tc_name]['count'] += 1
else:
self.started_cases[tc_name] = { 'count': 1 }
def end_case(self, tc_name, phase=''):
if tc_name in self.started_cases:
if phase == 'TS_SUM' and self.started_cases[tc_name]['count'] == 0:
return
if self.started_cases[tc_name]['count'] < 1:
logger.error(f"Already ENDED {phase} case '{tc_name}':{self.started_cases[tc_name]}")
elif self.trace:
logger.debug(f"END {phase} case '{tc_name}':{self.started_cases[tc_name]}")
self.started_cases[tc_name]['count'] -= 1
elif phase != 'TS_SUM':
logger.warning(f"END {phase} case '{tc_name}' without START detected")
def handle(self, line):
test_suite_match = re.search(self.test_suite_start_pattern, line)
if test_suite_match:
suite_name = test_suite_match.group("suite_name")
self.detected_suite_names.append(suite_name)
testcase_match = None
if self._match:
self.testcase_output += line + "\n"
testcase_match = re.search(self.ZTEST_START_PATTERN, line)
if testcase_match:
name = "{}.{}".format(self.id, testcase_match.group(2))
tc = self.instance.get_case_or_create(name)
if test_suite_start_match := re.search(self.test_suite_start_pattern, line):
self.start_suite(test_suite_start_match.group("suite_name"))
elif test_suite_end_match := re.search(self.test_suite_end_pattern, line):
suite_name=test_suite_end_match.group("suite_name")
self.end_suite(suite_name, 'TS_END')
elif testcase_match := re.search(self.test_case_start_pattern, line):
tc_name = testcase_match.group(2)
tc = self.get_testcase(tc_name, 'TC_START')
self.start_case(tc.name)
# Mark the test as started, if something happens here, it is mostly
# due to this tests, for example timeout. This should in this case
# be marked as failed and not blocked (not run).
tc.status = TwisterStatus.STARTED
if testcase_match or self._match:
self.testcase_output += line + "\n"
self._match = True
result_match = result_re.match(line)
if not self._match:
self.testcase_output += line + "\n"
self._match = True
# some testcases are skipped based on predicates and do not show up
# during test execution, however they are listed in the summary. Parse
# the summary for status and use that status instead.
summary_re = re.compile(r"- (PASS|FAIL|SKIP) - \[([^\.]*).(test_)?(\S*)\] duration = (\d*[.,]?\d*) seconds")
summary_match = summary_re.match(line)
if result_match:
elif result_match := self.test_case_end_pattern.match(line):
matched_status = result_match.group(1)
name = "{}.{}".format(self.id, result_match.group(3))
tc = self.instance.get_case_or_create(name)
tc_name = result_match.group(3)
tc = self.get_testcase(tc_name, 'TC_END')
self.end_case(tc.name)
tc.status = TwisterStatus[matched_status]
if tc.status == TwisterStatus.SKIP:
tc.reason = "ztest skip"
@ -755,15 +843,22 @@ class Test(Harness):
self.testcase_output = ""
self._match = False
self.ztest = True
elif summary_match:
matched_status = summary_match.group(1)
self.detected_suite_names.append(summary_match.group(2))
name = "{}.{}".format(self.id, summary_match.group(4))
tc = self.instance.get_case_or_create(name)
elif test_suite_summary_match := self.test_suite_summary_pattern.match(line):
suite_name=test_suite_summary_match.group("suite_name")
suite_status=test_suite_summary_match.group("suite_status")
self._match = False
self.ztest = True
self.end_suite(suite_name, 'TS_SUM', suite_status=suite_status)
elif test_case_summary_match := self.test_case_summary_pattern.match(line):
matched_status = test_case_summary_match.group(1)
suite_name = test_case_summary_match.group(2)
tc_name = test_case_summary_match.group(4)
tc = self.get_testcase(tc_name, 'TS_SUM', suite_name)
self.end_case(tc.name, 'TS_SUM')
tc.status = TwisterStatus[matched_status]
if tc.status == TwisterStatus.SKIP:
tc.reason = "ztest skip"
tc.duration = float(summary_match.group(5))
tc.duration = float(test_case_summary_match.group(5))
if tc.status == TwisterStatus.FAIL:
tc.output = self.testcase_output
self.testcase_output = ""

View File

@ -1,6 +1,6 @@
# vim: set syntax=python ts=4 :
#
# Copyright (c) 20180-2022 Intel Corporation
# Copyright (c) 2018-2024 Intel Corporation
# Copyright 2022 NXP
# SPDX-License-Identifier: Apache-2.0
@ -1108,13 +1108,16 @@ class ProjectBuilder(FilterBuilder):
matches = new_ztest_unit_test_regex.findall(sym.name)
if matches:
for m in matches:
# new_ztest_suite = m[0] # not used for now
new_ztest_suite = m[0]
if new_ztest_suite not in self.instance.testsuite.ztest_suite_names:
logger.warning(f"Unexpected Ztest suite '{new_ztest_suite}' "
f"not present in: {self.instance.testsuite.ztest_suite_names}")
test_func_name = m[1].replace("test_", "", 1)
testcase_id = f"{yaml_testsuite_name}.{test_func_name}"
testcase_id = f"{yaml_testsuite_name}.{new_ztest_suite}.{test_func_name}"
detected_cases.append(testcase_id)
if detected_cases:
logger.debug(f"{', '.join(detected_cases)} in {elf_file}")
logger.debug(f"Detected Ztest cases: [{', '.join(detected_cases)}] in {elf_file}")
tc_keeper = {tc.name: {'status': tc.status, 'reason': tc.reason} for tc in self.instance.testcases}
self.instance.testcases.clear()
self.instance.testsuite.testcases.clear()

View File

@ -1,6 +1,6 @@
# vim: set syntax=python ts=4 :
#
# Copyright (c) 2018-2022 Intel Corporation
# Copyright (c) 2018-2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from enum import Enum
@ -248,14 +248,16 @@ def _find_ztest_testcases(search_area, testcase_regex):
testcase_regex_matches = \
[m for m in testcase_regex.finditer(search_area)]
testcase_names = \
[m.group("testcase_name") for m in testcase_regex_matches]
testcase_names = [name.decode("UTF-8") for name in testcase_names]
[(m.group("suite_name") if m.groupdict().get("suite_name") else b'', m.group("testcase_name")) \
for m in testcase_regex_matches]
testcase_names = [(ts_name.decode("UTF-8"), tc_name.decode("UTF-8")) for ts_name, tc_name in testcase_names]
warnings = None
for testcase_name in testcase_names:
if not testcase_name.startswith("test_"):
if not testcase_name[1].startswith("test_"):
warnings = "Found a test that does not start with test_"
testcase_names = \
[tc_name.replace("test_", "", 1) for tc_name in testcase_names]
[(ts_name + '.' if ts_name else '') + f"{tc_name.replace('test_', '', 1)}" \
for (ts_name, tc_name) in testcase_names]
return testcase_names, warnings

View File

@ -597,31 +597,48 @@ TEST_DATA_7 = [
"",
"Running TESTSUITE suite_name",
["suite_name"],
{ 'suite_name': { 'count': 1, 'repeat': 0 } },
{},
TwisterStatus.NONE,
True,
TwisterStatus.NONE,
),
("", "START - test_testcase", [], TwisterStatus.STARTED, True, TwisterStatus.NONE),
(
"",
"On TC_START: Ztest case 'testcase' is not known in {} running suite(s)",
"START - test_testcase",
[],
{},
{ 'test_id.testcase': { 'count': 1 } },
TwisterStatus.STARTED,
True,
TwisterStatus.NONE
),
(
"On TC_END: Ztest case 'example' is not known in {} running suite(s)",
"PASS - test_example in 0 seconds",
[],
{},
{},
TwisterStatus.PASS,
True,
TwisterStatus.NONE,
),
(
"",
"On TC_END: Ztest case 'example' is not known in {} running suite(s)",
"SKIP - test_example in 0 seconds",
[],
{},
{},
TwisterStatus.SKIP,
True,
TwisterStatus.NONE,
),
(
"",
"On TC_END: Ztest case 'example' is not known in {} running suite(s)",
"FAIL - test_example in 0 seconds",
[],
{},
{},
TwisterStatus.FAIL,
True,
TwisterStatus.NONE,
@ -630,6 +647,8 @@ TEST_DATA_7 = [
"not a ztest and no state for test_id",
"START - test_testcase",
[],
{},
{ 'test_id.testcase': { 'count': 1 } },
TwisterStatus.PASS,
False,
TwisterStatus.PASS,
@ -638,6 +657,8 @@ TEST_DATA_7 = [
"not a ztest and no state for test_id",
"START - test_testcase",
[],
{},
{ 'test_id.testcase': { 'count': 1 } },
TwisterStatus.FAIL,
False,
TwisterStatus.FAIL,
@ -646,12 +667,14 @@ TEST_DATA_7 = [
@pytest.mark.parametrize(
"exp_out, line, exp_suite_name, exp_status, ztest, state",
"exp_out, line, exp_suite_name, exp_started_suites, exp_started_cases, exp_status, ztest, state",
TEST_DATA_7,
ids=["testsuite", "testcase", "pass", "skip", "failed", "ztest pass", "ztest fail"],
)
def test_test_handle(
tmp_path, caplog, exp_out, line, exp_suite_name, exp_status, ztest, state
tmp_path, caplog, exp_out, line,
exp_suite_name, exp_started_suites, exp_started_cases,
exp_status, ztest, state
):
# Arrange
line = line
@ -662,6 +685,7 @@ def test_test_handle(
mock_testsuite = mock.Mock(id="id", testcases=[])
mock_testsuite.name = "mock_testsuite"
mock_testsuite.harness_config = {}
mock_testsuite.ztest_suite_names = []
outdir = tmp_path / "gtest_out"
outdir.mkdir()
@ -681,6 +705,9 @@ def test_test_handle(
# Assert
assert test_obj.detected_suite_names == exp_suite_name
assert test_obj.started_suites == exp_started_suites
assert test_obj.started_cases == exp_started_cases
assert exp_out in caplog.text
if not "Running" in line and exp_out == "":
assert test_obj.instance.testcases[0].status == exp_status

View File

@ -1562,11 +1562,14 @@ def test_projectbuilder_process(
TESTDATA_7 = [
(
[
'z_ztest_unit_test__dummy_suite_name__dummy_test_name',
'z_ztest_unit_test__dummy_suite_name__test_dummy_name',
'z_ztest_unit_test__dummy_suite1_name__dummy_test_name1',
'z_ztest_unit_test__dummy_suite2_name__test_dummy_name2',
'no match'
],
['dummy_id.dummy_name', 'dummy_id.dummy_name']
[
('dummy_id.dummy_suite1_name.dummy_name1'),
('dummy_id.dummy_suite2_name.dummy_name2')
]
),
(
['no match'],
@ -1599,6 +1602,7 @@ def test_projectbuilder_determine_testcases(
instance_mock = mock.Mock()
instance_mock.testcases = []
instance_mock.testsuite.id = 'dummy_id'
instance_mock.testsuite.ztest_suite_names = []
env_mock = mock.Mock()
pb = ProjectBuilder(instance_mock, env_mock, mocked_jobserver)
@ -2137,13 +2141,11 @@ def test_projectbuilder_cmake():
instance_mock = mock.Mock()
instance_mock.handler = 'dummy handler'
instance_mock.build_dir = os.path.join('build', 'dir')
instance_mock.platform.name = 'frdm_k64f'
env_mock = mock.Mock()
pb = ProjectBuilder(instance_mock, env_mock, mocked_jobserver)
pb.build_dir = 'build_dir'
pb.testsuite.platform = instance_mock.platform
pb.testsuite.extra_args = ['some', 'platform:frdm_k64f:args']
pb.testsuite.extra_args = ['some', 'args']
pb.testsuite.extra_conf_files = ['some', 'files1']
pb.testsuite.extra_overlay_confs = ['some', 'files2']
pb.testsuite.extra_dtc_overlay_files = ['some', 'files3']
@ -2156,7 +2158,7 @@ def test_projectbuilder_cmake():
assert res == cmake_res_mock
pb.cmake_assemble_args.assert_called_once_with(
['some', 'args'],
pb.testsuite.extra_args,
pb.instance.handler,
pb.testsuite.extra_conf_files,
pb.testsuite.extra_overlay_confs,

View File

@ -1,5 +1,5 @@
#!/usr/bin/env python3
# Copyright (c) 2020 Intel Corporation
# Copyright (c) 2020-2024 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
@ -85,7 +85,8 @@ def test_get_all_testsuites_short(class_testplan, all_testsuites_dict):
'test_b.check_1', 'test_b.check_2', 'test_c.check_1',
'test_c.check_2', 'test_d.check_1.unit_1a',
'test_d.check_1.unit_1b',
'test_e.check_1.1a', 'test_e.check_1.1b',
'test_e.check_1.feature5.1a',
'test_e.check_1.feature5.1b',
'test_config.main']
assert sorted(plan.get_all_tests()) == sorted(expected_tests)

View File

@ -165,7 +165,7 @@ TESTDATA_2 = [
),
ScanPathResult(
warnings=None,
matches=['1a', '1b'],
matches=['feature5.1a', 'feature5.1b'],
has_registered_test_suites=False,
has_run_registered_test_suites=True,
has_test_main=False,

View File

@ -13,6 +13,7 @@ import pytest
import sys
import json
# pylint: disable=no-name-in-module
from conftest import ZEPHYR_BASE, TEST_DATA, testsuite_filename_mock
from twisterlib.testplan import TestPlan
@ -55,13 +56,13 @@ class TestConfig:
assert str(sys_exit.value) == '0'
assert len(filtered_j) == 3
assert len(filtered_j) == 4
@pytest.mark.parametrize(
'level, expected_tests',
[
('smoke', 5),
('acceptance', 6),
('smoke', 6),
('acceptance', 7),
],
ids=['smoke', 'acceptance']
)

View File

@ -9,6 +9,8 @@
ZTEST_SUITE(a2_tests, NULL, NULL, NULL, NULL, NULL);
ZTEST_SUITE(a3_tests, NULL, NULL, NULL, NULL, NULL);
/**
* @brief Test Asserts
*
@ -34,3 +36,8 @@ ZTEST(a2_tests, test_assert2)
zassert_equal(1, 1, "1 was not equal to 1");
zassert_equal_ptr(NULL, NULL, "NULL was not equal to NULL");
}
ZTEST(a3_tests, test_assert1)
{
zassert_true(1, "1 was false");
}

View File

@ -14,6 +14,7 @@ import sys
import json
import re
# pylint: disable=no-name-in-module
from conftest import ZEPHYR_BASE, TEST_DATA, testsuite_filename_mock
from twisterlib.testplan import TestPlan
@ -83,11 +84,12 @@ class TestFilter:
@pytest.mark.parametrize(
'tag, expected_test_count',
[
('device', 5), # dummy.agnostic.group1.subgroup1.assert
# dummy.agnostic.group1.subgroup2.assert
# dummy.agnostic.group2.assert1
# dummy.agnostic.group2.assert2
# dummy.agnostic.group2.assert3
('device', 6), # dummy.agnostic.group1.subgroup1.a1_1_tests.assert
# dummy.agnostic.group1.subgroup2.a2_2_tests.assert
# dummy.agnostic.group2.a2_tests.assert1
# dummy.agnostic.group2.a2_tests.assert2
# dummy.agnostic.group2.a2_tests.assert3
# dummy.agnostic.group2.a3_tests.assert1
('agnostic', 1) # dummy.device.group.assert
],
ids=['no device', 'no agnostic']
@ -144,7 +146,7 @@ class TestFilter:
assert str(sys_exit.value) == '0'
assert len(filtered_j) == 5
assert len(filtered_j) == 6
@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
def test_enable_slow_only(self, out_path):
@ -172,7 +174,7 @@ class TestFilter:
assert str(sys_exit.value) == '0'
assert len(filtered_j) == 3
assert len(filtered_j) == 4
@pytest.mark.parametrize(
'arch, expected',

View File

@ -36,7 +36,7 @@ class TestPlatform:
'built_configurations': 2,
'failed_configurations': 0,
'errored_configurations': 0,
'executed_test_cases': 8,
'executed_test_cases': 10,
'skipped_test_cases': 2,
'platform_count': 2,
'executed_on_platform': 4,
@ -129,7 +129,7 @@ class TestPlatform:
assert str(sys_exit.value) == '0'
assert len(filtered_j) == 12
assert len(filtered_j) == 14
def test_platform(self, out_path):
path = os.path.join(TEST_DATA, 'tests', 'dummy')

View File

@ -1,5 +1,5 @@
#!/usr/bin/env python3
# Copyright (c) 2023 Intel Corporation
# Copyright (c) 2023-2024 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
"""
@ -41,17 +41,18 @@ class TestPrintOuts:
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
[
'dummy.agnostic.group1.subgroup1.assert',
'dummy.agnostic.group1.subgroup2.assert',
'dummy.agnostic.group2.assert1',
'dummy.agnostic.group2.assert2',
'dummy.agnostic.group2.assert3'
'dummy.agnostic.group1.subgroup1.a1_1_tests.assert',
'dummy.agnostic.group1.subgroup2.a1_2_tests.assert',
'dummy.agnostic.group2.a2_tests.assert1',
'dummy.agnostic.group2.a2_tests.assert2',
'dummy.agnostic.group2.a3_tests.assert1',
'dummy.agnostic.group2.a2_tests.assert3'
]
),
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'device'),
[
'dummy.device.group.assert'
'dummy.device.group.d_tests.assert'
]
),
]
@ -64,11 +65,12 @@ class TestPrintOuts:
'└── Tests\n' \
' └── dummy\n' \
' └── agnostic\n' \
' ├── dummy.agnostic.group1.subgroup1.assert\n' \
' ├── dummy.agnostic.group1.subgroup2.assert\n' \
' ├── dummy.agnostic.group2.assert1\n' \
' ├── dummy.agnostic.group2.assert2\n' \
' └── dummy.agnostic.group2.assert3\n'
' ├── dummy.agnostic.group1.subgroup1.a1_1_tests.assert\n' \
' ├── dummy.agnostic.group1.subgroup2.a1_2_tests.assert\n' \
' ├── dummy.agnostic.group2.a2_tests.assert1\n' \
' ├── dummy.agnostic.group2.a2_tests.assert2\n' \
' ├── dummy.agnostic.group2.a2_tests.assert3\n' \
' └── dummy.agnostic.group2.a3_tests.assert1\n'
),
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'device'),
@ -77,7 +79,7 @@ class TestPrintOuts:
'└── Tests\n'
' └── dummy\n'
' └── device\n'
' └── dummy.device.group.assert\n'
' └── dummy.device.group.d_tests.assert\n'
),
]

View File

@ -1,5 +1,5 @@
#!/usr/bin/env python3
# Copyright (c) 2023 Intel Corporation
# Copyright (c) 2023-2024 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
"""
@ -350,12 +350,12 @@ class TestReport:
(
os.path.join(TEST_DATA, 'tests', 'dummy'),
['--detailed-skipped-report'],
{'qemu_x86/atom': 5, 'intel_adl_crb/alder_lake': 1}
{'qemu_x86/atom': 6, 'intel_adl_crb/alder_lake': 1}
),
(
os.path.join(TEST_DATA, 'tests', 'dummy'),
['--detailed-skipped-report', '--report-filtered'],
{'qemu_x86/atom': 6, 'intel_adl_crb/alder_lake': 6}
{'qemu_x86/atom': 7, 'intel_adl_crb/alder_lake': 7}
),
],
ids=['dummy tests', 'dummy tests with filtered']

View File

@ -1,5 +1,5 @@
#!/usr/bin/env python3
# Copyright (c) 2023 Intel Corporation
# Copyright (c) 2023-2024 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
"""
@ -54,7 +54,7 @@ class TestRunner:
'built_configurations': 0,
'failed_configurations': 0,
'errored_configurations': 0,
'executed_test_cases': 8,
'executed_test_cases': 10,
'skipped_test_cases': 0,
'platform_count': 2,
'executed_on_platform': 4,

View File

@ -10,10 +10,10 @@ import importlib
import mock
import os
import pytest
import re
import sys
import json
# pylint: disable=no-name-in-module
from conftest import ZEPHYR_BASE, TEST_DATA, testsuite_filename_mock
from twisterlib.testplan import TestPlan
@ -65,14 +65,8 @@ class TestShuffle:
with open(os.path.join(out_path, 'testplan.json')) as f:
j = json.load(f)
filtered_j = [
(ts['platform'], ts['name'], tc['identifier']) \
for ts in j['testsuites'] \
for tc in ts['testcases'] if 'reason' not in tc
]
testcases = [re.sub(r'\.assert[^\.]*?$', '', j[2]) for j in filtered_j]
testsuites = list(dict.fromkeys(testcases))
testsuites = [os.path.basename(ts['name']) for ts in j['testsuites']]
assert testsuites == expected_order

View File

@ -13,6 +13,7 @@ import pytest
import sys
import json
# pylint: disable=no-name-in-module
from conftest import ZEPHYR_BASE, TEST_DATA, testsuite_filename_mock, clear_log_in_test
from twisterlib.testplan import TestPlan
@ -71,4 +72,4 @@ class TestTestlist:
for tc in ts['testcases'] if 'reason' not in tc
]
assert len(filtered_j) == 5
assert len(filtered_j) == 6

View File

@ -13,6 +13,7 @@ import pytest
import sys
import json
# pylint: disable=no-name-in-module
from conftest import ZEPHYR_BASE, TEST_DATA, testsuite_filename_mock
from twisterlib.testplan import TestPlan
from twisterlib.error import TwisterRuntimeError
@ -20,7 +21,7 @@ from twisterlib.error import TwisterRuntimeError
class TestTestPlan:
TESTDATA_1 = [
('dummy.agnostic.group2.assert1', SystemExit, 3),
('dummy.agnostic.group2.a2_tests.assert1', SystemExit, 4),
(
os.path.join('scripts', 'tests', 'twister_blackbox', 'test_data', 'tests',
'dummy', 'agnostic', 'group1', 'subgroup1',
@ -30,12 +31,12 @@ class TestTestPlan:
),
]
TESTDATA_2 = [
('buildable', 6),
('runnable', 4),
('buildable', 7),
('runnable', 5),
]
TESTDATA_3 = [
(True, 1),
(False, 6),
(False, 7),
]
@classmethod
@ -52,7 +53,7 @@ class TestTestPlan:
@pytest.mark.parametrize(
'test, expected_exception, expected_subtest_count',
TESTDATA_1,
ids=['valid', 'invalid']
ids=['valid', 'not found']
)
@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
def test_subtest(self, out_path, test, expected_exception, expected_subtest_count):