Output verbosity for test cases
This commit is contained in:
parent
81c06b3955
commit
6e3638a3f0
|
@ -1653,6 +1653,8 @@ class Config:
|
|||
|
||||
#: Verbosity type for failed assertions (see :confval:`verbosity_assertions`).
|
||||
VERBOSITY_ASSERTIONS: Final = "assertions"
|
||||
#: Verbosity type for test case execution (see :confval:`verbosity_test_cases`).
|
||||
VERBOSITY_TEST_CASES: Final = "test_cases"
|
||||
_VERBOSITY_INI_DEFAULT: Final = "auto"
|
||||
|
||||
def get_verbosity(self, verbosity_type: Optional[str] = None) -> int:
|
||||
|
|
|
@ -253,6 +253,14 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
"progress even when capture=no)",
|
||||
default="progress",
|
||||
)
|
||||
Config._add_verbosity_ini(
|
||||
parser,
|
||||
Config.VERBOSITY_TEST_CASES,
|
||||
help=(
|
||||
"Specify a verbosity level for test case execution, overriding the main level. "
|
||||
"Higher levels will provide more detailed information about each test case executed."
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def pytest_configure(config: Config) -> None:
|
||||
|
@ -415,7 +423,7 @@ class TerminalReporter:
|
|||
|
||||
@property
|
||||
def showlongtestinfo(self) -> bool:
|
||||
return self.verbosity > 0
|
||||
return self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) > 0
|
||||
|
||||
def hasopt(self, char: str) -> bool:
|
||||
char = {"xfailed": "x", "skipped": "s"}.get(char, char)
|
||||
|
@ -593,7 +601,7 @@ class TerminalReporter:
|
|||
markup = {"yellow": True}
|
||||
else:
|
||||
markup = {}
|
||||
if self.verbosity <= 0:
|
||||
if self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) <= 0:
|
||||
self._tw.write(letter, **markup)
|
||||
else:
|
||||
self._progress_nodeids_reported.add(rep.nodeid)
|
||||
|
@ -602,7 +610,7 @@ class TerminalReporter:
|
|||
self.write_ensure_prefix(line, word, **markup)
|
||||
if rep.skipped or hasattr(report, "wasxfail"):
|
||||
reason = _get_raw_skip_reason(rep)
|
||||
if self.config.option.verbose < 2:
|
||||
if self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) < 2:
|
||||
available_width = (
|
||||
(self._tw.fullwidth - self._tw.width_of_current_line)
|
||||
- len(" [100%]")
|
||||
|
@ -639,7 +647,10 @@ class TerminalReporter:
|
|||
|
||||
def pytest_runtest_logfinish(self, nodeid: str) -> None:
|
||||
assert self._session
|
||||
if self.verbosity <= 0 and self._show_progress_info:
|
||||
if (
|
||||
self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) <= 0
|
||||
and self._show_progress_info
|
||||
):
|
||||
if self._show_progress_info == "count":
|
||||
num_tests = self._session.testscollected
|
||||
progress_length = len(f" [{num_tests}/{num_tests}]")
|
||||
|
@ -819,8 +830,9 @@ class TerminalReporter:
|
|||
rep.toterminal(self._tw)
|
||||
|
||||
def _printcollecteditems(self, items: Sequence[Item]) -> None:
|
||||
if self.config.option.verbose < 0:
|
||||
if self.config.option.verbose < -1:
|
||||
test_cases_verbosity = self.config.get_verbosity(Config.VERBOSITY_TEST_CASES)
|
||||
if test_cases_verbosity < 0:
|
||||
if test_cases_verbosity < -1:
|
||||
counts = Counter(item.nodeid.split("::", 1)[0] for item in items)
|
||||
for name, count in sorted(counts.items()):
|
||||
self._tw.line("%s: %d" % (name, count))
|
||||
|
@ -840,7 +852,7 @@ class TerminalReporter:
|
|||
stack.append(col)
|
||||
indent = (len(stack) - 1) * " "
|
||||
self._tw.line(f"{indent}{col}")
|
||||
if self.config.option.verbose >= 1:
|
||||
if test_cases_verbosity >= 1:
|
||||
obj = getattr(col, "obj", None)
|
||||
doc = inspect.getdoc(obj) if obj else None
|
||||
if doc:
|
||||
|
|
|
@ -2614,3 +2614,123 @@ def test_format_trimmed() -> None:
|
|||
|
||||
assert _format_trimmed(" ({}) ", msg, len(msg) + 4) == " (unconditional skip) "
|
||||
assert _format_trimmed(" ({}) ", msg, len(msg) + 3) == " (unconditional ...) "
|
||||
|
||||
|
||||
def test_fine_grained_test_case_verbosity(pytester: Pytester):
|
||||
p = pytester.makepyfile(_fine_grained_verbosity_file_contents())
|
||||
pytester.makeini(
|
||||
"""
|
||||
[pytest]
|
||||
verbosity_test_cases = 2
|
||||
"""
|
||||
)
|
||||
result = pytester.runpytest(p)
|
||||
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
f"{p.name}::test_ok PASSED [ 14%]",
|
||||
f"{p.name}::test_words_fail FAILED [ 28%]",
|
||||
f"{p.name}::test_numbers_fail FAILED [ 42%]",
|
||||
f"{p.name}::test_long_text_fail FAILED [ 57%]",
|
||||
f"{p.name}::test_parametrize_fail[hello-1] FAILED [ 71%]",
|
||||
f"{p.name}::test_parametrize_fail[world-987654321] FAILED [ 85%]",
|
||||
f"{p.name}::test_sample_skip SKIPPED (some",
|
||||
"long skip reason that will not fit on a single line with other content",
|
||||
"that goes on and on and on and on and on) [100%]",
|
||||
],
|
||||
consecutive=True,
|
||||
)
|
||||
|
||||
|
||||
def test_fine_grained_test_case_verbosity_collect_only_negative_2(pytester: Pytester):
|
||||
p = pytester.makepyfile(_fine_grained_verbosity_file_contents())
|
||||
pytester.makeini(
|
||||
"""
|
||||
[pytest]
|
||||
verbosity_test_cases = -2
|
||||
"""
|
||||
)
|
||||
result = pytester.runpytest("--collect-only", p)
|
||||
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"collected 7 items",
|
||||
"",
|
||||
f"{p.name}: 7",
|
||||
],
|
||||
consecutive=True,
|
||||
)
|
||||
|
||||
|
||||
def test_fine_grained_test_case_verbosity_collect_only_positive_2(pytester: Pytester):
|
||||
p = pytester.makepyfile(_fine_grained_verbosity_file_contents())
|
||||
pytester.makeini(
|
||||
"""
|
||||
[pytest]
|
||||
verbosity_test_cases = 2
|
||||
"""
|
||||
)
|
||||
result = pytester.runpytest("--collect-only", p)
|
||||
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"collected 7 items",
|
||||
"",
|
||||
f"<Module {p.name}>",
|
||||
" <Function test_ok>",
|
||||
" some docstring",
|
||||
" <Function test_words_fail>",
|
||||
" <Function test_numbers_fail>",
|
||||
" <Function test_long_text_fail>",
|
||||
" <Function test_parametrize_fail[hello-1]>",
|
||||
" <Function test_parametrize_fail[world-987654321]>",
|
||||
" <Function test_sample_skip>",
|
||||
],
|
||||
consecutive=True,
|
||||
)
|
||||
|
||||
|
||||
def _fine_grained_verbosity_file_contents() -> str:
|
||||
long_text = "Lorem ipsum dolor sit amet " * 10
|
||||
return f"""
|
||||
import pytest
|
||||
def test_ok():
|
||||
'''
|
||||
some docstring
|
||||
'''
|
||||
pass
|
||||
|
||||
|
||||
def test_words_fail():
|
||||
fruits1 = ["banana", "apple", "grapes", "melon", "kiwi"]
|
||||
fruits2 = ["banana", "apple", "orange", "melon", "kiwi"]
|
||||
assert fruits1 == fruits2
|
||||
|
||||
|
||||
def test_numbers_fail():
|
||||
number_to_text1 = {{str(x): x for x in range(5)}}
|
||||
number_to_text2 = {{str(x * 10): x * 10 for x in range(5)}}
|
||||
assert number_to_text1 == number_to_text2
|
||||
|
||||
|
||||
def test_long_text_fail():
|
||||
long_text = "{long_text}"
|
||||
assert "hello world" in long_text
|
||||
|
||||
|
||||
@pytest.mark.parametrize(["foo", "bar"], [
|
||||
("hello", 1),
|
||||
("world", 987654321),
|
||||
])
|
||||
def test_parametrize_fail(foo, bar):
|
||||
long_text = f"{{foo}} {{bar}}"
|
||||
assert "hello world" in long_text
|
||||
|
||||
|
||||
@pytest.mark.skip(
|
||||
"some long skip reason that will not fit on a single line with other content that goes"
|
||||
" on and on and on and on and on"
|
||||
)
|
||||
def test_sample_skip():
|
||||
pass
|
||||
"""
|
||||
|
|
Loading…
Reference in New Issue