diff --git a/AUTHORS b/AUTHORS index 19242fc98..a4f4d8410 100644 --- a/AUTHORS +++ b/AUTHORS @@ -138,6 +138,7 @@ Erik Hasse Erik M. Bray Evan Kepner Evgeny Seliverstov +Fabian Sturm Fabien Zarifian Fabio Zadrozny Felix Hofstätter diff --git a/changelog/11233.feature.rst b/changelog/11233.feature.rst new file mode 100644 index 000000000..c465def84 --- /dev/null +++ b/changelog/11233.feature.rst @@ -0,0 +1,5 @@ +Improvements to how ``-r`` for xfailures and xpasses: + +* Report tracebacks for xfailures when ``-rx`` is set. +* Report captured output for xpasses when ``-rX`` is set. +* For xpasses, add ``-`` in summary between test name and reason, to match how xfail is displayed. diff --git a/src/_pytest/terminal.py b/src/_pytest/terminal.py index ea26d9368..b91a97221 100644 --- a/src/_pytest/terminal.py +++ b/src/_pytest/terminal.py @@ -878,8 +878,10 @@ class TerminalReporter: def pytest_terminal_summary(self) -> Generator[None, None, None]: self.summary_errors() self.summary_failures() + self.summary_xfailures() self.summary_warnings() self.summary_passes() + self.summary_xpasses() try: return (yield) finally: @@ -1009,12 +1011,20 @@ class TerminalReporter: ) def summary_passes(self) -> None: + self.summary_passes_combined("passed", "PASSES", "P") + + def summary_xpasses(self) -> None: + self.summary_passes_combined("xpassed", "XPASSES", "X") + + def summary_passes_combined( + self, which_reports: str, sep_title: str, needed_opt: str + ) -> None: if self.config.option.tbstyle != "no": - if self.hasopt("P"): - reports: List[TestReport] = self.getreports("passed") + if self.hasopt(needed_opt): + reports: List[TestReport] = self.getreports(which_reports) if not reports: return - self.write_sep("=", "PASSES") + self.write_sep("=", sep_title) for rep in reports: if rep.sections: msg = self._getfailureheadline(rep) @@ -1048,21 +1058,30 @@ class TerminalReporter: self._tw.line(content) def summary_failures(self) -> None: + self.summary_failures_combined("failed", "FAILURES") + + def summary_xfailures(self) -> None: + self.summary_failures_combined("xfailed", "XFAILURES", "x") + + def summary_failures_combined( + self, which_reports: str, sep_title: str, needed_opt: Optional[str] = None + ) -> None: if self.config.option.tbstyle != "no": - reports: List[BaseReport] = self.getreports("failed") - if not reports: - return - self.write_sep("=", "FAILURES") - if self.config.option.tbstyle == "line": - for rep in reports: - line = self._getcrashline(rep) - self.write_line(line) - else: - for rep in reports: - msg = self._getfailureheadline(rep) - self.write_sep("_", msg, red=True, bold=True) - self._outrep_summary(rep) - self._handle_teardown_sections(rep.nodeid) + if not needed_opt or self.hasopt(needed_opt): + reports: List[BaseReport] = self.getreports(which_reports) + if not reports: + return + self.write_sep("=", sep_title) + if self.config.option.tbstyle == "line": + for rep in reports: + line = self._getcrashline(rep) + self.write_line(line) + else: + for rep in reports: + msg = self._getfailureheadline(rep) + self.write_sep("_", msg, red=True, bold=True) + self._outrep_summary(rep) + self._handle_teardown_sections(rep.nodeid) def summary_errors(self) -> None: if self.config.option.tbstyle != "no": @@ -1168,8 +1187,11 @@ class TerminalReporter: verbose_word, **{_color_for_type["warnings"]: True} ) nodeid = _get_node_id_with_markup(self._tw, self.config, rep) + line = f"{markup_word} {nodeid}" reason = rep.wasxfail - lines.append(f"{markup_word} {nodeid} {reason}") + if reason: + line += " - " + str(reason) + lines.append(line) def show_skipped(lines: List[str]) -> None: skipped: List[CollectReport] = self.stats.get("skipped", []) diff --git a/testing/test_skipping.py b/testing/test_skipping.py index b7e448df3..b2ad4b0cf 100644 --- a/testing/test_skipping.py +++ b/testing/test_skipping.py @@ -649,7 +649,7 @@ class TestXFail: result.stdout.fnmatch_lines( [ "*test_strict_xfail*", - "XPASS test_strict_xfail.py::test_foo unsupported feature", + "XPASS test_strict_xfail.py::test_foo - unsupported feature", ] ) assert result.ret == (1 if strict else 0) diff --git a/testing/test_terminal.py b/testing/test_terminal.py index 20392096e..b521deea7 100644 --- a/testing/test_terminal.py +++ b/testing/test_terminal.py @@ -2619,3 +2619,122 @@ def test_format_trimmed() -> None: assert _format_trimmed(" ({}) ", msg, len(msg) + 4) == " (unconditional skip) " assert _format_trimmed(" ({}) ", msg, len(msg) + 3) == " (unconditional ...) " + + +def test_summary_xfail_reason(pytester: Pytester) -> None: + pytester.makepyfile( + """ + import pytest + + @pytest.mark.xfail + def test_xfail(): + assert False + + @pytest.mark.xfail(reason="foo") + def test_xfail_reason(): + assert False + """ + ) + result = pytester.runpytest("-rx") + expect1 = "XFAIL test_summary_xfail_reason.py::test_xfail" + expect2 = "XFAIL test_summary_xfail_reason.py::test_xfail_reason - foo" + result.stdout.fnmatch_lines([expect1, expect2]) + assert result.stdout.lines.count(expect1) == 1 + assert result.stdout.lines.count(expect2) == 1 + + +def test_summary_xfail_tb(pytester: Pytester) -> None: + pytester.makepyfile( + """ + import pytest + + @pytest.mark.xfail + def test_xfail(): + a, b = 1, 2 + assert a == b + """ + ) + result = pytester.runpytest("-rx") + result.stdout.fnmatch_lines( + [ + "*= XFAILURES =*", + "*_ test_xfail _*", + "* @pytest.mark.xfail*", + "* def test_xfail():*", + "* a, b = 1, 2*", + "> *assert a == b*", + "E *assert 1 == 2*", + "test_summary_xfail_tb.py:6: AssertionError*", + "*= short test summary info =*", + "XFAIL test_summary_xfail_tb.py::test_xfail", + "*= 1 xfailed in * =*", + ] + ) + + +def test_xfail_tb_line(pytester: Pytester) -> None: + pytester.makepyfile( + """ + import pytest + + @pytest.mark.xfail + def test_xfail(): + a, b = 1, 2 + assert a == b + """ + ) + result = pytester.runpytest("-rx", "--tb=line") + result.stdout.fnmatch_lines( + [ + "*= XFAILURES =*", + "*test_xfail_tb_line.py:6: assert 1 == 2", + "*= short test summary info =*", + "XFAIL test_xfail_tb_line.py::test_xfail", + "*= 1 xfailed in * =*", + ] + ) + + +def test_summary_xpass_reason(pytester: Pytester) -> None: + pytester.makepyfile( + """ + import pytest + + @pytest.mark.xfail + def test_pass(): + ... + + @pytest.mark.xfail(reason="foo") + def test_reason(): + ... + """ + ) + result = pytester.runpytest("-rX") + expect1 = "XPASS test_summary_xpass_reason.py::test_pass" + expect2 = "XPASS test_summary_xpass_reason.py::test_reason - foo" + result.stdout.fnmatch_lines([expect1, expect2]) + assert result.stdout.lines.count(expect1) == 1 + assert result.stdout.lines.count(expect2) == 1 + + +def test_xpass_output(pytester: Pytester) -> None: + pytester.makepyfile( + """ + import pytest + + @pytest.mark.xfail + def test_pass(): + print('hi there') + """ + ) + result = pytester.runpytest("-rX") + result.stdout.fnmatch_lines( + [ + "*= XPASSES =*", + "*_ test_pass _*", + "*- Captured stdout call -*", + "*= short test summary info =*", + "XPASS test_xpass_output.py::test_pass*", + "*= 1 xpassed in * =*", + ] + )