From a5d36c1440ac500af308d076c5bf917fde2b3b22 Mon Sep 17 00:00:00 2001 From: Brian Okken <1568356+okken@users.noreply.github.com> Date: Fri, 22 Dec 2023 20:30:58 -0800 Subject: [PATCH 1/5] summary changes + testing for xfail and xpass --- src/_pytest/terminal.py | 45 ++++++++++++++-- testing/test_terminal.py | 114 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 156 insertions(+), 3 deletions(-) diff --git a/src/_pytest/terminal.py b/src/_pytest/terminal.py index ea26d9368..3278e88be 100644 --- a/src/_pytest/terminal.py +++ b/src/_pytest/terminal.py @@ -878,8 +878,10 @@ class TerminalReporter: def pytest_terminal_summary(self) -> Generator[None, None, None]: self.summary_errors() self.summary_failures() + self.summary_xfailures() self.summary_warnings() self.summary_passes() + self.summary_xpasses() try: return (yield) finally: @@ -1022,6 +1024,20 @@ class TerminalReporter: self._outrep_summary(rep) self._handle_teardown_sections(rep.nodeid) + def summary_xpasses(self) -> None: + if self.config.option.tbstyle != "no": + if self.hasopt("X"): + reports: List[TestReport] = self.getreports("xpassed") + if not reports: + return + self.write_sep("=", "XPASSES") + for rep in reports: + if rep.sections: + msg = self._getfailureheadline(rep) + self.write_sep("_", msg, green=True, bold=True) + self._outrep_summary(rep) + self._handle_teardown_sections(rep.nodeid) + def _get_teardown_reports(self, nodeid: str) -> List[TestReport]: reports = self.getreports("") return [ @@ -1064,6 +1080,24 @@ class TerminalReporter: self._outrep_summary(rep) self._handle_teardown_sections(rep.nodeid) + def summary_xfailures(self) -> None: + if self.config.option.tbstyle != "no": + if self.hasopt("x"): + reports: List[BaseReport] = self.getreports("xfailed") + if not reports: + return + self.write_sep("=", "XFAILURES") + if self.config.option.tbstyle == "line": + for rep in reports: + line = self._getcrashline(rep) + self.write_line(line) + else: + for rep in reports: + msg = self._getfailureheadline(rep) + self.write_sep("_", msg, red=True, bold=True) + self._outrep_summary(rep) + self._handle_teardown_sections(rep.nodeid) + def summary_errors(self) -> None: if self.config.option.tbstyle != "no": reports: List[BaseReport] = self.getreports("error") @@ -1152,8 +1186,10 @@ class TerminalReporter: markup_word = self._tw.markup( verbose_word, **{_color_for_type["warnings"]: True} ) - nodeid = _get_node_id_with_markup(self._tw, self.config, rep) - line = f"{markup_word} {nodeid}" + color = _color_for_type.get("xfailed", _color_for_type_default) + line = _get_line_with_reprcrash_message( + self.config, rep, self._tw, {color: True} + ) reason = rep.wasxfail if reason: line += " - " + str(reason) @@ -1168,8 +1204,11 @@ class TerminalReporter: verbose_word, **{_color_for_type["warnings"]: True} ) nodeid = _get_node_id_with_markup(self._tw, self.config, rep) + line = f"{markup_word} {nodeid}" reason = rep.wasxfail - lines.append(f"{markup_word} {nodeid} {reason}") + if reason: + line += " - " + str(reason) + lines.append(line) def show_skipped(lines: List[str]) -> None: skipped: List[CollectReport] = self.stats.get("skipped", []) diff --git a/testing/test_terminal.py b/testing/test_terminal.py index 80958f210..4f8d449e7 100644 --- a/testing/test_terminal.py +++ b/testing/test_terminal.py @@ -2614,3 +2614,117 @@ def test_format_trimmed() -> None: assert _format_trimmed(" ({}) ", msg, len(msg) + 4) == " (unconditional skip) " assert _format_trimmed(" ({}) ", msg, len(msg) + 3) == " (unconditional ...) " + + +def test_summary_xfail_reason(pytester: Pytester) -> None: + pytester.makepyfile( + """ + import pytest + + @pytest.mark.xfail + def test_xfail(): + assert False + + @pytest.mark.xfail(reason="foo") + def test_xfail_reason(): + assert False + """ + ) + result = pytester.runpytest("-rx") + expect1 = "XFAIL test_summary_xfail_reason.py::test_xfail - assert False" + expect2 = "XFAIL test_summary_xfail_reason.py::test_xfail_reason - assert False - foo" + result.stdout.fnmatch_lines([expect1, expect2]) + assert result.stdout.lines.count(expect1) == 1 + assert result.stdout.lines.count(expect2) == 1 + + +def test_summary_xfail_tb(pytester: Pytester) -> None: + pytester.makepyfile( + """ + import pytest + + @pytest.mark.xfail + def test_xfail(): + a, b = 1, 2 + assert a == b + """ + ) + result = pytester.runpytest("-rx") + result.stdout.fnmatch_lines([ + "*= XFAILURES =*", + "*_ test_xfail _*", + "* @pytest.mark.xfail*", + "* def test_xfail():*", + "* a, b = 1, 2*", + "> *assert a == b*", + "E *assert 1 == 2*", + "test_summary_xfail_tb.py:6: AssertionError*", + "*= short test summary info =*", + "XFAIL test_summary_xfail_tb.py::test_xfail - assert 1 == 2", + "*= 1 xfailed in * =*" + ]) + + +def test_xfail_tb_line(pytester: Pytester) -> None: + pytester.makepyfile( + """ + import pytest + + @pytest.mark.xfail + def test_xfail(): + a, b = 1, 2 + assert a == b + """ + ) + result = pytester.runpytest("-rx", "--tb=line") + result.stdout.fnmatch_lines([ + "*= XFAILURES =*", + "*test_xfail_tb_line.py:6: assert 1 == 2", + "*= short test summary info =*", + "XFAIL test_xfail_tb_line.py::test_xfail - assert 1 == 2", + "*= 1 xfailed in * =*" + ]) + + +def test_summary_xpass_reason(pytester: Pytester) -> None: + pytester.makepyfile( + """ + import pytest + + @pytest.mark.xfail + def test_pass(): + ... + + @pytest.mark.xfail(reason="foo") + def test_reason(): + ... + """ + ) + result = pytester.runpytest("-rX") + expect1 = "XPASS test_summary_xpass_reason.py::test_pass" + expect2 = "XPASS test_summary_xpass_reason.py::test_reason - foo" + result.stdout.fnmatch_lines([expect1, expect2]) + assert result.stdout.lines.count(expect1) == 1 + assert result.stdout.lines.count(expect2) == 1 + + +def test_xpass_output(pytester: Pytester) -> None: + pytester.makepyfile( + """ + import pytest + + @pytest.mark.xfail + def test_pass(): + print('hi there') + """ + ) + result = pytester.runpytest("-rX") + result.stdout.fnmatch_lines([ + "*= XPASSES =*", + "*_ test_pass _*", + "*- Captured stdout call -*", + "*= short test summary info =*", + "XPASS test_xpass_output.py::test_pass", + "*= 1 xpassed in * =*" + ]) + From b0cb867cc0d5e4d1292b4af6a52467201966b12d Mon Sep 17 00:00:00 2001 From: Brian Okken <1568356+okken@users.noreply.github.com> Date: Fri, 22 Dec 2023 20:44:23 -0800 Subject: [PATCH 2/5] remove some unused variables --- src/_pytest/terminal.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/_pytest/terminal.py b/src/_pytest/terminal.py index 3278e88be..df90111da 100644 --- a/src/_pytest/terminal.py +++ b/src/_pytest/terminal.py @@ -1182,10 +1182,6 @@ class TerminalReporter: def show_xfailed(lines: List[str]) -> None: xfailed = self.stats.get("xfailed", []) for rep in xfailed: - verbose_word = rep._get_verbose_word(self.config) - markup_word = self._tw.markup( - verbose_word, **{_color_for_type["warnings"]: True} - ) color = _color_for_type.get("xfailed", _color_for_type_default) line = _get_line_with_reprcrash_message( self.config, rep, self._tw, {color: True} From 52db918a27b2eb5043de6e80215076a98b0b9fff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20G=C3=B3rny?= Date: Sat, 23 Dec 2023 09:12:13 +0100 Subject: [PATCH 3/5] Fix handling empty values of NO_COLOR and FORCE_COLOR (#11712) * Fix handling empty values of NO_COLOR and FORCE_COLOR Fix handling NO_COLOR and FORCE_COLOR environment variables to correctly be ignored when they are set to an empty value, as defined in the specification: > Command-line software which adds ANSI color to its output by default > should check for a NO_COLOR environment variable that, when present > *and not an empty string* (regardless of its value), prevents > the addition of ANSI color. (emphasis mine, https://no-color.org/) The same is true of FORCE_COLOR, https://force-color.org/. * Streamline testing for FORCE_COLOR and NO_COLOR Streamline the tests for FORCE_COLOR and NO_COLOR variables, and cover all possible cases (unset, set to empty, set to "1"). Combine the two assert functions into one taking boolean parameters. Mock file.isatty in all circumstances to ensure that the environment variables take precedence over the fallback value resulting from isatty check (or that the fallback is actually used, in the case of both FORCE_COLOR and NO_COLOR being unset). --- AUTHORS | 1 + changelog/11712.bugfix.rst | 1 + doc/en/reference/reference.rst | 4 +- src/_pytest/_io/terminalwriter.py | 4 +- testing/io/test_terminalwriter.py | 63 +++++++++++++++++++------------ 5 files changed, 45 insertions(+), 28 deletions(-) create mode 100644 changelog/11712.bugfix.rst diff --git a/AUTHORS b/AUTHORS index bb273edcc..42cfd0be2 100644 --- a/AUTHORS +++ b/AUTHORS @@ -266,6 +266,7 @@ Michael Goerz Michael Krebs Michael Seifert Michal Wajszczuk +Michał Górny Michał Zięba Mickey Pashov Mihai Capotă diff --git a/changelog/11712.bugfix.rst b/changelog/11712.bugfix.rst new file mode 100644 index 000000000..416d76149 --- /dev/null +++ b/changelog/11712.bugfix.rst @@ -0,0 +1 @@ +Fixed handling ``NO_COLOR`` and ``FORCE_COLOR`` to ignore an empty value. diff --git a/doc/en/reference/reference.rst b/doc/en/reference/reference.rst index 3054109ba..b2b63a89e 100644 --- a/doc/en/reference/reference.rst +++ b/doc/en/reference/reference.rst @@ -1146,13 +1146,13 @@ When set to ``0``, pytest will not use color. .. envvar:: NO_COLOR -When set (regardless of value), pytest will not use color in terminal output. +When set to a non-empty string (regardless of value), pytest will not use color in terminal output. ``PY_COLORS`` takes precedence over ``NO_COLOR``, which takes precedence over ``FORCE_COLOR``. See `no-color.org `__ for other libraries supporting this community standard. .. envvar:: FORCE_COLOR -When set (regardless of value), pytest will use color in terminal output. +When set to a non-empty string (regardless of value), pytest will use color in terminal output. ``PY_COLORS`` and ``NO_COLOR`` take precedence over ``FORCE_COLOR``. Exceptions diff --git a/src/_pytest/_io/terminalwriter.py b/src/_pytest/_io/terminalwriter.py index 2b2f49e9a..bf9b76651 100644 --- a/src/_pytest/_io/terminalwriter.py +++ b/src/_pytest/_io/terminalwriter.py @@ -29,9 +29,9 @@ def should_do_markup(file: TextIO) -> bool: return True if os.environ.get("PY_COLORS") == "0": return False - if "NO_COLOR" in os.environ: + if os.environ.get("NO_COLOR"): return False - if "FORCE_COLOR" in os.environ: + if os.environ.get("FORCE_COLOR"): return True return ( hasattr(file, "isatty") and file.isatty() and os.environ.get("TERM") != "dumb" diff --git a/testing/io/test_terminalwriter.py b/testing/io/test_terminalwriter.py index a2d730b07..96e7366e5 100644 --- a/testing/io/test_terminalwriter.py +++ b/testing/io/test_terminalwriter.py @@ -5,6 +5,7 @@ import shutil import sys from pathlib import Path from typing import Generator +from typing import Optional from unittest import mock import pytest @@ -164,53 +165,67 @@ def test_attr_hasmarkup() -> None: assert "\x1b[0m" in s -def assert_color_set(): +def assert_color(expected: bool, default: Optional[bool] = None) -> None: file = io.StringIO() - tw = terminalwriter.TerminalWriter(file) - assert tw.hasmarkup + if default is None: + default = not expected + file.isatty = lambda: default # type: ignore + tw = terminalwriter.TerminalWriter(file=file) + assert tw.hasmarkup is expected tw.line("hello", bold=True) s = file.getvalue() - assert len(s) > len("hello\n") - assert "\x1b[1m" in s - assert "\x1b[0m" in s - - -def assert_color_not_set(): - f = io.StringIO() - f.isatty = lambda: True # type: ignore - tw = terminalwriter.TerminalWriter(file=f) - assert not tw.hasmarkup - tw.line("hello", bold=True) - s = f.getvalue() - assert s == "hello\n" + if expected: + assert len(s) > len("hello\n") + assert "\x1b[1m" in s + assert "\x1b[0m" in s + else: + assert s == "hello\n" def test_should_do_markup_PY_COLORS_eq_1(monkeypatch: MonkeyPatch) -> None: monkeypatch.setitem(os.environ, "PY_COLORS", "1") - assert_color_set() + assert_color(True) def test_should_not_do_markup_PY_COLORS_eq_0(monkeypatch: MonkeyPatch) -> None: monkeypatch.setitem(os.environ, "PY_COLORS", "0") - assert_color_not_set() + assert_color(False) def test_should_not_do_markup_NO_COLOR(monkeypatch: MonkeyPatch) -> None: monkeypatch.setitem(os.environ, "NO_COLOR", "1") - assert_color_not_set() + assert_color(False) def test_should_do_markup_FORCE_COLOR(monkeypatch: MonkeyPatch) -> None: monkeypatch.setitem(os.environ, "FORCE_COLOR", "1") - assert_color_set() + assert_color(True) -def test_should_not_do_markup_NO_COLOR_and_FORCE_COLOR( +@pytest.mark.parametrize( + ["NO_COLOR", "FORCE_COLOR", "expected"], + [ + ("1", "1", False), + ("", "1", True), + ("1", "", False), + ], +) +def test_NO_COLOR_and_FORCE_COLOR( monkeypatch: MonkeyPatch, + NO_COLOR: str, + FORCE_COLOR: str, + expected: bool, ) -> None: - monkeypatch.setitem(os.environ, "NO_COLOR", "1") - monkeypatch.setitem(os.environ, "FORCE_COLOR", "1") - assert_color_not_set() + monkeypatch.setitem(os.environ, "NO_COLOR", NO_COLOR) + monkeypatch.setitem(os.environ, "FORCE_COLOR", FORCE_COLOR) + assert_color(expected) + + +def test_empty_NO_COLOR_and_FORCE_COLOR_ignored(monkeypatch: MonkeyPatch) -> None: + monkeypatch.setitem(os.environ, "NO_COLOR", "") + monkeypatch.setitem(os.environ, "FORCE_COLOR", "") + assert_color(True, True) + assert_color(False, False) class TestTerminalWriterLineWidth: From 5d726faad07b7878852e36fd196584b03cb2cfda Mon Sep 17 00:00:00 2001 From: Brian Okken <1568356+okken@users.noreply.github.com> Date: Mon, 25 Dec 2023 16:15:22 -0800 Subject: [PATCH 4/5] remove assert outcome from xfail summary, as it breaks [NOTRUN] functionality --- src/_pytest/terminal.py | 8 +++++--- testing/test_skipping.py | 2 +- testing/test_terminal.py | 10 +++++----- 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/src/_pytest/terminal.py b/src/_pytest/terminal.py index df90111da..3ffcdfd91 100644 --- a/src/_pytest/terminal.py +++ b/src/_pytest/terminal.py @@ -1182,10 +1182,12 @@ class TerminalReporter: def show_xfailed(lines: List[str]) -> None: xfailed = self.stats.get("xfailed", []) for rep in xfailed: - color = _color_for_type.get("xfailed", _color_for_type_default) - line = _get_line_with_reprcrash_message( - self.config, rep, self._tw, {color: True} + verbose_word = rep._get_verbose_word(self.config) + markup_word = self._tw.markup( + verbose_word, **{_color_for_type["warnings"]: True} ) + nodeid = _get_node_id_with_markup(self._tw, self.config, rep) + line = f"{markup_word} {nodeid}" reason = rep.wasxfail if reason: line += " - " + str(reason) diff --git a/testing/test_skipping.py b/testing/test_skipping.py index b7e448df3..b2ad4b0cf 100644 --- a/testing/test_skipping.py +++ b/testing/test_skipping.py @@ -649,7 +649,7 @@ class TestXFail: result.stdout.fnmatch_lines( [ "*test_strict_xfail*", - "XPASS test_strict_xfail.py::test_foo unsupported feature", + "XPASS test_strict_xfail.py::test_foo - unsupported feature", ] ) assert result.ret == (1 if strict else 0) diff --git a/testing/test_terminal.py b/testing/test_terminal.py index 4f8d449e7..43811bc0f 100644 --- a/testing/test_terminal.py +++ b/testing/test_terminal.py @@ -2631,8 +2631,8 @@ def test_summary_xfail_reason(pytester: Pytester) -> None: """ ) result = pytester.runpytest("-rx") - expect1 = "XFAIL test_summary_xfail_reason.py::test_xfail - assert False" - expect2 = "XFAIL test_summary_xfail_reason.py::test_xfail_reason - assert False - foo" + expect1 = "XFAIL test_summary_xfail_reason.py::test_xfail" + expect2 = "XFAIL test_summary_xfail_reason.py::test_xfail_reason - foo" result.stdout.fnmatch_lines([expect1, expect2]) assert result.stdout.lines.count(expect1) == 1 assert result.stdout.lines.count(expect2) == 1 @@ -2660,7 +2660,7 @@ def test_summary_xfail_tb(pytester: Pytester) -> None: "E *assert 1 == 2*", "test_summary_xfail_tb.py:6: AssertionError*", "*= short test summary info =*", - "XFAIL test_summary_xfail_tb.py::test_xfail - assert 1 == 2", + "XFAIL test_summary_xfail_tb.py::test_xfail", "*= 1 xfailed in * =*" ]) @@ -2681,7 +2681,7 @@ def test_xfail_tb_line(pytester: Pytester) -> None: "*= XFAILURES =*", "*test_xfail_tb_line.py:6: assert 1 == 2", "*= short test summary info =*", - "XFAIL test_xfail_tb_line.py::test_xfail - assert 1 == 2", + "XFAIL test_xfail_tb_line.py::test_xfail", "*= 1 xfailed in * =*" ]) @@ -2724,7 +2724,7 @@ def test_xpass_output(pytester: Pytester) -> None: "*_ test_pass _*", "*- Captured stdout call -*", "*= short test summary info =*", - "XPASS test_xpass_output.py::test_pass", + "XPASS test_xpass_output.py::test_pass*", "*= 1 xpassed in * =*" ]) From afb204218ecf50fd34940b2c384e5d6141876c5a Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 26 Dec 2023 00:16:18 +0000 Subject: [PATCH 5/5] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- testing/test_terminal.py | 63 ++++++++++++++++++++++------------------ 1 file changed, 34 insertions(+), 29 deletions(-) diff --git a/testing/test_terminal.py b/testing/test_terminal.py index 43811bc0f..23f3d19d9 100644 --- a/testing/test_terminal.py +++ b/testing/test_terminal.py @@ -2650,19 +2650,21 @@ def test_summary_xfail_tb(pytester: Pytester) -> None: """ ) result = pytester.runpytest("-rx") - result.stdout.fnmatch_lines([ - "*= XFAILURES =*", - "*_ test_xfail _*", - "* @pytest.mark.xfail*", - "* def test_xfail():*", - "* a, b = 1, 2*", - "> *assert a == b*", - "E *assert 1 == 2*", - "test_summary_xfail_tb.py:6: AssertionError*", - "*= short test summary info =*", - "XFAIL test_summary_xfail_tb.py::test_xfail", - "*= 1 xfailed in * =*" - ]) + result.stdout.fnmatch_lines( + [ + "*= XFAILURES =*", + "*_ test_xfail _*", + "* @pytest.mark.xfail*", + "* def test_xfail():*", + "* a, b = 1, 2*", + "> *assert a == b*", + "E *assert 1 == 2*", + "test_summary_xfail_tb.py:6: AssertionError*", + "*= short test summary info =*", + "XFAIL test_summary_xfail_tb.py::test_xfail", + "*= 1 xfailed in * =*", + ] + ) def test_xfail_tb_line(pytester: Pytester) -> None: @@ -2677,13 +2679,15 @@ def test_xfail_tb_line(pytester: Pytester) -> None: """ ) result = pytester.runpytest("-rx", "--tb=line") - result.stdout.fnmatch_lines([ - "*= XFAILURES =*", - "*test_xfail_tb_line.py:6: assert 1 == 2", - "*= short test summary info =*", - "XFAIL test_xfail_tb_line.py::test_xfail", - "*= 1 xfailed in * =*" - ]) + result.stdout.fnmatch_lines( + [ + "*= XFAILURES =*", + "*test_xfail_tb_line.py:6: assert 1 == 2", + "*= short test summary info =*", + "XFAIL test_xfail_tb_line.py::test_xfail", + "*= 1 xfailed in * =*", + ] + ) def test_summary_xpass_reason(pytester: Pytester) -> None: @@ -2719,12 +2723,13 @@ def test_xpass_output(pytester: Pytester) -> None: """ ) result = pytester.runpytest("-rX") - result.stdout.fnmatch_lines([ - "*= XPASSES =*", - "*_ test_pass _*", - "*- Captured stdout call -*", - "*= short test summary info =*", - "XPASS test_xpass_output.py::test_pass*", - "*= 1 xpassed in * =*" - ]) - + result.stdout.fnmatch_lines( + [ + "*= XPASSES =*", + "*_ test_pass _*", + "*- Captured stdout call -*", + "*= short test summary info =*", + "XPASS test_xpass_output.py::test_pass*", + "*= 1 xpassed in * =*", + ] + )