Merge pull request #5061 from blueyed/summary_stats-multi-color
Multiple colors with terminal summary_stats
This commit is contained in:
		
						commit
						4af89bba9d
					
				|  | @ -0,0 +1 @@ | ||||||
|  | Use multiple colors with terminal summary statistics. | ||||||
|  | @ -864,15 +864,41 @@ class TerminalReporter: | ||||||
|             self._tw.line(content) |             self._tw.line(content) | ||||||
| 
 | 
 | ||||||
|     def summary_stats(self): |     def summary_stats(self): | ||||||
|         session_duration = time.time() - self._sessionstarttime |         if self.verbosity < -1: | ||||||
|         (line, color) = build_summary_stats_line(self.stats) |             return | ||||||
|         msg = "{} in {}".format(line, format_session_duration(session_duration)) |  | ||||||
|         markup = {color: True, "bold": True} |  | ||||||
| 
 | 
 | ||||||
|         if self.verbosity >= 0: |         session_duration = time.time() - self._sessionstarttime | ||||||
|             self.write_sep("=", msg, **markup) |         (parts, main_color) = build_summary_stats_line(self.stats) | ||||||
|         if self.verbosity == -1: |         line_parts = [] | ||||||
|             self.write_line(msg, **markup) | 
 | ||||||
|  |         display_sep = self.verbosity >= 0 | ||||||
|  |         if display_sep: | ||||||
|  |             fullwidth = self._tw.fullwidth | ||||||
|  |         for text, markup in parts: | ||||||
|  |             with_markup = self._tw.markup(text, **markup) | ||||||
|  |             if display_sep: | ||||||
|  |                 fullwidth += len(with_markup) - len(text) | ||||||
|  |             line_parts.append(with_markup) | ||||||
|  |         msg = ", ".join(line_parts) | ||||||
|  | 
 | ||||||
|  |         main_markup = {main_color: True} | ||||||
|  |         duration = " in {}".format(format_session_duration(session_duration)) | ||||||
|  |         duration_with_markup = self._tw.markup(duration, **main_markup) | ||||||
|  |         if display_sep: | ||||||
|  |             fullwidth += len(duration_with_markup) - len(duration) | ||||||
|  |         msg += duration_with_markup | ||||||
|  | 
 | ||||||
|  |         if display_sep: | ||||||
|  |             markup_for_end_sep = self._tw.markup("", **main_markup) | ||||||
|  |             if markup_for_end_sep.endswith("\x1b[0m"): | ||||||
|  |                 markup_for_end_sep = markup_for_end_sep[:-4] | ||||||
|  |             fullwidth += len(markup_for_end_sep) | ||||||
|  |             msg += markup_for_end_sep | ||||||
|  | 
 | ||||||
|  |         if display_sep: | ||||||
|  |             self.write_sep("=", msg, fullwidth=fullwidth, **main_markup) | ||||||
|  |         else: | ||||||
|  |             self.write_line(msg, **main_markup) | ||||||
| 
 | 
 | ||||||
|     def short_test_summary(self): |     def short_test_summary(self): | ||||||
|         if not self.reportchars: |         if not self.reportchars: | ||||||
|  | @ -1011,6 +1037,15 @@ def _folded_skips(skipped): | ||||||
|     return values |     return values | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  | _color_for_type = { | ||||||
|  |     "failed": "red", | ||||||
|  |     "error": "red", | ||||||
|  |     "warnings": "yellow", | ||||||
|  |     "passed": "green", | ||||||
|  | } | ||||||
|  | _color_for_type_default = "yellow" | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
| def build_summary_stats_line(stats): | def build_summary_stats_line(stats): | ||||||
|     known_types = ( |     known_types = ( | ||||||
|         "failed passed skipped deselected xfailed xpassed warnings error".split() |         "failed passed skipped deselected xfailed xpassed warnings error".split() | ||||||
|  | @ -1021,6 +1056,17 @@ def build_summary_stats_line(stats): | ||||||
|             if found_type:  # setup/teardown reports have an empty key, ignore them |             if found_type:  # setup/teardown reports have an empty key, ignore them | ||||||
|                 known_types.append(found_type) |                 known_types.append(found_type) | ||||||
|                 unknown_type_seen = True |                 unknown_type_seen = True | ||||||
|  | 
 | ||||||
|  |     # main color | ||||||
|  |     if "failed" in stats or "error" in stats: | ||||||
|  |         main_color = "red" | ||||||
|  |     elif "warnings" in stats or unknown_type_seen: | ||||||
|  |         main_color = "yellow" | ||||||
|  |     elif "passed" in stats: | ||||||
|  |         main_color = "green" | ||||||
|  |     else: | ||||||
|  |         main_color = "yellow" | ||||||
|  | 
 | ||||||
|     parts = [] |     parts = [] | ||||||
|     for key in known_types: |     for key in known_types: | ||||||
|         reports = stats.get(key, None) |         reports = stats.get(key, None) | ||||||
|  | @ -1028,23 +1074,14 @@ def build_summary_stats_line(stats): | ||||||
|             count = sum( |             count = sum( | ||||||
|                 1 for rep in reports if getattr(rep, "count_towards_summary", True) |                 1 for rep in reports if getattr(rep, "count_towards_summary", True) | ||||||
|             ) |             ) | ||||||
|             parts.append("%d %s" % (count, key)) |             color = _color_for_type.get(key, _color_for_type_default) | ||||||
|  |             markup = {color: True, "bold": color == main_color} | ||||||
|  |             parts.append(("%d %s" % (count, key), markup)) | ||||||
| 
 | 
 | ||||||
|     if parts: |     if not parts: | ||||||
|         line = ", ".join(parts) |         parts = [("no tests ran", {_color_for_type_default: True})] | ||||||
|     else: |  | ||||||
|         line = "no tests ran" |  | ||||||
| 
 | 
 | ||||||
|     if "failed" in stats or "error" in stats: |     return parts, main_color | ||||||
|         color = "red" |  | ||||||
|     elif "warnings" in stats or unknown_type_seen: |  | ||||||
|         color = "yellow" |  | ||||||
|     elif "passed" in stats: |  | ||||||
|         color = "green" |  | ||||||
|     else: |  | ||||||
|         color = "yellow" |  | ||||||
| 
 |  | ||||||
|     return line, color |  | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| def _plugin_nameversions(plugininfo): | def _plugin_nameversions(plugininfo): | ||||||
|  |  | ||||||
|  | @ -193,7 +193,7 @@ class TestPDB: | ||||||
|         ) |         ) | ||||||
|         child = testdir.spawn_pytest("-rs --pdb %s" % p1) |         child = testdir.spawn_pytest("-rs --pdb %s" % p1) | ||||||
|         child.expect("Skipping also with pdb active") |         child.expect("Skipping also with pdb active") | ||||||
|         child.expect("1 skipped in") |         child.expect_exact("= \x1b[33m\x1b[1m1 skipped\x1b[0m\x1b[33m in") | ||||||
|         child.sendeof() |         child.sendeof() | ||||||
|         self.flush(child) |         self.flush(child) | ||||||
| 
 | 
 | ||||||
|  | @ -221,7 +221,7 @@ class TestPDB: | ||||||
|         child.sendeof() |         child.sendeof() | ||||||
|         rest = child.read().decode("utf8") |         rest = child.read().decode("utf8") | ||||||
|         assert "Exit: Quitting debugger" in rest |         assert "Exit: Quitting debugger" in rest | ||||||
|         assert "= 1 failed in" in rest |         assert "= \x1b[31m\x1b[1m1 failed\x1b[0m\x1b[31m in" in rest | ||||||
|         assert "def test_1" not in rest |         assert "def test_1" not in rest | ||||||
|         assert "get rekt" not in rest |         assert "get rekt" not in rest | ||||||
|         self.flush(child) |         self.flush(child) | ||||||
|  | @ -725,7 +725,7 @@ class TestPDB: | ||||||
|             assert "> PDB continue (IO-capturing resumed) >" in rest |             assert "> PDB continue (IO-capturing resumed) >" in rest | ||||||
|         else: |         else: | ||||||
|             assert "> PDB continue >" in rest |             assert "> PDB continue >" in rest | ||||||
|         assert "1 passed in" in rest |         assert "= \x1b[32m\x1b[1m1 passed\x1b[0m\x1b[32m in" in rest | ||||||
| 
 | 
 | ||||||
|     def test_pdb_used_outside_test(self, testdir): |     def test_pdb_used_outside_test(self, testdir): | ||||||
|         p1 = testdir.makepyfile( |         p1 = testdir.makepyfile( | ||||||
|  | @ -1041,7 +1041,7 @@ class TestTraceOption: | ||||||
|         child.sendline("q") |         child.sendline("q") | ||||||
|         child.expect_exact("Exit: Quitting debugger") |         child.expect_exact("Exit: Quitting debugger") | ||||||
|         rest = child.read().decode("utf8") |         rest = child.read().decode("utf8") | ||||||
|         assert "2 passed in" in rest |         assert "= \x1b[32m\x1b[1m2 passed\x1b[0m\x1b[32m in" in rest | ||||||
|         assert "reading from stdin while output" not in rest |         assert "reading from stdin while output" not in rest | ||||||
|         # Only printed once - not on stderr. |         # Only printed once - not on stderr. | ||||||
|         assert "Exit: Quitting debugger" not in child.before.decode("utf8") |         assert "Exit: Quitting debugger" not in child.before.decode("utf8") | ||||||
|  | @ -1152,7 +1152,7 @@ def test_pdb_suspends_fixture_capturing(testdir, fixture): | ||||||
| 
 | 
 | ||||||
|     TestPDB.flush(child) |     TestPDB.flush(child) | ||||||
|     assert child.exitstatus == 0 |     assert child.exitstatus == 0 | ||||||
|     assert "= 1 passed in " in rest |     assert "= \x1b[32m\x1b[1m1 passed\x1b[0m\x1b[32m in" in rest | ||||||
|     assert "> PDB continue (IO-capturing resumed for fixture %s) >" % (fixture) in rest |     assert "> PDB continue (IO-capturing resumed for fixture %s) >" % (fixture) in rest | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -164,7 +164,7 @@ class TestTerminal: | ||||||
|         child.expect(r"collecting 2 items") |         child.expect(r"collecting 2 items") | ||||||
|         child.expect(r"collected 2 items") |         child.expect(r"collected 2 items") | ||||||
|         rest = child.read().decode("utf8") |         rest = child.read().decode("utf8") | ||||||
|         assert "2 passed in" in rest |         assert "= \x1b[32m\x1b[1m2 passed\x1b[0m\x1b[32m in" in rest | ||||||
| 
 | 
 | ||||||
|     def test_itemreport_subclasses_show_subclassed_file(self, testdir): |     def test_itemreport_subclasses_show_subclassed_file(self, testdir): | ||||||
|         testdir.makepyfile( |         testdir.makepyfile( | ||||||
|  | @ -1252,42 +1252,123 @@ def test_terminal_summary_warnings_header_once(testdir): | ||||||
|         # dict value, not the actual contents, so tuples of anything |         # dict value, not the actual contents, so tuples of anything | ||||||
|         # suffice |         # suffice | ||||||
|         # Important statuses -- the highest priority of these always wins |         # Important statuses -- the highest priority of these always wins | ||||||
|         ("red", "1 failed", {"failed": (1,)}), |         ("red", [("1 failed", {"bold": True, "red": True})], {"failed": (1,)}), | ||||||
|         ("red", "1 failed, 1 passed", {"failed": (1,), "passed": (1,)}), |         ( | ||||||
|         ("red", "1 error", {"error": (1,)}), |             "red", | ||||||
|         ("red", "1 passed, 1 error", {"error": (1,), "passed": (1,)}), |             [ | ||||||
|  |                 ("1 failed", {"bold": True, "red": True}), | ||||||
|  |                 ("1 passed", {"bold": False, "green": True}), | ||||||
|  |             ], | ||||||
|  |             {"failed": (1,), "passed": (1,)}, | ||||||
|  |         ), | ||||||
|  |         ("red", [("1 error", {"bold": True, "red": True})], {"error": (1,)}), | ||||||
|  |         ( | ||||||
|  |             "red", | ||||||
|  |             [ | ||||||
|  |                 ("1 passed", {"bold": False, "green": True}), | ||||||
|  |                 ("1 error", {"bold": True, "red": True}), | ||||||
|  |             ], | ||||||
|  |             {"error": (1,), "passed": (1,)}, | ||||||
|  |         ), | ||||||
|         # (a status that's not known to the code) |         # (a status that's not known to the code) | ||||||
|         ("yellow", "1 weird", {"weird": (1,)}), |         ("yellow", [("1 weird", {"bold": True, "yellow": True})], {"weird": (1,)}), | ||||||
|         ("yellow", "1 passed, 1 weird", {"weird": (1,), "passed": (1,)}), |         ( | ||||||
|         ("yellow", "1 warnings", {"warnings": (1,)}), |             "yellow", | ||||||
|         ("yellow", "1 passed, 1 warnings", {"warnings": (1,), "passed": (1,)}), |             [ | ||||||
|         ("green", "5 passed", {"passed": (1, 2, 3, 4, 5)}), |                 ("1 passed", {"bold": False, "green": True}), | ||||||
|  |                 ("1 weird", {"bold": True, "yellow": True}), | ||||||
|  |             ], | ||||||
|  |             {"weird": (1,), "passed": (1,)}, | ||||||
|  |         ), | ||||||
|  |         ( | ||||||
|  |             "yellow", | ||||||
|  |             [("1 warnings", {"bold": True, "yellow": True})], | ||||||
|  |             {"warnings": (1,)}, | ||||||
|  |         ), | ||||||
|  |         ( | ||||||
|  |             "yellow", | ||||||
|  |             [ | ||||||
|  |                 ("1 passed", {"bold": False, "green": True}), | ||||||
|  |                 ("1 warnings", {"bold": True, "yellow": True}), | ||||||
|  |             ], | ||||||
|  |             {"warnings": (1,), "passed": (1,)}, | ||||||
|  |         ), | ||||||
|  |         ( | ||||||
|  |             "green", | ||||||
|  |             [("5 passed", {"bold": True, "green": True})], | ||||||
|  |             {"passed": (1, 2, 3, 4, 5)}, | ||||||
|  |         ), | ||||||
|         # "Boring" statuses.  These have no effect on the color of the summary |         # "Boring" statuses.  These have no effect on the color of the summary | ||||||
|         # line.  Thus, if *every* test has a boring status, the summary line stays |         # line.  Thus, if *every* test has a boring status, the summary line stays | ||||||
|         # at its default color, i.e. yellow, to warn the user that the test run |         # at its default color, i.e. yellow, to warn the user that the test run | ||||||
|         # produced no useful information |         # produced no useful information | ||||||
|         ("yellow", "1 skipped", {"skipped": (1,)}), |         ("yellow", [("1 skipped", {"bold": True, "yellow": True})], {"skipped": (1,)}), | ||||||
|         ("green", "1 passed, 1 skipped", {"skipped": (1,), "passed": (1,)}), |         ( | ||||||
|         ("yellow", "1 deselected", {"deselected": (1,)}), |             "green", | ||||||
|         ("green", "1 passed, 1 deselected", {"deselected": (1,), "passed": (1,)}), |             [ | ||||||
|         ("yellow", "1 xfailed", {"xfailed": (1,)}), |                 ("1 passed", {"bold": True, "green": True}), | ||||||
|         ("green", "1 passed, 1 xfailed", {"xfailed": (1,), "passed": (1,)}), |                 ("1 skipped", {"bold": False, "yellow": True}), | ||||||
|         ("yellow", "1 xpassed", {"xpassed": (1,)}), |             ], | ||||||
|         ("green", "1 passed, 1 xpassed", {"xpassed": (1,), "passed": (1,)}), |             {"skipped": (1,), "passed": (1,)}, | ||||||
|  |         ), | ||||||
|  |         ( | ||||||
|  |             "yellow", | ||||||
|  |             [("1 deselected", {"bold": True, "yellow": True})], | ||||||
|  |             {"deselected": (1,)}, | ||||||
|  |         ), | ||||||
|  |         ( | ||||||
|  |             "green", | ||||||
|  |             [ | ||||||
|  |                 ("1 passed", {"bold": True, "green": True}), | ||||||
|  |                 ("1 deselected", {"bold": False, "yellow": True}), | ||||||
|  |             ], | ||||||
|  |             {"deselected": (1,), "passed": (1,)}, | ||||||
|  |         ), | ||||||
|  |         ("yellow", [("1 xfailed", {"bold": True, "yellow": True})], {"xfailed": (1,)}), | ||||||
|  |         ( | ||||||
|  |             "green", | ||||||
|  |             [ | ||||||
|  |                 ("1 passed", {"bold": True, "green": True}), | ||||||
|  |                 ("1 xfailed", {"bold": False, "yellow": True}), | ||||||
|  |             ], | ||||||
|  |             {"xfailed": (1,), "passed": (1,)}, | ||||||
|  |         ), | ||||||
|  |         ("yellow", [("1 xpassed", {"bold": True, "yellow": True})], {"xpassed": (1,)}), | ||||||
|  |         ( | ||||||
|  |             "green", | ||||||
|  |             [ | ||||||
|  |                 ("1 passed", {"bold": True, "green": True}), | ||||||
|  |                 ("1 xpassed", {"bold": False, "yellow": True}), | ||||||
|  |             ], | ||||||
|  |             {"xpassed": (1,), "passed": (1,)}, | ||||||
|  |         ), | ||||||
|         # Likewise if no tests were found at all |         # Likewise if no tests were found at all | ||||||
|         ("yellow", "no tests ran", {}), |         ("yellow", [("no tests ran", {"yellow": True})], {}), | ||||||
|         # Test the empty-key special case |         # Test the empty-key special case | ||||||
|         ("yellow", "no tests ran", {"": (1,)}), |         ("yellow", [("no tests ran", {"yellow": True})], {"": (1,)}), | ||||||
|         ("green", "1 passed", {"": (1,), "passed": (1,)}), |         ( | ||||||
|  |             "green", | ||||||
|  |             [("1 passed", {"bold": True, "green": True})], | ||||||
|  |             {"": (1,), "passed": (1,)}, | ||||||
|  |         ), | ||||||
|         # A couple more complex combinations |         # A couple more complex combinations | ||||||
|         ( |         ( | ||||||
|             "red", |             "red", | ||||||
|             "1 failed, 2 passed, 3 xfailed", |             [ | ||||||
|  |                 ("1 failed", {"bold": True, "red": True}), | ||||||
|  |                 ("2 passed", {"bold": False, "green": True}), | ||||||
|  |                 ("3 xfailed", {"bold": False, "yellow": True}), | ||||||
|  |             ], | ||||||
|             {"passed": (1, 2), "failed": (1,), "xfailed": (1, 2, 3)}, |             {"passed": (1, 2), "failed": (1,), "xfailed": (1, 2, 3)}, | ||||||
|         ), |         ), | ||||||
|         ( |         ( | ||||||
|             "green", |             "green", | ||||||
|             "1 passed, 2 skipped, 3 deselected, 2 xfailed", |             [ | ||||||
|  |                 ("1 passed", {"bold": True, "green": True}), | ||||||
|  |                 ("2 skipped", {"bold": False, "yellow": True}), | ||||||
|  |                 ("3 deselected", {"bold": False, "yellow": True}), | ||||||
|  |                 ("2 xfailed", {"bold": False, "yellow": True}), | ||||||
|  |             ], | ||||||
|             { |             { | ||||||
|                 "passed": (1,), |                 "passed": (1,), | ||||||
|                 "skipped": (1, 2), |                 "skipped": (1, 2), | ||||||
|  | @ -1313,11 +1394,11 @@ def test_skip_counting_towards_summary(): | ||||||
|     r1 = DummyReport() |     r1 = DummyReport() | ||||||
|     r2 = DummyReport() |     r2 = DummyReport() | ||||||
|     res = build_summary_stats_line({"failed": (r1, r2)}) |     res = build_summary_stats_line({"failed": (r1, r2)}) | ||||||
|     assert res == ("2 failed", "red") |     assert res == ([("2 failed", {"bold": True, "red": True})], "red") | ||||||
| 
 | 
 | ||||||
|     r1.count_towards_summary = False |     r1.count_towards_summary = False | ||||||
|     res = build_summary_stats_line({"failed": (r1, r2)}) |     res = build_summary_stats_line({"failed": (r1, r2)}) | ||||||
|     assert res == ("1 failed", "red") |     assert res == ([("1 failed", {"bold": True, "red": True})], "red") | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| class TestClassicOutputStyle: | class TestClassicOutputStyle: | ||||||
|  |  | ||||||
		Loading…
	
		Reference in New Issue