Merge pull request #4920 from nicoddemus/subtests-pytest-1367
Internal refactorings required for *external* pytest-subtests plugin
This commit is contained in:
		
						commit
						23ab43233e
					
				|  | @ -0,0 +1,6 @@ | |||
| Internal refactorings have been made in order to make the implementation of the | ||||
| `pytest-subtests <https://github.com/pytest-dev/pytest-subtests>`__ plugin | ||||
| possible, which adds unittest sub-test support and a new ``subtests`` fixture as discussed in | ||||
| `#1367 <https://github.com/pytest-dev/pytest/issues/1367>`__. | ||||
| 
 | ||||
| For details on the internal refactorings, please see the details on the related PR. | ||||
|  | @ -1,6 +1,8 @@ | |||
| import py | ||||
| 
 | ||||
| from _pytest._code.code import ExceptionInfo | ||||
| from _pytest._code.code import TerminalRepr | ||||
| from _pytest.outcomes import skip | ||||
| 
 | ||||
| 
 | ||||
| def getslaveinfoline(node): | ||||
|  | @ -20,6 +22,7 @@ def getslaveinfoline(node): | |||
| 
 | ||||
| class BaseReport(object): | ||||
|     when = None | ||||
|     location = None | ||||
| 
 | ||||
|     def __init__(self, **kw): | ||||
|         self.__dict__.update(kw) | ||||
|  | @ -97,6 +100,43 @@ class BaseReport(object): | |||
|     def fspath(self): | ||||
|         return self.nodeid.split("::")[0] | ||||
| 
 | ||||
|     @property | ||||
|     def count_towards_summary(self): | ||||
|         """ | ||||
|         **Experimental** | ||||
| 
 | ||||
|         Returns True if this report should be counted towards the totals shown at the end of the | ||||
|         test session: "1 passed, 1 failure, etc". | ||||
| 
 | ||||
|         .. note:: | ||||
| 
 | ||||
|             This function is considered **experimental**, so beware that it is subject to changes | ||||
|             even in patch releases. | ||||
|         """ | ||||
|         return True | ||||
| 
 | ||||
|     @property | ||||
|     def head_line(self): | ||||
|         """ | ||||
|         **Experimental** | ||||
| 
 | ||||
|         Returns the head line shown with longrepr output for this report, more commonly during | ||||
|         traceback representation during failures:: | ||||
| 
 | ||||
|             ________ Test.foo ________ | ||||
| 
 | ||||
| 
 | ||||
|         In the example above, the head_line is "Test.foo". | ||||
| 
 | ||||
|         .. note:: | ||||
| 
 | ||||
|             This function is considered **experimental**, so beware that it is subject to changes | ||||
|             even in patch releases. | ||||
|         """ | ||||
|         if self.location is not None: | ||||
|             fspath, lineno, domain = self.location | ||||
|             return domain | ||||
| 
 | ||||
| 
 | ||||
| class TestReport(BaseReport): | ||||
|     """ Basic test report object (also used for setup and teardown calls if | ||||
|  | @ -159,6 +199,49 @@ class TestReport(BaseReport): | |||
|             self.outcome, | ||||
|         ) | ||||
| 
 | ||||
|     @classmethod | ||||
|     def from_item_and_call(cls, item, call): | ||||
|         """ | ||||
|         Factory method to create and fill a TestReport with standard item and call info. | ||||
|         """ | ||||
|         when = call.when | ||||
|         duration = call.stop - call.start | ||||
|         keywords = {x: 1 for x in item.keywords} | ||||
|         excinfo = call.excinfo | ||||
|         sections = [] | ||||
|         if not call.excinfo: | ||||
|             outcome = "passed" | ||||
|             longrepr = None | ||||
|         else: | ||||
|             if not isinstance(excinfo, ExceptionInfo): | ||||
|                 outcome = "failed" | ||||
|                 longrepr = excinfo | ||||
|             elif excinfo.errisinstance(skip.Exception): | ||||
|                 outcome = "skipped" | ||||
|                 r = excinfo._getreprcrash() | ||||
|                 longrepr = (str(r.path), r.lineno, r.message) | ||||
|             else: | ||||
|                 outcome = "failed" | ||||
|                 if call.when == "call": | ||||
|                     longrepr = item.repr_failure(excinfo) | ||||
|                 else:  # exception in setup or teardown | ||||
|                     longrepr = item._repr_failure_py( | ||||
|                         excinfo, style=item.config.option.tbstyle | ||||
|                     ) | ||||
|         for rwhen, key, content in item._report_sections: | ||||
|             sections.append(("Captured %s %s" % (key, rwhen), content)) | ||||
|         return cls( | ||||
|             item.nodeid, | ||||
|             item.location, | ||||
|             keywords, | ||||
|             outcome, | ||||
|             longrepr, | ||||
|             when, | ||||
|             sections, | ||||
|             duration, | ||||
|             user_properties=item.user_properties, | ||||
|         ) | ||||
| 
 | ||||
| 
 | ||||
| class CollectReport(BaseReport): | ||||
|     when = "collect" | ||||
|  |  | |||
|  | @ -246,43 +246,7 @@ class CallInfo(object): | |||
| 
 | ||||
| 
 | ||||
| def pytest_runtest_makereport(item, call): | ||||
|     when = call.when | ||||
|     duration = call.stop - call.start | ||||
|     keywords = {x: 1 for x in item.keywords} | ||||
|     excinfo = call.excinfo | ||||
|     sections = [] | ||||
|     if not call.excinfo: | ||||
|         outcome = "passed" | ||||
|         longrepr = None | ||||
|     else: | ||||
|         if not isinstance(excinfo, ExceptionInfo): | ||||
|             outcome = "failed" | ||||
|             longrepr = excinfo | ||||
|         elif excinfo.errisinstance(skip.Exception): | ||||
|             outcome = "skipped" | ||||
|             r = excinfo._getreprcrash() | ||||
|             longrepr = (str(r.path), r.lineno, r.message) | ||||
|         else: | ||||
|             outcome = "failed" | ||||
|             if call.when == "call": | ||||
|                 longrepr = item.repr_failure(excinfo) | ||||
|             else:  # exception in setup or teardown | ||||
|                 longrepr = item._repr_failure_py( | ||||
|                     excinfo, style=item.config.option.tbstyle | ||||
|                 ) | ||||
|     for rwhen, key, content in item._report_sections: | ||||
|         sections.append(("Captured %s %s" % (key, rwhen), content)) | ||||
|     return TestReport( | ||||
|         item.nodeid, | ||||
|         item.location, | ||||
|         keywords, | ||||
|         outcome, | ||||
|         longrepr, | ||||
|         when, | ||||
|         sections, | ||||
|         duration, | ||||
|         user_properties=item.user_properties, | ||||
|     ) | ||||
|     return TestReport.from_item_and_call(item, call) | ||||
| 
 | ||||
| 
 | ||||
| def pytest_make_collect_report(collector): | ||||
|  |  | |||
|  | @ -197,6 +197,7 @@ class WarningReport(object): | |||
|     message = attr.ib() | ||||
|     nodeid = attr.ib(default=None) | ||||
|     fslocation = attr.ib(default=None) | ||||
|     count_towards_summary = True | ||||
| 
 | ||||
|     def get_location(self, config): | ||||
|         """ | ||||
|  | @ -383,6 +384,7 @@ class TerminalReporter(object): | |||
|             self.write_fspath_result(fsid, "") | ||||
| 
 | ||||
|     def pytest_runtest_logreport(self, report): | ||||
|         self._tests_ran = True | ||||
|         rep = report | ||||
|         res = self.config.hook.pytest_report_teststatus(report=rep, config=self.config) | ||||
|         category, letter, word = res | ||||
|  | @ -391,7 +393,6 @@ class TerminalReporter(object): | |||
|         else: | ||||
|             markup = None | ||||
|         self.stats.setdefault(category, []).append(rep) | ||||
|         self._tests_ran = True | ||||
|         if not letter and not word: | ||||
|             # probably passed setup/teardown | ||||
|             return | ||||
|  | @ -724,9 +725,8 @@ class TerminalReporter(object): | |||
|         return res + " " | ||||
| 
 | ||||
|     def _getfailureheadline(self, rep): | ||||
|         if hasattr(rep, "location"): | ||||
|             fspath, lineno, domain = rep.location | ||||
|             return domain | ||||
|         if rep.head_line: | ||||
|             return rep.head_line | ||||
|         else: | ||||
|             return "test session"  # XXX? | ||||
| 
 | ||||
|  | @ -874,18 +874,23 @@ class TerminalReporter(object): | |||
| 
 | ||||
| 
 | ||||
| def build_summary_stats_line(stats): | ||||
|     keys = ("failed passed skipped deselected xfailed xpassed warnings error").split() | ||||
|     unknown_key_seen = False | ||||
|     for key in stats.keys(): | ||||
|         if key not in keys: | ||||
|             if key:  # setup/teardown reports have an empty key, ignore them | ||||
|                 keys.append(key) | ||||
|                 unknown_key_seen = True | ||||
|     known_types = ( | ||||
|         "failed passed skipped deselected xfailed xpassed warnings error".split() | ||||
|     ) | ||||
|     unknown_type_seen = False | ||||
|     for found_type in stats: | ||||
|         if found_type not in known_types: | ||||
|             if found_type:  # setup/teardown reports have an empty key, ignore them | ||||
|                 known_types.append(found_type) | ||||
|                 unknown_type_seen = True | ||||
|     parts = [] | ||||
|     for key in keys: | ||||
|         val = stats.get(key, None) | ||||
|         if val: | ||||
|             parts.append("%d %s" % (len(val), key)) | ||||
|     for key in known_types: | ||||
|         reports = stats.get(key, None) | ||||
|         if reports: | ||||
|             count = sum( | ||||
|                 1 for rep in reports if getattr(rep, "count_towards_summary", True) | ||||
|             ) | ||||
|             parts.append("%d %s" % (count, key)) | ||||
| 
 | ||||
|     if parts: | ||||
|         line = ", ".join(parts) | ||||
|  | @ -894,14 +899,14 @@ def build_summary_stats_line(stats): | |||
| 
 | ||||
|     if "failed" in stats or "error" in stats: | ||||
|         color = "red" | ||||
|     elif "warnings" in stats or unknown_key_seen: | ||||
|     elif "warnings" in stats or unknown_type_seen: | ||||
|         color = "yellow" | ||||
|     elif "passed" in stats: | ||||
|         color = "green" | ||||
|     else: | ||||
|         color = "yellow" | ||||
| 
 | ||||
|     return (line, color) | ||||
|     return line, color | ||||
| 
 | ||||
| 
 | ||||
| def _plugin_nameversions(plugininfo): | ||||
|  |  | |||
|  | @ -15,6 +15,7 @@ import py | |||
| 
 | ||||
| import pytest | ||||
| from _pytest.main import EXIT_NOTESTSCOLLECTED | ||||
| from _pytest.reports import BaseReport | ||||
| from _pytest.terminal import _plugin_nameversions | ||||
| from _pytest.terminal import build_summary_stats_line | ||||
| from _pytest.terminal import getreportopt | ||||
|  | @ -1228,6 +1229,20 @@ def test_summary_stats(exp_line, exp_color, stats_arg): | |||
|     assert color == exp_color | ||||
| 
 | ||||
| 
 | ||||
| def test_skip_counting_towards_summary(): | ||||
|     class DummyReport(BaseReport): | ||||
|         count_towards_summary = True | ||||
| 
 | ||||
|     r1 = DummyReport() | ||||
|     r2 = DummyReport() | ||||
|     res = build_summary_stats_line({"failed": (r1, r2)}) | ||||
|     assert res == ("2 failed", "red") | ||||
| 
 | ||||
|     r1.count_towards_summary = False | ||||
|     res = build_summary_stats_line({"failed": (r1, r2)}) | ||||
|     assert res == ("1 failed", "red") | ||||
| 
 | ||||
| 
 | ||||
| class TestClassicOutputStyle(object): | ||||
|     """Ensure classic output style works as expected (#3883)""" | ||||
| 
 | ||||
|  |  | |||
		Loading…
	
		Reference in New Issue