Fix strict xfail: it should behave exactly like xfail when a test fails
This commit is contained in:
		
							parent
							
								
									ee88679c54
								
							
						
					
					
						commit
						0eeb466f11
					
				|  | @ -186,27 +186,33 @@ def pytest_runtest_setup(item): | |||
| @pytest.mark.hookwrapper | ||||
| def pytest_pyfunc_call(pyfuncitem): | ||||
|     check_xfail_no_run(pyfuncitem) | ||||
|     yield | ||||
|     evalxfail = pyfuncitem._evalxfail | ||||
|     if evalxfail.istrue() and _is_strict_xfail(evalxfail, pyfuncitem.config): | ||||
|         del pyfuncitem._evalxfail | ||||
|         explanation = evalxfail.getexplanation() | ||||
|         pytest.fail('[XPASS(strict)] ' + explanation, | ||||
|                     pytrace=False) | ||||
| 
 | ||||
| 
 | ||||
| def _is_strict_xfail(evalxfail, config): | ||||
|     default = config.getini('xfail_strict') | ||||
|     return evalxfail.get('strict', default) | ||||
|     outcome = yield | ||||
|     passed = outcome.excinfo is None | ||||
|     if passed: | ||||
|         check_strict_xfail(pyfuncitem) | ||||
| 
 | ||||
| 
 | ||||
| def check_xfail_no_run(item): | ||||
|     """check xfail(run=False)""" | ||||
|     if not item.config.option.runxfail: | ||||
|         evalxfail = item._evalxfail | ||||
|         if evalxfail.istrue(): | ||||
|             if not evalxfail.get('run', True): | ||||
|                 pytest.xfail("[NOTRUN] " + evalxfail.getexplanation()) | ||||
| 
 | ||||
| 
 | ||||
| def check_strict_xfail(pyfuncitem): | ||||
|     """check xfail(strict=True) for the given PASSING test""" | ||||
|     evalxfail = pyfuncitem._evalxfail | ||||
|     if evalxfail.istrue(): | ||||
|         strict_default = pyfuncitem.config.getini('xfail_strict') | ||||
|         is_strict_xfail = evalxfail.get('strict', strict_default) | ||||
|         if is_strict_xfail: | ||||
|             del pyfuncitem._evalxfail | ||||
|             explanation = evalxfail.getexplanation() | ||||
|             pytest.fail('[XPASS(strict)] ' + explanation, pytrace=False) | ||||
| 
 | ||||
| 
 | ||||
| @pytest.hookimpl(hookwrapper=True) | ||||
| def pytest_runtest_makereport(item, call): | ||||
|     outcome = yield | ||||
|  |  | |||
|  | @ -127,13 +127,15 @@ class TestEvaluator: | |||
| 
 | ||||
| 
 | ||||
| class TestXFail: | ||||
|     def test_xfail_simple(self, testdir): | ||||
| 
 | ||||
|     @pytest.mark.parametrize('strict', [True, False]) | ||||
|     def test_xfail_simple(self, testdir, strict): | ||||
|         item = testdir.getitem(""" | ||||
|             import pytest | ||||
|             @pytest.mark.xfail | ||||
|             @pytest.mark.xfail(strict=%s) | ||||
|             def test_func(): | ||||
|                 assert 0 | ||||
|         """) | ||||
|         """ % strict) | ||||
|         reports = runtestprotocol(item, log=False) | ||||
|         assert len(reports) == 3 | ||||
|         callreport = reports[1] | ||||
|  | @ -350,6 +352,23 @@ class TestXFail: | |||
|             matchline, | ||||
|         ]) | ||||
| 
 | ||||
|     def test_strict_sanity(self, testdir): | ||||
|         """sanity check for xfail(strict=True): a failing test should behave | ||||
|         exactly like a normal xfail. | ||||
|         """ | ||||
|         p = testdir.makepyfile(""" | ||||
|             import pytest | ||||
|             @pytest.mark.xfail(reason='unsupported feature', strict=True) | ||||
|             def test_foo(): | ||||
|                 assert 0 | ||||
|         """) | ||||
|         result = testdir.runpytest(p, '-rxX') | ||||
|         result.stdout.fnmatch_lines([ | ||||
|             '*XFAIL*', | ||||
|             '*unsupported feature*', | ||||
|         ]) | ||||
|         assert result.ret == 0 | ||||
| 
 | ||||
|     @pytest.mark.parametrize('strict', [True, False]) | ||||
|     def test_strict_xfail(self, testdir, strict): | ||||
|         p = testdir.makepyfile(""" | ||||
|  |  | |||
		Loading…
	
		Reference in New Issue