Fix strict xfail: it should behave exactly like xfail when a test fails
This commit is contained in:
parent
ee88679c54
commit
0eeb466f11
|
@ -186,27 +186,33 @@ def pytest_runtest_setup(item):
|
||||||
@pytest.mark.hookwrapper
|
@pytest.mark.hookwrapper
|
||||||
def pytest_pyfunc_call(pyfuncitem):
|
def pytest_pyfunc_call(pyfuncitem):
|
||||||
check_xfail_no_run(pyfuncitem)
|
check_xfail_no_run(pyfuncitem)
|
||||||
yield
|
outcome = yield
|
||||||
evalxfail = pyfuncitem._evalxfail
|
passed = outcome.excinfo is None
|
||||||
if evalxfail.istrue() and _is_strict_xfail(evalxfail, pyfuncitem.config):
|
if passed:
|
||||||
del pyfuncitem._evalxfail
|
check_strict_xfail(pyfuncitem)
|
||||||
explanation = evalxfail.getexplanation()
|
|
||||||
pytest.fail('[XPASS(strict)] ' + explanation,
|
|
||||||
pytrace=False)
|
|
||||||
|
|
||||||
|
|
||||||
def _is_strict_xfail(evalxfail, config):
|
|
||||||
default = config.getini('xfail_strict')
|
|
||||||
return evalxfail.get('strict', default)
|
|
||||||
|
|
||||||
|
|
||||||
def check_xfail_no_run(item):
|
def check_xfail_no_run(item):
|
||||||
|
"""check xfail(run=False)"""
|
||||||
if not item.config.option.runxfail:
|
if not item.config.option.runxfail:
|
||||||
evalxfail = item._evalxfail
|
evalxfail = item._evalxfail
|
||||||
if evalxfail.istrue():
|
if evalxfail.istrue():
|
||||||
if not evalxfail.get('run', True):
|
if not evalxfail.get('run', True):
|
||||||
pytest.xfail("[NOTRUN] " + evalxfail.getexplanation())
|
pytest.xfail("[NOTRUN] " + evalxfail.getexplanation())
|
||||||
|
|
||||||
|
|
||||||
|
def check_strict_xfail(pyfuncitem):
|
||||||
|
"""check xfail(strict=True) for the given PASSING test"""
|
||||||
|
evalxfail = pyfuncitem._evalxfail
|
||||||
|
if evalxfail.istrue():
|
||||||
|
strict_default = pyfuncitem.config.getini('xfail_strict')
|
||||||
|
is_strict_xfail = evalxfail.get('strict', strict_default)
|
||||||
|
if is_strict_xfail:
|
||||||
|
del pyfuncitem._evalxfail
|
||||||
|
explanation = evalxfail.getexplanation()
|
||||||
|
pytest.fail('[XPASS(strict)] ' + explanation, pytrace=False)
|
||||||
|
|
||||||
|
|
||||||
@pytest.hookimpl(hookwrapper=True)
|
@pytest.hookimpl(hookwrapper=True)
|
||||||
def pytest_runtest_makereport(item, call):
|
def pytest_runtest_makereport(item, call):
|
||||||
outcome = yield
|
outcome = yield
|
||||||
|
|
|
@ -127,13 +127,15 @@ class TestEvaluator:
|
||||||
|
|
||||||
|
|
||||||
class TestXFail:
|
class TestXFail:
|
||||||
def test_xfail_simple(self, testdir):
|
|
||||||
|
@pytest.mark.parametrize('strict', [True, False])
|
||||||
|
def test_xfail_simple(self, testdir, strict):
|
||||||
item = testdir.getitem("""
|
item = testdir.getitem("""
|
||||||
import pytest
|
import pytest
|
||||||
@pytest.mark.xfail
|
@pytest.mark.xfail(strict=%s)
|
||||||
def test_func():
|
def test_func():
|
||||||
assert 0
|
assert 0
|
||||||
""")
|
""" % strict)
|
||||||
reports = runtestprotocol(item, log=False)
|
reports = runtestprotocol(item, log=False)
|
||||||
assert len(reports) == 3
|
assert len(reports) == 3
|
||||||
callreport = reports[1]
|
callreport = reports[1]
|
||||||
|
@ -350,6 +352,23 @@ class TestXFail:
|
||||||
matchline,
|
matchline,
|
||||||
])
|
])
|
||||||
|
|
||||||
|
def test_strict_sanity(self, testdir):
|
||||||
|
"""sanity check for xfail(strict=True): a failing test should behave
|
||||||
|
exactly like a normal xfail.
|
||||||
|
"""
|
||||||
|
p = testdir.makepyfile("""
|
||||||
|
import pytest
|
||||||
|
@pytest.mark.xfail(reason='unsupported feature', strict=True)
|
||||||
|
def test_foo():
|
||||||
|
assert 0
|
||||||
|
""")
|
||||||
|
result = testdir.runpytest(p, '-rxX')
|
||||||
|
result.stdout.fnmatch_lines([
|
||||||
|
'*XFAIL*',
|
||||||
|
'*unsupported feature*',
|
||||||
|
])
|
||||||
|
assert result.ret == 0
|
||||||
|
|
||||||
@pytest.mark.parametrize('strict', [True, False])
|
@pytest.mark.parametrize('strict', [True, False])
|
||||||
def test_strict_xfail(self, testdir, strict):
|
def test_strict_xfail(self, testdir, strict):
|
||||||
p = testdir.makepyfile("""
|
p = testdir.makepyfile("""
|
||||||
|
|
Loading…
Reference in New Issue