fix issue9 wrong XPass with failing setup/teardown function of xfail marked test
now when setup or teardown of a test item/function fails and the test is marked "xfail" it will show up as an xfail-ed test.
This commit is contained in:
parent
2e80512bb8
commit
513482f4f7
|
@ -1,6 +1,9 @@
|
||||||
Changes between 2.0.0 and 2.0.1.dev1
|
Changes between 2.0.0 and 2.0.1.dev1
|
||||||
----------------------------------------------
|
----------------------------------------------
|
||||||
|
|
||||||
|
- fix issue9: direct setup/teardown functions for an xfail-marked
|
||||||
|
test will report as xfail if they fail (but reported as normal
|
||||||
|
passing setup/teardown).
|
||||||
- fix issue8: no logging errors at process exit
|
- fix issue8: no logging errors at process exit
|
||||||
- refinements to "collecting" output on non-ttys
|
- refinements to "collecting" output on non-ttys
|
||||||
- refine internal plugin registration and --traceconfig output
|
- refine internal plugin registration and --traceconfig output
|
||||||
|
|
|
@ -97,19 +97,19 @@ def pytest_runtest_makereport(__multicall__, item, call):
|
||||||
rep.keywords['xfail'] = "reason: " + call.excinfo.value.msg
|
rep.keywords['xfail'] = "reason: " + call.excinfo.value.msg
|
||||||
rep.outcome = "skipped"
|
rep.outcome = "skipped"
|
||||||
return rep
|
return rep
|
||||||
if call.when == "call":
|
rep = __multicall__.execute()
|
||||||
rep = __multicall__.execute()
|
evalxfail = item._evalxfail
|
||||||
evalxfail = getattr(item, '_evalxfail')
|
if not item.config.option.runxfail and evalxfail.istrue():
|
||||||
if not item.config.getvalue("runxfail") and evalxfail.istrue():
|
if call.excinfo:
|
||||||
if call.excinfo:
|
rep.outcome = "skipped"
|
||||||
rep.outcome = "skipped"
|
|
||||||
else:
|
|
||||||
rep.outcome = "failed"
|
|
||||||
rep.keywords['xfail'] = evalxfail.getexplanation()
|
rep.keywords['xfail'] = evalxfail.getexplanation()
|
||||||
else:
|
elif call.when == "call":
|
||||||
if 'xfail' in rep.keywords:
|
rep.outcome = "failed"
|
||||||
del rep.keywords['xfail']
|
rep.keywords['xfail'] = evalxfail.getexplanation()
|
||||||
return rep
|
else:
|
||||||
|
if 'xfail' in rep.keywords:
|
||||||
|
del rep.keywords['xfail']
|
||||||
|
return rep
|
||||||
|
|
||||||
# called by terminalreporter progress reporting
|
# called by terminalreporter progress reporting
|
||||||
def pytest_report_teststatus(report):
|
def pytest_report_teststatus(report):
|
||||||
|
|
|
@ -308,6 +308,37 @@ class TestXFail:
|
||||||
"*1 xfailed*",
|
"*1 xfailed*",
|
||||||
])
|
])
|
||||||
|
|
||||||
|
class TestXFailwithSetupTeardown:
|
||||||
|
def test_failing_setup_issue9(self, testdir):
|
||||||
|
testdir.makepyfile("""
|
||||||
|
import pytest
|
||||||
|
def setup_function(func):
|
||||||
|
assert 0
|
||||||
|
|
||||||
|
@pytest.mark.xfail
|
||||||
|
def test_func():
|
||||||
|
pass
|
||||||
|
""")
|
||||||
|
result = testdir.runpytest()
|
||||||
|
result.stdout.fnmatch_lines([
|
||||||
|
"*1 xfail*",
|
||||||
|
])
|
||||||
|
|
||||||
|
def test_failing_teardown_issue9(self, testdir):
|
||||||
|
testdir.makepyfile("""
|
||||||
|
import pytest
|
||||||
|
def teardown_function(func):
|
||||||
|
assert 0
|
||||||
|
|
||||||
|
@pytest.mark.xfail
|
||||||
|
def test_func():
|
||||||
|
pass
|
||||||
|
""")
|
||||||
|
result = testdir.runpytest()
|
||||||
|
result.stdout.fnmatch_lines([
|
||||||
|
"*1 xfail*",
|
||||||
|
])
|
||||||
|
|
||||||
|
|
||||||
class TestSkipif:
|
class TestSkipif:
|
||||||
def test_skipif_conditional(self, testdir):
|
def test_skipif_conditional(self, testdir):
|
||||||
|
|
Loading…
Reference in New Issue