add new parameters:
xfail(run=False) will not run expected-to-fail tests xfail(reason=True) will report the specified reason --HG-- branch : trunk
This commit is contained in:
parent
82d4aae571
commit
1a8b2838fa
|
@ -14,6 +14,9 @@ Changes between 1.2.1 and 1.3.0 (release pending)
|
||||||
- new pytest_pycollect_makemodule(path, parent) hook for
|
- new pytest_pycollect_makemodule(path, parent) hook for
|
||||||
allowing customization of the Module collection object for a
|
allowing customization of the Module collection object for a
|
||||||
matching test module.
|
matching test module.
|
||||||
|
- extend py.test.mark.xfail to accept two more keyword arg parameters:
|
||||||
|
``xfail(run=False)`` will not run the decorated test
|
||||||
|
``xfail(reason="...")`` will print the reason string when reporting
|
||||||
- expose (previously internal) commonly useful methods:
|
- expose (previously internal) commonly useful methods:
|
||||||
py.io.get_terminal_with() -> return terminal width
|
py.io.get_terminal_with() -> return terminal width
|
||||||
py.io.ansi_print(...) -> print colored/bold text on linux/win32
|
py.io.ansi_print(...) -> print colored/bold text on linux/win32
|
||||||
|
|
|
@ -83,10 +83,17 @@ Same as with skipif_ you can also selectively expect a failure
|
||||||
depending on platform::
|
depending on platform::
|
||||||
|
|
||||||
@py.test.mark.xfail("sys.version_info >= (3,0)")
|
@py.test.mark.xfail("sys.version_info >= (3,0)")
|
||||||
|
|
||||||
def test_function():
|
def test_function():
|
||||||
...
|
...
|
||||||
|
|
||||||
|
To not run a test and still regard it as "xfailed"::
|
||||||
|
|
||||||
|
@py.test.mark.xfail(..., run=False)
|
||||||
|
|
||||||
|
To specify an explicit reason to be shown with xfailure detail::
|
||||||
|
|
||||||
|
@py.test.mark.xfail(..., reason="my reason")
|
||||||
|
|
||||||
|
|
||||||
skipping on a missing import dependency
|
skipping on a missing import dependency
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
@ -123,13 +130,27 @@ import py
|
||||||
|
|
||||||
|
|
||||||
def pytest_runtest_setup(item):
|
def pytest_runtest_setup(item):
|
||||||
|
if not isinstance(item, py.test.collect.Function):
|
||||||
|
return
|
||||||
expr, result = evalexpression(item, 'skipif')
|
expr, result = evalexpression(item, 'skipif')
|
||||||
if result:
|
if result:
|
||||||
py.test.skip(expr)
|
py.test.skip(expr)
|
||||||
|
holder = getattr(item.obj, 'xfail', None)
|
||||||
|
if holder and not holder.kwargs.get('run', True):
|
||||||
|
py.test.skip("<did not run>")
|
||||||
|
|
||||||
def pytest_runtest_makereport(__multicall__, item, call):
|
def pytest_runtest_makereport(__multicall__, item, call):
|
||||||
if call.when != "call":
|
if not isinstance(item, py.test.collect.Function):
|
||||||
return
|
return
|
||||||
|
if call.when == "setup":
|
||||||
|
holder = getattr(item.obj, 'xfail', None)
|
||||||
|
if holder:
|
||||||
|
rep = __multicall__.execute()
|
||||||
|
reason = holder.kwargs.get("reason", "<no reason given>")
|
||||||
|
rep.keywords['xfail'] = "[not run] " + reason
|
||||||
|
return rep
|
||||||
|
return
|
||||||
|
elif call.when == "call":
|
||||||
expr, result = evalexpression(item, 'xfail')
|
expr, result = evalexpression(item, 'xfail')
|
||||||
rep = __multicall__.execute()
|
rep = __multicall__.execute()
|
||||||
if result:
|
if result:
|
||||||
|
@ -151,7 +172,7 @@ def pytest_report_teststatus(report):
|
||||||
if report.skipped:
|
if report.skipped:
|
||||||
return "xfailed", "x", "xfail"
|
return "xfailed", "x", "xfail"
|
||||||
elif report.failed:
|
elif report.failed:
|
||||||
return "xpassed", "P", "xpass"
|
return "xpassed", "P", "XPASS"
|
||||||
|
|
||||||
# called by the terminalreporter instance/plugin
|
# called by the terminalreporter instance/plugin
|
||||||
def pytest_terminal_summary(terminalreporter):
|
def pytest_terminal_summary(terminalreporter):
|
||||||
|
@ -172,6 +193,9 @@ def show_xfailed(terminalreporter):
|
||||||
entry = rep.longrepr.reprcrash
|
entry = rep.longrepr.reprcrash
|
||||||
modpath = rep.item.getmodpath(includemodule=True)
|
modpath = rep.item.getmodpath(includemodule=True)
|
||||||
pos = "%s %s:%d: " %(modpath, entry.path, entry.lineno)
|
pos = "%s %s:%d: " %(modpath, entry.path, entry.lineno)
|
||||||
|
if rep.keywords['xfail']:
|
||||||
|
reason = rep.keywords['xfail'].strip()
|
||||||
|
else:
|
||||||
reason = rep.longrepr.reprcrash.message
|
reason = rep.longrepr.reprcrash.message
|
||||||
i = reason.find("\n")
|
i = reason.find("\n")
|
||||||
if i != -1:
|
if i != -1:
|
||||||
|
|
|
@ -159,6 +159,9 @@ def test_generic(testdir, LineMatcher):
|
||||||
@py.test.mark.xfail
|
@py.test.mark.xfail
|
||||||
def test_xfail():
|
def test_xfail():
|
||||||
assert 0
|
assert 0
|
||||||
|
@py.test.mark.xfail(run=False)
|
||||||
|
def test_xfail_norun():
|
||||||
|
assert 0
|
||||||
""")
|
""")
|
||||||
testdir.runpytest("--resultlog=result.log")
|
testdir.runpytest("--resultlog=result.log")
|
||||||
lines = testdir.tmpdir.join("result.log").readlines(cr=0)
|
lines = testdir.tmpdir.join("result.log").readlines(cr=0)
|
||||||
|
@ -167,5 +170,6 @@ def test_generic(testdir, LineMatcher):
|
||||||
"F *:test_fail",
|
"F *:test_fail",
|
||||||
"s *:test_skip",
|
"s *:test_skip",
|
||||||
"x *:test_xfail",
|
"x *:test_xfail",
|
||||||
|
"x *:test_xfail_norun",
|
||||||
])
|
])
|
||||||
|
|
||||||
|
|
|
@ -12,6 +12,30 @@ def test_xfail_not_report_default(testdir):
|
||||||
"*1 expected failures*--report=xfailed*",
|
"*1 expected failures*--report=xfailed*",
|
||||||
])
|
])
|
||||||
|
|
||||||
|
def test_xfail_not_run(testdir):
|
||||||
|
p = testdir.makepyfile(test_one="""
|
||||||
|
import py
|
||||||
|
@py.test.mark.xfail(run=False, reason="noway")
|
||||||
|
def test_this():
|
||||||
|
assert 0
|
||||||
|
@py.test.mark.xfail("True", run=False, reason="noway")
|
||||||
|
def test_this_true():
|
||||||
|
assert 0
|
||||||
|
@py.test.mark.xfail("False", run=True, reason="huh")
|
||||||
|
def test_this_false():
|
||||||
|
assert 1
|
||||||
|
""")
|
||||||
|
result = testdir.runpytest(p, '-v')
|
||||||
|
result.stdout.fnmatch_lines([
|
||||||
|
"*2 expected failures*--report=xfailed*",
|
||||||
|
"*1 passed*",
|
||||||
|
])
|
||||||
|
result = testdir.runpytest(p, '--report=xfailed', )
|
||||||
|
result.stdout.fnmatch_lines([
|
||||||
|
"*test_one*test_this*not run*noway",
|
||||||
|
"*test_one*test_this_true*not run*noway",
|
||||||
|
])
|
||||||
|
|
||||||
def test_skip_not_report_default(testdir):
|
def test_skip_not_report_default(testdir):
|
||||||
p = testdir.makepyfile(test_one="""
|
p = testdir.makepyfile(test_one="""
|
||||||
import py
|
import py
|
||||||
|
|
Loading…
Reference in New Issue