add unit-tests for xfail and refine xfail handling and reporting
--HG-- branch : trunk
This commit is contained in:
		
							parent
							
								
									dd7fd97810
								
							
						
					
					
						commit
						28150c7486
					
				| 
						 | 
					@ -123,44 +123,83 @@ within test or setup code.  Example::
 | 
				
			||||||
            py.test.skip("unsuppored configuration")
 | 
					            py.test.skip("unsuppored configuration")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
"""
 | 
					"""
 | 
				
			||||||
# XXX py.test.skip, .importorskip and the Skipped class 
 | 
					 | 
				
			||||||
# should also be defined in this plugin, requires thought/changes
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
import py
 | 
					import py
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					class MarkEvaluator:
 | 
				
			||||||
 | 
					    def __init__(self, item, name):
 | 
				
			||||||
 | 
					        self.item = item
 | 
				
			||||||
 | 
					        self.name = name
 | 
				
			||||||
 | 
					        self.holder = getattr(item.obj, name, None)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def __bool__(self):
 | 
				
			||||||
 | 
					        return bool(self.holder)
 | 
				
			||||||
 | 
					    __nonzero__ = __bool__
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def istrue(self):
 | 
				
			||||||
 | 
					        if self.holder:
 | 
				
			||||||
 | 
					            d = {'os': py.std.os, 'sys': py.std.sys, 'config': self.item.config}
 | 
				
			||||||
 | 
					            self.result = True
 | 
				
			||||||
 | 
					            for expr in self.holder.args:
 | 
				
			||||||
 | 
					                self.expr = expr
 | 
				
			||||||
 | 
					                if isinstance(expr, str):
 | 
				
			||||||
 | 
					                    result = cached_eval(self.item.config, expr, d)
 | 
				
			||||||
 | 
					                else:
 | 
				
			||||||
 | 
					                    result = expr
 | 
				
			||||||
 | 
					                if not result:
 | 
				
			||||||
 | 
					                    self.result = False
 | 
				
			||||||
 | 
					                    self.expr = expr
 | 
				
			||||||
 | 
					                    break
 | 
				
			||||||
 | 
					        return getattr(self, 'result', False)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def get(self, attr, default=None):
 | 
				
			||||||
 | 
					        return self.holder.kwargs.get(attr, default)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def getexplanation(self):
 | 
				
			||||||
 | 
					        expl = self.get('reason', None)
 | 
				
			||||||
 | 
					        if not expl:
 | 
				
			||||||
 | 
					            if not hasattr(self, 'expr'):
 | 
				
			||||||
 | 
					                return "condition: True"
 | 
				
			||||||
 | 
					            else:
 | 
				
			||||||
 | 
					                return "condition: " + self.expr
 | 
				
			||||||
 | 
					        return expl
 | 
				
			||||||
 | 
					        
 | 
				
			||||||
 | 
					
 | 
				
			||||||
def pytest_runtest_setup(item):
 | 
					def pytest_runtest_setup(item):
 | 
				
			||||||
    if not isinstance(item, py.test.collect.Function):
 | 
					    if not isinstance(item, py.test.collect.Function):
 | 
				
			||||||
        return
 | 
					        return
 | 
				
			||||||
    expr, result = evalexpression(item, 'skipif')
 | 
					    evalskip = MarkEvaluator(item, 'skipif')
 | 
				
			||||||
    if result:
 | 
					    if evalskip.istrue():
 | 
				
			||||||
        py.test.skip(expr)
 | 
					        py.test.skip(evalskip.getexplanation())
 | 
				
			||||||
    holder = getattr(item.obj, 'xfail', None)
 | 
					    item._evalxfail = MarkEvaluator(item, 'xfail')
 | 
				
			||||||
    if holder and not holder.kwargs.get('run', True):
 | 
					    if item._evalxfail.istrue():
 | 
				
			||||||
        py.test.skip("<did not run>")
 | 
					        if not item._evalxfail.get('run', True):
 | 
				
			||||||
 | 
					            py.test.skip("xfail")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
def pytest_runtest_makereport(__multicall__, item, call):
 | 
					def pytest_runtest_makereport(__multicall__, item, call):
 | 
				
			||||||
    if not isinstance(item, py.test.collect.Function):
 | 
					    if not isinstance(item, py.test.collect.Function):
 | 
				
			||||||
        return
 | 
					        return
 | 
				
			||||||
    if call.when == "setup":
 | 
					    evalxfail = getattr(item, '_evalxfail', None)
 | 
				
			||||||
        holder = getattr(item.obj, 'xfail', None)
 | 
					    if not evalxfail:
 | 
				
			||||||
        if holder:
 | 
					 | 
				
			||||||
            rep = __multicall__.execute()
 | 
					 | 
				
			||||||
            reason = holder.kwargs.get("reason", "<no reason given>")
 | 
					 | 
				
			||||||
            rep.keywords['xfail'] = "[not run] " + reason
 | 
					 | 
				
			||||||
            return rep
 | 
					 | 
				
			||||||
        return
 | 
					        return
 | 
				
			||||||
    elif call.when == "call":
 | 
					    if call.when == "setup":
 | 
				
			||||||
        expr, result = evalexpression(item, 'xfail')
 | 
					 | 
				
			||||||
        rep = __multicall__.execute()
 | 
					        rep = __multicall__.execute()
 | 
				
			||||||
        if result:
 | 
					        if rep.skipped and evalxfail.istrue():
 | 
				
			||||||
 | 
					            expl = evalxfail.getexplanation()
 | 
				
			||||||
 | 
					            if not evalxfail.get("run", True):
 | 
				
			||||||
 | 
					                expl = "[NOTRUN] " + expl
 | 
				
			||||||
 | 
					            rep.keywords['xfail'] = expl
 | 
				
			||||||
 | 
					        return rep
 | 
				
			||||||
 | 
					    elif call.when == "call":
 | 
				
			||||||
 | 
					        rep = __multicall__.execute()
 | 
				
			||||||
 | 
					        if evalxfail.istrue():
 | 
				
			||||||
            if call.excinfo:
 | 
					            if call.excinfo:
 | 
				
			||||||
                rep.skipped = True
 | 
					                rep.skipped = True
 | 
				
			||||||
                rep.failed = rep.passed = False
 | 
					                rep.failed = rep.passed = False
 | 
				
			||||||
            else:
 | 
					            else:
 | 
				
			||||||
                rep.skipped = rep.passed = False
 | 
					                rep.skipped = rep.passed = False
 | 
				
			||||||
                rep.failed = True
 | 
					                rep.failed = True
 | 
				
			||||||
            rep.keywords['xfail'] = expr 
 | 
					            rep.keywords['xfail'] = evalxfail.getexplanation()
 | 
				
			||||||
        else:
 | 
					        else:
 | 
				
			||||||
            if 'xfail' in rep.keywords:
 | 
					            if 'xfail' in rep.keywords:
 | 
				
			||||||
                del rep.keywords['xfail']
 | 
					                del rep.keywords['xfail']
 | 
				
			||||||
| 
						 | 
					@ -190,43 +229,17 @@ def show_xfailed(terminalreporter):
 | 
				
			||||||
            return
 | 
					            return
 | 
				
			||||||
        tr.write_sep("_", "expected failures")
 | 
					        tr.write_sep("_", "expected failures")
 | 
				
			||||||
        for rep in xfailed:
 | 
					        for rep in xfailed:
 | 
				
			||||||
            entry = rep.longrepr.reprcrash
 | 
					            pos = terminalreporter.gettestid(rep.item)
 | 
				
			||||||
            modpath = rep.item.getmodpath(includemodule=True)
 | 
					            reason = rep.keywords['xfail']
 | 
				
			||||||
            pos = "%s %s:%d: " %(modpath, entry.path, entry.lineno)
 | 
					 | 
				
			||||||
            if rep.keywords['xfail']:
 | 
					 | 
				
			||||||
                reason = rep.keywords['xfail'].strip()
 | 
					 | 
				
			||||||
            else:
 | 
					 | 
				
			||||||
                reason = rep.longrepr.reprcrash.message
 | 
					 | 
				
			||||||
                i = reason.find("\n")
 | 
					 | 
				
			||||||
                if i != -1:
 | 
					 | 
				
			||||||
                    reason = reason[:i]
 | 
					 | 
				
			||||||
            tr._tw.line("%s %s" %(pos, reason))
 | 
					            tr._tw.line("%s %s" %(pos, reason))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    xpassed = terminalreporter.stats.get("xpassed")
 | 
					    xpassed = terminalreporter.stats.get("xpassed")
 | 
				
			||||||
    if xpassed:
 | 
					    if xpassed:
 | 
				
			||||||
        tr.write_sep("_", "UNEXPECTEDLY PASSING TESTS")
 | 
					        tr.write_sep("_", "UNEXPECTEDLY PASSING TESTS")
 | 
				
			||||||
        for rep in xpassed:
 | 
					        for rep in xpassed:
 | 
				
			||||||
            fspath, lineno, modpath = rep.item.reportinfo()
 | 
					            pos = terminalreporter.gettestid(rep.item)
 | 
				
			||||||
            pos = "%s %s:%d: unexpectedly passing" %(modpath, fspath, lineno)
 | 
					            reason = rep.keywords['xfail']
 | 
				
			||||||
            tr._tw.line(pos)
 | 
					            tr._tw.line("%s %s" %(pos, reason))
 | 
				
			||||||
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
def evalexpression(item, keyword):
 | 
					 | 
				
			||||||
    if isinstance(item, py.test.collect.Function):
 | 
					 | 
				
			||||||
        markholder = getattr(item.obj, keyword, None)
 | 
					 | 
				
			||||||
        result = False
 | 
					 | 
				
			||||||
        if markholder:
 | 
					 | 
				
			||||||
            d = {'os': py.std.os, 'sys': py.std.sys, 'config': item.config}
 | 
					 | 
				
			||||||
            expr, result = None, True
 | 
					 | 
				
			||||||
            for expr in markholder.args:
 | 
					 | 
				
			||||||
                if isinstance(expr, str):
 | 
					 | 
				
			||||||
                    result = cached_eval(item.config, expr, d)
 | 
					 | 
				
			||||||
                else:
 | 
					 | 
				
			||||||
                    result = expr
 | 
					 | 
				
			||||||
                if not result:
 | 
					 | 
				
			||||||
                    break
 | 
					 | 
				
			||||||
            return expr, result
 | 
					 | 
				
			||||||
    return None, False
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
def cached_eval(config, expr, d):
 | 
					def cached_eval(config, expr, d):
 | 
				
			||||||
    if not hasattr(config, '_evalcache'):
 | 
					    if not hasattr(config, '_evalcache'):
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1,6 +1,114 @@
 | 
				
			||||||
import py
 | 
					import py
 | 
				
			||||||
 | 
					
 | 
				
			||||||
def test_xfail_not_report_default(testdir):
 | 
					from py._plugin.pytest_skipping import MarkEvaluator
 | 
				
			||||||
 | 
					from py._plugin.pytest_skipping import pytest_runtest_setup
 | 
				
			||||||
 | 
					from py._plugin.pytest_runner import runtestprotocol 
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					class TestEvaluator:
 | 
				
			||||||
 | 
					    def test_no_marker(self, testdir):
 | 
				
			||||||
 | 
					        item = testdir.getitem("def test_func(): pass")
 | 
				
			||||||
 | 
					        evalskipif = MarkEvaluator(item, 'skipif')
 | 
				
			||||||
 | 
					        assert not evalskipif
 | 
				
			||||||
 | 
					        assert not evalskipif.istrue()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def test_marked_no_args(self, testdir):
 | 
				
			||||||
 | 
					        item = testdir.getitem("""
 | 
				
			||||||
 | 
					            import py 
 | 
				
			||||||
 | 
					            @py.test.mark.xyz
 | 
				
			||||||
 | 
					            def test_func(): 
 | 
				
			||||||
 | 
					                pass
 | 
				
			||||||
 | 
					        """)
 | 
				
			||||||
 | 
					        ev = MarkEvaluator(item, 'xyz')
 | 
				
			||||||
 | 
					        assert ev
 | 
				
			||||||
 | 
					        assert ev.istrue()
 | 
				
			||||||
 | 
					        expl = ev.getexplanation()
 | 
				
			||||||
 | 
					        assert expl == "condition: True"
 | 
				
			||||||
 | 
					        assert not ev.get("run", False)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def test_marked_one_arg(self, testdir):
 | 
				
			||||||
 | 
					        item = testdir.getitem("""
 | 
				
			||||||
 | 
					            import py 
 | 
				
			||||||
 | 
					            @py.test.mark.xyz("hasattr(os, 'sep')")
 | 
				
			||||||
 | 
					            def test_func(): 
 | 
				
			||||||
 | 
					                pass
 | 
				
			||||||
 | 
					        """)
 | 
				
			||||||
 | 
					        ev = MarkEvaluator(item, 'xyz')
 | 
				
			||||||
 | 
					        assert ev
 | 
				
			||||||
 | 
					        assert ev.istrue()
 | 
				
			||||||
 | 
					        expl = ev.getexplanation()
 | 
				
			||||||
 | 
					        assert expl == "condition: hasattr(os, 'sep')"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def test_marked_one_arg_with_reason(self, testdir):
 | 
				
			||||||
 | 
					        item = testdir.getitem("""
 | 
				
			||||||
 | 
					            import py 
 | 
				
			||||||
 | 
					            @py.test.mark.xyz("hasattr(os, 'sep')", attr=2, reason="hello world")
 | 
				
			||||||
 | 
					            def test_func(): 
 | 
				
			||||||
 | 
					                pass
 | 
				
			||||||
 | 
					        """)
 | 
				
			||||||
 | 
					        ev = MarkEvaluator(item, 'xyz')
 | 
				
			||||||
 | 
					        assert ev
 | 
				
			||||||
 | 
					        assert ev.istrue()
 | 
				
			||||||
 | 
					        expl = ev.getexplanation()
 | 
				
			||||||
 | 
					        assert expl == "hello world"
 | 
				
			||||||
 | 
					        assert ev.get("attr") == 2
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def test_skipif_class(self, testdir):
 | 
				
			||||||
 | 
					        item, = testdir.getitems("""
 | 
				
			||||||
 | 
					            import py
 | 
				
			||||||
 | 
					            class TestClass:
 | 
				
			||||||
 | 
					                pytestmark = py.test.mark.skipif("config._hackxyz")
 | 
				
			||||||
 | 
					                def test_func(self):
 | 
				
			||||||
 | 
					                    pass
 | 
				
			||||||
 | 
					        """)
 | 
				
			||||||
 | 
					        item.config._hackxyz = 3
 | 
				
			||||||
 | 
					        ev = MarkEvaluator(item, 'skipif')
 | 
				
			||||||
 | 
					        assert ev.istrue()
 | 
				
			||||||
 | 
					        expl = ev.getexplanation()
 | 
				
			||||||
 | 
					        assert expl == "condition: config._hackxyz"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					class TestXFail:
 | 
				
			||||||
 | 
					    def test_xfail_simple(self, testdir):
 | 
				
			||||||
 | 
					        item = testdir.getitem("""
 | 
				
			||||||
 | 
					            import py 
 | 
				
			||||||
 | 
					            @py.test.mark.xfail
 | 
				
			||||||
 | 
					            def test_func(): 
 | 
				
			||||||
 | 
					                assert 0
 | 
				
			||||||
 | 
					        """)
 | 
				
			||||||
 | 
					        reports = runtestprotocol(item, log=False)
 | 
				
			||||||
 | 
					        assert len(reports) == 3
 | 
				
			||||||
 | 
					        callreport = reports[1]
 | 
				
			||||||
 | 
					        assert callreport.skipped 
 | 
				
			||||||
 | 
					        expl = callreport.keywords['xfail']
 | 
				
			||||||
 | 
					        assert expl == "condition: True"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def test_xfail_xpassed(self, testdir):
 | 
				
			||||||
 | 
					        item = testdir.getitem("""
 | 
				
			||||||
 | 
					            import py 
 | 
				
			||||||
 | 
					            @py.test.mark.xfail
 | 
				
			||||||
 | 
					            def test_func(): 
 | 
				
			||||||
 | 
					                assert 1
 | 
				
			||||||
 | 
					        """)
 | 
				
			||||||
 | 
					        reports = runtestprotocol(item, log=False)
 | 
				
			||||||
 | 
					        assert len(reports) == 3
 | 
				
			||||||
 | 
					        callreport = reports[1]
 | 
				
			||||||
 | 
					        assert callreport.failed
 | 
				
			||||||
 | 
					        expl = callreport.keywords['xfail']
 | 
				
			||||||
 | 
					        assert expl == "condition: True"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def test_xfail_evalfalse_but_fails(self, testdir):
 | 
				
			||||||
 | 
					        item = testdir.getitem("""
 | 
				
			||||||
 | 
					            import py
 | 
				
			||||||
 | 
					            @py.test.mark.xfail('False')
 | 
				
			||||||
 | 
					            def test_func():
 | 
				
			||||||
 | 
					                assert 0
 | 
				
			||||||
 | 
					        """)
 | 
				
			||||||
 | 
					        reports = runtestprotocol(item, log=False)
 | 
				
			||||||
 | 
					        callreport = reports[1]
 | 
				
			||||||
 | 
					        assert callreport.failed 
 | 
				
			||||||
 | 
					        assert 'xfail' not in callreport.keywords
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def test_xfail_not_report_default(self, testdir):
 | 
				
			||||||
        p = testdir.makepyfile(test_one="""
 | 
					        p = testdir.makepyfile(test_one="""
 | 
				
			||||||
            import py
 | 
					            import py
 | 
				
			||||||
            @py.test.mark.xfail
 | 
					            @py.test.mark.xfail
 | 
				
			||||||
| 
						 | 
					@ -12,30 +120,67 @@ def test_xfail_not_report_default(testdir):
 | 
				
			||||||
            "*1 expected failures*--report=xfailed*",
 | 
					            "*1 expected failures*--report=xfailed*",
 | 
				
			||||||
        ])
 | 
					        ])
 | 
				
			||||||
 | 
					
 | 
				
			||||||
def test_xfail_not_run(testdir):
 | 
					    def test_xfail_not_run_xfail_reporting(self, testdir):
 | 
				
			||||||
        p = testdir.makepyfile(test_one="""
 | 
					        p = testdir.makepyfile(test_one="""
 | 
				
			||||||
            import py
 | 
					            import py
 | 
				
			||||||
            @py.test.mark.xfail(run=False, reason="noway")
 | 
					            @py.test.mark.xfail(run=False, reason="noway")
 | 
				
			||||||
            def test_this():
 | 
					            def test_this():
 | 
				
			||||||
                assert 0
 | 
					                assert 0
 | 
				
			||||||
        @py.test.mark.xfail("True", run=False, reason="noway")
 | 
					            @py.test.mark.xfail("True", run=False)
 | 
				
			||||||
            def test_this_true():
 | 
					            def test_this_true():
 | 
				
			||||||
                assert 0
 | 
					                assert 0
 | 
				
			||||||
        @py.test.mark.xfail("False", run=True, reason="huh")
 | 
					            @py.test.mark.xfail("False", run=False, reason="huh")
 | 
				
			||||||
            def test_this_false():
 | 
					            def test_this_false():
 | 
				
			||||||
                assert 1
 | 
					                assert 1
 | 
				
			||||||
        """)
 | 
					        """)
 | 
				
			||||||
    result = testdir.runpytest(p, '-v')
 | 
					 | 
				
			||||||
    result.stdout.fnmatch_lines([
 | 
					 | 
				
			||||||
        "*2 expected failures*--report=xfailed*",
 | 
					 | 
				
			||||||
        "*1 passed*",
 | 
					 | 
				
			||||||
    ])
 | 
					 | 
				
			||||||
        result = testdir.runpytest(p, '--report=xfailed', )
 | 
					        result = testdir.runpytest(p, '--report=xfailed', )
 | 
				
			||||||
        result.stdout.fnmatch_lines([
 | 
					        result.stdout.fnmatch_lines([
 | 
				
			||||||
        "*test_one*test_this*not run*noway",
 | 
					            "*test_one*test_this*NOTRUN*noway",
 | 
				
			||||||
        "*test_one*test_this_true*not run*noway",
 | 
					            "*test_one*test_this_true*NOTRUN*condition:*True*",
 | 
				
			||||||
 | 
					            "*1 passed*",
 | 
				
			||||||
        ])
 | 
					        ])
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def test_xfail_xpass(self, testdir):
 | 
				
			||||||
 | 
					        p = testdir.makepyfile(test_one="""
 | 
				
			||||||
 | 
					            import py
 | 
				
			||||||
 | 
					            @py.test.mark.xfail
 | 
				
			||||||
 | 
					            def test_that():
 | 
				
			||||||
 | 
					                assert 1
 | 
				
			||||||
 | 
					        """)
 | 
				
			||||||
 | 
					        result = testdir.runpytest(p, '--report=xfailed')
 | 
				
			||||||
 | 
					        result.stdout.fnmatch_lines([
 | 
				
			||||||
 | 
					            "*UNEXPECTEDLY PASSING*",
 | 
				
			||||||
 | 
					            "*test_that*",
 | 
				
			||||||
 | 
					            "*1 xpassed*"
 | 
				
			||||||
 | 
					        ])
 | 
				
			||||||
 | 
					        assert result.ret == 1
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					class TestSkipif:
 | 
				
			||||||
 | 
					    def test_skipif_conditional(self, testdir):
 | 
				
			||||||
 | 
					        item = testdir.getitem("""
 | 
				
			||||||
 | 
					            import py 
 | 
				
			||||||
 | 
					            @py.test.mark.skipif("hasattr(os, 'sep')")
 | 
				
			||||||
 | 
					            def test_func(): 
 | 
				
			||||||
 | 
					                pass
 | 
				
			||||||
 | 
					        """)
 | 
				
			||||||
 | 
					        x = py.test.raises(py.test.skip.Exception, "pytest_runtest_setup(item)")
 | 
				
			||||||
 | 
					        assert x.value.msg == "condition: hasattr(os, 'sep')"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def test_skipif_reporting(self, testdir):
 | 
				
			||||||
 | 
					        p = testdir.makepyfile("""
 | 
				
			||||||
 | 
					            import py
 | 
				
			||||||
 | 
					            @py.test.mark.skipif("hasattr(sys, 'platform')")
 | 
				
			||||||
 | 
					            def test_that():
 | 
				
			||||||
 | 
					                assert 0
 | 
				
			||||||
 | 
					        """)
 | 
				
			||||||
 | 
					        result = testdir.runpytest(p, '-s', '--report=skipped')
 | 
				
			||||||
 | 
					        result.stdout.fnmatch_lines([
 | 
				
			||||||
 | 
					            "*Skipped*platform*",
 | 
				
			||||||
 | 
					            "*1 skipped*"
 | 
				
			||||||
 | 
					        ])
 | 
				
			||||||
 | 
					        assert result.ret == 0
 | 
				
			||||||
 | 
					
 | 
				
			||||||
def test_skip_not_report_default(testdir):
 | 
					def test_skip_not_report_default(testdir):
 | 
				
			||||||
    p = testdir.makepyfile(test_one="""
 | 
					    p = testdir.makepyfile(test_one="""
 | 
				
			||||||
        import py
 | 
					        import py
 | 
				
			||||||
| 
						 | 
					@ -47,69 +192,6 @@ def test_skip_not_report_default(testdir):
 | 
				
			||||||
        "*1 skipped*--report=skipped*",
 | 
					        "*1 skipped*--report=skipped*",
 | 
				
			||||||
    ])
 | 
					    ])
 | 
				
			||||||
 | 
					
 | 
				
			||||||
def test_xfail_decorator(testdir):
 | 
					 | 
				
			||||||
    p = testdir.makepyfile(test_one="""
 | 
					 | 
				
			||||||
        import py
 | 
					 | 
				
			||||||
        @py.test.mark.xfail
 | 
					 | 
				
			||||||
        def test_this():
 | 
					 | 
				
			||||||
            assert 0
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
        @py.test.mark.xfail
 | 
					 | 
				
			||||||
        def test_that():
 | 
					 | 
				
			||||||
            assert 1
 | 
					 | 
				
			||||||
    """)
 | 
					 | 
				
			||||||
    result = testdir.runpytest(p, '--report=xfailed')
 | 
					 | 
				
			||||||
    result.stdout.fnmatch_lines([
 | 
					 | 
				
			||||||
        "*expected failures*",
 | 
					 | 
				
			||||||
        "*test_one.test_this*test_one.py:4*",
 | 
					 | 
				
			||||||
        "*UNEXPECTEDLY PASSING*",
 | 
					 | 
				
			||||||
        "*test_that*",
 | 
					 | 
				
			||||||
        "*1 xfailed*"
 | 
					 | 
				
			||||||
    ])
 | 
					 | 
				
			||||||
    assert result.ret == 1
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
def test_xfail_at_module(testdir):
 | 
					 | 
				
			||||||
    p = testdir.makepyfile("""
 | 
					 | 
				
			||||||
        import py
 | 
					 | 
				
			||||||
        pytestmark = py.test.mark.xfail('True')
 | 
					 | 
				
			||||||
        def test_intentional_xfail():
 | 
					 | 
				
			||||||
            assert 0
 | 
					 | 
				
			||||||
    """)
 | 
					 | 
				
			||||||
    result = testdir.runpytest(p, '--report=xfailed')
 | 
					 | 
				
			||||||
    result.stdout.fnmatch_lines([
 | 
					 | 
				
			||||||
        "*expected failures*",
 | 
					 | 
				
			||||||
        "*test_intentional_xfail*:4*",
 | 
					 | 
				
			||||||
        "*1 xfailed*"
 | 
					 | 
				
			||||||
    ])
 | 
					 | 
				
			||||||
    assert result.ret == 0
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
def test_xfail_evalfalse_but_fails(testdir):
 | 
					 | 
				
			||||||
    p = testdir.makepyfile("""
 | 
					 | 
				
			||||||
        import py
 | 
					 | 
				
			||||||
        @py.test.mark.xfail('False')
 | 
					 | 
				
			||||||
        def test_fail():
 | 
					 | 
				
			||||||
            assert 0
 | 
					 | 
				
			||||||
    """)
 | 
					 | 
				
			||||||
    result = testdir.runpytest(p, '--report=xfailed')
 | 
					 | 
				
			||||||
    result.stdout.fnmatch_lines([
 | 
					 | 
				
			||||||
        "*test_xfail_evalfalse_but_fails*:4*",
 | 
					 | 
				
			||||||
        "*1 failed*"
 | 
					 | 
				
			||||||
    ])
 | 
					 | 
				
			||||||
    assert result.ret == 1
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
def test_skipif_decorator(testdir):
 | 
					 | 
				
			||||||
    p = testdir.makepyfile("""
 | 
					 | 
				
			||||||
        import py
 | 
					 | 
				
			||||||
        @py.test.mark.skipif("hasattr(sys, 'platform')")
 | 
					 | 
				
			||||||
        def test_that():
 | 
					 | 
				
			||||||
            assert 0
 | 
					 | 
				
			||||||
    """)
 | 
					 | 
				
			||||||
    result = testdir.runpytest(p, '--report=skipped')
 | 
					 | 
				
			||||||
    result.stdout.fnmatch_lines([
 | 
					 | 
				
			||||||
        "*Skipped*platform*",
 | 
					 | 
				
			||||||
        "*1 skipped*"
 | 
					 | 
				
			||||||
    ])
 | 
					 | 
				
			||||||
    assert result.ret == 0
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
def test_skipif_class(testdir):
 | 
					def test_skipif_class(testdir):
 | 
				
			||||||
    p = testdir.makepyfile("""
 | 
					    p = testdir.makepyfile("""
 | 
				
			||||||
| 
						 | 
					@ -127,19 +209,6 @@ def test_skipif_class(testdir):
 | 
				
			||||||
        "*2 skipped*"
 | 
					        "*2 skipped*"
 | 
				
			||||||
    ])
 | 
					    ])
 | 
				
			||||||
 | 
					
 | 
				
			||||||
def test_evalexpression_cls_config_example(testdir):
 | 
					 | 
				
			||||||
    from py._plugin.pytest_skipping import evalexpression
 | 
					 | 
				
			||||||
    item, = testdir.getitems("""
 | 
					 | 
				
			||||||
        import py
 | 
					 | 
				
			||||||
        class TestClass:
 | 
					 | 
				
			||||||
            pytestmark = py.test.mark.skipif("config._hackxyz")
 | 
					 | 
				
			||||||
            def test_func(self):
 | 
					 | 
				
			||||||
                pass
 | 
					 | 
				
			||||||
    """)
 | 
					 | 
				
			||||||
    item.config._hackxyz = 3
 | 
					 | 
				
			||||||
    x, y = evalexpression(item, 'skipif')
 | 
					 | 
				
			||||||
    assert x == 'config._hackxyz'
 | 
					 | 
				
			||||||
    assert y == 3
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
def test_skip_reasons_folding():
 | 
					def test_skip_reasons_folding():
 | 
				
			||||||
    from py._plugin import pytest_runner as runner 
 | 
					    from py._plugin import pytest_runner as runner 
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in New Issue