Merge pull request #1795 from hackebrot/fix-report-outcome-for-xpass
WIP Change outcome to 'passed' for xfail unless it's strict
This commit is contained in:
		
						commit
						3e685d6a8d
					
				| 
						 | 
					@ -79,6 +79,20 @@
 | 
				
			||||||
 | 
					
 | 
				
			||||||
*
 | 
					*
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					**Changes**
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					* Change ``report.outcome`` for ``xpassed`` tests to ``"passed"`` in non-strict
 | 
				
			||||||
 | 
					  mode and ``"failed"`` in strict mode. Thanks to `@hackebrot`_ for the PR
 | 
				
			||||||
 | 
					  (`#1795`_) and `@gprasad84`_ for report (`#1546`_).
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					* Tests marked with ``xfail(strict=False)`` (the default) now appear in
 | 
				
			||||||
 | 
					  JUnitXML reports as passing tests instead of skipped.
 | 
				
			||||||
 | 
					  Thanks to `@hackebrot`_ for the PR (`#1795`_).
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					.. _#1795: https://github.com/pytest-dev/pytest/pull/1795
 | 
				
			||||||
 | 
					.. _#1546: https://github.com/pytest-dev/pytest/issues/1546
 | 
				
			||||||
 | 
					.. _@gprasad84: https://github.com/gprasad84
 | 
				
			||||||
 | 
					
 | 
				
			||||||
.. _#1210: https://github.com/pytest-dev/pytest/issues/1210
 | 
					.. _#1210: https://github.com/pytest-dev/pytest/issues/1210
 | 
				
			||||||
.. _#1435: https://github.com/pytest-dev/pytest/issues/1435
 | 
					.. _#1435: https://github.com/pytest-dev/pytest/issues/1435
 | 
				
			||||||
.. _#1471: https://github.com/pytest-dev/pytest/issues/1471
 | 
					.. _#1471: https://github.com/pytest-dev/pytest/issues/1471
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -220,6 +220,18 @@ def check_strict_xfail(pyfuncitem):
 | 
				
			||||||
            pytest.fail('[XPASS(strict)] ' + explanation, pytrace=False)
 | 
					            pytest.fail('[XPASS(strict)] ' + explanation, pytrace=False)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					def _is_unittest_unexpected_success_a_failure():
 | 
				
			||||||
 | 
					    """Return if the test suite should fail if a @expectedFailure unittest test PASSES.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    From https://docs.python.org/3/library/unittest.html?highlight=unittest#unittest.TestResult.wasSuccessful:
 | 
				
			||||||
 | 
					        Changed in version 3.4: Returns False if there were any
 | 
				
			||||||
 | 
					        unexpectedSuccesses from tests marked with the expectedFailure() decorator.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    TODO: this should be moved to the "compat" module.
 | 
				
			||||||
 | 
					    """
 | 
				
			||||||
 | 
					    return sys.version_info >= (3, 4)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@pytest.hookimpl(hookwrapper=True)
 | 
					@pytest.hookimpl(hookwrapper=True)
 | 
				
			||||||
def pytest_runtest_makereport(item, call):
 | 
					def pytest_runtest_makereport(item, call):
 | 
				
			||||||
    outcome = yield
 | 
					    outcome = yield
 | 
				
			||||||
| 
						 | 
					@ -228,9 +240,15 @@ def pytest_runtest_makereport(item, call):
 | 
				
			||||||
    evalskip = getattr(item, '_evalskip', None)
 | 
					    evalskip = getattr(item, '_evalskip', None)
 | 
				
			||||||
    # unitttest special case, see setting of _unexpectedsuccess
 | 
					    # unitttest special case, see setting of _unexpectedsuccess
 | 
				
			||||||
    if hasattr(item, '_unexpectedsuccess') and rep.when == "call":
 | 
					    if hasattr(item, '_unexpectedsuccess') and rep.when == "call":
 | 
				
			||||||
        # we need to translate into how pytest encodes xpass
 | 
					        if item._unexpectedsuccess:
 | 
				
			||||||
        rep.wasxfail = "reason: " + repr(item._unexpectedsuccess)
 | 
					            rep.longrepr = "Unexpected success: {0}".format(item._unexpectedsuccess)
 | 
				
			||||||
        rep.outcome = "failed"
 | 
					        else:
 | 
				
			||||||
 | 
					            rep.longrepr = "Unexpected success"
 | 
				
			||||||
 | 
					        if _is_unittest_unexpected_success_a_failure():
 | 
				
			||||||
 | 
					            rep.outcome = "failed"
 | 
				
			||||||
 | 
					        else:
 | 
				
			||||||
 | 
					            rep.outcome = "passed"
 | 
				
			||||||
 | 
					            rep.wasxfail = rep.longrepr
 | 
				
			||||||
    elif item.config.option.runxfail:
 | 
					    elif item.config.option.runxfail:
 | 
				
			||||||
        pass   # don't interefere
 | 
					        pass   # don't interefere
 | 
				
			||||||
    elif call.excinfo and call.excinfo.errisinstance(pytest.xfail.Exception):
 | 
					    elif call.excinfo and call.excinfo.errisinstance(pytest.xfail.Exception):
 | 
				
			||||||
| 
						 | 
					@ -245,8 +263,15 @@ def pytest_runtest_makereport(item, call):
 | 
				
			||||||
                rep.outcome = "skipped"
 | 
					                rep.outcome = "skipped"
 | 
				
			||||||
                rep.wasxfail = evalxfail.getexplanation()
 | 
					                rep.wasxfail = evalxfail.getexplanation()
 | 
				
			||||||
        elif call.when == "call":
 | 
					        elif call.when == "call":
 | 
				
			||||||
            rep.outcome = "failed"  # xpass outcome
 | 
					            strict_default = item.config.getini('xfail_strict')
 | 
				
			||||||
            rep.wasxfail = evalxfail.getexplanation()
 | 
					            is_strict_xfail = evalxfail.get('strict', strict_default)
 | 
				
			||||||
 | 
					            explanation = evalxfail.getexplanation()
 | 
				
			||||||
 | 
					            if is_strict_xfail:
 | 
				
			||||||
 | 
					                rep.outcome = "failed"
 | 
				
			||||||
 | 
					                rep.longrepr = "[XPASS(strict)] {0}".format(explanation)
 | 
				
			||||||
 | 
					            else:
 | 
				
			||||||
 | 
					                rep.outcome = "passed"
 | 
				
			||||||
 | 
					                rep.wasxfail = explanation
 | 
				
			||||||
    elif evalskip is not None and rep.skipped and type(rep.longrepr) is tuple:
 | 
					    elif evalskip is not None and rep.skipped and type(rep.longrepr) is tuple:
 | 
				
			||||||
        # skipped by mark.skipif; change the location of the failure
 | 
					        # skipped by mark.skipif; change the location of the failure
 | 
				
			||||||
        # to point to the item definition, otherwise it will display
 | 
					        # to point to the item definition, otherwise it will display
 | 
				
			||||||
| 
						 | 
					@ -260,7 +285,7 @@ def pytest_report_teststatus(report):
 | 
				
			||||||
    if hasattr(report, "wasxfail"):
 | 
					    if hasattr(report, "wasxfail"):
 | 
				
			||||||
        if report.skipped:
 | 
					        if report.skipped:
 | 
				
			||||||
            return "xfailed", "x", "xfail"
 | 
					            return "xfailed", "x", "xfail"
 | 
				
			||||||
        elif report.failed:
 | 
					        elif report.passed:
 | 
				
			||||||
            return "xpassed", "X", ("XPASS", {'yellow': True})
 | 
					            return "xpassed", "X", ("XPASS", {'yellow': True})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
# called by the terminalreporter instance/plugin
 | 
					# called by the terminalreporter instance/plugin
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1080,22 +1080,23 @@ class TestMarkersWithParametrization:
 | 
				
			||||||
        reprec = testdir.inline_run()
 | 
					        reprec = testdir.inline_run()
 | 
				
			||||||
        reprec.assertoutcome(passed=2, skipped=1)
 | 
					        reprec.assertoutcome(passed=2, skipped=1)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def test_xfail_passing_is_xpass(self, testdir):
 | 
					    @pytest.mark.parametrize('strict', [True, False])
 | 
				
			||||||
 | 
					    def test_xfail_passing_is_xpass(self, testdir, strict):
 | 
				
			||||||
        s = """
 | 
					        s = """
 | 
				
			||||||
            import pytest
 | 
					            import pytest
 | 
				
			||||||
 | 
					
 | 
				
			||||||
            @pytest.mark.parametrize(("n", "expected"), [
 | 
					            @pytest.mark.parametrize(("n", "expected"), [
 | 
				
			||||||
                (1, 2),
 | 
					                (1, 2),
 | 
				
			||||||
                pytest.mark.xfail("sys.version > 0", reason="some bug")((2, 3)),
 | 
					                pytest.mark.xfail("sys.version_info > (0, 0, 0)", reason="some bug", strict={strict})((2, 3)),
 | 
				
			||||||
                (3, 4),
 | 
					                (3, 4),
 | 
				
			||||||
            ])
 | 
					            ])
 | 
				
			||||||
            def test_increment(n, expected):
 | 
					            def test_increment(n, expected):
 | 
				
			||||||
                assert n + 1 == expected
 | 
					                assert n + 1 == expected
 | 
				
			||||||
        """
 | 
					        """.format(strict=strict)
 | 
				
			||||||
        testdir.makepyfile(s)
 | 
					        testdir.makepyfile(s)
 | 
				
			||||||
        reprec = testdir.inline_run()
 | 
					        reprec = testdir.inline_run()
 | 
				
			||||||
        # xpass is fail, obviously :)
 | 
					        passed, failed = (2, 1) if strict else (3, 0)
 | 
				
			||||||
        reprec.assertoutcome(passed=2, failed=1)
 | 
					        reprec.assertoutcome(passed=passed, failed=failed)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def test_parametrize_called_in_generate_tests(self, testdir):
 | 
					    def test_parametrize_called_in_generate_tests(self, testdir):
 | 
				
			||||||
        s = """
 | 
					        s = """
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -592,6 +592,7 @@ class TestRootdir:
 | 
				
			||||||
        assert inicfg == {}
 | 
					        assert inicfg == {}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def test_nothing(self, tmpdir):
 | 
					    def test_nothing(self, tmpdir):
 | 
				
			||||||
 | 
					        tmpdir.chdir()
 | 
				
			||||||
        rootdir, inifile, inicfg = determine_setup(None, [tmpdir])
 | 
					        rootdir, inifile, inicfg = determine_setup(None, [tmpdir])
 | 
				
			||||||
        assert rootdir == tmpdir
 | 
					        assert rootdir == tmpdir
 | 
				
			||||||
        assert inifile is None
 | 
					        assert inifile is None
 | 
				
			||||||
| 
						 | 
					@ -603,6 +604,7 @@ class TestRootdir:
 | 
				
			||||||
        assert rootdir == tmpdir
 | 
					        assert rootdir == tmpdir
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def test_with_arg_outside_cwd_without_inifile(self, tmpdir):
 | 
					    def test_with_arg_outside_cwd_without_inifile(self, tmpdir):
 | 
				
			||||||
 | 
					        tmpdir.chdir()
 | 
				
			||||||
        a = tmpdir.mkdir("a")
 | 
					        a = tmpdir.mkdir("a")
 | 
				
			||||||
        b = tmpdir.mkdir("b")
 | 
					        b = tmpdir.mkdir("b")
 | 
				
			||||||
        rootdir, inifile, inicfg = determine_setup(None, [a, b])
 | 
					        rootdir, inifile, inicfg = determine_setup(None, [a, b])
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -100,7 +100,7 @@ class TestPython:
 | 
				
			||||||
        result, dom = runandparse(testdir)
 | 
					        result, dom = runandparse(testdir)
 | 
				
			||||||
        assert result.ret
 | 
					        assert result.ret
 | 
				
			||||||
        node = dom.find_first_by_tag("testsuite")
 | 
					        node = dom.find_first_by_tag("testsuite")
 | 
				
			||||||
        node.assert_attr(name="pytest", errors=0, failures=1, skips=3, tests=5)
 | 
					        node.assert_attr(name="pytest", errors=0, failures=1, skips=2, tests=5)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def test_summing_simple_with_errors(self, testdir):
 | 
					    def test_summing_simple_with_errors(self, testdir):
 | 
				
			||||||
        testdir.makepyfile("""
 | 
					        testdir.makepyfile("""
 | 
				
			||||||
| 
						 | 
					@ -115,13 +115,16 @@ class TestPython:
 | 
				
			||||||
            def test_error(fixture):
 | 
					            def test_error(fixture):
 | 
				
			||||||
                pass
 | 
					                pass
 | 
				
			||||||
            @pytest.mark.xfail
 | 
					            @pytest.mark.xfail
 | 
				
			||||||
 | 
					            def test_xfail():
 | 
				
			||||||
 | 
					                assert False
 | 
				
			||||||
 | 
					            @pytest.mark.xfail(strict=True)
 | 
				
			||||||
            def test_xpass():
 | 
					            def test_xpass():
 | 
				
			||||||
                assert 1
 | 
					                assert True
 | 
				
			||||||
        """)
 | 
					        """)
 | 
				
			||||||
        result, dom = runandparse(testdir)
 | 
					        result, dom = runandparse(testdir)
 | 
				
			||||||
        assert result.ret
 | 
					        assert result.ret
 | 
				
			||||||
        node = dom.find_first_by_tag("testsuite")
 | 
					        node = dom.find_first_by_tag("testsuite")
 | 
				
			||||||
        node.assert_attr(name="pytest", errors=1, failures=1, skips=1, tests=4)
 | 
					        node.assert_attr(name="pytest", errors=1, failures=2, skips=1, tests=5)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def test_timing_function(self, testdir):
 | 
					    def test_timing_function(self, testdir):
 | 
				
			||||||
        testdir.makepyfile("""
 | 
					        testdir.makepyfile("""
 | 
				
			||||||
| 
						 | 
					@ -346,16 +349,33 @@ class TestPython:
 | 
				
			||||||
        result, dom = runandparse(testdir)
 | 
					        result, dom = runandparse(testdir)
 | 
				
			||||||
        # assert result.ret
 | 
					        # assert result.ret
 | 
				
			||||||
        node = dom.find_first_by_tag("testsuite")
 | 
					        node = dom.find_first_by_tag("testsuite")
 | 
				
			||||||
        node.assert_attr(skips=1, tests=1)
 | 
					        node.assert_attr(skips=0, tests=1)
 | 
				
			||||||
        tnode = node.find_first_by_tag("testcase")
 | 
					        tnode = node.find_first_by_tag("testcase")
 | 
				
			||||||
        tnode.assert_attr(
 | 
					        tnode.assert_attr(
 | 
				
			||||||
            file="test_xfailure_xpass.py",
 | 
					            file="test_xfailure_xpass.py",
 | 
				
			||||||
            line="1",
 | 
					            line="1",
 | 
				
			||||||
            classname="test_xfailure_xpass",
 | 
					            classname="test_xfailure_xpass",
 | 
				
			||||||
            name="test_xpass")
 | 
					            name="test_xpass")
 | 
				
			||||||
        fnode = tnode.find_first_by_tag("skipped")
 | 
					
 | 
				
			||||||
        fnode.assert_attr(message="xfail-marked test passes unexpectedly")
 | 
					    def test_xfailure_xpass_strict(self, testdir):
 | 
				
			||||||
        # assert "ValueError" in fnode.toxml()
 | 
					        testdir.makepyfile("""
 | 
				
			||||||
 | 
					            import pytest
 | 
				
			||||||
 | 
					            @pytest.mark.xfail(strict=True, reason="This needs to fail!")
 | 
				
			||||||
 | 
					            def test_xpass():
 | 
				
			||||||
 | 
					                pass
 | 
				
			||||||
 | 
					        """)
 | 
				
			||||||
 | 
					        result, dom = runandparse(testdir)
 | 
				
			||||||
 | 
					        # assert result.ret
 | 
				
			||||||
 | 
					        node = dom.find_first_by_tag("testsuite")
 | 
				
			||||||
 | 
					        node.assert_attr(skips=0, tests=1)
 | 
				
			||||||
 | 
					        tnode = node.find_first_by_tag("testcase")
 | 
				
			||||||
 | 
					        tnode.assert_attr(
 | 
				
			||||||
 | 
					            file="test_xfailure_xpass_strict.py",
 | 
				
			||||||
 | 
					            line="1",
 | 
				
			||||||
 | 
					            classname="test_xfailure_xpass_strict",
 | 
				
			||||||
 | 
					            name="test_xpass")
 | 
				
			||||||
 | 
					        fnode = tnode.find_first_by_tag("failure")
 | 
				
			||||||
 | 
					        fnode.assert_attr(message="[XPASS(strict)] This needs to fail!")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def test_collect_error(self, testdir):
 | 
					    def test_collect_error(self, testdir):
 | 
				
			||||||
        testdir.makepyfile("syntax error")
 | 
					        testdir.makepyfile("syntax error")
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -145,7 +145,20 @@ class TestXFail:
 | 
				
			||||||
    def test_xfail_xpassed(self, testdir):
 | 
					    def test_xfail_xpassed(self, testdir):
 | 
				
			||||||
        item = testdir.getitem("""
 | 
					        item = testdir.getitem("""
 | 
				
			||||||
            import pytest
 | 
					            import pytest
 | 
				
			||||||
            @pytest.mark.xfail
 | 
					            @pytest.mark.xfail(reason="this is an xfail")
 | 
				
			||||||
 | 
					            def test_func():
 | 
				
			||||||
 | 
					                assert 1
 | 
				
			||||||
 | 
					        """)
 | 
				
			||||||
 | 
					        reports = runtestprotocol(item, log=False)
 | 
				
			||||||
 | 
					        assert len(reports) == 3
 | 
				
			||||||
 | 
					        callreport = reports[1]
 | 
				
			||||||
 | 
					        assert callreport.passed
 | 
				
			||||||
 | 
					        assert callreport.wasxfail == "this is an xfail"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def test_xfail_xpassed_strict(self, testdir):
 | 
				
			||||||
 | 
					        item = testdir.getitem("""
 | 
				
			||||||
 | 
					            import pytest
 | 
				
			||||||
 | 
					            @pytest.mark.xfail(strict=True, reason="nope")
 | 
				
			||||||
            def test_func():
 | 
					            def test_func():
 | 
				
			||||||
                assert 1
 | 
					                assert 1
 | 
				
			||||||
        """)
 | 
					        """)
 | 
				
			||||||
| 
						 | 
					@ -153,7 +166,8 @@ class TestXFail:
 | 
				
			||||||
        assert len(reports) == 3
 | 
					        assert len(reports) == 3
 | 
				
			||||||
        callreport = reports[1]
 | 
					        callreport = reports[1]
 | 
				
			||||||
        assert callreport.failed
 | 
					        assert callreport.failed
 | 
				
			||||||
        assert callreport.wasxfail == ""
 | 
					        assert callreport.longrepr == "[XPASS(strict)] nope"
 | 
				
			||||||
 | 
					        assert not hasattr(callreport, "wasxfail")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def test_xfail_run_anyway(self, testdir):
 | 
					    def test_xfail_run_anyway(self, testdir):
 | 
				
			||||||
        testdir.makepyfile("""
 | 
					        testdir.makepyfile("""
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -419,8 +419,9 @@ class TestTrialUnittest:
 | 
				
			||||||
                def test_method(self):
 | 
					                def test_method(self):
 | 
				
			||||||
                    pass
 | 
					                    pass
 | 
				
			||||||
        """)
 | 
					        """)
 | 
				
			||||||
 | 
					        from _pytest.skipping import _is_unittest_unexpected_success_a_failure
 | 
				
			||||||
 | 
					        should_fail = _is_unittest_unexpected_success_a_failure()
 | 
				
			||||||
        result = testdir.runpytest("-rxs")
 | 
					        result = testdir.runpytest("-rxs")
 | 
				
			||||||
        assert result.ret == 0
 | 
					 | 
				
			||||||
        result.stdout.fnmatch_lines_random([
 | 
					        result.stdout.fnmatch_lines_random([
 | 
				
			||||||
            "*XFAIL*test_trial_todo*",
 | 
					            "*XFAIL*test_trial_todo*",
 | 
				
			||||||
            "*trialselfskip*",
 | 
					            "*trialselfskip*",
 | 
				
			||||||
| 
						 | 
					@ -429,8 +430,9 @@ class TestTrialUnittest:
 | 
				
			||||||
            "*i2wanto*",
 | 
					            "*i2wanto*",
 | 
				
			||||||
            "*sys.version_info*",
 | 
					            "*sys.version_info*",
 | 
				
			||||||
            "*skip_in_method*",
 | 
					            "*skip_in_method*",
 | 
				
			||||||
            "*4 skipped*3 xfail*1 xpass*",
 | 
					            "*1 failed*4 skipped*3 xfailed*" if should_fail else "*4 skipped*3 xfail*1 xpass*",
 | 
				
			||||||
        ])
 | 
					        ])
 | 
				
			||||||
 | 
					        assert result.ret == (1 if should_fail else 0)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def test_trial_error(self, testdir):
 | 
					    def test_trial_error(self, testdir):
 | 
				
			||||||
        testdir.makepyfile("""
 | 
					        testdir.makepyfile("""
 | 
				
			||||||
| 
						 | 
					@ -587,24 +589,62 @@ def test_unittest_typerror_traceback(testdir):
 | 
				
			||||||
    assert "TypeError" in result.stdout.str()
 | 
					    assert "TypeError" in result.stdout.str()
 | 
				
			||||||
    assert result.ret == 1
 | 
					    assert result.ret == 1
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@pytest.mark.skipif("sys.version_info < (2,7)")
 | 
					@pytest.mark.skipif("sys.version_info < (2,7)")
 | 
				
			||||||
def test_unittest_unexpected_failure(testdir):
 | 
					@pytest.mark.parametrize('runner', ['pytest', 'unittest'])
 | 
				
			||||||
    testdir.makepyfile("""
 | 
					def test_unittest_expected_failure_for_failing_test_is_xfail(testdir, runner):
 | 
				
			||||||
 | 
					    script = testdir.makepyfile("""
 | 
				
			||||||
        import unittest
 | 
					        import unittest
 | 
				
			||||||
        class MyTestCase(unittest.TestCase):
 | 
					        class MyTestCase(unittest.TestCase):
 | 
				
			||||||
            @unittest.expectedFailure
 | 
					            @unittest.expectedFailure
 | 
				
			||||||
            def test_func1(self):
 | 
					            def test_failing_test_is_xfail(self):
 | 
				
			||||||
                assert 0
 | 
					                assert False
 | 
				
			||||||
            @unittest.expectedFailure
 | 
					        if __name__ == '__main__':
 | 
				
			||||||
            def test_func2(self):
 | 
					            unittest.main()
 | 
				
			||||||
                assert 1
 | 
					 | 
				
			||||||
    """)
 | 
					    """)
 | 
				
			||||||
    result = testdir.runpytest("-rxX")
 | 
					    if runner == 'pytest':
 | 
				
			||||||
    result.stdout.fnmatch_lines([
 | 
					        result = testdir.runpytest("-rxX")
 | 
				
			||||||
        "*XFAIL*MyTestCase*test_func1*",
 | 
					        result.stdout.fnmatch_lines([
 | 
				
			||||||
        "*XPASS*MyTestCase*test_func2*",
 | 
					            "*XFAIL*MyTestCase*test_failing_test_is_xfail*",
 | 
				
			||||||
        "*1 xfailed*1 xpass*",
 | 
					            "*1 xfailed*",
 | 
				
			||||||
    ])
 | 
					        ])
 | 
				
			||||||
 | 
					    else:
 | 
				
			||||||
 | 
					        result = testdir.runpython(script)
 | 
				
			||||||
 | 
					        result.stderr.fnmatch_lines([
 | 
				
			||||||
 | 
					            "*1 test in*",
 | 
				
			||||||
 | 
					            "*OK*(expected failures=1)*",
 | 
				
			||||||
 | 
					        ])
 | 
				
			||||||
 | 
					    assert result.ret == 0
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					@pytest.mark.skipif("sys.version_info < (2,7)")
 | 
				
			||||||
 | 
					@pytest.mark.parametrize('runner', ['pytest', 'unittest'])
 | 
				
			||||||
 | 
					def test_unittest_expected_failure_for_passing_test_is_fail(testdir, runner):
 | 
				
			||||||
 | 
					    script = testdir.makepyfile("""
 | 
				
			||||||
 | 
					        import unittest
 | 
				
			||||||
 | 
					        class MyTestCase(unittest.TestCase):
 | 
				
			||||||
 | 
					            @unittest.expectedFailure
 | 
				
			||||||
 | 
					            def test_passing_test_is_fail(self):
 | 
				
			||||||
 | 
					                assert True
 | 
				
			||||||
 | 
					        if __name__ == '__main__':
 | 
				
			||||||
 | 
					            unittest.main()
 | 
				
			||||||
 | 
					    """)
 | 
				
			||||||
 | 
					    from _pytest.skipping import _is_unittest_unexpected_success_a_failure
 | 
				
			||||||
 | 
					    should_fail = _is_unittest_unexpected_success_a_failure()
 | 
				
			||||||
 | 
					    if runner == 'pytest':
 | 
				
			||||||
 | 
					        result = testdir.runpytest("-rxX")
 | 
				
			||||||
 | 
					        result.stdout.fnmatch_lines([
 | 
				
			||||||
 | 
					            "*MyTestCase*test_passing_test_is_fail*",
 | 
				
			||||||
 | 
					            "*1 failed*" if should_fail else "*1 xpassed*",
 | 
				
			||||||
 | 
					        ])
 | 
				
			||||||
 | 
					    else:
 | 
				
			||||||
 | 
					        result = testdir.runpython(script)
 | 
				
			||||||
 | 
					        result.stderr.fnmatch_lines([
 | 
				
			||||||
 | 
					            "*1 test in*",
 | 
				
			||||||
 | 
					            "*(unexpected successes=1)*",
 | 
				
			||||||
 | 
					        ])
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    assert result.ret == (1 if should_fail else 0)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@pytest.mark.parametrize('fix_type, stmt', [
 | 
					@pytest.mark.parametrize('fix_type, stmt', [
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in New Issue