diff --git a/AUTHORS b/AUTHORS index dfc0a542e..92750acc3 100644 --- a/AUTHORS +++ b/AUTHORS @@ -69,6 +69,7 @@ Nicolas Delaby Pieter Mulder Piotr Banaszkiewicz Punyashloka Biswal +Quentin Pradet Ralf Schmitt Raphael Pierzina Ronny Pfannschmidt diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 93197bba9..a64d64a12 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -7,7 +7,9 @@ * -* +* Fix ``pytest.mark.skip`` mark when used in strict mode. + Thanks `@pquentin`_ for the PR and `@RonnyPfannschmidt`_ for + showing how to fix the bug. * Minor improvements and fixes to the documentation. Thanks `@omarkohl`_ for the PR. @@ -165,6 +167,7 @@ .. _@rabbbit: https://github.com/rabbbit .. _@hackebrot: https://github.com/hackebrot .. _@omarkohl: https://github.com/omarkohl +.. _@pquentin: https://github.com/pquentin 2.8.7 ===== diff --git a/_pytest/skipping.py b/_pytest/skipping.py index 69157f485..55a24ddb9 100644 --- a/_pytest/skipping.py +++ b/_pytest/skipping.py @@ -30,6 +30,11 @@ def pytest_configure(config): nop.Exception = XFailed setattr(pytest, "xfail", nop) + config.addinivalue_line("markers", + "skip(reason=None): skip the given test function with an optional reason. " + "Example: skip(reason=\"no way of currently testing this\") skips the " + "test." + ) config.addinivalue_line("markers", "skipif(condition): skip the given test function if eval(condition) " "results in a True value. Evaluation happens within the " @@ -38,13 +43,13 @@ def pytest_configure(config): "http://pytest.org/latest/skipping.html" ) config.addinivalue_line("markers", - "xfail(condition, reason=None, run=True, raises=None): mark the the test function " - "as an expected failure if eval(condition) has a True value. " - "Optionally specify a reason for better reporting and run=False if " - "you don't even want to execute the test function. If only specific " - "exception(s) are expected, you can list them in raises, and if the test fails " - "in other ways, it will be reported as a true failure. " - "See http://pytest.org/latest/skipping.html" + "xfail(condition, reason=None, run=True, raises=None, strict=False): " + "mark the the test function as an expected failure if eval(condition) " + "has a True value. Optionally specify a reason for better reporting " + "and run=False if you don't even want to execute the test function. " + "If only specific exception(s) are expected, you can list them in " + "raises, and if the test fails in other ways, it will be reported as " + "a true failure. See http://pytest.org/latest/skipping.html" ) diff --git a/testing/test_skipping.py b/testing/test_skipping.py index 3464974e0..194c8692b 100644 --- a/testing/test_skipping.py +++ b/testing/test_skipping.py @@ -539,6 +539,19 @@ class TestSkip: "*1 passed*2 skipped*", ]) + def test_strict_and_skip(self, testdir): + testdir.makepyfile(""" + import pytest + @pytest.mark.skip + def test_hello(): + pass + """) + result = testdir.runpytest("-rs --strict") + result.stdout.fnmatch_lines([ + "*unconditional skip*", + "*1 skipped*", + ]) + class TestSkipif: def test_skipif_conditional(self, testdir): item = testdir.getitem(""" @@ -812,7 +825,7 @@ def test_default_markers(testdir): result = testdir.runpytest("--markers") result.stdout.fnmatch_lines([ "*skipif(*condition)*skip*", - "*xfail(*condition, reason=None, run=True, raises=None)*expected failure*", + "*xfail(*condition, reason=None, run=True, raises=None, strict=False)*expected failure*", ]) def test_xfail_test_setup_exception(testdir):