Change RemovedInPytest4Warnings to errors by default

To keep existing tests which emit RemovedInPytest4Warnings running, decided
to go with a command line option because:

* Is harder to integrate an ini option with tests which already use an ini file
* It also marks tests which need to be removed/updated in 4.1, when
  RemovedInPytest4Warning and related functionality are removed.

Fix #3737
This commit is contained in:
Bruno Oliveira
2018-11-08 20:14:58 -02:00
parent b1312147e0
commit dc20dedbc7
17 changed files with 136 additions and 60 deletions

View File

@@ -7,6 +7,7 @@ import _pytest._code
import pytest
from _pytest.main import EXIT_NOTESTSCOLLECTED
from _pytest.nodes import Collector
from _pytest.warnings import SHOW_PYTEST_WARNINGS_ARG
class TestModule(object):
@@ -370,7 +371,7 @@ class TestGenerator(object):
yield assert_order_of_execution
"""
)
reprec = testdir.inline_run(o)
reprec = testdir.inline_run(o, SHOW_PYTEST_WARNINGS_ARG)
passed, skipped, failed = reprec.countoutcomes()
assert passed == 7
assert not skipped and not failed
@@ -404,7 +405,7 @@ class TestGenerator(object):
yield assert_order_of_execution
"""
)
reprec = testdir.inline_run(o)
reprec = testdir.inline_run(o, SHOW_PYTEST_WARNINGS_ARG)
passed, skipped, failed = reprec.countoutcomes()
assert passed == 4
assert not skipped and not failed
@@ -448,7 +449,7 @@ class TestGenerator(object):
assert setuplist[1] != setuplist[2], setuplist
"""
)
reprec = testdir.inline_run(o, "-v")
reprec = testdir.inline_run(o, "-v", SHOW_PYTEST_WARNINGS_ARG)
passed, skipped, failed = reprec.countoutcomes()
assert passed == 4
assert not skipped and not failed
@@ -1380,7 +1381,7 @@ def test_collector_attributes(testdir):
pass
"""
)
result = testdir.runpytest()
result = testdir.runpytest(SHOW_PYTEST_WARNINGS_ARG)
result.stdout.fnmatch_lines(["*1 passed*"])
@@ -1407,7 +1408,7 @@ def test_customize_through_attributes(testdir):
pass
"""
)
result = testdir.runpytest("--collect-only")
result = testdir.runpytest("--collect-only", SHOW_PYTEST_WARNINGS_ARG)
result.stdout.fnmatch_lines(["*MyClass*", "*MyFunction*test_hello*"])

View File

@@ -8,6 +8,7 @@ from _pytest.fixtures import FixtureLookupError
from _pytest.fixtures import FixtureRequest
from _pytest.pathlib import Path
from _pytest.pytester import get_public_names
from _pytest.warnings import SHOW_PYTEST_WARNINGS_ARG
def test_getfuncargnames():
@@ -975,7 +976,8 @@ class TestRequestCachedSetup(object):
class TestClass(object):
def test_func1a(self, something):
assert something == "hello"
"""
""",
SHOW_PYTEST_WARNINGS_ARG,
)
reprec.assertoutcome(passed=2)
@@ -997,7 +999,8 @@ class TestRequestCachedSetup(object):
assert something == "hello"
def test_func2b(self, something):
assert something == "hello"
"""
""",
SHOW_PYTEST_WARNINGS_ARG,
)
reprec.assertoutcome(passed=4)
@@ -1057,7 +1060,7 @@ class TestRequestCachedSetup(object):
assert arg1 != arg2
"""
)
result = testdir.runpytest("-v")
result = testdir.runpytest("-v", SHOW_PYTEST_WARNINGS_ARG)
result.stdout.fnmatch_lines(["*1 passed*"])
def test_request_cached_setup_getfixturevalue(self, testdir):
@@ -1076,7 +1079,7 @@ class TestRequestCachedSetup(object):
assert arg1 == 11
"""
)
result = testdir.runpytest("-v")
result = testdir.runpytest("-v", SHOW_PYTEST_WARNINGS_ARG)
result.stdout.fnmatch_lines(["*1 passed*"])
def test_request_cached_setup_functional(self, testdir):
@@ -1107,7 +1110,7 @@ class TestRequestCachedSetup(object):
assert test_0.values == [2]
"""
)
result = testdir.runpytest("-v")
result = testdir.runpytest("-v", SHOW_PYTEST_WARNINGS_ARG)
result.stdout.fnmatch_lines(["*3 passed*"])
def test_issue117_sessionscopeteardown(self, testdir):
@@ -1126,7 +1129,7 @@ class TestRequestCachedSetup(object):
pass
"""
)
result = testdir.runpytest()
result = testdir.runpytest(SHOW_PYTEST_WARNINGS_ARG)
assert result.ret != 0
result.stdout.fnmatch_lines(["*3/x*", "*ZeroDivisionError*"])
@@ -1868,7 +1871,7 @@ class TestAutouseManagement(object):
yield f, -3
"""
)
reprec = testdir.inline_run()
reprec = testdir.inline_run(SHOW_PYTEST_WARNINGS_ARG)
reprec.assertoutcome(passed=2)
def test_funcarg_and_setup(self, testdir):
@@ -2348,7 +2351,7 @@ class TestFixtureMarker(object):
"""
% method
)
result = testdir.runpytest()
result = testdir.runpytest(SHOW_PYTEST_WARNINGS_ARG)
assert result.ret != 0
result.stdout.fnmatch_lines(
["*ScopeMismatch*You tried*function*session*request*"]

View File

@@ -10,6 +10,7 @@ from hypothesis import strategies
import pytest
from _pytest import fixtures
from _pytest import python
from _pytest.warnings import SHOW_PYTEST_WARNINGS_ARG
PY3 = sys.version_info >= (3, 0)
@@ -444,7 +445,7 @@ class TestMetafunc(object):
pass
"""
)
result = testdir.runpytest("--collect-only")
result = testdir.runpytest("--collect-only", SHOW_PYTEST_WARNINGS_ARG)
result.stdout.fnmatch_lines(
[
"<Module 'test_parametrize_ids_exception.py'>",
@@ -866,7 +867,7 @@ class TestMetafuncFunctional(object):
assert metafunc.cls == TestClass
"""
)
result = testdir.runpytest(p, "-v")
result = testdir.runpytest(p, "-v", SHOW_PYTEST_WARNINGS_ARG)
result.assert_outcomes(passed=2)
def test_addcall_with_two_funcargs_generators(self, testdir):
@@ -887,7 +888,7 @@ class TestMetafuncFunctional(object):
assert arg1 == arg2
"""
)
result = testdir.runpytest("-v", p)
result = testdir.runpytest("-v", p, SHOW_PYTEST_WARNINGS_ARG)
result.stdout.fnmatch_lines(
["*test_myfunc*0*PASS*", "*test_myfunc*1*FAIL*", "*1 failed, 1 passed*"]
)
@@ -910,7 +911,7 @@ class TestMetafuncFunctional(object):
assert arg1 in (10, 20)
"""
)
result = testdir.runpytest("-v", p)
result = testdir.runpytest("-v", p, SHOW_PYTEST_WARNINGS_ARG)
result.stdout.fnmatch_lines(
[
"*test_func1*0*PASS*",
@@ -960,7 +961,7 @@ class TestMetafuncFunctional(object):
assert arg1 == arg2
"""
)
result = testdir.runpytest("-v", p)
result = testdir.runpytest("-v", p, SHOW_PYTEST_WARNINGS_ARG)
result.stdout.fnmatch_lines(
[
"*test_myfunc*hello*PASS*",
@@ -980,7 +981,7 @@ class TestMetafuncFunctional(object):
assert hello == "world"
"""
)
result = testdir.runpytest("-v", p)
result = testdir.runpytest("-v", p, SHOW_PYTEST_WARNINGS_ARG)
result.stdout.fnmatch_lines(["*test_myfunc*hello*PASS*", "*1 passed*"])
def test_two_functions_not_same_instance(self, testdir):
@@ -996,7 +997,7 @@ class TestMetafuncFunctional(object):
self.x = 1
"""
)
result = testdir.runpytest("-v", p)
result = testdir.runpytest("-v", p, SHOW_PYTEST_WARNINGS_ARG)
result.stdout.fnmatch_lines(
["*test_func*0*PASS*", "*test_func*1*PASS*", "*2 pass*"]
)
@@ -1014,7 +1015,7 @@ class TestMetafuncFunctional(object):
self.val = 1
"""
)
result = testdir.runpytest(p)
result = testdir.runpytest(p, SHOW_PYTEST_WARNINGS_ARG)
result.assert_outcomes(passed=1)
def test_parametrize_functional2(self, testdir):
@@ -1536,7 +1537,7 @@ class TestMarkersWithParametrization(object):
assert n + 1 == expected
"""
testdir.makepyfile(s)
rec = testdir.inline_run("-m", "foo")
rec = testdir.inline_run("-m", "foo", SHOW_PYTEST_WARNINGS_ARG)
passed, skipped, fail = rec.listoutcomes()
assert len(passed) == 1
assert len(skipped) == 0
@@ -1576,7 +1577,7 @@ class TestMarkersWithParametrization(object):
assert n + 1 == expected
"""
testdir.makepyfile(s)
reprec = testdir.inline_run()
reprec = testdir.inline_run(SHOW_PYTEST_WARNINGS_ARG)
# xfail is skip??
reprec.assertoutcome(passed=2, skipped=1)
@@ -1593,7 +1594,7 @@ class TestMarkersWithParametrization(object):
assert n % 2 == 0
"""
testdir.makepyfile(s)
reprec = testdir.inline_run()
reprec = testdir.inline_run(SHOW_PYTEST_WARNINGS_ARG)
reprec.assertoutcome(passed=2, skipped=1)
def test_xfail_with_arg(self, testdir):
@@ -1609,7 +1610,7 @@ class TestMarkersWithParametrization(object):
assert n + 1 == expected
"""
testdir.makepyfile(s)
reprec = testdir.inline_run()
reprec = testdir.inline_run(SHOW_PYTEST_WARNINGS_ARG)
reprec.assertoutcome(passed=2, skipped=1)
def test_xfail_with_kwarg(self, testdir):
@@ -1625,7 +1626,7 @@ class TestMarkersWithParametrization(object):
assert n + 1 == expected
"""
testdir.makepyfile(s)
reprec = testdir.inline_run()
reprec = testdir.inline_run(SHOW_PYTEST_WARNINGS_ARG)
reprec.assertoutcome(passed=2, skipped=1)
def test_xfail_with_arg_and_kwarg(self, testdir):
@@ -1641,7 +1642,7 @@ class TestMarkersWithParametrization(object):
assert n + 1 == expected
"""
testdir.makepyfile(s)
reprec = testdir.inline_run()
reprec = testdir.inline_run(SHOW_PYTEST_WARNINGS_ARG)
reprec.assertoutcome(passed=2, skipped=1)
@pytest.mark.parametrize("strict", [True, False])
@@ -1660,7 +1661,7 @@ class TestMarkersWithParametrization(object):
strict=strict
)
testdir.makepyfile(s)
reprec = testdir.inline_run()
reprec = testdir.inline_run(SHOW_PYTEST_WARNINGS_ARG)
passed, failed = (2, 1) if strict else (3, 0)
reprec.assertoutcome(passed=passed, failed=failed)
@@ -1684,7 +1685,7 @@ class TestMarkersWithParametrization(object):
assert n + 1 == expected
"""
testdir.makepyfile(s)
reprec = testdir.inline_run()
reprec = testdir.inline_run(SHOW_PYTEST_WARNINGS_ARG)
reprec.assertoutcome(passed=2, skipped=2)
@pytest.mark.issue290