#7942 refactor stepwise tests to utilize pytester
This commit is contained in:
		
							parent
							
								
									6cddeb8cb3
								
							
						
					
					
						commit
						c58abf7ad1
					
				|  | @ -1,11 +1,13 @@ | ||||||
| import pytest | import pytest | ||||||
|  | from _pytest.monkeypatch import MonkeyPatch | ||||||
|  | from _pytest.pytester import Pytester | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| @pytest.fixture | @pytest.fixture | ||||||
| def stepwise_testdir(testdir): | def stepwise_pytester(pytester: Pytester) -> Pytester: | ||||||
|     # Rather than having to modify our testfile between tests, we introduce |     # Rather than having to modify our testfile between tests, we introduce | ||||||
|     # a flag for whether or not the second test should fail. |     # a flag for whether or not the second test should fail. | ||||||
|     testdir.makeconftest( |     pytester.makeconftest( | ||||||
|         """ |         """ | ||||||
| def pytest_addoption(parser): | def pytest_addoption(parser): | ||||||
|     group = parser.getgroup('general') |     group = parser.getgroup('general') | ||||||
|  | @ -15,7 +17,7 @@ def pytest_addoption(parser): | ||||||
|     ) |     ) | ||||||
| 
 | 
 | ||||||
|     # Create a simple test suite. |     # Create a simple test suite. | ||||||
|     testdir.makepyfile( |     pytester.makepyfile( | ||||||
|         test_a=""" |         test_a=""" | ||||||
| def test_success_before_fail(): | def test_success_before_fail(): | ||||||
|     assert 1 |     assert 1 | ||||||
|  | @ -34,7 +36,7 @@ def test_success_after_last_fail(): | ||||||
| """ | """ | ||||||
|     ) |     ) | ||||||
| 
 | 
 | ||||||
|     testdir.makepyfile( |     pytester.makepyfile( | ||||||
|         test_b=""" |         test_b=""" | ||||||
| def test_success(): | def test_success(): | ||||||
|     assert 1 |     assert 1 | ||||||
|  | @ -42,19 +44,19 @@ def test_success(): | ||||||
|     ) |     ) | ||||||
| 
 | 
 | ||||||
|     # customize cache directory so we don't use the tox's cache directory, which makes tests in this module flaky |     # customize cache directory so we don't use the tox's cache directory, which makes tests in this module flaky | ||||||
|     testdir.makeini( |     pytester.makeini( | ||||||
|         """ |         """ | ||||||
|         [pytest] |         [pytest] | ||||||
|         cache_dir = .cache |         cache_dir = .cache | ||||||
|     """ |     """ | ||||||
|     ) |     ) | ||||||
| 
 | 
 | ||||||
|     return testdir |     return pytester | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| @pytest.fixture | @pytest.fixture | ||||||
| def error_testdir(testdir): | def error_pytester(pytester: Pytester) -> Pytester: | ||||||
|     testdir.makepyfile( |     pytester.makepyfile( | ||||||
|         test_a=""" |         test_a=""" | ||||||
| def test_error(nonexisting_fixture): | def test_error(nonexisting_fixture): | ||||||
|     assert 1 |     assert 1 | ||||||
|  | @ -64,15 +66,15 @@ def test_success_after_fail(): | ||||||
| """ | """ | ||||||
|     ) |     ) | ||||||
| 
 | 
 | ||||||
|     return testdir |     return pytester | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| @pytest.fixture | @pytest.fixture | ||||||
| def broken_testdir(testdir): | def broken_pytester(pytester: Pytester) -> Pytester: | ||||||
|     testdir.makepyfile( |     pytester.makepyfile( | ||||||
|         working_testfile="def test_proper(): assert 1", broken_testfile="foobar" |         working_testfile="def test_proper(): assert 1", broken_testfile="foobar" | ||||||
|     ) |     ) | ||||||
|     return testdir |     return pytester | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| def _strip_resource_warnings(lines): | def _strip_resource_warnings(lines): | ||||||
|  | @ -85,16 +87,15 @@ def _strip_resource_warnings(lines): | ||||||
|     ] |     ] | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| def test_run_without_stepwise(stepwise_testdir): | def test_run_without_stepwise(stepwise_pytester: Pytester) -> None: | ||||||
|     result = stepwise_testdir.runpytest("-v", "--strict-markers", "--fail") |     result = stepwise_pytester.runpytest("-v", "--strict-markers", "--fail") | ||||||
| 
 |  | ||||||
|     result.stdout.fnmatch_lines(["*test_success_before_fail PASSED*"]) |     result.stdout.fnmatch_lines(["*test_success_before_fail PASSED*"]) | ||||||
|     result.stdout.fnmatch_lines(["*test_fail_on_flag FAILED*"]) |     result.stdout.fnmatch_lines(["*test_fail_on_flag FAILED*"]) | ||||||
|     result.stdout.fnmatch_lines(["*test_success_after_fail PASSED*"]) |     result.stdout.fnmatch_lines(["*test_success_after_fail PASSED*"]) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| def test_stepwise_output_summary(testdir): | def test_stepwise_output_summary(pytester: Pytester) -> None: | ||||||
|     testdir.makepyfile( |     pytester.makepyfile( | ||||||
|         """ |         """ | ||||||
|         import pytest |         import pytest | ||||||
|         @pytest.mark.parametrize("expected", [True, True, True, True, False]) |         @pytest.mark.parametrize("expected", [True, True, True, True, False]) | ||||||
|  | @ -102,17 +103,17 @@ def test_stepwise_output_summary(testdir): | ||||||
|             assert expected |             assert expected | ||||||
|         """ |         """ | ||||||
|     ) |     ) | ||||||
|     result = testdir.runpytest("-v", "--stepwise") |     result = pytester.runpytest("-v", "--stepwise") | ||||||
|     result.stdout.fnmatch_lines(["stepwise: no previously failed tests, not skipping."]) |     result.stdout.fnmatch_lines(["stepwise: no previously failed tests, not skipping."]) | ||||||
|     result = testdir.runpytest("-v", "--stepwise") |     result = pytester.runpytest("-v", "--stepwise") | ||||||
|     result.stdout.fnmatch_lines( |     result.stdout.fnmatch_lines( | ||||||
|         ["stepwise: skipping 4 already passed items.", "*1 failed, 4 deselected*"] |         ["stepwise: skipping 4 already passed items.", "*1 failed, 4 deselected*"] | ||||||
|     ) |     ) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| def test_fail_and_continue_with_stepwise(stepwise_testdir): | def test_fail_and_continue_with_stepwise(stepwise_pytester: Pytester) -> None: | ||||||
|     # Run the tests with a failing second test. |     # Run the tests with a failing second test. | ||||||
|     result = stepwise_testdir.runpytest( |     result = stepwise_pytester.runpytest( | ||||||
|         "-v", "--strict-markers", "--stepwise", "--fail" |         "-v", "--strict-markers", "--stepwise", "--fail" | ||||||
|     ) |     ) | ||||||
|     assert _strip_resource_warnings(result.stderr.lines) == [] |     assert _strip_resource_warnings(result.stderr.lines) == [] | ||||||
|  | @ -124,7 +125,7 @@ def test_fail_and_continue_with_stepwise(stepwise_testdir): | ||||||
|     assert "test_success_after_fail" not in stdout |     assert "test_success_after_fail" not in stdout | ||||||
| 
 | 
 | ||||||
|     # "Fix" the test that failed in the last run and run it again. |     # "Fix" the test that failed in the last run and run it again. | ||||||
|     result = stepwise_testdir.runpytest("-v", "--strict-markers", "--stepwise") |     result = stepwise_pytester.runpytest("-v", "--strict-markers", "--stepwise") | ||||||
|     assert _strip_resource_warnings(result.stderr.lines) == [] |     assert _strip_resource_warnings(result.stderr.lines) == [] | ||||||
| 
 | 
 | ||||||
|     stdout = result.stdout.str() |     stdout = result.stdout.str() | ||||||
|  | @ -135,8 +136,8 @@ def test_fail_and_continue_with_stepwise(stepwise_testdir): | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| @pytest.mark.parametrize("stepwise_skip", ["--stepwise-skip", "--sw-skip"]) | @pytest.mark.parametrize("stepwise_skip", ["--stepwise-skip", "--sw-skip"]) | ||||||
| def test_run_with_skip_option(stepwise_testdir, stepwise_skip): | def test_run_with_skip_option(stepwise_pytester: Pytester, stepwise_skip: str) -> None: | ||||||
|     result = stepwise_testdir.runpytest( |     result = stepwise_pytester.runpytest( | ||||||
|         "-v", "--strict-markers", "--stepwise", stepwise_skip, "--fail", "--fail-last", |         "-v", "--strict-markers", "--stepwise", stepwise_skip, "--fail", "--fail-last", | ||||||
|     ) |     ) | ||||||
|     assert _strip_resource_warnings(result.stderr.lines) == [] |     assert _strip_resource_warnings(result.stderr.lines) == [] | ||||||
|  | @ -149,8 +150,8 @@ def test_run_with_skip_option(stepwise_testdir, stepwise_skip): | ||||||
|     assert "test_success_after_last_fail" not in stdout |     assert "test_success_after_last_fail" not in stdout | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| def test_fail_on_errors(error_testdir): | def test_fail_on_errors(error_pytester: Pytester) -> None: | ||||||
|     result = error_testdir.runpytest("-v", "--strict-markers", "--stepwise") |     result = error_pytester.runpytest("-v", "--strict-markers", "--stepwise") | ||||||
| 
 | 
 | ||||||
|     assert _strip_resource_warnings(result.stderr.lines) == [] |     assert _strip_resource_warnings(result.stderr.lines) == [] | ||||||
|     stdout = result.stdout.str() |     stdout = result.stdout.str() | ||||||
|  | @ -159,8 +160,8 @@ def test_fail_on_errors(error_testdir): | ||||||
|     assert "test_success_after_fail" not in stdout |     assert "test_success_after_fail" not in stdout | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| def test_change_testfile(stepwise_testdir): | def test_change_testfile(stepwise_pytester: Pytester) -> None: | ||||||
|     result = stepwise_testdir.runpytest( |     result = stepwise_pytester.runpytest( | ||||||
|         "-v", "--strict-markers", "--stepwise", "--fail", "test_a.py" |         "-v", "--strict-markers", "--stepwise", "--fail", "test_a.py" | ||||||
|     ) |     ) | ||||||
|     assert _strip_resource_warnings(result.stderr.lines) == [] |     assert _strip_resource_warnings(result.stderr.lines) == [] | ||||||
|  | @ -170,7 +171,7 @@ def test_change_testfile(stepwise_testdir): | ||||||
| 
 | 
 | ||||||
|     # Make sure the second test run starts from the beginning, since the |     # Make sure the second test run starts from the beginning, since the | ||||||
|     # test to continue from does not exist in testfile_b. |     # test to continue from does not exist in testfile_b. | ||||||
|     result = stepwise_testdir.runpytest( |     result = stepwise_pytester.runpytest( | ||||||
|         "-v", "--strict-markers", "--stepwise", "test_b.py" |         "-v", "--strict-markers", "--stepwise", "test_b.py" | ||||||
|     ) |     ) | ||||||
|     assert _strip_resource_warnings(result.stderr.lines) == [] |     assert _strip_resource_warnings(result.stderr.lines) == [] | ||||||
|  | @ -180,17 +181,19 @@ def test_change_testfile(stepwise_testdir): | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| @pytest.mark.parametrize("broken_first", [True, False]) | @pytest.mark.parametrize("broken_first", [True, False]) | ||||||
| def test_stop_on_collection_errors(broken_testdir, broken_first): | def test_stop_on_collection_errors( | ||||||
|  |     broken_pytester: Pytester, broken_first: bool | ||||||
|  | ) -> None: | ||||||
|     """Stop during collection errors. Broken test first or broken test last |     """Stop during collection errors. Broken test first or broken test last | ||||||
|     actually surfaced a bug (#5444), so we test both situations.""" |     actually surfaced a bug (#5444), so we test both situations.""" | ||||||
|     files = ["working_testfile.py", "broken_testfile.py"] |     files = ["working_testfile.py", "broken_testfile.py"] | ||||||
|     if broken_first: |     if broken_first: | ||||||
|         files.reverse() |         files.reverse() | ||||||
|     result = broken_testdir.runpytest("-v", "--strict-markers", "--stepwise", *files) |     result = broken_pytester.runpytest("-v", "--strict-markers", "--stepwise", *files) | ||||||
|     result.stdout.fnmatch_lines("*error during collection*") |     result.stdout.fnmatch_lines("*error during collection*") | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| def test_xfail_handling(testdir, monkeypatch): | def test_xfail_handling(pytester: Pytester, monkeypatch: MonkeyPatch) -> None: | ||||||
|     """Ensure normal xfail is ignored, and strict xfail interrupts the session in sw mode |     """Ensure normal xfail is ignored, and strict xfail interrupts the session in sw mode | ||||||
| 
 | 
 | ||||||
|     (#5547) |     (#5547) | ||||||
|  | @ -207,8 +210,8 @@ def test_xfail_handling(testdir, monkeypatch): | ||||||
|         def test_c(): pass |         def test_c(): pass | ||||||
|         def test_d(): pass |         def test_d(): pass | ||||||
|     """ |     """ | ||||||
|     testdir.makepyfile(contents.format(assert_value="0", strict="False")) |     pytester.makepyfile(contents.format(assert_value="0", strict="False")) | ||||||
|     result = testdir.runpytest("--sw", "-v") |     result = pytester.runpytest("--sw", "-v") | ||||||
|     result.stdout.fnmatch_lines( |     result.stdout.fnmatch_lines( | ||||||
|         [ |         [ | ||||||
|             "*::test_a PASSED *", |             "*::test_a PASSED *", | ||||||
|  | @ -219,8 +222,8 @@ def test_xfail_handling(testdir, monkeypatch): | ||||||
|         ] |         ] | ||||||
|     ) |     ) | ||||||
| 
 | 
 | ||||||
|     testdir.makepyfile(contents.format(assert_value="1", strict="True")) |     pytester.makepyfile(contents.format(assert_value="1", strict="True")) | ||||||
|     result = testdir.runpytest("--sw", "-v") |     result = pytester.runpytest("--sw", "-v") | ||||||
|     result.stdout.fnmatch_lines( |     result.stdout.fnmatch_lines( | ||||||
|         [ |         [ | ||||||
|             "*::test_a PASSED *", |             "*::test_a PASSED *", | ||||||
|  | @ -230,8 +233,8 @@ def test_xfail_handling(testdir, monkeypatch): | ||||||
|         ] |         ] | ||||||
|     ) |     ) | ||||||
| 
 | 
 | ||||||
|     testdir.makepyfile(contents.format(assert_value="0", strict="True")) |     pytester.makepyfile(contents.format(assert_value="0", strict="True")) | ||||||
|     result = testdir.runpytest("--sw", "-v") |     result = pytester.runpytest("--sw", "-v") | ||||||
|     result.stdout.fnmatch_lines( |     result.stdout.fnmatch_lines( | ||||||
|         [ |         [ | ||||||
|             "*::test_b XFAIL *", |             "*::test_b XFAIL *", | ||||||
|  |  | ||||||
		Loading…
	
		Reference in New Issue