commit
						c997c32004
					
				|  | @ -43,8 +43,7 @@ jobs: | ||||||
|       python: 'pypy3' |       python: 'pypy3' | ||||||
| 
 | 
 | ||||||
|     - env: TOXENV=py35-xdist |     - env: TOXENV=py35-xdist | ||||||
|       dist: trusty |       python: '3.5' | ||||||
|       python: '3.5.0' |  | ||||||
| 
 | 
 | ||||||
|     # Coverage for: |     # Coverage for: | ||||||
|     # - pytester's LsofFdLeakChecker |     # - pytester's LsofFdLeakChecker | ||||||
|  |  | ||||||
							
								
								
									
										4
									
								
								AUTHORS
								
								
								
								
							
							
						
						
									
										4
									
								
								AUTHORS
								
								
								
								
							|  | @ -23,6 +23,7 @@ Andras Tim | ||||||
| Andrea Cimatoribus | Andrea Cimatoribus | ||||||
| Andreas Zeidler | Andreas Zeidler | ||||||
| Andrey Paramonov | Andrey Paramonov | ||||||
|  | Andrzej Klajnert | ||||||
| Andrzej Ostrowski | Andrzej Ostrowski | ||||||
| Andy Freeland | Andy Freeland | ||||||
| Anthon van der Neut | Anthon van der Neut | ||||||
|  | @ -55,6 +56,7 @@ Charnjit SiNGH (CCSJ) | ||||||
| Chris Lamb | Chris Lamb | ||||||
| Christian Boelsen | Christian Boelsen | ||||||
| Christian Fetzer | Christian Fetzer | ||||||
|  | Christian Neumüller | ||||||
| Christian Theunert | Christian Theunert | ||||||
| Christian Tismer | Christian Tismer | ||||||
| Christopher Gilling | Christopher Gilling | ||||||
|  | @ -96,6 +98,7 @@ Feng Ma | ||||||
| Florian Bruhin | Florian Bruhin | ||||||
| Floris Bruynooghe | Floris Bruynooghe | ||||||
| Gabriel Reis | Gabriel Reis | ||||||
|  | Gene Wood | ||||||
| George Kussumoto | George Kussumoto | ||||||
| Georgy Dyuldin | Georgy Dyuldin | ||||||
| Graham Horler | Graham Horler | ||||||
|  | @ -210,6 +213,7 @@ Raphael Castaneda | ||||||
| Raphael Pierzina | Raphael Pierzina | ||||||
| Raquel Alegre | Raquel Alegre | ||||||
| Ravi Chandra | Ravi Chandra | ||||||
|  | Robert Holt | ||||||
| Roberto Polli | Roberto Polli | ||||||
| Roland Puntaier | Roland Puntaier | ||||||
| Romain Dorgueil | Romain Dorgueil | ||||||
|  |  | ||||||
|  | @ -18,6 +18,32 @@ with advance notice in the **Deprecations** section of releases. | ||||||
| 
 | 
 | ||||||
| .. towncrier release notes start | .. towncrier release notes start | ||||||
| 
 | 
 | ||||||
|  | pytest 5.1.2 (2019-08-30) | ||||||
|  | ========================= | ||||||
|  | 
 | ||||||
|  | Bug Fixes | ||||||
|  | --------- | ||||||
|  | 
 | ||||||
|  | - `#2270 <https://github.com/pytest-dev/pytest/issues/2270>`_: Fixed ``self`` reference in function-scoped fixtures defined plugin classes: previously ``self`` | ||||||
|  |   would be a reference to a *test* class, not the *plugin* class. | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | - `#570 <https://github.com/pytest-dev/pytest/issues/570>`_: Fixed long standing issue where fixture scope was not respected when indirect fixtures were used during | ||||||
|  |   parametrization. | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | - `#5782 <https://github.com/pytest-dev/pytest/issues/5782>`_: Fix decoding error when printing an error response from ``--pastebin``. | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | - `#5786 <https://github.com/pytest-dev/pytest/issues/5786>`_: Chained exceptions in test and collection reports are now correctly serialized, allowing plugins like | ||||||
|  |   ``pytest-xdist`` to display them properly. | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | - `#5792 <https://github.com/pytest-dev/pytest/issues/5792>`_: Windows: Fix error that occurs in certain circumstances when loading | ||||||
|  |   ``conftest.py`` from a working directory that has casing other than the one stored | ||||||
|  |   in the filesystem (e.g., ``c:\test`` instead of ``C:\test``). | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
| pytest 5.1.1 (2019-08-20) | pytest 5.1.1 (2019-08-20) | ||||||
| ========================= | ========================= | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -1 +0,0 @@ | ||||||
| Fix decoding error when printing an error response from ``--pastebin``. |  | ||||||
|  | @ -0,0 +1 @@ | ||||||
|  | Fix pypy3.6 (nightly) on windows. | ||||||
|  | @ -0,0 +1 @@ | ||||||
|  | Handle ``--fulltrace`` correctly with ``pytest.raises``. | ||||||
|  | @ -0,0 +1,2 @@ | ||||||
|  | Windows: Fix regression with conftest whose qualified name contains uppercase | ||||||
|  | characters (introduced by #5792). | ||||||
|  | @ -16,7 +16,7 @@ REGENDOC_ARGS := \ | ||||||
| 	--normalize "/[ \t]+\n/\n/" \
 | 	--normalize "/[ \t]+\n/\n/" \
 | ||||||
| 	--normalize "~\$$REGENDOC_TMPDIR~/home/sweet/project~" \
 | 	--normalize "~\$$REGENDOC_TMPDIR~/home/sweet/project~" \
 | ||||||
| 	--normalize "~/path/to/example~/home/sweet/project~" \
 | 	--normalize "~/path/to/example~/home/sweet/project~" \
 | ||||||
| 	--normalize "/in \d+.\d+ seconds/in 0.12 seconds/" \
 | 	--normalize "/in \d+.\d+s ==/in 0.12s ==/" \
 | ||||||
| 	--normalize "@/tmp/pytest-of-.*/pytest-\d+@PYTEST_TMPDIR@" \
 | 	--normalize "@/tmp/pytest-of-.*/pytest-\d+@PYTEST_TMPDIR@" \
 | ||||||
| 	--normalize "@pytest-(\d+)\\.[^ ,]+@pytest-\1.x.y@" \
 | 	--normalize "@pytest-(\d+)\\.[^ ,]+@pytest-\1.x.y@" \
 | ||||||
| 	--normalize "@(This is pytest version )(\d+)\\.[^ ,]+@\1\2.x.y@" \
 | 	--normalize "@(This is pytest version )(\d+)\\.[^ ,]+@\1\2.x.y@" \
 | ||||||
|  |  | ||||||
|  | @ -6,6 +6,7 @@ Release announcements | ||||||
|    :maxdepth: 2 |    :maxdepth: 2 | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  |    release-5.1.2 | ||||||
|    release-5.1.1 |    release-5.1.1 | ||||||
|    release-5.1.0 |    release-5.1.0 | ||||||
|    release-5.0.1 |    release-5.0.1 | ||||||
|  |  | ||||||
|  | @ -0,0 +1,23 @@ | ||||||
|  | pytest-5.1.2 | ||||||
|  | ======================================= | ||||||
|  | 
 | ||||||
|  | pytest 5.1.2 has just been released to PyPI. | ||||||
|  | 
 | ||||||
|  | This is a bug-fix release, being a drop-in replacement. To upgrade:: | ||||||
|  | 
 | ||||||
|  |   pip install --upgrade pytest | ||||||
|  | 
 | ||||||
|  | The full changelog is available at https://docs.pytest.org/en/latest/changelog.html. | ||||||
|  | 
 | ||||||
|  | Thanks to all who contributed to this release, among them: | ||||||
|  | 
 | ||||||
|  | * Andrzej Klajnert | ||||||
|  | * Anthony Sottile | ||||||
|  | * Bruno Oliveira | ||||||
|  | * Christian Neumüller | ||||||
|  | * Robert Holt | ||||||
|  | * linchiwei123 | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | Happy testing, | ||||||
|  | The pytest Development Team | ||||||
|  | @ -47,7 +47,7 @@ you will see the return value of the function call: | ||||||
|     E        +  where 3 = f() |     E        +  where 3 = f() | ||||||
| 
 | 
 | ||||||
|     test_assert1.py:6: AssertionError |     test_assert1.py:6: AssertionError | ||||||
|     ============================ 1 failed in 0.02s ============================= |     ============================ 1 failed in 0.12s ============================= | ||||||
| 
 | 
 | ||||||
| ``pytest`` has support for showing the values of the most common subexpressions | ``pytest`` has support for showing the values of the most common subexpressions | ||||||
| including calls, attributes, comparisons, and binary and unary | including calls, attributes, comparisons, and binary and unary | ||||||
|  | @ -208,7 +208,7 @@ if you run this module: | ||||||
|     E         Use -v to get the full diff |     E         Use -v to get the full diff | ||||||
| 
 | 
 | ||||||
|     test_assert2.py:6: AssertionError |     test_assert2.py:6: AssertionError | ||||||
|     ============================ 1 failed in 0.02s ============================= |     ============================ 1 failed in 0.12s ============================= | ||||||
| 
 | 
 | ||||||
| Special comparisons are done for a number of cases: | Special comparisons are done for a number of cases: | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -75,7 +75,7 @@ If you run this for the first time you will see two failures: | ||||||
|     E           Failed: bad luck |     E           Failed: bad luck | ||||||
| 
 | 
 | ||||||
|     test_50.py:7: Failed |     test_50.py:7: Failed | ||||||
|     2 failed, 48 passed in 0.08s |     2 failed, 48 passed in 0.07s | ||||||
| 
 | 
 | ||||||
| If you then run it with ``--lf``: | If you then run it with ``--lf``: | ||||||
| 
 | 
 | ||||||
|  | @ -114,7 +114,7 @@ If you then run it with ``--lf``: | ||||||
|     E           Failed: bad luck |     E           Failed: bad luck | ||||||
| 
 | 
 | ||||||
|     test_50.py:7: Failed |     test_50.py:7: Failed | ||||||
|     ===================== 2 failed, 48 deselected in 0.02s ===================== |     ===================== 2 failed, 48 deselected in 0.12s ===================== | ||||||
| 
 | 
 | ||||||
| You have run only the two failing tests from the last run, while the 48 passing | You have run only the two failing tests from the last run, while the 48 passing | ||||||
| tests have not been run ("deselected"). | tests have not been run ("deselected"). | ||||||
|  | @ -158,7 +158,7 @@ of ``FF`` and dots): | ||||||
|     E           Failed: bad luck |     E           Failed: bad luck | ||||||
| 
 | 
 | ||||||
|     test_50.py:7: Failed |     test_50.py:7: Failed | ||||||
|     ======================= 2 failed, 48 passed in 0.07s ======================= |     ======================= 2 failed, 48 passed in 0.12s ======================= | ||||||
| 
 | 
 | ||||||
| .. _`config.cache`: | .. _`config.cache`: | ||||||
| 
 | 
 | ||||||
|  | @ -283,7 +283,7 @@ You can always peek at the content of the cache using the | ||||||
|     example/value contains: |     example/value contains: | ||||||
|       42 |       42 | ||||||
| 
 | 
 | ||||||
|     ========================== no tests ran in 0.00s =========================== |     ========================== no tests ran in 0.12s =========================== | ||||||
| 
 | 
 | ||||||
| ``--cache-show`` takes an optional argument to specify a glob pattern for | ``--cache-show`` takes an optional argument to specify a glob pattern for | ||||||
| filtering: | filtering: | ||||||
|  | @ -300,7 +300,7 @@ filtering: | ||||||
|     example/value contains: |     example/value contains: | ||||||
|       42 |       42 | ||||||
| 
 | 
 | ||||||
|     ========================== no tests ran in 0.00s =========================== |     ========================== no tests ran in 0.12s =========================== | ||||||
| 
 | 
 | ||||||
| Clearing Cache content | Clearing Cache content | ||||||
| ---------------------- | ---------------------- | ||||||
|  |  | ||||||
|  | @ -91,7 +91,7 @@ of the failing function and hide the other one: | ||||||
|     test_module.py:12: AssertionError |     test_module.py:12: AssertionError | ||||||
|     -------------------------- Captured stdout setup --------------------------- |     -------------------------- Captured stdout setup --------------------------- | ||||||
|     setting up <function test_func2 at 0xdeadbeef> |     setting up <function test_func2 at 0xdeadbeef> | ||||||
|     ======================= 1 failed, 1 passed in 0.02s ======================== |     ======================= 1 failed, 1 passed in 0.12s ======================== | ||||||
| 
 | 
 | ||||||
| Accessing captured output from a test function | Accessing captured output from a test function | ||||||
| --------------------------------------------------- | --------------------------------------------------- | ||||||
|  |  | ||||||
|  | @ -107,8 +107,8 @@ check for ini-files as follows: | ||||||
| 
 | 
 | ||||||
|     # first look for pytest.ini files |     # first look for pytest.ini files | ||||||
|     path/pytest.ini |     path/pytest.ini | ||||||
|     path/setup.cfg  # must also contain [tool:pytest] section to match |  | ||||||
|     path/tox.ini    # must also contain [pytest] section to match |     path/tox.ini    # must also contain [pytest] section to match | ||||||
|  |     path/setup.cfg  # must also contain [tool:pytest] section to match | ||||||
|     pytest.ini |     pytest.ini | ||||||
|     ... # all the way down to the root |     ... # all the way down to the root | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -36,7 +36,7 @@ then you can just invoke ``pytest`` directly: | ||||||
| 
 | 
 | ||||||
|     test_example.txt .                                                   [100%] |     test_example.txt .                                                   [100%] | ||||||
| 
 | 
 | ||||||
|     ============================ 1 passed in 0.01s ============================= |     ============================ 1 passed in 0.12s ============================= | ||||||
| 
 | 
 | ||||||
| By default, pytest will collect ``test*.txt`` files looking for doctest directives, but you | By default, pytest will collect ``test*.txt`` files looking for doctest directives, but you | ||||||
| can pass additional globs using the ``--doctest-glob`` option (multi-allowed). | can pass additional globs using the ``--doctest-glob`` option (multi-allowed). | ||||||
|  | @ -66,7 +66,7 @@ and functions, including from test modules: | ||||||
|     mymodule.py .                                                        [ 50%] |     mymodule.py .                                                        [ 50%] | ||||||
|     test_example.txt .                                                   [100%] |     test_example.txt .                                                   [100%] | ||||||
| 
 | 
 | ||||||
|     ============================ 2 passed in 0.01s ============================= |     ============================ 2 passed in 0.12s ============================= | ||||||
| 
 | 
 | ||||||
| You can make these changes permanent in your project by | You can make these changes permanent in your project by | ||||||
| putting them into a pytest.ini file like this: | putting them into a pytest.ini file like this: | ||||||
|  |  | ||||||
|  | @ -52,7 +52,7 @@ You can then restrict a test run to only run tests marked with ``webtest``: | ||||||
| 
 | 
 | ||||||
|     test_server.py::test_send_http PASSED                                [100%] |     test_server.py::test_send_http PASSED                                [100%] | ||||||
| 
 | 
 | ||||||
|     ===================== 1 passed, 3 deselected in 0.01s ====================== |     ===================== 1 passed, 3 deselected in 0.12s ====================== | ||||||
| 
 | 
 | ||||||
| Or the inverse, running all tests except the webtest ones: | Or the inverse, running all tests except the webtest ones: | ||||||
| 
 | 
 | ||||||
|  | @ -69,7 +69,7 @@ Or the inverse, running all tests except the webtest ones: | ||||||
|     test_server.py::test_another PASSED                                  [ 66%] |     test_server.py::test_another PASSED                                  [ 66%] | ||||||
|     test_server.py::TestClass::test_method PASSED                        [100%] |     test_server.py::TestClass::test_method PASSED                        [100%] | ||||||
| 
 | 
 | ||||||
|     ===================== 3 passed, 1 deselected in 0.01s ====================== |     ===================== 3 passed, 1 deselected in 0.12s ====================== | ||||||
| 
 | 
 | ||||||
| Selecting tests based on their node ID | Selecting tests based on their node ID | ||||||
| -------------------------------------- | -------------------------------------- | ||||||
|  | @ -89,7 +89,7 @@ tests based on their module, class, method, or function name: | ||||||
| 
 | 
 | ||||||
|     test_server.py::TestClass::test_method PASSED                        [100%] |     test_server.py::TestClass::test_method PASSED                        [100%] | ||||||
| 
 | 
 | ||||||
|     ============================ 1 passed in 0.01s ============================= |     ============================ 1 passed in 0.12s ============================= | ||||||
| 
 | 
 | ||||||
| You can also select on the class: | You can also select on the class: | ||||||
| 
 | 
 | ||||||
|  | @ -104,7 +104,7 @@ You can also select on the class: | ||||||
| 
 | 
 | ||||||
|     test_server.py::TestClass::test_method PASSED                        [100%] |     test_server.py::TestClass::test_method PASSED                        [100%] | ||||||
| 
 | 
 | ||||||
|     ============================ 1 passed in 0.01s ============================= |     ============================ 1 passed in 0.12s ============================= | ||||||
| 
 | 
 | ||||||
| Or select multiple nodes: | Or select multiple nodes: | ||||||
| 
 | 
 | ||||||
|  | @ -120,7 +120,7 @@ Or select multiple nodes: | ||||||
|     test_server.py::TestClass::test_method PASSED                        [ 50%] |     test_server.py::TestClass::test_method PASSED                        [ 50%] | ||||||
|     test_server.py::test_send_http PASSED                                [100%] |     test_server.py::test_send_http PASSED                                [100%] | ||||||
| 
 | 
 | ||||||
|     ============================ 2 passed in 0.01s ============================= |     ============================ 2 passed in 0.12s ============================= | ||||||
| 
 | 
 | ||||||
| .. _node-id: | .. _node-id: | ||||||
| 
 | 
 | ||||||
|  | @ -159,7 +159,7 @@ select tests based on their names: | ||||||
| 
 | 
 | ||||||
|     test_server.py::test_send_http PASSED                                [100%] |     test_server.py::test_send_http PASSED                                [100%] | ||||||
| 
 | 
 | ||||||
|     ===================== 1 passed, 3 deselected in 0.01s ====================== |     ===================== 1 passed, 3 deselected in 0.12s ====================== | ||||||
| 
 | 
 | ||||||
| And you can also run all tests except the ones that match the keyword: | And you can also run all tests except the ones that match the keyword: | ||||||
| 
 | 
 | ||||||
|  | @ -176,7 +176,7 @@ And you can also run all tests except the ones that match the keyword: | ||||||
|     test_server.py::test_another PASSED                                  [ 66%] |     test_server.py::test_another PASSED                                  [ 66%] | ||||||
|     test_server.py::TestClass::test_method PASSED                        [100%] |     test_server.py::TestClass::test_method PASSED                        [100%] | ||||||
| 
 | 
 | ||||||
|     ===================== 3 passed, 1 deselected in 0.01s ====================== |     ===================== 3 passed, 1 deselected in 0.12s ====================== | ||||||
| 
 | 
 | ||||||
| Or to select "http" and "quick" tests: | Or to select "http" and "quick" tests: | ||||||
| 
 | 
 | ||||||
|  | @ -192,7 +192,7 @@ Or to select "http" and "quick" tests: | ||||||
|     test_server.py::test_send_http PASSED                                [ 50%] |     test_server.py::test_send_http PASSED                                [ 50%] | ||||||
|     test_server.py::test_something_quick PASSED                          [100%] |     test_server.py::test_something_quick PASSED                          [100%] | ||||||
| 
 | 
 | ||||||
|     ===================== 2 passed, 2 deselected in 0.01s ====================== |     ===================== 2 passed, 2 deselected in 0.12s ====================== | ||||||
| 
 | 
 | ||||||
| .. note:: | .. note:: | ||||||
| 
 | 
 | ||||||
|  | @ -413,7 +413,7 @@ the test needs: | ||||||
| 
 | 
 | ||||||
|     test_someenv.py s                                                    [100%] |     test_someenv.py s                                                    [100%] | ||||||
| 
 | 
 | ||||||
|     ============================ 1 skipped in 0.00s ============================ |     ============================ 1 skipped in 0.12s ============================ | ||||||
| 
 | 
 | ||||||
| and here is one that specifies exactly the environment needed: | and here is one that specifies exactly the environment needed: | ||||||
| 
 | 
 | ||||||
|  | @ -428,7 +428,7 @@ and here is one that specifies exactly the environment needed: | ||||||
| 
 | 
 | ||||||
|     test_someenv.py .                                                    [100%] |     test_someenv.py .                                                    [100%] | ||||||
| 
 | 
 | ||||||
|     ============================ 1 passed in 0.01s ============================= |     ============================ 1 passed in 0.12s ============================= | ||||||
| 
 | 
 | ||||||
| The ``--markers`` option always gives you a list of available markers: | The ``--markers`` option always gives you a list of available markers: | ||||||
| 
 | 
 | ||||||
|  | @ -499,7 +499,7 @@ The output is as follows: | ||||||
|     $ pytest -q -s |     $ pytest -q -s | ||||||
|     Mark(name='my_marker', args=(<function hello_world at 0xdeadbeef>,), kwargs={}) |     Mark(name='my_marker', args=(<function hello_world at 0xdeadbeef>,), kwargs={}) | ||||||
|     . |     . | ||||||
|     1 passed in 0.00s |     1 passed in 0.01s | ||||||
| 
 | 
 | ||||||
| We can see that the custom marker has its argument set extended with the function ``hello_world``. This is the key difference between creating a custom marker as a callable, which invokes ``__call__`` behind the scenes, and using ``with_args``. | We can see that the custom marker has its argument set extended with the function ``hello_world``. This is the key difference between creating a custom marker as a callable, which invokes ``__call__`` behind the scenes, and using ``with_args``. | ||||||
| 
 | 
 | ||||||
|  | @ -551,7 +551,7 @@ Let's run this without capturing output and see what we get: | ||||||
|     glob args=('class',) kwargs={'x': 2} |     glob args=('class',) kwargs={'x': 2} | ||||||
|     glob args=('module',) kwargs={'x': 1} |     glob args=('module',) kwargs={'x': 1} | ||||||
|     . |     . | ||||||
|     1 passed in 0.01s |     1 passed in 0.02s | ||||||
| 
 | 
 | ||||||
| marking platform specific tests with pytest | marking platform specific tests with pytest | ||||||
| -------------------------------------------------------------- | -------------------------------------------------------------- | ||||||
|  | @ -623,7 +623,7 @@ then you will see two tests skipped and two executed tests as expected: | ||||||
| 
 | 
 | ||||||
|     ========================= short test summary info ========================== |     ========================= short test summary info ========================== | ||||||
|     SKIPPED [2] $REGENDOC_TMPDIR/conftest.py:13: cannot run on platform linux |     SKIPPED [2] $REGENDOC_TMPDIR/conftest.py:13: cannot run on platform linux | ||||||
|     ======================= 2 passed, 2 skipped in 0.01s ======================= |     ======================= 2 passed, 2 skipped in 0.12s ======================= | ||||||
| 
 | 
 | ||||||
| Note that if you specify a platform via the marker-command line option like this: | Note that if you specify a platform via the marker-command line option like this: | ||||||
| 
 | 
 | ||||||
|  | @ -638,7 +638,7 @@ Note that if you specify a platform via the marker-command line option like this | ||||||
| 
 | 
 | ||||||
|     test_plat.py .                                                       [100%] |     test_plat.py .                                                       [100%] | ||||||
| 
 | 
 | ||||||
|     ===================== 1 passed, 3 deselected in 0.01s ====================== |     ===================== 1 passed, 3 deselected in 0.12s ====================== | ||||||
| 
 | 
 | ||||||
| then the unmarked-tests will not be run.  It is thus a way to restrict the run to the specific tests. | then the unmarked-tests will not be run.  It is thus a way to restrict the run to the specific tests. | ||||||
| 
 | 
 | ||||||
|  | @ -711,7 +711,7 @@ We can now use the ``-m option`` to select one set: | ||||||
|     test_module.py:8: in test_interface_complex |     test_module.py:8: in test_interface_complex | ||||||
|         assert 0 |         assert 0 | ||||||
|     E   assert 0 |     E   assert 0 | ||||||
|     ===================== 2 failed, 2 deselected in 0.02s ====================== |     ===================== 2 failed, 2 deselected in 0.12s ====================== | ||||||
| 
 | 
 | ||||||
| or to select both "event" and "interface" tests: | or to select both "event" and "interface" tests: | ||||||
| 
 | 
 | ||||||
|  | @ -739,4 +739,4 @@ or to select both "event" and "interface" tests: | ||||||
|     test_module.py:12: in test_event_simple |     test_module.py:12: in test_event_simple | ||||||
|         assert 0 |         assert 0 | ||||||
|     E   assert 0 |     E   assert 0 | ||||||
|     ===================== 3 failed, 1 deselected in 0.03s ====================== |     ===================== 3 failed, 1 deselected in 0.12s ====================== | ||||||
|  |  | ||||||
|  | @ -41,7 +41,7 @@ now execute the test specification: | ||||||
|     usecase execution failed |     usecase execution failed | ||||||
|        spec failed: 'some': 'other' |        spec failed: 'some': 'other' | ||||||
|        no further details known at this point. |        no further details known at this point. | ||||||
|     ======================= 1 failed, 1 passed in 0.02s ======================== |     ======================= 1 failed, 1 passed in 0.12s ======================== | ||||||
| 
 | 
 | ||||||
| .. regendoc:wipe | .. regendoc:wipe | ||||||
| 
 | 
 | ||||||
|  | @ -77,7 +77,7 @@ consulted when reporting in ``verbose`` mode: | ||||||
|     usecase execution failed |     usecase execution failed | ||||||
|        spec failed: 'some': 'other' |        spec failed: 'some': 'other' | ||||||
|        no further details known at this point. |        no further details known at this point. | ||||||
|     ======================= 1 failed, 1 passed in 0.02s ======================== |     ======================= 1 failed, 1 passed in 0.12s ======================== | ||||||
| 
 | 
 | ||||||
| .. regendoc:wipe | .. regendoc:wipe | ||||||
| 
 | 
 | ||||||
|  | @ -97,4 +97,4 @@ interesting to just look at the collection tree: | ||||||
|         <YamlItem hello> |         <YamlItem hello> | ||||||
|         <YamlItem ok> |         <YamlItem ok> | ||||||
| 
 | 
 | ||||||
|     ========================== no tests ran in 0.02s =========================== |     ========================== no tests ran in 0.12s =========================== | ||||||
|  |  | ||||||
|  | @ -172,7 +172,7 @@ objects, they are still using the default pytest representation: | ||||||
|       <Function test_timedistance_v3[forward]> |       <Function test_timedistance_v3[forward]> | ||||||
|       <Function test_timedistance_v3[backward]> |       <Function test_timedistance_v3[backward]> | ||||||
| 
 | 
 | ||||||
|     ========================== no tests ran in 0.01s =========================== |     ========================== no tests ran in 0.12s =========================== | ||||||
| 
 | 
 | ||||||
| In ``test_timedistance_v3``, we used ``pytest.param`` to specify the test IDs | In ``test_timedistance_v3``, we used ``pytest.param`` to specify the test IDs | ||||||
| together with the actual data, instead of listing them separately. | together with the actual data, instead of listing them separately. | ||||||
|  | @ -229,7 +229,7 @@ this is a fully self-contained example which you can run with: | ||||||
| 
 | 
 | ||||||
|     test_scenarios.py ....                                               [100%] |     test_scenarios.py ....                                               [100%] | ||||||
| 
 | 
 | ||||||
|     ============================ 4 passed in 0.01s ============================= |     ============================ 4 passed in 0.12s ============================= | ||||||
| 
 | 
 | ||||||
| If you just collect tests you'll also nicely see 'advanced' and 'basic' as variants for the test function: | If you just collect tests you'll also nicely see 'advanced' and 'basic' as variants for the test function: | ||||||
| 
 | 
 | ||||||
|  | @ -248,7 +248,7 @@ If you just collect tests you'll also nicely see 'advanced' and 'basic' as varia | ||||||
|           <Function test_demo1[advanced]> |           <Function test_demo1[advanced]> | ||||||
|           <Function test_demo2[advanced]> |           <Function test_demo2[advanced]> | ||||||
| 
 | 
 | ||||||
|     ========================== no tests ran in 0.01s =========================== |     ========================== no tests ran in 0.12s =========================== | ||||||
| 
 | 
 | ||||||
| Note that we told ``metafunc.parametrize()`` that your scenario values | Note that we told ``metafunc.parametrize()`` that your scenario values | ||||||
| should be considered class-scoped.  With pytest-2.3 this leads to a | should be considered class-scoped.  With pytest-2.3 this leads to a | ||||||
|  | @ -323,7 +323,7 @@ Let's first see how it looks like at collection time: | ||||||
|       <Function test_db_initialized[d1]> |       <Function test_db_initialized[d1]> | ||||||
|       <Function test_db_initialized[d2]> |       <Function test_db_initialized[d2]> | ||||||
| 
 | 
 | ||||||
|     ========================== no tests ran in 0.00s =========================== |     ========================== no tests ran in 0.12s =========================== | ||||||
| 
 | 
 | ||||||
| And then when we run the test: | And then when we run the test: | ||||||
| 
 | 
 | ||||||
|  | @ -394,7 +394,7 @@ The result of this test will be successful: | ||||||
|     <Module test_indirect_list.py> |     <Module test_indirect_list.py> | ||||||
|       <Function test_indirect[a-b]> |       <Function test_indirect[a-b]> | ||||||
| 
 | 
 | ||||||
|     ========================== no tests ran in 0.00s =========================== |     ========================== no tests ran in 0.12s =========================== | ||||||
| 
 | 
 | ||||||
| .. regendoc:wipe | .. regendoc:wipe | ||||||
| 
 | 
 | ||||||
|  | @ -475,10 +475,11 @@ Running it results in some skips if we don't have all the python interpreters in | ||||||
| .. code-block:: pytest | .. code-block:: pytest | ||||||
| 
 | 
 | ||||||
|    . $ pytest -rs -q multipython.py |    . $ pytest -rs -q multipython.py | ||||||
|    ssssssssssss......sss......                                          [100%] |    ssssssssssss...ssssssssssss                                          [100%] | ||||||
|    ========================= short test summary info ========================== |    ========================= short test summary info ========================== | ||||||
|    SKIPPED [15] $REGENDOC_TMPDIR/CWD/multipython.py:30: 'python3.5' not found |    SKIPPED [12] $REGENDOC_TMPDIR/CWD/multipython.py:30: 'python3.5' not found | ||||||
|    12 passed, 15 skipped in 0.62s |    SKIPPED [12] $REGENDOC_TMPDIR/CWD/multipython.py:30: 'python3.7' not found | ||||||
|  |    3 passed, 24 skipped in 0.24s | ||||||
| 
 | 
 | ||||||
| Indirect parametrization of optional implementations/imports | Indirect parametrization of optional implementations/imports | ||||||
| -------------------------------------------------------------------- | -------------------------------------------------------------------- | ||||||
|  | @ -547,7 +548,7 @@ If you run this with reporting for skips enabled: | ||||||
| 
 | 
 | ||||||
|     ========================= short test summary info ========================== |     ========================= short test summary info ========================== | ||||||
|     SKIPPED [1] $REGENDOC_TMPDIR/conftest.py:13: could not import 'opt2': No module named 'opt2' |     SKIPPED [1] $REGENDOC_TMPDIR/conftest.py:13: could not import 'opt2': No module named 'opt2' | ||||||
|     ======================= 1 passed, 1 skipped in 0.01s ======================= |     ======================= 1 passed, 1 skipped in 0.12s ======================= | ||||||
| 
 | 
 | ||||||
| You'll see that we don't have an ``opt2`` module and thus the second test run | You'll see that we don't have an ``opt2`` module and thus the second test run | ||||||
| of our ``test_func1`` was skipped.  A few notes: | of our ``test_func1`` was skipped.  A few notes: | ||||||
|  | @ -609,7 +610,7 @@ Then run ``pytest`` with verbose mode and with only the ``basic`` marker: | ||||||
|     test_pytest_param_example.py::test_eval[basic_2+4] PASSED            [ 66%] |     test_pytest_param_example.py::test_eval[basic_2+4] PASSED            [ 66%] | ||||||
|     test_pytest_param_example.py::test_eval[basic_6*9] XFAIL             [100%] |     test_pytest_param_example.py::test_eval[basic_6*9] XFAIL             [100%] | ||||||
| 
 | 
 | ||||||
|     =============== 2 passed, 15 deselected, 1 xfailed in 0.08s ================ |     =============== 2 passed, 15 deselected, 1 xfailed in 0.12s ================ | ||||||
| 
 | 
 | ||||||
| As the result: | As the result: | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -158,7 +158,7 @@ The test collection would look like this: | ||||||
|           <Function simple_check> |           <Function simple_check> | ||||||
|           <Function complex_check> |           <Function complex_check> | ||||||
| 
 | 
 | ||||||
|     ========================== no tests ran in 0.01s =========================== |     ========================== no tests ran in 0.12s =========================== | ||||||
| 
 | 
 | ||||||
| You can check for multiple glob patterns by adding a space between the patterns: | You can check for multiple glob patterns by adding a space between the patterns: | ||||||
| 
 | 
 | ||||||
|  | @ -221,7 +221,7 @@ You can always peek at the collection tree without running tests like this: | ||||||
|           <Function test_method> |           <Function test_method> | ||||||
|           <Function test_anothermethod> |           <Function test_anothermethod> | ||||||
| 
 | 
 | ||||||
|     ========================== no tests ran in 0.00s =========================== |     ========================== no tests ran in 0.12s =========================== | ||||||
| 
 | 
 | ||||||
| .. _customizing-test-collection: | .. _customizing-test-collection: | ||||||
| 
 | 
 | ||||||
|  | @ -297,7 +297,7 @@ file will be left out: | ||||||
|     rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini |     rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini | ||||||
|     collected 0 items |     collected 0 items | ||||||
| 
 | 
 | ||||||
|     ========================== no tests ran in 0.01s =========================== |     ========================== no tests ran in 0.12s =========================== | ||||||
| 
 | 
 | ||||||
| It's also possible to ignore files based on Unix shell-style wildcards by adding | It's also possible to ignore files based on Unix shell-style wildcards by adding | ||||||
| patterns to ``collect_ignore_glob``. | patterns to ``collect_ignore_glob``. | ||||||
|  |  | ||||||
|  | @ -650,4 +650,4 @@ Here is a nice run of several failures and how ``pytest`` presents things: | ||||||
|     E        +  where 1 = This is JSON\n{\n  'foo': 'bar'\n}.a |     E        +  where 1 = This is JSON\n{\n  'foo': 'bar'\n}.a | ||||||
| 
 | 
 | ||||||
|     failure_demo.py:282: AssertionError |     failure_demo.py:282: AssertionError | ||||||
|     ============================ 44 failed in 0.26s ============================ |     ============================ 44 failed in 0.12s ============================ | ||||||
|  |  | ||||||
|  | @ -132,7 +132,7 @@ directory with the above conftest.py: | ||||||
|     rootdir: $REGENDOC_TMPDIR |     rootdir: $REGENDOC_TMPDIR | ||||||
|     collected 0 items |     collected 0 items | ||||||
| 
 | 
 | ||||||
|     ========================== no tests ran in 0.00s =========================== |     ========================== no tests ran in 0.12s =========================== | ||||||
| 
 | 
 | ||||||
| .. _`excontrolskip`: | .. _`excontrolskip`: | ||||||
| 
 | 
 | ||||||
|  | @ -201,7 +201,7 @@ and when running it will see a skipped "slow" test: | ||||||
| 
 | 
 | ||||||
|     ========================= short test summary info ========================== |     ========================= short test summary info ========================== | ||||||
|     SKIPPED [1] test_module.py:8: need --runslow option to run |     SKIPPED [1] test_module.py:8: need --runslow option to run | ||||||
|     ======================= 1 passed, 1 skipped in 0.01s ======================= |     ======================= 1 passed, 1 skipped in 0.12s ======================= | ||||||
| 
 | 
 | ||||||
| Or run it including the ``slow`` marked test: | Or run it including the ``slow`` marked test: | ||||||
| 
 | 
 | ||||||
|  | @ -216,7 +216,7 @@ Or run it including the ``slow`` marked test: | ||||||
| 
 | 
 | ||||||
|     test_module.py ..                                                    [100%] |     test_module.py ..                                                    [100%] | ||||||
| 
 | 
 | ||||||
|     ============================ 2 passed in 0.01s ============================= |     ============================ 2 passed in 0.12s ============================= | ||||||
| 
 | 
 | ||||||
| Writing well integrated assertion helpers | Writing well integrated assertion helpers | ||||||
| -------------------------------------------------- | -------------------------------------------------- | ||||||
|  | @ -358,7 +358,7 @@ which will add the string to the test header accordingly: | ||||||
|     rootdir: $REGENDOC_TMPDIR |     rootdir: $REGENDOC_TMPDIR | ||||||
|     collected 0 items |     collected 0 items | ||||||
| 
 | 
 | ||||||
|     ========================== no tests ran in 0.00s =========================== |     ========================== no tests ran in 0.12s =========================== | ||||||
| 
 | 
 | ||||||
| .. regendoc:wipe | .. regendoc:wipe | ||||||
| 
 | 
 | ||||||
|  | @ -388,7 +388,7 @@ which will add info only when run with "--v": | ||||||
|     rootdir: $REGENDOC_TMPDIR |     rootdir: $REGENDOC_TMPDIR | ||||||
|     collecting ... collected 0 items |     collecting ... collected 0 items | ||||||
| 
 | 
 | ||||||
|     ========================== no tests ran in 0.00s =========================== |     ========================== no tests ran in 0.12s =========================== | ||||||
| 
 | 
 | ||||||
| and nothing when run plainly: | and nothing when run plainly: | ||||||
| 
 | 
 | ||||||
|  | @ -401,7 +401,7 @@ and nothing when run plainly: | ||||||
|     rootdir: $REGENDOC_TMPDIR |     rootdir: $REGENDOC_TMPDIR | ||||||
|     collected 0 items |     collected 0 items | ||||||
| 
 | 
 | ||||||
|     ========================== no tests ran in 0.00s =========================== |     ========================== no tests ran in 0.12s =========================== | ||||||
| 
 | 
 | ||||||
| profiling test duration | profiling test duration | ||||||
| -------------------------- | -------------------------- | ||||||
|  | @ -447,7 +447,7 @@ Now we can profile which test functions execute the slowest: | ||||||
|     0.30s call     test_some_are_slow.py::test_funcslow2 |     0.30s call     test_some_are_slow.py::test_funcslow2 | ||||||
|     0.20s call     test_some_are_slow.py::test_funcslow1 |     0.20s call     test_some_are_slow.py::test_funcslow1 | ||||||
|     0.10s call     test_some_are_slow.py::test_funcfast |     0.10s call     test_some_are_slow.py::test_funcfast | ||||||
|     ============================ 3 passed in 0.61s ============================= |     ============================ 3 passed in 0.12s ============================= | ||||||
| 
 | 
 | ||||||
| incremental testing - test steps | incremental testing - test steps | ||||||
| --------------------------------------------------- | --------------------------------------------------- | ||||||
|  | @ -531,7 +531,7 @@ If we run this: | ||||||
|     ========================= short test summary info ========================== |     ========================= short test summary info ========================== | ||||||
|     XFAIL test_step.py::TestUserHandling::test_deletion |     XFAIL test_step.py::TestUserHandling::test_deletion | ||||||
|       reason: previous test failed (test_modification) |       reason: previous test failed (test_modification) | ||||||
|     ================== 1 failed, 2 passed, 1 xfailed in 0.03s ================== |     ================== 1 failed, 2 passed, 1 xfailed in 0.12s ================== | ||||||
| 
 | 
 | ||||||
| We'll see that ``test_deletion`` was not executed because ``test_modification`` | We'll see that ``test_deletion`` was not executed because ``test_modification`` | ||||||
| failed.  It is reported as an "expected failure". | failed.  It is reported as an "expected failure". | ||||||
|  | @ -644,7 +644,7 @@ We can run this: | ||||||
|     E       assert 0 |     E       assert 0 | ||||||
| 
 | 
 | ||||||
|     a/test_db2.py:2: AssertionError |     a/test_db2.py:2: AssertionError | ||||||
|     ============= 3 failed, 2 passed, 1 xfailed, 1 error in 0.05s ============== |     ============= 3 failed, 2 passed, 1 xfailed, 1 error in 0.12s ============== | ||||||
| 
 | 
 | ||||||
| The two test modules in the ``a`` directory see the same ``db`` fixture instance | The two test modules in the ``a`` directory see the same ``db`` fixture instance | ||||||
| while the one test in the sister-directory ``b`` doesn't see it.  We could of course | while the one test in the sister-directory ``b`` doesn't see it.  We could of course | ||||||
|  | @ -733,7 +733,7 @@ and run them: | ||||||
|     E       assert 0 |     E       assert 0 | ||||||
| 
 | 
 | ||||||
|     test_module.py:6: AssertionError |     test_module.py:6: AssertionError | ||||||
|     ============================ 2 failed in 0.02s ============================= |     ============================ 2 failed in 0.12s ============================= | ||||||
| 
 | 
 | ||||||
| you will have a "failures" file which contains the failing test ids: | you will have a "failures" file which contains the failing test ids: | ||||||
| 
 | 
 | ||||||
|  | @ -848,7 +848,7 @@ and run it: | ||||||
|     E       assert 0 |     E       assert 0 | ||||||
| 
 | 
 | ||||||
|     test_module.py:19: AssertionError |     test_module.py:19: AssertionError | ||||||
|     ======================== 2 failed, 1 error in 0.02s ======================== |     ======================== 2 failed, 1 error in 0.12s ======================== | ||||||
| 
 | 
 | ||||||
| You'll see that the fixture finalizers could use the precise reporting | You'll see that the fixture finalizers could use the precise reporting | ||||||
| information. | information. | ||||||
|  |  | ||||||
|  | @ -96,7 +96,7 @@ marked ``smtp_connection`` fixture function.  Running the test looks like this: | ||||||
|     E       assert 0 |     E       assert 0 | ||||||
| 
 | 
 | ||||||
|     test_smtpsimple.py:14: AssertionError |     test_smtpsimple.py:14: AssertionError | ||||||
|     ============================ 1 failed in 0.18s ============================= |     ============================ 1 failed in 0.12s ============================= | ||||||
| 
 | 
 | ||||||
| In the failure traceback we see that the test function was called with a | In the failure traceback we see that the test function was called with a | ||||||
| ``smtp_connection`` argument, the ``smtplib.SMTP()`` instance created by the fixture | ``smtp_connection`` argument, the ``smtplib.SMTP()`` instance created by the fixture | ||||||
|  | @ -258,7 +258,7 @@ inspect what is going on and can now run the tests: | ||||||
|     E       assert 0 |     E       assert 0 | ||||||
| 
 | 
 | ||||||
|     test_module.py:13: AssertionError |     test_module.py:13: AssertionError | ||||||
|     ============================ 2 failed in 0.20s ============================= |     ============================ 2 failed in 0.12s ============================= | ||||||
| 
 | 
 | ||||||
| You see the two ``assert 0`` failing and more importantly you can also see | You see the two ``assert 0`` failing and more importantly you can also see | ||||||
| that the same (module-scoped) ``smtp_connection`` object was passed into the | that the same (module-scoped) ``smtp_connection`` object was passed into the | ||||||
|  | @ -361,7 +361,7 @@ Let's execute it: | ||||||
|     $ pytest -s -q --tb=no |     $ pytest -s -q --tb=no | ||||||
|     FFteardown smtp |     FFteardown smtp | ||||||
| 
 | 
 | ||||||
|     2 failed in 0.20s |     2 failed in 0.79s | ||||||
| 
 | 
 | ||||||
| We see that the ``smtp_connection`` instance is finalized after the two | We see that the ``smtp_connection`` instance is finalized after the two | ||||||
| tests finished execution.  Note that if we decorated our fixture | tests finished execution.  Note that if we decorated our fixture | ||||||
|  | @ -515,7 +515,7 @@ again, nothing much has changed: | ||||||
|     $ pytest -s -q --tb=no |     $ pytest -s -q --tb=no | ||||||
|     FFfinalizing <smtplib.SMTP object at 0xdeadbeef> (smtp.gmail.com) |     FFfinalizing <smtplib.SMTP object at 0xdeadbeef> (smtp.gmail.com) | ||||||
| 
 | 
 | ||||||
|     2 failed in 0.21s |     2 failed in 0.77s | ||||||
| 
 | 
 | ||||||
| Let's quickly create another test module that actually sets the | Let's quickly create another test module that actually sets the | ||||||
| server URL in its module namespace: | server URL in its module namespace: | ||||||
|  | @ -692,7 +692,7 @@ So let's just do another run: | ||||||
|     test_module.py:13: AssertionError |     test_module.py:13: AssertionError | ||||||
|     ------------------------- Captured stdout teardown ------------------------- |     ------------------------- Captured stdout teardown ------------------------- | ||||||
|     finalizing <smtplib.SMTP object at 0xdeadbeef> |     finalizing <smtplib.SMTP object at 0xdeadbeef> | ||||||
|     4 failed in 0.89s |     4 failed in 1.69s | ||||||
| 
 | 
 | ||||||
| We see that our two test functions each ran twice, against the different | We see that our two test functions each ran twice, against the different | ||||||
| ``smtp_connection`` instances.  Note also, that with the ``mail.python.org`` | ``smtp_connection`` instances.  Note also, that with the ``mail.python.org`` | ||||||
|  | @ -771,7 +771,7 @@ Running the above tests results in the following test IDs being used: | ||||||
|      <Function test_ehlo[mail.python.org]> |      <Function test_ehlo[mail.python.org]> | ||||||
|      <Function test_noop[mail.python.org]> |      <Function test_noop[mail.python.org]> | ||||||
| 
 | 
 | ||||||
|    ========================== no tests ran in 0.01s =========================== |    ========================== no tests ran in 0.12s =========================== | ||||||
| 
 | 
 | ||||||
| .. _`fixture-parametrize-marks`: | .. _`fixture-parametrize-marks`: | ||||||
| 
 | 
 | ||||||
|  | @ -812,7 +812,7 @@ Running this test will *skip* the invocation of ``data_set`` with value ``2``: | ||||||
|     test_fixture_marks.py::test_data[1] PASSED                           [ 66%] |     test_fixture_marks.py::test_data[1] PASSED                           [ 66%] | ||||||
|     test_fixture_marks.py::test_data[2] SKIPPED                          [100%] |     test_fixture_marks.py::test_data[2] SKIPPED                          [100%] | ||||||
| 
 | 
 | ||||||
|     ======================= 2 passed, 1 skipped in 0.01s ======================= |     ======================= 2 passed, 1 skipped in 0.12s ======================= | ||||||
| 
 | 
 | ||||||
| .. _`interdependent fixtures`: | .. _`interdependent fixtures`: | ||||||
| 
 | 
 | ||||||
|  | @ -861,7 +861,7 @@ Here we declare an ``app`` fixture which receives the previously defined | ||||||
|     test_appsetup.py::test_smtp_connection_exists[smtp.gmail.com] PASSED [ 50%] |     test_appsetup.py::test_smtp_connection_exists[smtp.gmail.com] PASSED [ 50%] | ||||||
|     test_appsetup.py::test_smtp_connection_exists[mail.python.org] PASSED [100%] |     test_appsetup.py::test_smtp_connection_exists[mail.python.org] PASSED [100%] | ||||||
| 
 | 
 | ||||||
|     ============================ 2 passed in 0.44s ============================= |     ============================ 2 passed in 0.12s ============================= | ||||||
| 
 | 
 | ||||||
| Due to the parametrization of ``smtp_connection``, the test will run twice with two | Due to the parametrization of ``smtp_connection``, the test will run twice with two | ||||||
| different ``App`` instances and respective smtp servers.  There is no | different ``App`` instances and respective smtp servers.  There is no | ||||||
|  | @ -971,7 +971,7 @@ Let's run the tests in verbose mode and with looking at the print-output: | ||||||
|       TEARDOWN modarg mod2 |       TEARDOWN modarg mod2 | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|     ============================ 8 passed in 0.01s ============================= |     ============================ 8 passed in 0.12s ============================= | ||||||
| 
 | 
 | ||||||
| You can see that the parametrized module-scoped ``modarg`` resource caused an | You can see that the parametrized module-scoped ``modarg`` resource caused an | ||||||
| ordering of test execution that lead to the fewest possible "active" resources. | ordering of test execution that lead to the fewest possible "active" resources. | ||||||
|  |  | ||||||
|  | @ -69,7 +69,7 @@ That’s it. You can now execute the test function: | ||||||
|     E        +  where 4 = func(3) |     E        +  where 4 = func(3) | ||||||
| 
 | 
 | ||||||
|     test_sample.py:6: AssertionError |     test_sample.py:6: AssertionError | ||||||
|     ============================ 1 failed in 0.02s ============================= |     ============================ 1 failed in 0.12s ============================= | ||||||
| 
 | 
 | ||||||
| This test returns a failure report because ``func(3)`` does not return ``5``. | This test returns a failure report because ``func(3)`` does not return ``5``. | ||||||
| 
 | 
 | ||||||
|  | @ -108,7 +108,7 @@ Execute the test function with “quiet” reporting mode: | ||||||
| 
 | 
 | ||||||
|     $ pytest -q test_sysexit.py |     $ pytest -q test_sysexit.py | ||||||
|     .                                                                    [100%] |     .                                                                    [100%] | ||||||
|     1 passed in 0.00s |     1 passed in 0.01s | ||||||
| 
 | 
 | ||||||
| Group multiple tests in a class | Group multiple tests in a class | ||||||
| -------------------------------------------------------------- | -------------------------------------------------------------- | ||||||
|  |  | ||||||
|  | @ -88,7 +88,7 @@ This has the following benefits: | ||||||
| 
 | 
 | ||||||
| .. note:: | .. note:: | ||||||
| 
 | 
 | ||||||
|     See :ref:`pythonpath` for more information about the difference between calling ``pytest`` and |     See :ref:`pytest vs python -m pytest` for more information about the difference between calling ``pytest`` and | ||||||
|     ``python -m pytest``. |     ``python -m pytest``. | ||||||
| 
 | 
 | ||||||
| Note that using this scheme your test files must have **unique names**, because | Note that using this scheme your test files must have **unique names**, because | ||||||
|  |  | ||||||
|  | @ -44,7 +44,7 @@ To execute it: | ||||||
|     E        +  where 4 = inc(3) |     E        +  where 4 = inc(3) | ||||||
| 
 | 
 | ||||||
|     test_sample.py:6: AssertionError |     test_sample.py:6: AssertionError | ||||||
|     ============================ 1 failed in 0.02s ============================= |     ============================ 1 failed in 0.12s ============================= | ||||||
| 
 | 
 | ||||||
| Due to ``pytest``'s detailed assertion introspection, only plain ``assert`` statements are used. | Due to ``pytest``'s detailed assertion introspection, only plain ``assert`` statements are used. | ||||||
| See :ref:`Getting Started <getstarted>` for more examples. | See :ref:`Getting Started <getstarted>` for more examples. | ||||||
|  |  | ||||||
|  | @ -75,7 +75,7 @@ them in turn: | ||||||
|     E        +  where 54 = eval('6*9') |     E        +  where 54 = eval('6*9') | ||||||
| 
 | 
 | ||||||
|     test_expectation.py:6: AssertionError |     test_expectation.py:6: AssertionError | ||||||
|     ======================= 1 failed, 2 passed in 0.02s ======================== |     ======================= 1 failed, 2 passed in 0.12s ======================== | ||||||
| 
 | 
 | ||||||
| .. note:: | .. note:: | ||||||
| 
 | 
 | ||||||
|  | @ -128,7 +128,7 @@ Let's run this: | ||||||
| 
 | 
 | ||||||
|     test_expectation.py ..x                                              [100%] |     test_expectation.py ..x                                              [100%] | ||||||
| 
 | 
 | ||||||
|     ======================= 2 passed, 1 xfailed in 0.02s ======================= |     ======================= 2 passed, 1 xfailed in 0.12s ======================= | ||||||
| 
 | 
 | ||||||
| The one parameter set which caused a failure previously now | The one parameter set which caused a failure previously now | ||||||
| shows up as an "xfailed (expected to fail)" test. | shows up as an "xfailed (expected to fail)" test. | ||||||
|  |  | ||||||
|  | @ -72,6 +72,8 @@ imported in the global import namespace. | ||||||
| 
 | 
 | ||||||
| This is also discussed in details in :ref:`test discovery`. | This is also discussed in details in :ref:`test discovery`. | ||||||
| 
 | 
 | ||||||
|  | .. _`pytest vs python -m pytest`: | ||||||
|  | 
 | ||||||
| Invoking ``pytest`` versus ``python -m pytest`` | Invoking ``pytest`` versus ``python -m pytest`` | ||||||
| ----------------------------------------------- | ----------------------------------------------- | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -59,7 +59,7 @@ pytest.raises | ||||||
| 
 | 
 | ||||||
| **Tutorial**: :ref:`assertraises`. | **Tutorial**: :ref:`assertraises`. | ||||||
| 
 | 
 | ||||||
| .. autofunction:: pytest.raises(expected_exception: Exception, [match], [message]) | .. autofunction:: pytest.raises(expected_exception: Exception, [match]) | ||||||
|     :with: excinfo |     :with: excinfo | ||||||
| 
 | 
 | ||||||
| pytest.deprecated_call | pytest.deprecated_call | ||||||
|  |  | ||||||
|  | @ -371,7 +371,7 @@ Running it with the report-on-xfail option gives this output: | ||||||
|     XFAIL xfail_demo.py::test_hello6 |     XFAIL xfail_demo.py::test_hello6 | ||||||
|       reason: reason |       reason: reason | ||||||
|     XFAIL xfail_demo.py::test_hello7 |     XFAIL xfail_demo.py::test_hello7 | ||||||
|     ============================ 7 xfailed in 0.05s ============================ |     ============================ 7 xfailed in 0.12s ============================ | ||||||
| 
 | 
 | ||||||
| .. _`skip/xfail with parametrize`: | .. _`skip/xfail with parametrize`: | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -64,7 +64,7 @@ Running this would result in a passed test except for the last | ||||||
|     E       assert 0 |     E       assert 0 | ||||||
| 
 | 
 | ||||||
|     test_tmp_path.py:13: AssertionError |     test_tmp_path.py:13: AssertionError | ||||||
|     ============================ 1 failed in 0.02s ============================= |     ============================ 1 failed in 0.12s ============================= | ||||||
| 
 | 
 | ||||||
| .. _`tmp_path_factory example`: | .. _`tmp_path_factory example`: | ||||||
| 
 | 
 | ||||||
|  | @ -133,7 +133,7 @@ Running this would result in a passed test except for the last | ||||||
|     E       assert 0 |     E       assert 0 | ||||||
| 
 | 
 | ||||||
|     test_tmpdir.py:9: AssertionError |     test_tmpdir.py:9: AssertionError | ||||||
|     ============================ 1 failed in 0.02s ============================= |     ============================ 1 failed in 0.12s ============================= | ||||||
| 
 | 
 | ||||||
| .. _`tmpdir factory example`: | .. _`tmpdir factory example`: | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -166,7 +166,7 @@ the ``self.db`` values in the traceback: | ||||||
|     E       assert 0 |     E       assert 0 | ||||||
| 
 | 
 | ||||||
|     test_unittest_db.py:13: AssertionError |     test_unittest_db.py:13: AssertionError | ||||||
|     ============================ 2 failed in 0.02s ============================= |     ============================ 2 failed in 0.12s ============================= | ||||||
| 
 | 
 | ||||||
| This default pytest traceback shows that the two test methods | This default pytest traceback shows that the two test methods | ||||||
| share the same ``self.db`` instance which was our intention | share the same ``self.db`` instance which was our intention | ||||||
|  |  | ||||||
|  | @ -247,7 +247,7 @@ Example: | ||||||
|     XPASS test_example.py::test_xpass always xfail |     XPASS test_example.py::test_xpass always xfail | ||||||
|     ERROR test_example.py::test_error - assert 0 |     ERROR test_example.py::test_error - assert 0 | ||||||
|     FAILED test_example.py::test_fail - assert 0 |     FAILED test_example.py::test_fail - assert 0 | ||||||
|     == 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.03s === |     == 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.12s === | ||||||
| 
 | 
 | ||||||
| The ``-r`` options accepts a number of characters after it, with ``a`` used | The ``-r`` options accepts a number of characters after it, with ``a`` used | ||||||
| above meaning "all except passes". | above meaning "all except passes". | ||||||
|  | @ -297,7 +297,7 @@ More than one character can be used, so for example to only see failed and skipp | ||||||
|     ========================= short test summary info ========================== |     ========================= short test summary info ========================== | ||||||
|     FAILED test_example.py::test_fail - assert 0 |     FAILED test_example.py::test_fail - assert 0 | ||||||
|     SKIPPED [1] $REGENDOC_TMPDIR/test_example.py:23: skipping this test |     SKIPPED [1] $REGENDOC_TMPDIR/test_example.py:23: skipping this test | ||||||
|     == 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.03s === |     == 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.12s === | ||||||
| 
 | 
 | ||||||
| Using ``p`` lists the passing tests, whilst ``P`` adds an extra section "PASSES" with those tests that passed but had | Using ``p`` lists the passing tests, whilst ``P`` adds an extra section "PASSES" with those tests that passed but had | ||||||
| captured output: | captured output: | ||||||
|  | @ -336,7 +336,7 @@ captured output: | ||||||
|     ok |     ok | ||||||
|     ========================= short test summary info ========================== |     ========================= short test summary info ========================== | ||||||
|     PASSED test_example.py::test_ok |     PASSED test_example.py::test_ok | ||||||
|     == 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.03s === |     == 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.12s === | ||||||
| 
 | 
 | ||||||
| .. _pdb-option: | .. _pdb-option: | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -41,7 +41,7 @@ Running pytest now produces this output: | ||||||
|         warnings.warn(UserWarning("api v1, should use functions from v2")) |         warnings.warn(UserWarning("api v1, should use functions from v2")) | ||||||
| 
 | 
 | ||||||
|     -- Docs: https://docs.pytest.org/en/latest/warnings.html |     -- Docs: https://docs.pytest.org/en/latest/warnings.html | ||||||
|     ====================== 1 passed, 1 warnings in 0.00s ======================= |     ====================== 1 passed, 1 warnings in 0.12s ======================= | ||||||
| 
 | 
 | ||||||
| The ``-W`` flag can be passed to control which warnings will be displayed or even turn | The ``-W`` flag can be passed to control which warnings will be displayed or even turn | ||||||
| them into errors: | them into errors: | ||||||
|  |  | ||||||
|  | @ -789,7 +789,11 @@ def _py36_windowsconsoleio_workaround(stream): | ||||||
| 
 | 
 | ||||||
|     See https://github.com/pytest-dev/py/issues/103 |     See https://github.com/pytest-dev/py/issues/103 | ||||||
|     """ |     """ | ||||||
|     if not sys.platform.startswith("win32") or sys.version_info[:2] < (3, 6): |     if ( | ||||||
|  |         not sys.platform.startswith("win32") | ||||||
|  |         or sys.version_info[:2] < (3, 6) | ||||||
|  |         or hasattr(sys, "pypy_version_info") | ||||||
|  |     ): | ||||||
|         return |         return | ||||||
| 
 | 
 | ||||||
|     # bail out if ``stream`` doesn't seem like a proper ``io`` stream (#2666) |     # bail out if ``stream`` doesn't seem like a proper ``io`` stream (#2666) | ||||||
|  |  | ||||||
|  | @ -431,10 +431,7 @@ class PytestPluginManager(PluginManager): | ||||||
|                 continue |                 continue | ||||||
|             conftestpath = parent.join("conftest.py") |             conftestpath = parent.join("conftest.py") | ||||||
|             if conftestpath.isfile(): |             if conftestpath.isfile(): | ||||||
|                 # Use realpath to avoid loading the same conftest twice |                 mod = self._importconftest(conftestpath) | ||||||
|                 # with build systems that create build directories containing |  | ||||||
|                 # symlinks to actual files. |  | ||||||
|                 mod = self._importconftest(conftestpath.realpath()) |  | ||||||
|                 clist.append(mod) |                 clist.append(mod) | ||||||
|         self._dirpath2confmods[directory] = clist |         self._dirpath2confmods[directory] = clist | ||||||
|         return clist |         return clist | ||||||
|  | @ -449,8 +446,14 @@ class PytestPluginManager(PluginManager): | ||||||
|         raise KeyError(name) |         raise KeyError(name) | ||||||
| 
 | 
 | ||||||
|     def _importconftest(self, conftestpath): |     def _importconftest(self, conftestpath): | ||||||
|  |         # Use a resolved Path object as key to avoid loading the same conftest twice | ||||||
|  |         # with build systems that create build directories containing | ||||||
|  |         # symlinks to actual files. | ||||||
|  |         # Using Path().resolve() is better than py.path.realpath because | ||||||
|  |         # it resolves to the correct path/drive in case-insensitive file systems (#5792) | ||||||
|  |         key = Path(str(conftestpath)).resolve() | ||||||
|         try: |         try: | ||||||
|             return self._conftestpath2mod[conftestpath] |             return self._conftestpath2mod[key] | ||||||
|         except KeyError: |         except KeyError: | ||||||
|             pkgpath = conftestpath.pypkgpath() |             pkgpath = conftestpath.pypkgpath() | ||||||
|             if pkgpath is None: |             if pkgpath is None: | ||||||
|  | @ -467,7 +470,7 @@ class PytestPluginManager(PluginManager): | ||||||
|                 raise ConftestImportFailure(conftestpath, sys.exc_info()) |                 raise ConftestImportFailure(conftestpath, sys.exc_info()) | ||||||
| 
 | 
 | ||||||
|             self._conftest_plugins.add(mod) |             self._conftest_plugins.add(mod) | ||||||
|             self._conftestpath2mod[conftestpath] = mod |             self._conftestpath2mod[key] = mod | ||||||
|             dirpath = conftestpath.dirpath() |             dirpath = conftestpath.dirpath() | ||||||
|             if dirpath in self._dirpath2confmods: |             if dirpath in self._dirpath2confmods: | ||||||
|                 for path, mods in self._dirpath2confmods.items(): |                 for path, mods in self._dirpath2confmods.items(): | ||||||
|  |  | ||||||
|  | @ -859,7 +859,7 @@ class FixtureDef: | ||||||
|             if argname != "request": |             if argname != "request": | ||||||
|                 fixturedef.addfinalizer(functools.partial(self.finish, request=request)) |                 fixturedef.addfinalizer(functools.partial(self.finish, request=request)) | ||||||
| 
 | 
 | ||||||
|         my_cache_key = request.param_index |         my_cache_key = self.cache_key(request) | ||||||
|         cached_result = getattr(self, "cached_result", None) |         cached_result = getattr(self, "cached_result", None) | ||||||
|         if cached_result is not None: |         if cached_result is not None: | ||||||
|             result, cache_key, err = cached_result |             result, cache_key, err = cached_result | ||||||
|  | @ -877,6 +877,9 @@ class FixtureDef: | ||||||
|         hook = self._fixturemanager.session.gethookproxy(request.node.fspath) |         hook = self._fixturemanager.session.gethookproxy(request.node.fspath) | ||||||
|         return hook.pytest_fixture_setup(fixturedef=self, request=request) |         return hook.pytest_fixture_setup(fixturedef=self, request=request) | ||||||
| 
 | 
 | ||||||
|  |     def cache_key(self, request): | ||||||
|  |         return request.param_index if not hasattr(request, "param") else request.param | ||||||
|  | 
 | ||||||
|     def __repr__(self): |     def __repr__(self): | ||||||
|         return "<FixtureDef argname={!r} scope={!r} baseid={!r}>".format( |         return "<FixtureDef argname={!r} scope={!r} baseid={!r}>".format( | ||||||
|             self.argname, self.scope, self.baseid |             self.argname, self.scope, self.baseid | ||||||
|  | @ -897,6 +900,12 @@ def resolve_fixture_function(fixturedef, request): | ||||||
|         # request.instance so that code working with "fixturedef" behaves |         # request.instance so that code working with "fixturedef" behaves | ||||||
|         # as expected. |         # as expected. | ||||||
|         if request.instance is not None: |         if request.instance is not None: | ||||||
|  |             # handle the case where fixture is defined not in a test class, but some other class | ||||||
|  |             # (for example a plugin class with a fixture), see #2270 | ||||||
|  |             if hasattr(fixturefunc, "__self__") and not isinstance( | ||||||
|  |                 request.instance, fixturefunc.__self__.__class__ | ||||||
|  |             ): | ||||||
|  |                 return fixturefunc | ||||||
|             fixturefunc = getimfunc(fixturedef.func) |             fixturefunc = getimfunc(fixturedef.func) | ||||||
|             if fixturefunc != fixturedef.func: |             if fixturefunc != fixturedef.func: | ||||||
|                 fixturefunc = fixturefunc.__get__(request.instance) |                 fixturefunc = fixturefunc.__get__(request.instance) | ||||||
|  | @ -913,7 +922,7 @@ def pytest_fixture_setup(fixturedef, request): | ||||||
|         kwargs[argname] = result |         kwargs[argname] = result | ||||||
| 
 | 
 | ||||||
|     fixturefunc = resolve_fixture_function(fixturedef, request) |     fixturefunc = resolve_fixture_function(fixturedef, request) | ||||||
|     my_cache_key = request.param_index |     my_cache_key = fixturedef.cache_key(request) | ||||||
|     try: |     try: | ||||||
|         result = call_fixture_func(fixturefunc, request, kwargs) |         result = call_fixture_func(fixturefunc, request, kwargs) | ||||||
|     except TEST_OUTCOME: |     except TEST_OUTCOME: | ||||||
|  |  | ||||||
|  | @ -263,7 +263,6 @@ class Node: | ||||||
|         fm = self.session._fixturemanager |         fm = self.session._fixturemanager | ||||||
|         if excinfo.errisinstance(fm.FixtureLookupError): |         if excinfo.errisinstance(fm.FixtureLookupError): | ||||||
|             return excinfo.value.formatrepr() |             return excinfo.value.formatrepr() | ||||||
|         tbfilter = True |  | ||||||
|         if self.config.getoption("fulltrace", False): |         if self.config.getoption("fulltrace", False): | ||||||
|             style = "long" |             style = "long" | ||||||
|         else: |         else: | ||||||
|  | @ -271,7 +270,6 @@ class Node: | ||||||
|             self._prunetraceback(excinfo) |             self._prunetraceback(excinfo) | ||||||
|             if len(excinfo.traceback) == 0: |             if len(excinfo.traceback) == 0: | ||||||
|                 excinfo.traceback = tb |                 excinfo.traceback = tb | ||||||
|             tbfilter = False  # prunetraceback already does it |  | ||||||
|             if style == "auto": |             if style == "auto": | ||||||
|                 style = "long" |                 style = "long" | ||||||
|         # XXX should excinfo.getrepr record all data and toterminal() process it? |         # XXX should excinfo.getrepr record all data and toterminal() process it? | ||||||
|  | @ -297,7 +295,7 @@ class Node: | ||||||
|             abspath=abspath, |             abspath=abspath, | ||||||
|             showlocals=self.config.getoption("showlocals", False), |             showlocals=self.config.getoption("showlocals", False), | ||||||
|             style=style, |             style=style, | ||||||
|             tbfilter=tbfilter, |             tbfilter=False,  # pruned already, or in --fulltrace mode. | ||||||
|             truncate_locals=truncate_locals, |             truncate_locals=truncate_locals, | ||||||
|         ) |         ) | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -4,6 +4,7 @@ from typing import Union | ||||||
| 
 | 
 | ||||||
| import py | import py | ||||||
| 
 | 
 | ||||||
|  | from _pytest._code.code import ExceptionChainRepr | ||||||
| from _pytest._code.code import ExceptionInfo | from _pytest._code.code import ExceptionInfo | ||||||
| from _pytest._code.code import ReprEntry | from _pytest._code.code import ReprEntry | ||||||
| from _pytest._code.code import ReprEntryNative | from _pytest._code.code import ReprEntryNative | ||||||
|  | @ -161,46 +162,7 @@ class BaseReport: | ||||||
| 
 | 
 | ||||||
|         Experimental method. |         Experimental method. | ||||||
|         """ |         """ | ||||||
| 
 |         return _report_to_json(self) | ||||||
|         def disassembled_report(rep): |  | ||||||
|             reprtraceback = rep.longrepr.reprtraceback.__dict__.copy() |  | ||||||
|             reprcrash = rep.longrepr.reprcrash.__dict__.copy() |  | ||||||
| 
 |  | ||||||
|             new_entries = [] |  | ||||||
|             for entry in reprtraceback["reprentries"]: |  | ||||||
|                 entry_data = { |  | ||||||
|                     "type": type(entry).__name__, |  | ||||||
|                     "data": entry.__dict__.copy(), |  | ||||||
|                 } |  | ||||||
|                 for key, value in entry_data["data"].items(): |  | ||||||
|                     if hasattr(value, "__dict__"): |  | ||||||
|                         entry_data["data"][key] = value.__dict__.copy() |  | ||||||
|                 new_entries.append(entry_data) |  | ||||||
| 
 |  | ||||||
|             reprtraceback["reprentries"] = new_entries |  | ||||||
| 
 |  | ||||||
|             return { |  | ||||||
|                 "reprcrash": reprcrash, |  | ||||||
|                 "reprtraceback": reprtraceback, |  | ||||||
|                 "sections": rep.longrepr.sections, |  | ||||||
|             } |  | ||||||
| 
 |  | ||||||
|         d = self.__dict__.copy() |  | ||||||
|         if hasattr(self.longrepr, "toterminal"): |  | ||||||
|             if hasattr(self.longrepr, "reprtraceback") and hasattr( |  | ||||||
|                 self.longrepr, "reprcrash" |  | ||||||
|             ): |  | ||||||
|                 d["longrepr"] = disassembled_report(self) |  | ||||||
|             else: |  | ||||||
|                 d["longrepr"] = str(self.longrepr) |  | ||||||
|         else: |  | ||||||
|             d["longrepr"] = self.longrepr |  | ||||||
|         for name in d: |  | ||||||
|             if isinstance(d[name], (py.path.local, Path)): |  | ||||||
|                 d[name] = str(d[name]) |  | ||||||
|             elif name == "result": |  | ||||||
|                 d[name] = None  # for now |  | ||||||
|         return d |  | ||||||
| 
 | 
 | ||||||
|     @classmethod |     @classmethod | ||||||
|     def _from_json(cls, reportdict): |     def _from_json(cls, reportdict): | ||||||
|  | @ -212,54 +174,8 @@ class BaseReport: | ||||||
| 
 | 
 | ||||||
|         Experimental method. |         Experimental method. | ||||||
|         """ |         """ | ||||||
|         if reportdict["longrepr"]: |         kwargs = _report_kwargs_from_json(reportdict) | ||||||
|             if ( |         return cls(**kwargs) | ||||||
|                 "reprcrash" in reportdict["longrepr"] |  | ||||||
|                 and "reprtraceback" in reportdict["longrepr"] |  | ||||||
|             ): |  | ||||||
| 
 |  | ||||||
|                 reprtraceback = reportdict["longrepr"]["reprtraceback"] |  | ||||||
|                 reprcrash = reportdict["longrepr"]["reprcrash"] |  | ||||||
| 
 |  | ||||||
|                 unserialized_entries = [] |  | ||||||
|                 for entry_data in reprtraceback["reprentries"]: |  | ||||||
|                     data = entry_data["data"] |  | ||||||
|                     entry_type = entry_data["type"] |  | ||||||
|                     if entry_type == "ReprEntry": |  | ||||||
|                         reprfuncargs = None |  | ||||||
|                         reprfileloc = None |  | ||||||
|                         reprlocals = None |  | ||||||
|                         if data["reprfuncargs"]: |  | ||||||
|                             reprfuncargs = ReprFuncArgs(**data["reprfuncargs"]) |  | ||||||
|                         if data["reprfileloc"]: |  | ||||||
|                             reprfileloc = ReprFileLocation(**data["reprfileloc"]) |  | ||||||
|                         if data["reprlocals"]: |  | ||||||
|                             reprlocals = ReprLocals(data["reprlocals"]["lines"]) |  | ||||||
| 
 |  | ||||||
|                         reprentry = ReprEntry( |  | ||||||
|                             lines=data["lines"], |  | ||||||
|                             reprfuncargs=reprfuncargs, |  | ||||||
|                             reprlocals=reprlocals, |  | ||||||
|                             filelocrepr=reprfileloc, |  | ||||||
|                             style=data["style"], |  | ||||||
|                         )  # type: Union[ReprEntry, ReprEntryNative] |  | ||||||
|                     elif entry_type == "ReprEntryNative": |  | ||||||
|                         reprentry = ReprEntryNative(data["lines"]) |  | ||||||
|                     else: |  | ||||||
|                         _report_unserialization_failure(entry_type, cls, reportdict) |  | ||||||
|                     unserialized_entries.append(reprentry) |  | ||||||
|                 reprtraceback["reprentries"] = unserialized_entries |  | ||||||
| 
 |  | ||||||
|                 exception_info = ReprExceptionInfo( |  | ||||||
|                     reprtraceback=ReprTraceback(**reprtraceback), |  | ||||||
|                     reprcrash=ReprFileLocation(**reprcrash), |  | ||||||
|                 ) |  | ||||||
| 
 |  | ||||||
|                 for section in reportdict["longrepr"]["sections"]: |  | ||||||
|                     exception_info.addsection(*section) |  | ||||||
|                 reportdict["longrepr"] = exception_info |  | ||||||
| 
 |  | ||||||
|         return cls(**reportdict) |  | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| def _report_unserialization_failure(type_name, report_class, reportdict): | def _report_unserialization_failure(type_name, report_class, reportdict): | ||||||
|  | @ -425,3 +341,142 @@ def pytest_report_from_serializable(data): | ||||||
|         assert False, "Unknown report_type unserialize data: {}".format( |         assert False, "Unknown report_type unserialize data: {}".format( | ||||||
|             data["_report_type"] |             data["_report_type"] | ||||||
|         ) |         ) | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def _report_to_json(report): | ||||||
|  |     """ | ||||||
|  |     This was originally the serialize_report() function from xdist (ca03269). | ||||||
|  | 
 | ||||||
|  |     Returns the contents of this report as a dict of builtin entries, suitable for | ||||||
|  |     serialization. | ||||||
|  |     """ | ||||||
|  | 
 | ||||||
|  |     def serialize_repr_entry(entry): | ||||||
|  |         entry_data = {"type": type(entry).__name__, "data": entry.__dict__.copy()} | ||||||
|  |         for key, value in entry_data["data"].items(): | ||||||
|  |             if hasattr(value, "__dict__"): | ||||||
|  |                 entry_data["data"][key] = value.__dict__.copy() | ||||||
|  |         return entry_data | ||||||
|  | 
 | ||||||
|  |     def serialize_repr_traceback(reprtraceback): | ||||||
|  |         result = reprtraceback.__dict__.copy() | ||||||
|  |         result["reprentries"] = [ | ||||||
|  |             serialize_repr_entry(x) for x in reprtraceback.reprentries | ||||||
|  |         ] | ||||||
|  |         return result | ||||||
|  | 
 | ||||||
|  |     def serialize_repr_crash(reprcrash): | ||||||
|  |         return reprcrash.__dict__.copy() | ||||||
|  | 
 | ||||||
|  |     def serialize_longrepr(rep): | ||||||
|  |         result = { | ||||||
|  |             "reprcrash": serialize_repr_crash(rep.longrepr.reprcrash), | ||||||
|  |             "reprtraceback": serialize_repr_traceback(rep.longrepr.reprtraceback), | ||||||
|  |             "sections": rep.longrepr.sections, | ||||||
|  |         } | ||||||
|  |         if isinstance(rep.longrepr, ExceptionChainRepr): | ||||||
|  |             result["chain"] = [] | ||||||
|  |             for repr_traceback, repr_crash, description in rep.longrepr.chain: | ||||||
|  |                 result["chain"].append( | ||||||
|  |                     ( | ||||||
|  |                         serialize_repr_traceback(repr_traceback), | ||||||
|  |                         serialize_repr_crash(repr_crash), | ||||||
|  |                         description, | ||||||
|  |                     ) | ||||||
|  |                 ) | ||||||
|  |         else: | ||||||
|  |             result["chain"] = None | ||||||
|  |         return result | ||||||
|  | 
 | ||||||
|  |     d = report.__dict__.copy() | ||||||
|  |     if hasattr(report.longrepr, "toterminal"): | ||||||
|  |         if hasattr(report.longrepr, "reprtraceback") and hasattr( | ||||||
|  |             report.longrepr, "reprcrash" | ||||||
|  |         ): | ||||||
|  |             d["longrepr"] = serialize_longrepr(report) | ||||||
|  |         else: | ||||||
|  |             d["longrepr"] = str(report.longrepr) | ||||||
|  |     else: | ||||||
|  |         d["longrepr"] = report.longrepr | ||||||
|  |     for name in d: | ||||||
|  |         if isinstance(d[name], (py.path.local, Path)): | ||||||
|  |             d[name] = str(d[name]) | ||||||
|  |         elif name == "result": | ||||||
|  |             d[name] = None  # for now | ||||||
|  |     return d | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def _report_kwargs_from_json(reportdict): | ||||||
|  |     """ | ||||||
|  |     This was originally the serialize_report() function from xdist (ca03269). | ||||||
|  | 
 | ||||||
|  |     Returns **kwargs that can be used to construct a TestReport or CollectReport instance. | ||||||
|  |     """ | ||||||
|  | 
 | ||||||
|  |     def deserialize_repr_entry(entry_data): | ||||||
|  |         data = entry_data["data"] | ||||||
|  |         entry_type = entry_data["type"] | ||||||
|  |         if entry_type == "ReprEntry": | ||||||
|  |             reprfuncargs = None | ||||||
|  |             reprfileloc = None | ||||||
|  |             reprlocals = None | ||||||
|  |             if data["reprfuncargs"]: | ||||||
|  |                 reprfuncargs = ReprFuncArgs(**data["reprfuncargs"]) | ||||||
|  |             if data["reprfileloc"]: | ||||||
|  |                 reprfileloc = ReprFileLocation(**data["reprfileloc"]) | ||||||
|  |             if data["reprlocals"]: | ||||||
|  |                 reprlocals = ReprLocals(data["reprlocals"]["lines"]) | ||||||
|  | 
 | ||||||
|  |             reprentry = ReprEntry( | ||||||
|  |                 lines=data["lines"], | ||||||
|  |                 reprfuncargs=reprfuncargs, | ||||||
|  |                 reprlocals=reprlocals, | ||||||
|  |                 filelocrepr=reprfileloc, | ||||||
|  |                 style=data["style"], | ||||||
|  |             )  # type: Union[ReprEntry, ReprEntryNative] | ||||||
|  |         elif entry_type == "ReprEntryNative": | ||||||
|  |             reprentry = ReprEntryNative(data["lines"]) | ||||||
|  |         else: | ||||||
|  |             _report_unserialization_failure(entry_type, TestReport, reportdict) | ||||||
|  |         return reprentry | ||||||
|  | 
 | ||||||
|  |     def deserialize_repr_traceback(repr_traceback_dict): | ||||||
|  |         repr_traceback_dict["reprentries"] = [ | ||||||
|  |             deserialize_repr_entry(x) for x in repr_traceback_dict["reprentries"] | ||||||
|  |         ] | ||||||
|  |         return ReprTraceback(**repr_traceback_dict) | ||||||
|  | 
 | ||||||
|  |     def deserialize_repr_crash(repr_crash_dict): | ||||||
|  |         return ReprFileLocation(**repr_crash_dict) | ||||||
|  | 
 | ||||||
|  |     if ( | ||||||
|  |         reportdict["longrepr"] | ||||||
|  |         and "reprcrash" in reportdict["longrepr"] | ||||||
|  |         and "reprtraceback" in reportdict["longrepr"] | ||||||
|  |     ): | ||||||
|  | 
 | ||||||
|  |         reprtraceback = deserialize_repr_traceback( | ||||||
|  |             reportdict["longrepr"]["reprtraceback"] | ||||||
|  |         ) | ||||||
|  |         reprcrash = deserialize_repr_crash(reportdict["longrepr"]["reprcrash"]) | ||||||
|  |         if reportdict["longrepr"]["chain"]: | ||||||
|  |             chain = [] | ||||||
|  |             for repr_traceback_data, repr_crash_data, description in reportdict[ | ||||||
|  |                 "longrepr" | ||||||
|  |             ]["chain"]: | ||||||
|  |                 chain.append( | ||||||
|  |                     ( | ||||||
|  |                         deserialize_repr_traceback(repr_traceback_data), | ||||||
|  |                         deserialize_repr_crash(repr_crash_data), | ||||||
|  |                         description, | ||||||
|  |                     ) | ||||||
|  |                 ) | ||||||
|  |             exception_info = ExceptionChainRepr(chain) | ||||||
|  |         else: | ||||||
|  |             exception_info = ReprExceptionInfo(reprtraceback, reprcrash) | ||||||
|  | 
 | ||||||
|  |         for section in reportdict["longrepr"]["sections"]: | ||||||
|  |             exception_info.addsection(*section) | ||||||
|  |         reportdict["longrepr"] = exception_info | ||||||
|  | 
 | ||||||
|  |     return reportdict | ||||||
|  |  | ||||||
|  | @ -1,8 +1,6 @@ | ||||||
| import sys | import sys | ||||||
| from unittest import mock | from unittest import mock | ||||||
| 
 | 
 | ||||||
| from test_excinfo import TWMock |  | ||||||
| 
 |  | ||||||
| import _pytest._code | import _pytest._code | ||||||
| import pytest | import pytest | ||||||
| 
 | 
 | ||||||
|  | @ -168,17 +166,15 @@ class TestTracebackEntry: | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| class TestReprFuncArgs: | class TestReprFuncArgs: | ||||||
|     def test_not_raise_exception_with_mixed_encoding(self): |     def test_not_raise_exception_with_mixed_encoding(self, tw_mock): | ||||||
|         from _pytest._code.code import ReprFuncArgs |         from _pytest._code.code import ReprFuncArgs | ||||||
| 
 | 
 | ||||||
|         tw = TWMock() |  | ||||||
| 
 |  | ||||||
|         args = [("unicode_string", "São Paulo"), ("utf8_string", b"S\xc3\xa3o Paulo")] |         args = [("unicode_string", "São Paulo"), ("utf8_string", b"S\xc3\xa3o Paulo")] | ||||||
| 
 | 
 | ||||||
|         r = ReprFuncArgs(args) |         r = ReprFuncArgs(args) | ||||||
|         r.toterminal(tw) |         r.toterminal(tw_mock) | ||||||
| 
 | 
 | ||||||
|         assert ( |         assert ( | ||||||
|             tw.lines[0] |             tw_mock.lines[0] | ||||||
|             == r"unicode_string = São Paulo, utf8_string = b'S\xc3\xa3o Paulo'" |             == r"unicode_string = São Paulo, utf8_string = b'S\xc3\xa3o Paulo'" | ||||||
|         ) |         ) | ||||||
|  |  | ||||||
|  | @ -31,33 +31,6 @@ def limited_recursion_depth(): | ||||||
|     sys.setrecursionlimit(before) |     sys.setrecursionlimit(before) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| class TWMock: |  | ||||||
|     WRITE = object() |  | ||||||
| 
 |  | ||||||
|     def __init__(self): |  | ||||||
|         self.lines = [] |  | ||||||
|         self.is_writing = False |  | ||||||
| 
 |  | ||||||
|     def sep(self, sep, line=None): |  | ||||||
|         self.lines.append((sep, line)) |  | ||||||
| 
 |  | ||||||
|     def write(self, msg, **kw): |  | ||||||
|         self.lines.append((TWMock.WRITE, msg)) |  | ||||||
| 
 |  | ||||||
|     def line(self, line, **kw): |  | ||||||
|         self.lines.append(line) |  | ||||||
| 
 |  | ||||||
|     def markup(self, text, **kw): |  | ||||||
|         return text |  | ||||||
| 
 |  | ||||||
|     def get_write_msg(self, idx): |  | ||||||
|         flag, msg = self.lines[idx] |  | ||||||
|         assert flag == TWMock.WRITE |  | ||||||
|         return msg |  | ||||||
| 
 |  | ||||||
|     fullwidth = 80 |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| def test_excinfo_simple() -> None: | def test_excinfo_simple() -> None: | ||||||
|     try: |     try: | ||||||
|         raise ValueError |         raise ValueError | ||||||
|  | @ -426,6 +399,13 @@ def test_match_raises_error(testdir): | ||||||
|     result = testdir.runpytest() |     result = testdir.runpytest() | ||||||
|     assert result.ret != 0 |     assert result.ret != 0 | ||||||
|     result.stdout.fnmatch_lines(["*AssertionError*Pattern*[123]*not found*"]) |     result.stdout.fnmatch_lines(["*AssertionError*Pattern*[123]*not found*"]) | ||||||
|  |     assert "__tracebackhide__ = True" not in result.stdout.str() | ||||||
|  | 
 | ||||||
|  |     result = testdir.runpytest("--fulltrace") | ||||||
|  |     assert result.ret != 0 | ||||||
|  |     result.stdout.fnmatch_lines( | ||||||
|  |         ["*__tracebackhide__ = True*", "*AssertionError*Pattern*[123]*not found*"] | ||||||
|  |     ) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| class TestFormattedExcinfo: | class TestFormattedExcinfo: | ||||||
|  | @ -658,7 +638,7 @@ raise ValueError() | ||||||
|         assert loc.lineno == 3 |         assert loc.lineno == 3 | ||||||
|         # assert loc.message == "ValueError: hello" |         # assert loc.message == "ValueError: hello" | ||||||
| 
 | 
 | ||||||
|     def test_repr_tracebackentry_lines2(self, importasmod): |     def test_repr_tracebackentry_lines2(self, importasmod, tw_mock): | ||||||
|         mod = importasmod( |         mod = importasmod( | ||||||
|             """ |             """ | ||||||
|             def func1(m, x, y, z): |             def func1(m, x, y, z): | ||||||
|  | @ -678,13 +658,12 @@ raise ValueError() | ||||||
|         p = FormattedExcinfo(funcargs=True) |         p = FormattedExcinfo(funcargs=True) | ||||||
|         repr_entry = p.repr_traceback_entry(entry) |         repr_entry = p.repr_traceback_entry(entry) | ||||||
|         assert repr_entry.reprfuncargs.args == reprfuncargs.args |         assert repr_entry.reprfuncargs.args == reprfuncargs.args | ||||||
|         tw = TWMock() |         repr_entry.toterminal(tw_mock) | ||||||
|         repr_entry.toterminal(tw) |         assert tw_mock.lines[0] == "m = " + repr("m" * 90) | ||||||
|         assert tw.lines[0] == "m = " + repr("m" * 90) |         assert tw_mock.lines[1] == "x = 5, y = 13" | ||||||
|         assert tw.lines[1] == "x = 5, y = 13" |         assert tw_mock.lines[2] == "z = " + repr("z" * 120) | ||||||
|         assert tw.lines[2] == "z = " + repr("z" * 120) |  | ||||||
| 
 | 
 | ||||||
|     def test_repr_tracebackentry_lines_var_kw_args(self, importasmod): |     def test_repr_tracebackentry_lines_var_kw_args(self, importasmod, tw_mock): | ||||||
|         mod = importasmod( |         mod = importasmod( | ||||||
|             """ |             """ | ||||||
|             def func1(x, *y, **z): |             def func1(x, *y, **z): | ||||||
|  | @ -703,9 +682,8 @@ raise ValueError() | ||||||
|         p = FormattedExcinfo(funcargs=True) |         p = FormattedExcinfo(funcargs=True) | ||||||
|         repr_entry = p.repr_traceback_entry(entry) |         repr_entry = p.repr_traceback_entry(entry) | ||||||
|         assert repr_entry.reprfuncargs.args == reprfuncargs.args |         assert repr_entry.reprfuncargs.args == reprfuncargs.args | ||||||
|         tw = TWMock() |         repr_entry.toterminal(tw_mock) | ||||||
|         repr_entry.toterminal(tw) |         assert tw_mock.lines[0] == "x = 'a', y = ('b',), z = {'c': 'd'}" | ||||||
|         assert tw.lines[0] == "x = 'a', y = ('b',), z = {'c': 'd'}" |  | ||||||
| 
 | 
 | ||||||
|     def test_repr_tracebackentry_short(self, importasmod): |     def test_repr_tracebackentry_short(self, importasmod): | ||||||
|         mod = importasmod( |         mod = importasmod( | ||||||
|  | @ -842,7 +820,7 @@ raise ValueError() | ||||||
|         assert p._makepath(__file__) == __file__ |         assert p._makepath(__file__) == __file__ | ||||||
|         p.repr_traceback(excinfo) |         p.repr_traceback(excinfo) | ||||||
| 
 | 
 | ||||||
|     def test_repr_excinfo_addouterr(self, importasmod): |     def test_repr_excinfo_addouterr(self, importasmod, tw_mock): | ||||||
|         mod = importasmod( |         mod = importasmod( | ||||||
|             """ |             """ | ||||||
|             def entry(): |             def entry(): | ||||||
|  | @ -852,10 +830,9 @@ raise ValueError() | ||||||
|         excinfo = pytest.raises(ValueError, mod.entry) |         excinfo = pytest.raises(ValueError, mod.entry) | ||||||
|         repr = excinfo.getrepr() |         repr = excinfo.getrepr() | ||||||
|         repr.addsection("title", "content") |         repr.addsection("title", "content") | ||||||
|         twmock = TWMock() |         repr.toterminal(tw_mock) | ||||||
|         repr.toterminal(twmock) |         assert tw_mock.lines[-1] == "content" | ||||||
|         assert twmock.lines[-1] == "content" |         assert tw_mock.lines[-2] == ("-", "title") | ||||||
|         assert twmock.lines[-2] == ("-", "title") |  | ||||||
| 
 | 
 | ||||||
|     def test_repr_excinfo_reprcrash(self, importasmod): |     def test_repr_excinfo_reprcrash(self, importasmod): | ||||||
|         mod = importasmod( |         mod = importasmod( | ||||||
|  | @ -920,7 +897,7 @@ raise ValueError() | ||||||
|         x = str(MyRepr()) |         x = str(MyRepr()) | ||||||
|         assert x == "я" |         assert x == "я" | ||||||
| 
 | 
 | ||||||
|     def test_toterminal_long(self, importasmod): |     def test_toterminal_long(self, importasmod, tw_mock): | ||||||
|         mod = importasmod( |         mod = importasmod( | ||||||
|             """ |             """ | ||||||
|             def g(x): |             def g(x): | ||||||
|  | @ -932,27 +909,26 @@ raise ValueError() | ||||||
|         excinfo = pytest.raises(ValueError, mod.f) |         excinfo = pytest.raises(ValueError, mod.f) | ||||||
|         excinfo.traceback = excinfo.traceback.filter() |         excinfo.traceback = excinfo.traceback.filter() | ||||||
|         repr = excinfo.getrepr() |         repr = excinfo.getrepr() | ||||||
|         tw = TWMock() |         repr.toterminal(tw_mock) | ||||||
|         repr.toterminal(tw) |         assert tw_mock.lines[0] == "" | ||||||
|         assert tw.lines[0] == "" |         tw_mock.lines.pop(0) | ||||||
|         tw.lines.pop(0) |         assert tw_mock.lines[0] == "    def f():" | ||||||
|         assert tw.lines[0] == "    def f():" |         assert tw_mock.lines[1] == ">       g(3)" | ||||||
|         assert tw.lines[1] == ">       g(3)" |         assert tw_mock.lines[2] == "" | ||||||
|         assert tw.lines[2] == "" |         line = tw_mock.get_write_msg(3) | ||||||
|         line = tw.get_write_msg(3) |  | ||||||
|         assert line.endswith("mod.py") |         assert line.endswith("mod.py") | ||||||
|         assert tw.lines[4] == (":5: ") |         assert tw_mock.lines[4] == (":5: ") | ||||||
|         assert tw.lines[5] == ("_ ", None) |         assert tw_mock.lines[5] == ("_ ", None) | ||||||
|         assert tw.lines[6] == "" |         assert tw_mock.lines[6] == "" | ||||||
|         assert tw.lines[7] == "    def g(x):" |         assert tw_mock.lines[7] == "    def g(x):" | ||||||
|         assert tw.lines[8] == ">       raise ValueError(x)" |         assert tw_mock.lines[8] == ">       raise ValueError(x)" | ||||||
|         assert tw.lines[9] == "E       ValueError: 3" |         assert tw_mock.lines[9] == "E       ValueError: 3" | ||||||
|         assert tw.lines[10] == "" |         assert tw_mock.lines[10] == "" | ||||||
|         line = tw.get_write_msg(11) |         line = tw_mock.get_write_msg(11) | ||||||
|         assert line.endswith("mod.py") |         assert line.endswith("mod.py") | ||||||
|         assert tw.lines[12] == ":3: ValueError" |         assert tw_mock.lines[12] == ":3: ValueError" | ||||||
| 
 | 
 | ||||||
|     def test_toterminal_long_missing_source(self, importasmod, tmpdir): |     def test_toterminal_long_missing_source(self, importasmod, tmpdir, tw_mock): | ||||||
|         mod = importasmod( |         mod = importasmod( | ||||||
|             """ |             """ | ||||||
|             def g(x): |             def g(x): | ||||||
|  | @ -965,25 +941,24 @@ raise ValueError() | ||||||
|         tmpdir.join("mod.py").remove() |         tmpdir.join("mod.py").remove() | ||||||
|         excinfo.traceback = excinfo.traceback.filter() |         excinfo.traceback = excinfo.traceback.filter() | ||||||
|         repr = excinfo.getrepr() |         repr = excinfo.getrepr() | ||||||
|         tw = TWMock() |         repr.toterminal(tw_mock) | ||||||
|         repr.toterminal(tw) |         assert tw_mock.lines[0] == "" | ||||||
|         assert tw.lines[0] == "" |         tw_mock.lines.pop(0) | ||||||
|         tw.lines.pop(0) |         assert tw_mock.lines[0] == ">   ???" | ||||||
|         assert tw.lines[0] == ">   ???" |         assert tw_mock.lines[1] == "" | ||||||
|         assert tw.lines[1] == "" |         line = tw_mock.get_write_msg(2) | ||||||
|         line = tw.get_write_msg(2) |  | ||||||
|         assert line.endswith("mod.py") |         assert line.endswith("mod.py") | ||||||
|         assert tw.lines[3] == ":5: " |         assert tw_mock.lines[3] == ":5: " | ||||||
|         assert tw.lines[4] == ("_ ", None) |         assert tw_mock.lines[4] == ("_ ", None) | ||||||
|         assert tw.lines[5] == "" |         assert tw_mock.lines[5] == "" | ||||||
|         assert tw.lines[6] == ">   ???" |         assert tw_mock.lines[6] == ">   ???" | ||||||
|         assert tw.lines[7] == "E   ValueError: 3" |         assert tw_mock.lines[7] == "E   ValueError: 3" | ||||||
|         assert tw.lines[8] == "" |         assert tw_mock.lines[8] == "" | ||||||
|         line = tw.get_write_msg(9) |         line = tw_mock.get_write_msg(9) | ||||||
|         assert line.endswith("mod.py") |         assert line.endswith("mod.py") | ||||||
|         assert tw.lines[10] == ":3: ValueError" |         assert tw_mock.lines[10] == ":3: ValueError" | ||||||
| 
 | 
 | ||||||
|     def test_toterminal_long_incomplete_source(self, importasmod, tmpdir): |     def test_toterminal_long_incomplete_source(self, importasmod, tmpdir, tw_mock): | ||||||
|         mod = importasmod( |         mod = importasmod( | ||||||
|             """ |             """ | ||||||
|             def g(x): |             def g(x): | ||||||
|  | @ -996,25 +971,24 @@ raise ValueError() | ||||||
|         tmpdir.join("mod.py").write("asdf") |         tmpdir.join("mod.py").write("asdf") | ||||||
|         excinfo.traceback = excinfo.traceback.filter() |         excinfo.traceback = excinfo.traceback.filter() | ||||||
|         repr = excinfo.getrepr() |         repr = excinfo.getrepr() | ||||||
|         tw = TWMock() |         repr.toterminal(tw_mock) | ||||||
|         repr.toterminal(tw) |         assert tw_mock.lines[0] == "" | ||||||
|         assert tw.lines[0] == "" |         tw_mock.lines.pop(0) | ||||||
|         tw.lines.pop(0) |         assert tw_mock.lines[0] == ">   ???" | ||||||
|         assert tw.lines[0] == ">   ???" |         assert tw_mock.lines[1] == "" | ||||||
|         assert tw.lines[1] == "" |         line = tw_mock.get_write_msg(2) | ||||||
|         line = tw.get_write_msg(2) |  | ||||||
|         assert line.endswith("mod.py") |         assert line.endswith("mod.py") | ||||||
|         assert tw.lines[3] == ":5: " |         assert tw_mock.lines[3] == ":5: " | ||||||
|         assert tw.lines[4] == ("_ ", None) |         assert tw_mock.lines[4] == ("_ ", None) | ||||||
|         assert tw.lines[5] == "" |         assert tw_mock.lines[5] == "" | ||||||
|         assert tw.lines[6] == ">   ???" |         assert tw_mock.lines[6] == ">   ???" | ||||||
|         assert tw.lines[7] == "E   ValueError: 3" |         assert tw_mock.lines[7] == "E   ValueError: 3" | ||||||
|         assert tw.lines[8] == "" |         assert tw_mock.lines[8] == "" | ||||||
|         line = tw.get_write_msg(9) |         line = tw_mock.get_write_msg(9) | ||||||
|         assert line.endswith("mod.py") |         assert line.endswith("mod.py") | ||||||
|         assert tw.lines[10] == ":3: ValueError" |         assert tw_mock.lines[10] == ":3: ValueError" | ||||||
| 
 | 
 | ||||||
|     def test_toterminal_long_filenames(self, importasmod): |     def test_toterminal_long_filenames(self, importasmod, tw_mock): | ||||||
|         mod = importasmod( |         mod = importasmod( | ||||||
|             """ |             """ | ||||||
|             def f(): |             def f(): | ||||||
|  | @ -1022,23 +996,22 @@ raise ValueError() | ||||||
|         """ |         """ | ||||||
|         ) |         ) | ||||||
|         excinfo = pytest.raises(ValueError, mod.f) |         excinfo = pytest.raises(ValueError, mod.f) | ||||||
|         tw = TWMock() |  | ||||||
|         path = py.path.local(mod.__file__) |         path = py.path.local(mod.__file__) | ||||||
|         old = path.dirpath().chdir() |         old = path.dirpath().chdir() | ||||||
|         try: |         try: | ||||||
|             repr = excinfo.getrepr(abspath=False) |             repr = excinfo.getrepr(abspath=False) | ||||||
|             repr.toterminal(tw) |             repr.toterminal(tw_mock) | ||||||
|             x = py.path.local().bestrelpath(path) |             x = py.path.local().bestrelpath(path) | ||||||
|             if len(x) < len(str(path)): |             if len(x) < len(str(path)): | ||||||
|                 msg = tw.get_write_msg(-2) |                 msg = tw_mock.get_write_msg(-2) | ||||||
|                 assert msg == "mod.py" |                 assert msg == "mod.py" | ||||||
|                 assert tw.lines[-1] == ":3: ValueError" |                 assert tw_mock.lines[-1] == ":3: ValueError" | ||||||
| 
 | 
 | ||||||
|             repr = excinfo.getrepr(abspath=True) |             repr = excinfo.getrepr(abspath=True) | ||||||
|             repr.toterminal(tw) |             repr.toterminal(tw_mock) | ||||||
|             msg = tw.get_write_msg(-2) |             msg = tw_mock.get_write_msg(-2) | ||||||
|             assert msg == path |             assert msg == path | ||||||
|             line = tw.lines[-1] |             line = tw_mock.lines[-1] | ||||||
|             assert line == ":3: ValueError" |             assert line == ":3: ValueError" | ||||||
|         finally: |         finally: | ||||||
|             old.chdir() |             old.chdir() | ||||||
|  | @ -1073,7 +1046,7 @@ raise ValueError() | ||||||
|         repr.toterminal(tw) |         repr.toterminal(tw) | ||||||
|         assert tw.stringio.getvalue() |         assert tw.stringio.getvalue() | ||||||
| 
 | 
 | ||||||
|     def test_traceback_repr_style(self, importasmod): |     def test_traceback_repr_style(self, importasmod, tw_mock): | ||||||
|         mod = importasmod( |         mod = importasmod( | ||||||
|             """ |             """ | ||||||
|             def f(): |             def f(): | ||||||
|  | @ -1091,35 +1064,34 @@ raise ValueError() | ||||||
|         excinfo.traceback[1].set_repr_style("short") |         excinfo.traceback[1].set_repr_style("short") | ||||||
|         excinfo.traceback[2].set_repr_style("short") |         excinfo.traceback[2].set_repr_style("short") | ||||||
|         r = excinfo.getrepr(style="long") |         r = excinfo.getrepr(style="long") | ||||||
|         tw = TWMock() |         r.toterminal(tw_mock) | ||||||
|         r.toterminal(tw) |         for line in tw_mock.lines: | ||||||
|         for line in tw.lines: |  | ||||||
|             print(line) |             print(line) | ||||||
|         assert tw.lines[0] == "" |         assert tw_mock.lines[0] == "" | ||||||
|         assert tw.lines[1] == "    def f():" |         assert tw_mock.lines[1] == "    def f():" | ||||||
|         assert tw.lines[2] == ">       g()" |         assert tw_mock.lines[2] == ">       g()" | ||||||
|         assert tw.lines[3] == "" |         assert tw_mock.lines[3] == "" | ||||||
|         msg = tw.get_write_msg(4) |         msg = tw_mock.get_write_msg(4) | ||||||
|         assert msg.endswith("mod.py") |         assert msg.endswith("mod.py") | ||||||
|         assert tw.lines[5] == ":3: " |         assert tw_mock.lines[5] == ":3: " | ||||||
|         assert tw.lines[6] == ("_ ", None) |         assert tw_mock.lines[6] == ("_ ", None) | ||||||
|         tw.get_write_msg(7) |         tw_mock.get_write_msg(7) | ||||||
|         assert tw.lines[8].endswith("in g") |         assert tw_mock.lines[8].endswith("in g") | ||||||
|         assert tw.lines[9] == "    h()" |         assert tw_mock.lines[9] == "    h()" | ||||||
|         tw.get_write_msg(10) |         tw_mock.get_write_msg(10) | ||||||
|         assert tw.lines[11].endswith("in h") |         assert tw_mock.lines[11].endswith("in h") | ||||||
|         assert tw.lines[12] == "    i()" |         assert tw_mock.lines[12] == "    i()" | ||||||
|         assert tw.lines[13] == ("_ ", None) |         assert tw_mock.lines[13] == ("_ ", None) | ||||||
|         assert tw.lines[14] == "" |         assert tw_mock.lines[14] == "" | ||||||
|         assert tw.lines[15] == "    def i():" |         assert tw_mock.lines[15] == "    def i():" | ||||||
|         assert tw.lines[16] == ">       raise ValueError()" |         assert tw_mock.lines[16] == ">       raise ValueError()" | ||||||
|         assert tw.lines[17] == "E       ValueError" |         assert tw_mock.lines[17] == "E       ValueError" | ||||||
|         assert tw.lines[18] == "" |         assert tw_mock.lines[18] == "" | ||||||
|         msg = tw.get_write_msg(19) |         msg = tw_mock.get_write_msg(19) | ||||||
|         msg.endswith("mod.py") |         msg.endswith("mod.py") | ||||||
|         assert tw.lines[20] == ":9: ValueError" |         assert tw_mock.lines[20] == ":9: ValueError" | ||||||
| 
 | 
 | ||||||
|     def test_exc_chain_repr(self, importasmod): |     def test_exc_chain_repr(self, importasmod, tw_mock): | ||||||
|         mod = importasmod( |         mod = importasmod( | ||||||
|             """ |             """ | ||||||
|             class Err(Exception): |             class Err(Exception): | ||||||
|  | @ -1140,72 +1112,71 @@ raise ValueError() | ||||||
|         ) |         ) | ||||||
|         excinfo = pytest.raises(AttributeError, mod.f) |         excinfo = pytest.raises(AttributeError, mod.f) | ||||||
|         r = excinfo.getrepr(style="long") |         r = excinfo.getrepr(style="long") | ||||||
|         tw = TWMock() |         r.toterminal(tw_mock) | ||||||
|         r.toterminal(tw) |         for line in tw_mock.lines: | ||||||
|         for line in tw.lines: |  | ||||||
|             print(line) |             print(line) | ||||||
|         assert tw.lines[0] == "" |         assert tw_mock.lines[0] == "" | ||||||
|         assert tw.lines[1] == "    def f():" |         assert tw_mock.lines[1] == "    def f():" | ||||||
|         assert tw.lines[2] == "        try:" |         assert tw_mock.lines[2] == "        try:" | ||||||
|         assert tw.lines[3] == ">           g()" |         assert tw_mock.lines[3] == ">           g()" | ||||||
|         assert tw.lines[4] == "" |         assert tw_mock.lines[4] == "" | ||||||
|         line = tw.get_write_msg(5) |         line = tw_mock.get_write_msg(5) | ||||||
|         assert line.endswith("mod.py") |         assert line.endswith("mod.py") | ||||||
|         assert tw.lines[6] == ":6: " |         assert tw_mock.lines[6] == ":6: " | ||||||
|         assert tw.lines[7] == ("_ ", None) |         assert tw_mock.lines[7] == ("_ ", None) | ||||||
|         assert tw.lines[8] == "" |         assert tw_mock.lines[8] == "" | ||||||
|         assert tw.lines[9] == "    def g():" |         assert tw_mock.lines[9] == "    def g():" | ||||||
|         assert tw.lines[10] == ">       raise ValueError()" |         assert tw_mock.lines[10] == ">       raise ValueError()" | ||||||
|         assert tw.lines[11] == "E       ValueError" |         assert tw_mock.lines[11] == "E       ValueError" | ||||||
|         assert tw.lines[12] == "" |         assert tw_mock.lines[12] == "" | ||||||
|         line = tw.get_write_msg(13) |         line = tw_mock.get_write_msg(13) | ||||||
|         assert line.endswith("mod.py") |         assert line.endswith("mod.py") | ||||||
|         assert tw.lines[14] == ":12: ValueError" |         assert tw_mock.lines[14] == ":12: ValueError" | ||||||
|         assert tw.lines[15] == "" |         assert tw_mock.lines[15] == "" | ||||||
|         assert ( |         assert ( | ||||||
|             tw.lines[16] |             tw_mock.lines[16] | ||||||
|             == "The above exception was the direct cause of the following exception:" |             == "The above exception was the direct cause of the following exception:" | ||||||
|         ) |         ) | ||||||
|         assert tw.lines[17] == "" |         assert tw_mock.lines[17] == "" | ||||||
|         assert tw.lines[18] == "    def f():" |         assert tw_mock.lines[18] == "    def f():" | ||||||
|         assert tw.lines[19] == "        try:" |         assert tw_mock.lines[19] == "        try:" | ||||||
|         assert tw.lines[20] == "            g()" |         assert tw_mock.lines[20] == "            g()" | ||||||
|         assert tw.lines[21] == "        except Exception as e:" |         assert tw_mock.lines[21] == "        except Exception as e:" | ||||||
|         assert tw.lines[22] == ">           raise Err() from e" |         assert tw_mock.lines[22] == ">           raise Err() from e" | ||||||
|         assert tw.lines[23] == "E           test_exc_chain_repr0.mod.Err" |         assert tw_mock.lines[23] == "E           test_exc_chain_repr0.mod.Err" | ||||||
|         assert tw.lines[24] == "" |         assert tw_mock.lines[24] == "" | ||||||
|         line = tw.get_write_msg(25) |         line = tw_mock.get_write_msg(25) | ||||||
|         assert line.endswith("mod.py") |         assert line.endswith("mod.py") | ||||||
|         assert tw.lines[26] == ":8: Err" |         assert tw_mock.lines[26] == ":8: Err" | ||||||
|         assert tw.lines[27] == "" |         assert tw_mock.lines[27] == "" | ||||||
|         assert ( |         assert ( | ||||||
|             tw.lines[28] |             tw_mock.lines[28] | ||||||
|             == "During handling of the above exception, another exception occurred:" |             == "During handling of the above exception, another exception occurred:" | ||||||
|         ) |         ) | ||||||
|         assert tw.lines[29] == "" |         assert tw_mock.lines[29] == "" | ||||||
|         assert tw.lines[30] == "    def f():" |         assert tw_mock.lines[30] == "    def f():" | ||||||
|         assert tw.lines[31] == "        try:" |         assert tw_mock.lines[31] == "        try:" | ||||||
|         assert tw.lines[32] == "            g()" |         assert tw_mock.lines[32] == "            g()" | ||||||
|         assert tw.lines[33] == "        except Exception as e:" |         assert tw_mock.lines[33] == "        except Exception as e:" | ||||||
|         assert tw.lines[34] == "            raise Err() from e" |         assert tw_mock.lines[34] == "            raise Err() from e" | ||||||
|         assert tw.lines[35] == "        finally:" |         assert tw_mock.lines[35] == "        finally:" | ||||||
|         assert tw.lines[36] == ">           h()" |         assert tw_mock.lines[36] == ">           h()" | ||||||
|         assert tw.lines[37] == "" |         assert tw_mock.lines[37] == "" | ||||||
|         line = tw.get_write_msg(38) |         line = tw_mock.get_write_msg(38) | ||||||
|         assert line.endswith("mod.py") |         assert line.endswith("mod.py") | ||||||
|         assert tw.lines[39] == ":10: " |         assert tw_mock.lines[39] == ":10: " | ||||||
|         assert tw.lines[40] == ("_ ", None) |         assert tw_mock.lines[40] == ("_ ", None) | ||||||
|         assert tw.lines[41] == "" |         assert tw_mock.lines[41] == "" | ||||||
|         assert tw.lines[42] == "    def h():" |         assert tw_mock.lines[42] == "    def h():" | ||||||
|         assert tw.lines[43] == ">       raise AttributeError()" |         assert tw_mock.lines[43] == ">       raise AttributeError()" | ||||||
|         assert tw.lines[44] == "E       AttributeError" |         assert tw_mock.lines[44] == "E       AttributeError" | ||||||
|         assert tw.lines[45] == "" |         assert tw_mock.lines[45] == "" | ||||||
|         line = tw.get_write_msg(46) |         line = tw_mock.get_write_msg(46) | ||||||
|         assert line.endswith("mod.py") |         assert line.endswith("mod.py") | ||||||
|         assert tw.lines[47] == ":15: AttributeError" |         assert tw_mock.lines[47] == ":15: AttributeError" | ||||||
| 
 | 
 | ||||||
|     @pytest.mark.parametrize("mode", ["from_none", "explicit_suppress"]) |     @pytest.mark.parametrize("mode", ["from_none", "explicit_suppress"]) | ||||||
|     def test_exc_repr_chain_suppression(self, importasmod, mode): |     def test_exc_repr_chain_suppression(self, importasmod, mode, tw_mock): | ||||||
|         """Check that exc repr does not show chained exceptions in Python 3. |         """Check that exc repr does not show chained exceptions in Python 3. | ||||||
|         - When the exception is raised with "from None" |         - When the exception is raised with "from None" | ||||||
|         - Explicitly suppressed with "chain=False" to ExceptionInfo.getrepr(). |         - Explicitly suppressed with "chain=False" to ExceptionInfo.getrepr(). | ||||||
|  | @ -1226,24 +1197,23 @@ raise ValueError() | ||||||
|         ) |         ) | ||||||
|         excinfo = pytest.raises(AttributeError, mod.f) |         excinfo = pytest.raises(AttributeError, mod.f) | ||||||
|         r = excinfo.getrepr(style="long", chain=mode != "explicit_suppress") |         r = excinfo.getrepr(style="long", chain=mode != "explicit_suppress") | ||||||
|         tw = TWMock() |         r.toterminal(tw_mock) | ||||||
|         r.toterminal(tw) |         for line in tw_mock.lines: | ||||||
|         for line in tw.lines: |  | ||||||
|             print(line) |             print(line) | ||||||
|         assert tw.lines[0] == "" |         assert tw_mock.lines[0] == "" | ||||||
|         assert tw.lines[1] == "    def f():" |         assert tw_mock.lines[1] == "    def f():" | ||||||
|         assert tw.lines[2] == "        try:" |         assert tw_mock.lines[2] == "        try:" | ||||||
|         assert tw.lines[3] == "            g()" |         assert tw_mock.lines[3] == "            g()" | ||||||
|         assert tw.lines[4] == "        except Exception:" |         assert tw_mock.lines[4] == "        except Exception:" | ||||||
|         assert tw.lines[5] == ">           raise AttributeError(){}".format( |         assert tw_mock.lines[5] == ">           raise AttributeError(){}".format( | ||||||
|             raise_suffix |             raise_suffix | ||||||
|         ) |         ) | ||||||
|         assert tw.lines[6] == "E           AttributeError" |         assert tw_mock.lines[6] == "E           AttributeError" | ||||||
|         assert tw.lines[7] == "" |         assert tw_mock.lines[7] == "" | ||||||
|         line = tw.get_write_msg(8) |         line = tw_mock.get_write_msg(8) | ||||||
|         assert line.endswith("mod.py") |         assert line.endswith("mod.py") | ||||||
|         assert tw.lines[9] == ":6: AttributeError" |         assert tw_mock.lines[9] == ":6: AttributeError" | ||||||
|         assert len(tw.lines) == 10 |         assert len(tw_mock.lines) == 10 | ||||||
| 
 | 
 | ||||||
|     @pytest.mark.parametrize( |     @pytest.mark.parametrize( | ||||||
|         "reason, description", |         "reason, description", | ||||||
|  | @ -1304,7 +1274,7 @@ raise ValueError() | ||||||
|             ] |             ] | ||||||
|         ) |         ) | ||||||
| 
 | 
 | ||||||
|     def test_exc_chain_repr_cycle(self, importasmod): |     def test_exc_chain_repr_cycle(self, importasmod, tw_mock): | ||||||
|         mod = importasmod( |         mod = importasmod( | ||||||
|             """ |             """ | ||||||
|             class Err(Exception): |             class Err(Exception): | ||||||
|  | @ -1325,9 +1295,8 @@ raise ValueError() | ||||||
|         ) |         ) | ||||||
|         excinfo = pytest.raises(ZeroDivisionError, mod.unreraise) |         excinfo = pytest.raises(ZeroDivisionError, mod.unreraise) | ||||||
|         r = excinfo.getrepr(style="short") |         r = excinfo.getrepr(style="short") | ||||||
|         tw = TWMock() |         r.toterminal(tw_mock) | ||||||
|         r.toterminal(tw) |         out = "\n".join(line for line in tw_mock.lines if isinstance(line, str)) | ||||||
|         out = "\n".join(line for line in tw.lines if isinstance(line, str)) |  | ||||||
|         expected_out = textwrap.dedent( |         expected_out = textwrap.dedent( | ||||||
|             """\ |             """\ | ||||||
|             :13: in unreraise |             :13: in unreraise | ||||||
|  |  | ||||||
|  | @ -55,3 +55,36 @@ def pytest_collection_modifyitems(config, items): | ||||||
|     items[:] = fast_items + neutral_items + slow_items + slowest_items |     items[:] = fast_items + neutral_items + slow_items + slowest_items | ||||||
| 
 | 
 | ||||||
|     yield |     yield | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | @pytest.fixture | ||||||
|  | def tw_mock(): | ||||||
|  |     """Returns a mock terminal writer""" | ||||||
|  | 
 | ||||||
|  |     class TWMock: | ||||||
|  |         WRITE = object() | ||||||
|  | 
 | ||||||
|  |         def __init__(self): | ||||||
|  |             self.lines = [] | ||||||
|  |             self.is_writing = False | ||||||
|  | 
 | ||||||
|  |         def sep(self, sep, line=None): | ||||||
|  |             self.lines.append((sep, line)) | ||||||
|  | 
 | ||||||
|  |         def write(self, msg, **kw): | ||||||
|  |             self.lines.append((TWMock.WRITE, msg)) | ||||||
|  | 
 | ||||||
|  |         def line(self, line, **kw): | ||||||
|  |             self.lines.append(line) | ||||||
|  | 
 | ||||||
|  |         def markup(self, text, **kw): | ||||||
|  |             return text | ||||||
|  | 
 | ||||||
|  |         def get_write_msg(self, idx): | ||||||
|  |             flag, msg = self.lines[idx] | ||||||
|  |             assert flag == TWMock.WRITE | ||||||
|  |             return msg | ||||||
|  | 
 | ||||||
|  |         fullwidth = 80 | ||||||
|  | 
 | ||||||
|  |     return TWMock() | ||||||
|  |  | ||||||
|  | @ -449,7 +449,8 @@ class TestFillFixtures: | ||||||
|                 "*ERROR at setup of test_lookup_error*", |                 "*ERROR at setup of test_lookup_error*", | ||||||
|                 "  def test_lookup_error(unknown):*", |                 "  def test_lookup_error(unknown):*", | ||||||
|                 "E       fixture 'unknown' not found", |                 "E       fixture 'unknown' not found", | ||||||
|                 ">       available fixtures:*a_fixture,*b_fixture,*c_fixture,*d_fixture*monkeypatch,*",  # sorted |                 ">       available fixtures:*a_fixture,*b_fixture,*c_fixture,*d_fixture*monkeypatch,*", | ||||||
|  |                 # sorted | ||||||
|                 ">       use 'py*test --fixtures *' for help on them.", |                 ">       use 'py*test --fixtures *' for help on them.", | ||||||
|                 "*1 error*", |                 "*1 error*", | ||||||
|             ] |             ] | ||||||
|  | @ -3945,6 +3946,38 @@ class TestScopeOrdering: | ||||||
|         reprec = testdir.inline_run() |         reprec = testdir.inline_run() | ||||||
|         reprec.assertoutcome(passed=2) |         reprec.assertoutcome(passed=2) | ||||||
| 
 | 
 | ||||||
|  |     def test_class_fixture_self_instance(self, testdir): | ||||||
|  |         """Check that plugin classes which implement fixtures receive the plugin instance | ||||||
|  |         as self (see #2270). | ||||||
|  |         """ | ||||||
|  |         testdir.makeconftest( | ||||||
|  |             """ | ||||||
|  |             import pytest | ||||||
|  | 
 | ||||||
|  |             def pytest_configure(config): | ||||||
|  |                 config.pluginmanager.register(MyPlugin()) | ||||||
|  | 
 | ||||||
|  |             class MyPlugin(): | ||||||
|  |                 def __init__(self): | ||||||
|  |                     self.arg = 1 | ||||||
|  | 
 | ||||||
|  |                 @pytest.fixture(scope='function') | ||||||
|  |                 def myfix(self): | ||||||
|  |                     assert isinstance(self, MyPlugin) | ||||||
|  |                     return self.arg | ||||||
|  |         """ | ||||||
|  |         ) | ||||||
|  | 
 | ||||||
|  |         testdir.makepyfile( | ||||||
|  |             """ | ||||||
|  |             class TestClass(object): | ||||||
|  |                 def test_1(self, myfix): | ||||||
|  |                     assert myfix == 1 | ||||||
|  |         """ | ||||||
|  |         ) | ||||||
|  |         reprec = testdir.inline_run() | ||||||
|  |         reprec.assertoutcome(passed=1) | ||||||
|  | 
 | ||||||
| 
 | 
 | ||||||
| def test_call_fixture_function_error(): | def test_call_fixture_function_error(): | ||||||
|     """Check if an error is raised if a fixture function is called directly (#4545)""" |     """Check if an error is raised if a fixture function is called directly (#4545)""" | ||||||
|  | @ -4009,3 +4042,55 @@ def test_fixture_named_request(testdir): | ||||||
|             "  *test_fixture_named_request.py:5", |             "  *test_fixture_named_request.py:5", | ||||||
|         ] |         ] | ||||||
|     ) |     ) | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def test_indirect_fixture_does_not_break_scope(testdir): | ||||||
|  |     """Ensure that fixture scope is respected when using indirect fixtures (#570)""" | ||||||
|  |     testdir.makepyfile( | ||||||
|  |         """ | ||||||
|  |         import pytest | ||||||
|  | 
 | ||||||
|  |         instantiated  = [] | ||||||
|  | 
 | ||||||
|  |         @pytest.fixture(scope="session") | ||||||
|  |         def fixture_1(request): | ||||||
|  |             instantiated.append(("fixture_1", request.param)) | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  |         @pytest.fixture(scope="session") | ||||||
|  |         def fixture_2(request): | ||||||
|  |             instantiated.append(("fixture_2", request.param)) | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  |         scenarios = [ | ||||||
|  |             ("A", "a1"), | ||||||
|  |             ("A", "a2"), | ||||||
|  |             ("B", "b1"), | ||||||
|  |             ("B", "b2"), | ||||||
|  |             ("C", "c1"), | ||||||
|  |             ("C", "c2"), | ||||||
|  |         ] | ||||||
|  | 
 | ||||||
|  |         @pytest.mark.parametrize( | ||||||
|  |             "fixture_1,fixture_2", scenarios, indirect=["fixture_1", "fixture_2"] | ||||||
|  |         ) | ||||||
|  |         def test_create_fixtures(fixture_1, fixture_2): | ||||||
|  |             pass | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  |         def test_check_fixture_instantiations(): | ||||||
|  |             assert instantiated == [ | ||||||
|  |                 ('fixture_1', 'A'), | ||||||
|  |                 ('fixture_2', 'a1'), | ||||||
|  |                 ('fixture_2', 'a2'), | ||||||
|  |                 ('fixture_1', 'B'), | ||||||
|  |                 ('fixture_2', 'b1'), | ||||||
|  |                 ('fixture_2', 'b2'), | ||||||
|  |                 ('fixture_1', 'C'), | ||||||
|  |                 ('fixture_2', 'c1'), | ||||||
|  |                 ('fixture_2', 'c2'), | ||||||
|  |             ] | ||||||
|  |     """ | ||||||
|  |     ) | ||||||
|  |     result = testdir.runpytest() | ||||||
|  |     result.assert_outcomes(passed=7) | ||||||
|  |  | ||||||
|  | @ -1,4 +1,6 @@ | ||||||
|  | import os | ||||||
| import textwrap | import textwrap | ||||||
|  | from pathlib import Path | ||||||
| 
 | 
 | ||||||
| import py | import py | ||||||
| 
 | 
 | ||||||
|  | @ -163,11 +165,12 @@ def test_setinitial_conftest_subdirs(testdir, name): | ||||||
|     subconftest = sub.ensure("conftest.py") |     subconftest = sub.ensure("conftest.py") | ||||||
|     conftest = PytestPluginManager() |     conftest = PytestPluginManager() | ||||||
|     conftest_setinitial(conftest, [sub.dirpath()], confcutdir=testdir.tmpdir) |     conftest_setinitial(conftest, [sub.dirpath()], confcutdir=testdir.tmpdir) | ||||||
|  |     key = Path(str(subconftest)).resolve() | ||||||
|     if name not in ("whatever", ".dotdir"): |     if name not in ("whatever", ".dotdir"): | ||||||
|         assert subconftest in conftest._conftestpath2mod |         assert key in conftest._conftestpath2mod | ||||||
|         assert len(conftest._conftestpath2mod) == 1 |         assert len(conftest._conftestpath2mod) == 1 | ||||||
|     else: |     else: | ||||||
|         assert subconftest not in conftest._conftestpath2mod |         assert key not in conftest._conftestpath2mod | ||||||
|         assert len(conftest._conftestpath2mod) == 0 |         assert len(conftest._conftestpath2mod) == 0 | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  | @ -275,6 +278,31 @@ def test_conftest_symlink_files(testdir): | ||||||
|     assert result.ret == ExitCode.OK |     assert result.ret == ExitCode.OK | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  | @pytest.mark.skipif( | ||||||
|  |     os.path.normcase("x") != os.path.normcase("X"), | ||||||
|  |     reason="only relevant for case insensitive file systems", | ||||||
|  | ) | ||||||
|  | def test_conftest_badcase(testdir): | ||||||
|  |     """Check conftest.py loading when directory casing is wrong (#5792).""" | ||||||
|  |     testdir.tmpdir.mkdir("JenkinsRoot").mkdir("test") | ||||||
|  |     source = {"setup.py": "", "test/__init__.py": "", "test/conftest.py": ""} | ||||||
|  |     testdir.makepyfile(**{"JenkinsRoot/%s" % k: v for k, v in source.items()}) | ||||||
|  | 
 | ||||||
|  |     testdir.tmpdir.join("jenkinsroot/test").chdir() | ||||||
|  |     result = testdir.runpytest() | ||||||
|  |     assert result.ret == ExitCode.NO_TESTS_COLLECTED | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def test_conftest_uppercase(testdir): | ||||||
|  |     """Check conftest.py whose qualified name contains uppercase characters (#5819)""" | ||||||
|  |     source = {"__init__.py": "", "Foo/conftest.py": "", "Foo/__init__.py": ""} | ||||||
|  |     testdir.makepyfile(**source) | ||||||
|  | 
 | ||||||
|  |     testdir.tmpdir.chdir() | ||||||
|  |     result = testdir.runpytest() | ||||||
|  |     assert result.ret == ExitCode.NO_TESTS_COLLECTED | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
| def test_no_conftest(testdir): | def test_no_conftest(testdir): | ||||||
|     testdir.makeconftest("assert 0") |     testdir.makeconftest("assert 0") | ||||||
|     result = testdir.runpytest("--noconftest") |     result = testdir.runpytest("--noconftest") | ||||||
|  |  | ||||||
|  | @ -853,7 +853,7 @@ class TestDebuggingBreakpoints: | ||||||
|         Test that supports breakpoint global marks on Python 3.7+ and not on |         Test that supports breakpoint global marks on Python 3.7+ and not on | ||||||
|         CPython 3.5, 2.7 |         CPython 3.5, 2.7 | ||||||
|         """ |         """ | ||||||
|         if sys.version_info.major == 3 and sys.version_info.minor >= 7: |         if sys.version_info >= (3, 7): | ||||||
|             assert SUPPORTS_BREAKPOINT_BUILTIN is True |             assert SUPPORTS_BREAKPOINT_BUILTIN is True | ||||||
|         if sys.version_info.major == 3 and sys.version_info.minor == 5: |         if sys.version_info.major == 3 and sys.version_info.minor == 5: | ||||||
|             assert SUPPORTS_BREAKPOINT_BUILTIN is False |             assert SUPPORTS_BREAKPOINT_BUILTIN is False | ||||||
|  |  | ||||||
|  | @ -1,4 +1,5 @@ | ||||||
| import pytest | import pytest | ||||||
|  | from _pytest._code.code import ExceptionChainRepr | ||||||
| from _pytest.pathlib import Path | from _pytest.pathlib import Path | ||||||
| from _pytest.reports import CollectReport | from _pytest.reports import CollectReport | ||||||
| from _pytest.reports import TestReport | from _pytest.reports import TestReport | ||||||
|  | @ -220,8 +221,8 @@ class TestReportSerialization: | ||||||
|         assert data["path1"] == str(testdir.tmpdir) |         assert data["path1"] == str(testdir.tmpdir) | ||||||
|         assert data["path2"] == str(testdir.tmpdir) |         assert data["path2"] == str(testdir.tmpdir) | ||||||
| 
 | 
 | ||||||
|     def test_unserialization_failure(self, testdir): |     def test_deserialization_failure(self, testdir): | ||||||
|         """Check handling of failure during unserialization of report types.""" |         """Check handling of failure during deserialization of report types.""" | ||||||
|         testdir.makepyfile( |         testdir.makepyfile( | ||||||
|             """ |             """ | ||||||
|             def test_a(): |             def test_a(): | ||||||
|  | @ -242,6 +243,75 @@ class TestReportSerialization: | ||||||
|         ): |         ): | ||||||
|             TestReport._from_json(data) |             TestReport._from_json(data) | ||||||
| 
 | 
 | ||||||
|  |     @pytest.mark.parametrize("report_class", [TestReport, CollectReport]) | ||||||
|  |     def test_chained_exceptions(self, testdir, tw_mock, report_class): | ||||||
|  |         """Check serialization/deserialization of report objects containing chained exceptions (#5786)""" | ||||||
|  |         testdir.makepyfile( | ||||||
|  |             """ | ||||||
|  |             def foo(): | ||||||
|  |                 raise ValueError('value error') | ||||||
|  |             def test_a(): | ||||||
|  |                 try: | ||||||
|  |                     foo() | ||||||
|  |                 except ValueError as e: | ||||||
|  |                     raise RuntimeError('runtime error') from e | ||||||
|  |             if {error_during_import}: | ||||||
|  |                 test_a() | ||||||
|  |         """.format( | ||||||
|  |                 error_during_import=report_class is CollectReport | ||||||
|  |             ) | ||||||
|  |         ) | ||||||
|  | 
 | ||||||
|  |         reprec = testdir.inline_run() | ||||||
|  |         if report_class is TestReport: | ||||||
|  |             reports = reprec.getreports("pytest_runtest_logreport") | ||||||
|  |             # we have 3 reports: setup/call/teardown | ||||||
|  |             assert len(reports) == 3 | ||||||
|  |             # get the call report | ||||||
|  |             report = reports[1] | ||||||
|  |         else: | ||||||
|  |             assert report_class is CollectReport | ||||||
|  |             # two collection reports: session and test file | ||||||
|  |             reports = reprec.getreports("pytest_collectreport") | ||||||
|  |             assert len(reports) == 2 | ||||||
|  |             report = reports[1] | ||||||
|  | 
 | ||||||
|  |         def check_longrepr(longrepr): | ||||||
|  |             """Check the attributes of the given longrepr object according to the test file. | ||||||
|  | 
 | ||||||
|  |             We can get away with testing both CollectReport and TestReport with this function because | ||||||
|  |             the longrepr objects are very similar. | ||||||
|  |             """ | ||||||
|  |             assert isinstance(longrepr, ExceptionChainRepr) | ||||||
|  |             assert longrepr.sections == [("title", "contents", "=")] | ||||||
|  |             assert len(longrepr.chain) == 2 | ||||||
|  |             entry1, entry2 = longrepr.chain | ||||||
|  |             tb1, fileloc1, desc1 = entry1 | ||||||
|  |             tb2, fileloc2, desc2 = entry2 | ||||||
|  | 
 | ||||||
|  |             assert "ValueError('value error')" in str(tb1) | ||||||
|  |             assert "RuntimeError('runtime error')" in str(tb2) | ||||||
|  | 
 | ||||||
|  |             assert ( | ||||||
|  |                 desc1 | ||||||
|  |                 == "The above exception was the direct cause of the following exception:" | ||||||
|  |             ) | ||||||
|  |             assert desc2 is None | ||||||
|  | 
 | ||||||
|  |         assert report.failed | ||||||
|  |         assert len(report.sections) == 0 | ||||||
|  |         report.longrepr.addsection("title", "contents", "=") | ||||||
|  |         check_longrepr(report.longrepr) | ||||||
|  | 
 | ||||||
|  |         data = report._to_json() | ||||||
|  |         loaded_report = report_class._from_json(data) | ||||||
|  |         check_longrepr(loaded_report.longrepr) | ||||||
|  | 
 | ||||||
|  |         # make sure we don't blow up on ``toterminal`` call; we don't test the actual output because it is very | ||||||
|  |         # brittle and hard to maintain, but we can assume it is correct because ``toterminal`` is already tested | ||||||
|  |         # elsewhere and we do check the contents of the longrepr object after loading it. | ||||||
|  |         loaded_report.longrepr.toterminal(tw_mock) | ||||||
|  | 
 | ||||||
| 
 | 
 | ||||||
| class TestHooks: | class TestHooks: | ||||||
|     """Test that the hooks are working correctly for plugins""" |     """Test that the hooks are working correctly for plugins""" | ||||||
|  |  | ||||||
		Loading…
	
		Reference in New Issue