commit
						5bf9f9a711
					
				|  | @ -16,3 +16,11 @@ source = src/ | |||
|   */lib/python*/site-packages/ | ||||
|   */pypy*/site-packages/ | ||||
|   *\Lib\site-packages\ | ||||
| 
 | ||||
| [report] | ||||
| skip_covered = True | ||||
| show_missing = True | ||||
| exclude_lines = | ||||
|     \#\s*pragma: no cover | ||||
|     ^\s*raise NotImplementedError\b | ||||
|     ^\s*return NotImplemented\b | ||||
|  |  | |||
|  | @ -26,7 +26,7 @@ repos: | |||
|     hooks: | ||||
|     -   id: flake8 | ||||
|         language_version: python3 | ||||
|         additional_dependencies: [flake8-typing-imports] | ||||
|         additional_dependencies: [flake8-typing-imports==1.3.0] | ||||
| -   repo: https://github.com/asottile/reorder_python_imports | ||||
|     rev: v1.4.0 | ||||
|     hooks: | ||||
|  |  | |||
|  | @ -43,7 +43,8 @@ jobs: | |||
|       python: 'pypy3' | ||||
| 
 | ||||
|     - env: TOXENV=py35-xdist | ||||
|       python: '3.5' | ||||
|       dist: trusty | ||||
|       python: '3.5.0' | ||||
| 
 | ||||
|     # Coverage for: | ||||
|     # - pytester's LsofFdLeakChecker | ||||
|  |  | |||
							
								
								
									
										167
									
								
								CHANGELOG.rst
								
								
								
								
							
							
						
						
									
										167
									
								
								CHANGELOG.rst
								
								
								
								
							|  | @ -18,6 +18,173 @@ with advance notice in the **Deprecations** section of releases. | |||
| 
 | ||||
| .. towncrier release notes start | ||||
| 
 | ||||
| pytest 5.1.1 (2019-08-20) | ||||
| ========================= | ||||
| 
 | ||||
| Bug Fixes | ||||
| --------- | ||||
| 
 | ||||
| - `#5751 <https://github.com/pytest-dev/pytest/issues/5751>`_: Fixed ``TypeError`` when importing pytest on Python 3.5.0 and 3.5.1. | ||||
| 
 | ||||
| 
 | ||||
| pytest 5.1.0 (2019-08-15) | ||||
| ========================= | ||||
| 
 | ||||
| Removals | ||||
| -------- | ||||
| 
 | ||||
| - `#5180 <https://github.com/pytest-dev/pytest/issues/5180>`_: As per our policy, the following features have been deprecated in the 4.X series and are now | ||||
|   removed: | ||||
| 
 | ||||
|   * ``Request.getfuncargvalue``: use ``Request.getfixturevalue`` instead. | ||||
| 
 | ||||
|   * ``pytest.raises`` and ``pytest.warns`` no longer support strings as the second argument. | ||||
| 
 | ||||
|   * ``message`` parameter of ``pytest.raises``. | ||||
| 
 | ||||
|   * ``pytest.raises``, ``pytest.warns`` and ``ParameterSet.param`` now use native keyword-only | ||||
|     syntax. This might change the exception message from previous versions, but they still raise | ||||
|     ``TypeError`` on unknown keyword arguments as before. | ||||
| 
 | ||||
|   * ``pytest.config`` global variable. | ||||
| 
 | ||||
|   * ``tmpdir_factory.ensuretemp`` method. | ||||
| 
 | ||||
|   * ``pytest_logwarning`` hook. | ||||
| 
 | ||||
|   * ``RemovedInPytest4Warning`` warning type. | ||||
| 
 | ||||
|   * ``request`` is now a reserved name for fixtures. | ||||
| 
 | ||||
| 
 | ||||
|   For more information consult | ||||
|   `Deprecations and Removals <https://docs.pytest.org/en/latest/deprecations.html>`__ in the docs. | ||||
| 
 | ||||
| 
 | ||||
| - `#5565 <https://github.com/pytest-dev/pytest/issues/5565>`_: Removed unused support code for `unittest2 <https://pypi.org/project/unittest2/>`__. | ||||
| 
 | ||||
|   The ``unittest2`` backport module is no longer | ||||
|   necessary since Python 3.3+, and the small amount of code in pytest to support it also doesn't seem | ||||
|   to be used: after removed, all tests still pass unchanged. | ||||
| 
 | ||||
|   Although our policy is to introduce a deprecation period before removing any features or support | ||||
|   for third party libraries, because this code is apparently not used | ||||
|   at all (even if ``unittest2`` is used by a test suite executed by pytest), it was decided to | ||||
|   remove it in this release. | ||||
| 
 | ||||
|   If you experience a regression because of this, please | ||||
|   `file an issue <https://github.com/pytest-dev/pytest/issues/new>`__. | ||||
| 
 | ||||
| 
 | ||||
| - `#5615 <https://github.com/pytest-dev/pytest/issues/5615>`_: ``pytest.fail``, ``pytest.xfail`` and ``pytest.skip`` no longer support bytes for the message argument. | ||||
| 
 | ||||
|   This was supported for Python 2 where it was tempting to use ``"message"`` | ||||
|   instead of ``u"message"``. | ||||
| 
 | ||||
|   Python 3 code is unlikely to pass ``bytes`` to these functions. If you do, | ||||
|   please decode it to an ``str`` beforehand. | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| Features | ||||
| -------- | ||||
| 
 | ||||
| - `#5564 <https://github.com/pytest-dev/pytest/issues/5564>`_: New ``Config.invocation_args`` attribute containing the unchanged arguments passed to ``pytest.main()``. | ||||
| 
 | ||||
| 
 | ||||
| - `#5576 <https://github.com/pytest-dev/pytest/issues/5576>`_: New `NUMBER <https://docs.pytest.org/en/latest/doctest.html#using-doctest-options>`__ | ||||
|   option for doctests to ignore irrelevant differences in floating-point numbers. | ||||
|   Inspired by Sébastien Boisgérault's `numtest <https://github.com/boisgera/numtest>`__ | ||||
|   extension for doctest. | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| Improvements | ||||
| ------------ | ||||
| 
 | ||||
| - `#5471 <https://github.com/pytest-dev/pytest/issues/5471>`_: JUnit XML now includes a timestamp and hostname in the testsuite tag. | ||||
| 
 | ||||
| 
 | ||||
| - `#5707 <https://github.com/pytest-dev/pytest/issues/5707>`_: Time taken to run the test suite now includes a human-readable representation when it takes over | ||||
|   60 seconds, for example:: | ||||
| 
 | ||||
|       ===== 2 failed in 102.70s (0:01:42) ===== | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| Bug Fixes | ||||
| --------- | ||||
| 
 | ||||
| - `#4344 <https://github.com/pytest-dev/pytest/issues/4344>`_: Fix RuntimeError/StopIteration when trying to collect package with "__init__.py" only. | ||||
| 
 | ||||
| 
 | ||||
| - `#5115 <https://github.com/pytest-dev/pytest/issues/5115>`_: Warnings issued during ``pytest_configure`` are explicitly not treated as errors, even if configured as such, because it otherwise completely breaks pytest. | ||||
| 
 | ||||
| 
 | ||||
| - `#5477 <https://github.com/pytest-dev/pytest/issues/5477>`_: The XML file produced by ``--junitxml`` now correctly contain a ``<testsuites>`` root element. | ||||
| 
 | ||||
| 
 | ||||
| - `#5523 <https://github.com/pytest-dev/pytest/issues/5523>`_: Fixed using multiple short options together in the command-line (for example ``-vs``) in Python 3.8+. | ||||
| 
 | ||||
| 
 | ||||
| - `#5524 <https://github.com/pytest-dev/pytest/issues/5524>`_: Fix issue where ``tmp_path`` and ``tmpdir`` would not remove directories containing files marked as read-only, | ||||
|   which could lead to pytest crashing when executed a second time with the ``--basetemp`` option. | ||||
| 
 | ||||
| 
 | ||||
| - `#5537 <https://github.com/pytest-dev/pytest/issues/5537>`_: Replace ``importlib_metadata`` backport with ``importlib.metadata`` from the | ||||
|   standard library on Python 3.8+. | ||||
| 
 | ||||
| 
 | ||||
| - `#5578 <https://github.com/pytest-dev/pytest/issues/5578>`_: Improve type checking for some exception-raising functions (``pytest.xfail``, ``pytest.skip``, etc) | ||||
|   so they provide better error messages when users meant to use marks (for example ``@pytest.xfail`` | ||||
|   instead of ``@pytest.mark.xfail``). | ||||
| 
 | ||||
| 
 | ||||
| - `#5606 <https://github.com/pytest-dev/pytest/issues/5606>`_: Fixed internal error when test functions were patched with objects that cannot be compared | ||||
|   for truth values against others, like ``numpy`` arrays. | ||||
| 
 | ||||
| 
 | ||||
| - `#5634 <https://github.com/pytest-dev/pytest/issues/5634>`_: ``pytest.exit`` is now correctly handled in ``unittest`` cases. | ||||
|   This makes ``unittest`` cases handle ``quit`` from pytest's pdb correctly. | ||||
| 
 | ||||
| 
 | ||||
| - `#5650 <https://github.com/pytest-dev/pytest/issues/5650>`_: Improved output when parsing an ini configuration file fails. | ||||
| 
 | ||||
| 
 | ||||
| - `#5701 <https://github.com/pytest-dev/pytest/issues/5701>`_: Fix collection of ``staticmethod`` objects defined with ``functools.partial``. | ||||
| 
 | ||||
| 
 | ||||
| - `#5734 <https://github.com/pytest-dev/pytest/issues/5734>`_: Skip async generator test functions, and update the warning message to refer to ``async def`` functions. | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| Improved Documentation | ||||
| ---------------------- | ||||
| 
 | ||||
| - `#5669 <https://github.com/pytest-dev/pytest/issues/5669>`_: Add docstring for ``Testdir.copy_example``. | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| Trivial/Internal Changes | ||||
| ------------------------ | ||||
| 
 | ||||
| - `#5095 <https://github.com/pytest-dev/pytest/issues/5095>`_: XML files of the ``xunit2`` family are now validated against the schema by pytest's own test suite | ||||
|   to avoid future regressions. | ||||
| 
 | ||||
| 
 | ||||
| - `#5516 <https://github.com/pytest-dev/pytest/issues/5516>`_: Cache node splitting function which can improve collection performance in very large test suites. | ||||
| 
 | ||||
| 
 | ||||
| - `#5603 <https://github.com/pytest-dev/pytest/issues/5603>`_: Simplified internal ``SafeRepr`` class and removed some dead code. | ||||
| 
 | ||||
| 
 | ||||
| - `#5664 <https://github.com/pytest-dev/pytest/issues/5664>`_: When invoking pytest's own testsuite with ``PYTHONDONTWRITEBYTECODE=1``, | ||||
|   the ``test_xfail_handling`` test no longer fails. | ||||
| 
 | ||||
| 
 | ||||
| - `#5684 <https://github.com/pytest-dev/pytest/issues/5684>`_: Replace manual handling of ``OSError.errno`` in the codebase by new ``OSError`` subclasses (``PermissionError``, ``FileNotFoundError``, etc.). | ||||
| 
 | ||||
| 
 | ||||
| pytest 5.0.1 (2019-07-04) | ||||
| ========================= | ||||
| 
 | ||||
|  |  | |||
|  | @ -1 +0,0 @@ | |||
| Fix RuntimeError/StopIteration when trying to collect package with "__init__.py" only. | ||||
|  | @ -1,2 +0,0 @@ | |||
| XML files of the ``xunit2`` family are now validated against the schema by pytest's own test suite | ||||
| to avoid future regressions. | ||||
|  | @ -1 +0,0 @@ | |||
| Warnings issued during ``pytest_configure`` are explicitly not treated as errors, even if configured as such, because it otherwise completely breaks pytest. | ||||
|  | @ -1,26 +0,0 @@ | |||
| As per our policy, the following features have been deprecated in the 4.X series and are now | ||||
| removed: | ||||
| 
 | ||||
| * ``Request.getfuncargvalue``: use ``Request.getfixturevalue`` instead. | ||||
| 
 | ||||
| * ``pytest.raises`` and ``pytest.warns`` no longer support strings as the second argument. | ||||
| 
 | ||||
| * ``message`` parameter of ``pytest.raises``. | ||||
| 
 | ||||
| * ``pytest.raises``, ``pytest.warns`` and ``ParameterSet.param`` now use native keyword-only | ||||
|   syntax. This might change the exception message from previous versions, but they still raise | ||||
|   ``TypeError`` on unknown keyword arguments as before. | ||||
| 
 | ||||
| * ``pytest.config`` global variable. | ||||
| 
 | ||||
| * ``tmpdir_factory.ensuretemp`` method. | ||||
| 
 | ||||
| * ``pytest_logwarning`` hook. | ||||
| 
 | ||||
| * ``RemovedInPytest4Warning`` warning type. | ||||
| 
 | ||||
| * ``request`` is now a reserved name for fixtures. | ||||
| 
 | ||||
| 
 | ||||
| For more information consult | ||||
| `Deprecations and Removals <https://docs.pytest.org/en/latest/deprecations.html>`__ in the docs. | ||||
|  | @ -1 +0,0 @@ | |||
| JUnit XML now includes a timestamp and hostname in the testsuite tag. | ||||
|  | @ -1 +0,0 @@ | |||
| The XML file produced by ``--junitxml`` now correctly contain a ``<testsuites>`` root element. | ||||
|  | @ -1 +0,0 @@ | |||
| Cache node splitting function which can improve collection performance in very large test suites. | ||||
|  | @ -1 +0,0 @@ | |||
| Fixed using multiple short options together in the command-line (for example ``-vs``) in Python 3.8+. | ||||
|  | @ -1,2 +0,0 @@ | |||
| Fix issue where ``tmp_path`` and ``tmpdir`` would not remove directories containing files marked as read-only, | ||||
| which could lead to pytest crashing when executed a second time with the ``--basetemp`` option. | ||||
|  | @ -1,2 +0,0 @@ | |||
| Replace ``importlib_metadata`` backport with ``importlib.metadata`` from the | ||||
| standard library on Python 3.8+. | ||||
|  | @ -1 +0,0 @@ | |||
| New ``Config.invocation_args`` attribute containing the unchanged arguments passed to ``pytest.main()``. | ||||
|  | @ -1,13 +0,0 @@ | |||
| Removed unused support code for `unittest2 <https://pypi.org/project/unittest2/>`__. | ||||
| 
 | ||||
| The ``unittest2`` backport module is no longer | ||||
| necessary since Python 3.3+, and the small amount of code in pytest to support it also doesn't seem | ||||
| to be used: after removed, all tests still pass unchanged. | ||||
| 
 | ||||
| Although our policy is to introduce a deprecation period before removing any features or support | ||||
| for third party libraries, because this code is apparently not used | ||||
| at all (even if ``unittest2`` is used by a test suite executed by pytest), it was decided to | ||||
| remove it in this release. | ||||
| 
 | ||||
| If you experience a regression because of this, please | ||||
| `file an issue <https://github.com/pytest-dev/pytest/issues/new>`__. | ||||
|  | @ -1,4 +0,0 @@ | |||
| New `NUMBER <https://docs.pytest.org/en/latest/doctest.html#using-doctest-options>`__ | ||||
| option for doctests to ignore irrelevant differences in floating-point numbers. | ||||
| Inspired by Sébastien Boisgérault's `numtest <https://github.com/boisgera/numtest>`__ | ||||
| extension for doctest. | ||||
|  | @ -1,3 +0,0 @@ | |||
| Improve type checking for some exception-raising functions (``pytest.xfail``, ``pytest.skip``, etc) | ||||
| so they provide better error messages when users meant to use marks (for example ``@pytest.xfail`` | ||||
| instead of ``@pytest.mark.xfail``). | ||||
|  | @ -1 +0,0 @@ | |||
| Simplified internal ``SafeRepr`` class and removed some dead code. | ||||
|  | @ -1,2 +0,0 @@ | |||
| Fixed internal error when test functions were patched with objects that cannot be compared | ||||
| for truth values against others, like ``numpy`` arrays. | ||||
|  | @ -1,7 +0,0 @@ | |||
| ``pytest.fail``, ``pytest.xfail`` and ``pytest.skip`` no longer support bytes for the message argument. | ||||
| 
 | ||||
| This was supported for Python 2 where it was tempting to use ``"message"`` | ||||
| instead of ``u"message"``. | ||||
| 
 | ||||
| Python 3 code is unlikely to pass ``bytes`` to these functions. If you do, | ||||
| please decode it to an ``str`` beforehand. | ||||
|  | @ -1,2 +0,0 @@ | |||
| ``pytest.exit`` is now correctly handled in ``unittest`` cases. | ||||
| This makes ``unittest`` cases handle ``quit`` from pytest's pdb correctly. | ||||
|  | @ -1 +0,0 @@ | |||
| Improved output when parsing an ini configuration file fails. | ||||
|  | @ -1,2 +0,0 @@ | |||
| When invoking pytest's own testsuite with ``PYTHONDONTWRITEBYTECODE=1``, | ||||
| the ``test_xfail_handling`` test no longer fails. | ||||
|  | @ -1 +0,0 @@ | |||
| Add docstring for ``Testdir.copy_example``. | ||||
|  | @ -1 +0,0 @@ | |||
| Replace manual handling of ``OSError.errno`` in the codebase by new ``OSError`` subclasses (``PermissionError``, ``FileNotFoundError``, etc.). | ||||
|  | @ -1 +0,0 @@ | |||
| Fix collection of ``staticmethod`` objects defined with ``functools.partial``. | ||||
|  | @ -1,4 +0,0 @@ | |||
| Time taken to run the test suite now includes a human-readable representation when it takes over | ||||
| 60 seconds, for example:: | ||||
| 
 | ||||
|     ===== 2 failed in 102.70s (0:01:42) ===== | ||||
|  | @ -1 +0,0 @@ | |||
| Skip async generator test functions, and update the warning message to refer to ``async def`` functions. | ||||
|  | @ -0,0 +1 @@ | |||
| Fix decoding error when printing an error response from ``--pastebin``. | ||||
|  | @ -0,0 +1,7 @@ | |||
| coverage: | ||||
|   status: | ||||
|     project: true | ||||
|     patch: true | ||||
|     changes: true | ||||
| 
 | ||||
| comment: off | ||||
|  | @ -6,6 +6,8 @@ Release announcements | |||
|    :maxdepth: 2 | ||||
| 
 | ||||
| 
 | ||||
|    release-5.1.1 | ||||
|    release-5.1.0 | ||||
|    release-5.0.1 | ||||
|    release-5.0.0 | ||||
|    release-4.6.5 | ||||
|  |  | |||
|  | @ -0,0 +1,56 @@ | |||
| pytest-5.1.0 | ||||
| ======================================= | ||||
| 
 | ||||
| The pytest team is proud to announce the 5.1.0 release! | ||||
| 
 | ||||
| pytest is a mature Python testing tool with more than a 2000 tests | ||||
| against itself, passing on many different interpreters and platforms. | ||||
| 
 | ||||
| This release contains a number of bugs fixes and improvements, so users are encouraged | ||||
| to take a look at the CHANGELOG: | ||||
| 
 | ||||
|     https://docs.pytest.org/en/latest/changelog.html | ||||
| 
 | ||||
| For complete documentation, please visit: | ||||
| 
 | ||||
|     https://docs.pytest.org/en/latest/ | ||||
| 
 | ||||
| As usual, you can upgrade from pypi via: | ||||
| 
 | ||||
|     pip install -U pytest | ||||
| 
 | ||||
| Thanks to all who contributed to this release, among them: | ||||
| 
 | ||||
| * Albert Tugushev | ||||
| * Alexey Zankevich | ||||
| * Anthony Sottile | ||||
| * Bruno Oliveira | ||||
| * Daniel Hahler | ||||
| * David Röthlisberger | ||||
| * Florian Bruhin | ||||
| * Ilya Stepin | ||||
| * Jon Dufresne | ||||
| * Kaiqi | ||||
| * Max R | ||||
| * Miro Hrončok | ||||
| * Oliver Bestwalter | ||||
| * Ran Benita | ||||
| * Ronny Pfannschmidt | ||||
| * Samuel Searles-Bryant | ||||
| * Semen Zhydenko | ||||
| * Steffen Schroeder | ||||
| * Thomas Grainger | ||||
| * Tim Hoffmann | ||||
| * William Woodall | ||||
| * Wojtek Erbetowski | ||||
| * Xixi Zhao | ||||
| * Yash Todi | ||||
| * boris | ||||
| * dmitry.dygalo | ||||
| * helloocc | ||||
| * martbln | ||||
| * mei-li | ||||
| 
 | ||||
| 
 | ||||
| Happy testing, | ||||
| The Pytest Development Team | ||||
|  | @ -0,0 +1,24 @@ | |||
| pytest-5.1.1 | ||||
| ======================================= | ||||
| 
 | ||||
| pytest 5.1.1 has just been released to PyPI. | ||||
| 
 | ||||
| This is a bug-fix release, being a drop-in replacement. To upgrade:: | ||||
| 
 | ||||
|   pip install --upgrade pytest | ||||
| 
 | ||||
| The full changelog is available at https://docs.pytest.org/en/latest/changelog.html. | ||||
| 
 | ||||
| Thanks to all who contributed to this release, among them: | ||||
| 
 | ||||
| * Anthony Sottile | ||||
| * Bruno Oliveira | ||||
| * Daniel Hahler | ||||
| * Florian Bruhin | ||||
| * Hugo van Kemenade | ||||
| * Ran Benita | ||||
| * Ronny Pfannschmidt | ||||
| 
 | ||||
| 
 | ||||
| Happy testing, | ||||
| The pytest Development Team | ||||
|  | @ -47,7 +47,7 @@ you will see the return value of the function call: | |||
|     E        +  where 3 = f() | ||||
| 
 | ||||
|     test_assert1.py:6: AssertionError | ||||
|     ========================= 1 failed in 0.12 seconds ========================= | ||||
|     ============================ 1 failed in 0.02s ============================= | ||||
| 
 | ||||
| ``pytest`` has support for showing the values of the most common subexpressions | ||||
| including calls, attributes, comparisons, and binary and unary | ||||
|  | @ -208,7 +208,7 @@ if you run this module: | |||
|     E         Use -v to get the full diff | ||||
| 
 | ||||
|     test_assert2.py:6: AssertionError | ||||
|     ========================= 1 failed in 0.12 seconds ========================= | ||||
|     ============================ 1 failed in 0.02s ============================= | ||||
| 
 | ||||
| Special comparisons are done for a number of cases: | ||||
| 
 | ||||
|  | @ -279,7 +279,7 @@ the conftest file: | |||
|    E            vals: 1 != 2 | ||||
| 
 | ||||
|    test_foocompare.py:12: AssertionError | ||||
|    1 failed in 0.12 seconds | ||||
|    1 failed in 0.02s | ||||
| 
 | ||||
| .. _assert-details: | ||||
| .. _`assert introspection`: | ||||
|  |  | |||
|  | @ -160,7 +160,7 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a | |||
|             in python < 3.6 this is a pathlib2.Path | ||||
| 
 | ||||
| 
 | ||||
|     no tests ran in 0.12 seconds | ||||
|     no tests ran in 0.00s | ||||
| 
 | ||||
| You can also interactively ask for help, e.g. by typing on the Python interactive prompt something like: | ||||
| 
 | ||||
|  |  | |||
|  | @ -63,7 +63,7 @@ If you run this for the first time you will see two failures: | |||
|     >           pytest.fail("bad luck") | ||||
|     E           Failed: bad luck | ||||
| 
 | ||||
|     test_50.py:6: Failed | ||||
|     test_50.py:7: Failed | ||||
|     _______________________________ test_num[25] _______________________________ | ||||
| 
 | ||||
|     i = 25 | ||||
|  | @ -74,8 +74,8 @@ If you run this for the first time you will see two failures: | |||
|     >           pytest.fail("bad luck") | ||||
|     E           Failed: bad luck | ||||
| 
 | ||||
|     test_50.py:6: Failed | ||||
|     2 failed, 48 passed in 0.12 seconds | ||||
|     test_50.py:7: Failed | ||||
|     2 failed, 48 passed in 0.08s | ||||
| 
 | ||||
| If you then run it with ``--lf``: | ||||
| 
 | ||||
|  | @ -102,7 +102,7 @@ If you then run it with ``--lf``: | |||
|     >           pytest.fail("bad luck") | ||||
|     E           Failed: bad luck | ||||
| 
 | ||||
|     test_50.py:6: Failed | ||||
|     test_50.py:7: Failed | ||||
|     _______________________________ test_num[25] _______________________________ | ||||
| 
 | ||||
|     i = 25 | ||||
|  | @ -113,8 +113,8 @@ If you then run it with ``--lf``: | |||
|     >           pytest.fail("bad luck") | ||||
|     E           Failed: bad luck | ||||
| 
 | ||||
|     test_50.py:6: Failed | ||||
|     ================= 2 failed, 48 deselected in 0.12 seconds ================== | ||||
|     test_50.py:7: Failed | ||||
|     ===================== 2 failed, 48 deselected in 0.02s ===================== | ||||
| 
 | ||||
| You have run only the two failing tests from the last run, while the 48 passing | ||||
| tests have not been run ("deselected"). | ||||
|  | @ -146,7 +146,7 @@ of ``FF`` and dots): | |||
|     >           pytest.fail("bad luck") | ||||
|     E           Failed: bad luck | ||||
| 
 | ||||
|     test_50.py:6: Failed | ||||
|     test_50.py:7: Failed | ||||
|     _______________________________ test_num[25] _______________________________ | ||||
| 
 | ||||
|     i = 25 | ||||
|  | @ -157,8 +157,8 @@ of ``FF`` and dots): | |||
|     >           pytest.fail("bad luck") | ||||
|     E           Failed: bad luck | ||||
| 
 | ||||
|     test_50.py:6: Failed | ||||
|     =================== 2 failed, 48 passed in 0.12 seconds ==================== | ||||
|     test_50.py:7: Failed | ||||
|     ======================= 2 failed, 48 passed in 0.07s ======================= | ||||
| 
 | ||||
| .. _`config.cache`: | ||||
| 
 | ||||
|  | @ -227,10 +227,10 @@ If you run this command for the first time, you can see the print statement: | |||
|     >       assert mydata == 23 | ||||
|     E       assert 42 == 23 | ||||
| 
 | ||||
|     test_caching.py:17: AssertionError | ||||
|     test_caching.py:20: AssertionError | ||||
|     -------------------------- Captured stdout setup --------------------------- | ||||
|     running expensive computation... | ||||
|     1 failed in 0.12 seconds | ||||
|     1 failed in 0.02s | ||||
| 
 | ||||
| If you run it a second time, the value will be retrieved from | ||||
| the cache and nothing will be printed: | ||||
|  | @ -248,8 +248,8 @@ the cache and nothing will be printed: | |||
|     >       assert mydata == 23 | ||||
|     E       assert 42 == 23 | ||||
| 
 | ||||
|     test_caching.py:17: AssertionError | ||||
|     1 failed in 0.12 seconds | ||||
|     test_caching.py:20: AssertionError | ||||
|     1 failed in 0.02s | ||||
| 
 | ||||
| See the :ref:`cache-api` for more details. | ||||
| 
 | ||||
|  | @ -283,7 +283,7 @@ You can always peek at the content of the cache using the | |||
|     example/value contains: | ||||
|       42 | ||||
| 
 | ||||
|     ======================= no tests ran in 0.12 seconds ======================= | ||||
|     ========================== no tests ran in 0.00s =========================== | ||||
| 
 | ||||
| ``--cache-show`` takes an optional argument to specify a glob pattern for | ||||
| filtering: | ||||
|  | @ -300,7 +300,7 @@ filtering: | |||
|     example/value contains: | ||||
|       42 | ||||
| 
 | ||||
|     ======================= no tests ran in 0.12 seconds ======================= | ||||
|     ========================== no tests ran in 0.00s =========================== | ||||
| 
 | ||||
| Clearing Cache content | ||||
| ---------------------- | ||||
|  |  | |||
|  | @ -88,10 +88,10 @@ of the failing function and hide the other one: | |||
|     >       assert False | ||||
|     E       assert False | ||||
| 
 | ||||
|     test_module.py:9: AssertionError | ||||
|     test_module.py:12: AssertionError | ||||
|     -------------------------- Captured stdout setup --------------------------- | ||||
|     setting up <function test_func2 at 0xdeadbeef> | ||||
|     ==================== 1 failed, 1 passed in 0.12 seconds ==================== | ||||
|     ======================= 1 failed, 1 passed in 0.02s ======================== | ||||
| 
 | ||||
| Accessing captured output from a test function | ||||
| --------------------------------------------------- | ||||
|  |  | |||
|  | @ -36,7 +36,7 @@ then you can just invoke ``pytest`` directly: | |||
| 
 | ||||
|     test_example.txt .                                                   [100%] | ||||
| 
 | ||||
|     ========================= 1 passed in 0.12 seconds ========================= | ||||
|     ============================ 1 passed in 0.01s ============================= | ||||
| 
 | ||||
| By default, pytest will collect ``test*.txt`` files looking for doctest directives, but you | ||||
| can pass additional globs using the ``--doctest-glob`` option (multi-allowed). | ||||
|  | @ -66,7 +66,7 @@ and functions, including from test modules: | |||
|     mymodule.py .                                                        [ 50%] | ||||
|     test_example.txt .                                                   [100%] | ||||
| 
 | ||||
|     ========================= 2 passed in 0.12 seconds ========================= | ||||
|     ============================ 2 passed in 0.01s ============================= | ||||
| 
 | ||||
| You can make these changes permanent in your project by | ||||
| putting them into a pytest.ini file like this: | ||||
|  |  | |||
|  | @ -52,7 +52,7 @@ You can then restrict a test run to only run tests marked with ``webtest``: | |||
| 
 | ||||
|     test_server.py::test_send_http PASSED                                [100%] | ||||
| 
 | ||||
|     ================== 1 passed, 3 deselected in 0.12 seconds ================== | ||||
|     ===================== 1 passed, 3 deselected in 0.01s ====================== | ||||
| 
 | ||||
| Or the inverse, running all tests except the webtest ones: | ||||
| 
 | ||||
|  | @ -69,7 +69,7 @@ Or the inverse, running all tests except the webtest ones: | |||
|     test_server.py::test_another PASSED                                  [ 66%] | ||||
|     test_server.py::TestClass::test_method PASSED                        [100%] | ||||
| 
 | ||||
|     ================== 3 passed, 1 deselected in 0.12 seconds ================== | ||||
|     ===================== 3 passed, 1 deselected in 0.01s ====================== | ||||
| 
 | ||||
| Selecting tests based on their node ID | ||||
| -------------------------------------- | ||||
|  | @ -89,7 +89,7 @@ tests based on their module, class, method, or function name: | |||
| 
 | ||||
|     test_server.py::TestClass::test_method PASSED                        [100%] | ||||
| 
 | ||||
|     ========================= 1 passed in 0.12 seconds ========================= | ||||
|     ============================ 1 passed in 0.01s ============================= | ||||
| 
 | ||||
| You can also select on the class: | ||||
| 
 | ||||
|  | @ -104,7 +104,7 @@ You can also select on the class: | |||
| 
 | ||||
|     test_server.py::TestClass::test_method PASSED                        [100%] | ||||
| 
 | ||||
|     ========================= 1 passed in 0.12 seconds ========================= | ||||
|     ============================ 1 passed in 0.01s ============================= | ||||
| 
 | ||||
| Or select multiple nodes: | ||||
| 
 | ||||
|  | @ -120,7 +120,7 @@ Or select multiple nodes: | |||
|     test_server.py::TestClass::test_method PASSED                        [ 50%] | ||||
|     test_server.py::test_send_http PASSED                                [100%] | ||||
| 
 | ||||
|     ========================= 2 passed in 0.12 seconds ========================= | ||||
|     ============================ 2 passed in 0.01s ============================= | ||||
| 
 | ||||
| .. _node-id: | ||||
| 
 | ||||
|  | @ -159,7 +159,7 @@ select tests based on their names: | |||
| 
 | ||||
|     test_server.py::test_send_http PASSED                                [100%] | ||||
| 
 | ||||
|     ================== 1 passed, 3 deselected in 0.12 seconds ================== | ||||
|     ===================== 1 passed, 3 deselected in 0.01s ====================== | ||||
| 
 | ||||
| And you can also run all tests except the ones that match the keyword: | ||||
| 
 | ||||
|  | @ -176,7 +176,7 @@ And you can also run all tests except the ones that match the keyword: | |||
|     test_server.py::test_another PASSED                                  [ 66%] | ||||
|     test_server.py::TestClass::test_method PASSED                        [100%] | ||||
| 
 | ||||
|     ================== 3 passed, 1 deselected in 0.12 seconds ================== | ||||
|     ===================== 3 passed, 1 deselected in 0.01s ====================== | ||||
| 
 | ||||
| Or to select "http" and "quick" tests: | ||||
| 
 | ||||
|  | @ -192,7 +192,7 @@ Or to select "http" and "quick" tests: | |||
|     test_server.py::test_send_http PASSED                                [ 50%] | ||||
|     test_server.py::test_something_quick PASSED                          [100%] | ||||
| 
 | ||||
|     ================== 2 passed, 2 deselected in 0.12 seconds ================== | ||||
|     ===================== 2 passed, 2 deselected in 0.01s ====================== | ||||
| 
 | ||||
| .. note:: | ||||
| 
 | ||||
|  | @ -413,7 +413,7 @@ the test needs: | |||
| 
 | ||||
|     test_someenv.py s                                                    [100%] | ||||
| 
 | ||||
|     ======================== 1 skipped in 0.12 seconds ========================= | ||||
|     ============================ 1 skipped in 0.00s ============================ | ||||
| 
 | ||||
| and here is one that specifies exactly the environment needed: | ||||
| 
 | ||||
|  | @ -428,7 +428,7 @@ and here is one that specifies exactly the environment needed: | |||
| 
 | ||||
|     test_someenv.py .                                                    [100%] | ||||
| 
 | ||||
|     ========================= 1 passed in 0.12 seconds ========================= | ||||
|     ============================ 1 passed in 0.01s ============================= | ||||
| 
 | ||||
| The ``--markers`` option always gives you a list of available markers: | ||||
| 
 | ||||
|  | @ -499,7 +499,7 @@ The output is as follows: | |||
|     $ pytest -q -s | ||||
|     Mark(name='my_marker', args=(<function hello_world at 0xdeadbeef>,), kwargs={}) | ||||
|     . | ||||
|     1 passed in 0.12 seconds | ||||
|     1 passed in 0.00s | ||||
| 
 | ||||
| We can see that the custom marker has its argument set extended with the function ``hello_world``. This is the key difference between creating a custom marker as a callable, which invokes ``__call__`` behind the scenes, and using ``with_args``. | ||||
| 
 | ||||
|  | @ -551,7 +551,7 @@ Let's run this without capturing output and see what we get: | |||
|     glob args=('class',) kwargs={'x': 2} | ||||
|     glob args=('module',) kwargs={'x': 1} | ||||
|     . | ||||
|     1 passed in 0.12 seconds | ||||
|     1 passed in 0.01s | ||||
| 
 | ||||
| marking platform specific tests with pytest | ||||
| -------------------------------------------------------------- | ||||
|  | @ -623,7 +623,7 @@ then you will see two tests skipped and two executed tests as expected: | |||
| 
 | ||||
|     ========================= short test summary info ========================== | ||||
|     SKIPPED [2] $REGENDOC_TMPDIR/conftest.py:13: cannot run on platform linux | ||||
|     =================== 2 passed, 2 skipped in 0.12 seconds ==================== | ||||
|     ======================= 2 passed, 2 skipped in 0.01s ======================= | ||||
| 
 | ||||
| Note that if you specify a platform via the marker-command line option like this: | ||||
| 
 | ||||
|  | @ -638,7 +638,7 @@ Note that if you specify a platform via the marker-command line option like this | |||
| 
 | ||||
|     test_plat.py .                                                       [100%] | ||||
| 
 | ||||
|     ================== 1 passed, 3 deselected in 0.12 seconds ================== | ||||
|     ===================== 1 passed, 3 deselected in 0.01s ====================== | ||||
| 
 | ||||
| then the unmarked-tests will not be run.  It is thus a way to restrict the run to the specific tests. | ||||
| 
 | ||||
|  | @ -711,7 +711,7 @@ We can now use the ``-m option`` to select one set: | |||
|     test_module.py:8: in test_interface_complex | ||||
|         assert 0 | ||||
|     E   assert 0 | ||||
|     ================== 2 failed, 2 deselected in 0.12 seconds ================== | ||||
|     ===================== 2 failed, 2 deselected in 0.02s ====================== | ||||
| 
 | ||||
| or to select both "event" and "interface" tests: | ||||
| 
 | ||||
|  | @ -739,4 +739,4 @@ or to select both "event" and "interface" tests: | |||
|     test_module.py:12: in test_event_simple | ||||
|         assert 0 | ||||
|     E   assert 0 | ||||
|     ================== 3 failed, 1 deselected in 0.12 seconds ================== | ||||
|     ===================== 3 failed, 1 deselected in 0.03s ====================== | ||||
|  |  | |||
|  | @ -41,7 +41,7 @@ now execute the test specification: | |||
|     usecase execution failed | ||||
|        spec failed: 'some': 'other' | ||||
|        no further details known at this point. | ||||
|     ==================== 1 failed, 1 passed in 0.12 seconds ==================== | ||||
|     ======================= 1 failed, 1 passed in 0.02s ======================== | ||||
| 
 | ||||
| .. regendoc:wipe | ||||
| 
 | ||||
|  | @ -77,7 +77,7 @@ consulted when reporting in ``verbose`` mode: | |||
|     usecase execution failed | ||||
|        spec failed: 'some': 'other' | ||||
|        no further details known at this point. | ||||
|     ==================== 1 failed, 1 passed in 0.12 seconds ==================== | ||||
|     ======================= 1 failed, 1 passed in 0.02s ======================== | ||||
| 
 | ||||
| .. regendoc:wipe | ||||
| 
 | ||||
|  | @ -97,4 +97,4 @@ interesting to just look at the collection tree: | |||
|         <YamlItem hello> | ||||
|         <YamlItem ok> | ||||
| 
 | ||||
|     ======================= no tests ran in 0.12 seconds ======================= | ||||
|     ========================== no tests ran in 0.02s =========================== | ||||
|  |  | |||
|  | @ -54,7 +54,7 @@ This means that we only run 2 tests if we do not pass ``--all``: | |||
| 
 | ||||
|     $ pytest -q test_compute.py | ||||
|     ..                                                                   [100%] | ||||
|     2 passed in 0.12 seconds | ||||
|     2 passed in 0.01s | ||||
| 
 | ||||
| We run only two computations, so we see two dots. | ||||
| let's run the full monty: | ||||
|  | @ -72,8 +72,8 @@ let's run the full monty: | |||
|     >       assert param1 < 4 | ||||
|     E       assert 4 < 4 | ||||
| 
 | ||||
|     test_compute.py:3: AssertionError | ||||
|     1 failed, 4 passed in 0.12 seconds | ||||
|     test_compute.py:4: AssertionError | ||||
|     1 failed, 4 passed in 0.02s | ||||
| 
 | ||||
| As expected when running the full range of ``param1`` values | ||||
| we'll get an error on the last one. | ||||
|  | @ -172,7 +172,7 @@ objects, they are still using the default pytest representation: | |||
|       <Function test_timedistance_v3[forward]> | ||||
|       <Function test_timedistance_v3[backward]> | ||||
| 
 | ||||
|     ======================= no tests ran in 0.12 seconds ======================= | ||||
|     ========================== no tests ran in 0.01s =========================== | ||||
| 
 | ||||
| In ``test_timedistance_v3``, we used ``pytest.param`` to specify the test IDs | ||||
| together with the actual data, instead of listing them separately. | ||||
|  | @ -229,7 +229,7 @@ this is a fully self-contained example which you can run with: | |||
| 
 | ||||
|     test_scenarios.py ....                                               [100%] | ||||
| 
 | ||||
|     ========================= 4 passed in 0.12 seconds ========================= | ||||
|     ============================ 4 passed in 0.01s ============================= | ||||
| 
 | ||||
| If you just collect tests you'll also nicely see 'advanced' and 'basic' as variants for the test function: | ||||
| 
 | ||||
|  | @ -248,7 +248,7 @@ If you just collect tests you'll also nicely see 'advanced' and 'basic' as varia | |||
|           <Function test_demo1[advanced]> | ||||
|           <Function test_demo2[advanced]> | ||||
| 
 | ||||
|     ======================= no tests ran in 0.12 seconds ======================= | ||||
|     ========================== no tests ran in 0.01s =========================== | ||||
| 
 | ||||
| Note that we told ``metafunc.parametrize()`` that your scenario values | ||||
| should be considered class-scoped.  With pytest-2.3 this leads to a | ||||
|  | @ -262,8 +262,8 @@ Deferring the setup of parametrized resources | |||
| The parametrization of test functions happens at collection | ||||
| time.  It is a good idea to setup expensive resources like DB | ||||
| connections or subprocess only when the actual test is run. | ||||
| Here is a simple example how you can achieve that, first | ||||
| the actual test requiring a ``db`` object: | ||||
| Here is a simple example how you can achieve that. This test | ||||
| requires a ``db`` object fixture: | ||||
| 
 | ||||
| .. code-block:: python | ||||
| 
 | ||||
|  | @ -323,7 +323,7 @@ Let's first see how it looks like at collection time: | |||
|       <Function test_db_initialized[d1]> | ||||
|       <Function test_db_initialized[d2]> | ||||
| 
 | ||||
|     ======================= no tests ran in 0.12 seconds ======================= | ||||
|     ========================== no tests ran in 0.00s =========================== | ||||
| 
 | ||||
| And then when we run the test: | ||||
| 
 | ||||
|  | @ -342,8 +342,8 @@ And then when we run the test: | |||
|     >           pytest.fail("deliberately failing for demo purposes") | ||||
|     E           Failed: deliberately failing for demo purposes | ||||
| 
 | ||||
|     test_backends.py:6: Failed | ||||
|     1 failed, 1 passed in 0.12 seconds | ||||
|     test_backends.py:8: Failed | ||||
|     1 failed, 1 passed in 0.02s | ||||
| 
 | ||||
| The first invocation with ``db == "DB1"`` passed while the second with ``db == "DB2"`` failed.  Our ``db`` fixture function has instantiated each of the DB values during the setup phase while the ``pytest_generate_tests`` generated two according calls to the ``test_db_initialized`` during the collection phase. | ||||
| 
 | ||||
|  | @ -394,7 +394,7 @@ The result of this test will be successful: | |||
|     <Module test_indirect_list.py> | ||||
|       <Function test_indirect[a-b]> | ||||
| 
 | ||||
|     ======================= no tests ran in 0.12 seconds ======================= | ||||
|     ========================== no tests ran in 0.00s =========================== | ||||
| 
 | ||||
| .. regendoc:wipe | ||||
| 
 | ||||
|  | @ -453,8 +453,8 @@ argument sets to use for each test function.  Let's run it: | |||
|     >       assert a == b | ||||
|     E       assert 1 == 2 | ||||
| 
 | ||||
|     test_parametrize.py:18: AssertionError | ||||
|     1 failed, 2 passed in 0.12 seconds | ||||
|     test_parametrize.py:21: AssertionError | ||||
|     1 failed, 2 passed in 0.03s | ||||
| 
 | ||||
| Indirect parametrization with multiple fixtures | ||||
| -------------------------------------------------------------- | ||||
|  | @ -475,11 +475,10 @@ Running it results in some skips if we don't have all the python interpreters in | |||
| .. code-block:: pytest | ||||
| 
 | ||||
|    . $ pytest -rs -q multipython.py | ||||
|    ssssssssssss...ssssssssssss                                          [100%] | ||||
|    ssssssssssss......sss......                                          [100%] | ||||
|    ========================= short test summary info ========================== | ||||
|    SKIPPED [12] $REGENDOC_TMPDIR/CWD/multipython.py:30: 'python3.5' not found | ||||
|    SKIPPED [12] $REGENDOC_TMPDIR/CWD/multipython.py:30: 'python3.7' not found | ||||
|    3 passed, 24 skipped in 0.12 seconds | ||||
|    SKIPPED [15] $REGENDOC_TMPDIR/CWD/multipython.py:30: 'python3.5' not found | ||||
|    12 passed, 15 skipped in 0.62s | ||||
| 
 | ||||
| Indirect parametrization of optional implementations/imports | ||||
| -------------------------------------------------------------------- | ||||
|  | @ -547,8 +546,8 @@ If you run this with reporting for skips enabled: | |||
|     test_module.py .s                                                    [100%] | ||||
| 
 | ||||
|     ========================= short test summary info ========================== | ||||
|     SKIPPED [1] $REGENDOC_TMPDIR/conftest.py:11: could not import 'opt2': No module named 'opt2' | ||||
|     =================== 1 passed, 1 skipped in 0.12 seconds ==================== | ||||
|     SKIPPED [1] $REGENDOC_TMPDIR/conftest.py:13: could not import 'opt2': No module named 'opt2' | ||||
|     ======================= 1 passed, 1 skipped in 0.01s ======================= | ||||
| 
 | ||||
| You'll see that we don't have an ``opt2`` module and thus the second test run | ||||
| of our ``test_func1`` was skipped.  A few notes: | ||||
|  | @ -610,7 +609,7 @@ Then run ``pytest`` with verbose mode and with only the ``basic`` marker: | |||
|     test_pytest_param_example.py::test_eval[basic_2+4] PASSED            [ 66%] | ||||
|     test_pytest_param_example.py::test_eval[basic_6*9] XFAIL             [100%] | ||||
| 
 | ||||
|     ============ 2 passed, 15 deselected, 1 xfailed in 0.12 seconds ============ | ||||
|     =============== 2 passed, 15 deselected, 1 xfailed in 0.08s ================ | ||||
| 
 | ||||
| As the result: | ||||
| 
 | ||||
|  |  | |||
|  | @ -158,7 +158,7 @@ The test collection would look like this: | |||
|           <Function simple_check> | ||||
|           <Function complex_check> | ||||
| 
 | ||||
|     ======================= no tests ran in 0.12 seconds ======================= | ||||
|     ========================== no tests ran in 0.01s =========================== | ||||
| 
 | ||||
| You can check for multiple glob patterns by adding a space between the patterns: | ||||
| 
 | ||||
|  | @ -221,7 +221,7 @@ You can always peek at the collection tree without running tests like this: | |||
|           <Function test_method> | ||||
|           <Function test_anothermethod> | ||||
| 
 | ||||
|     ======================= no tests ran in 0.12 seconds ======================= | ||||
|     ========================== no tests ran in 0.00s =========================== | ||||
| 
 | ||||
| .. _customizing-test-collection: | ||||
| 
 | ||||
|  | @ -297,7 +297,7 @@ file will be left out: | |||
|     rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini | ||||
|     collected 0 items | ||||
| 
 | ||||
|     ======================= no tests ran in 0.12 seconds ======================= | ||||
|     ========================== no tests ran in 0.01s =========================== | ||||
| 
 | ||||
| It's also possible to ignore files based on Unix shell-style wildcards by adding | ||||
| patterns to ``collect_ignore_glob``. | ||||
|  |  | |||
|  | @ -119,7 +119,7 @@ Here is a nice run of several failures and how ``pytest`` presents things: | |||
|             a = "1" * 100 + "a" + "2" * 100 | ||||
|             b = "1" * 100 + "b" + "2" * 100 | ||||
|     >       assert a == b | ||||
|     E       AssertionError: assert '111111111111...2222222222222' == '1111111111111...2222222222222' | ||||
|     E       AssertionError: assert '111111111111...2222222222222' == '111111111111...2222222222222' | ||||
|     E         Skipping 90 identical leading characters in diff, use -v to show | ||||
|     E         Skipping 91 identical trailing characters in diff, use -v to show | ||||
|     E         - 1111111111a222222222 | ||||
|  | @ -136,7 +136,7 @@ Here is a nice run of several failures and how ``pytest`` presents things: | |||
|             a = "1\n" * 100 + "a" + "2\n" * 100 | ||||
|             b = "1\n" * 100 + "b" + "2\n" * 100 | ||||
|     >       assert a == b | ||||
|     E       AssertionError: assert '1\n1\n1\n1\n...n2\n2\n2\n2\n' == '1\n1\n1\n1\n1...n2\n2\n2\n2\n' | ||||
|     E       AssertionError: assert '1\n1\n1\n1\n...n2\n2\n2\n2\n' == '1\n1\n1\n1\n...n2\n2\n2\n2\n' | ||||
|     E         Skipping 190 identical leading characters in diff, use -v to show | ||||
|     E         Skipping 191 identical trailing characters in diff, use -v to show | ||||
|     E           1 | ||||
|  | @ -235,7 +235,7 @@ Here is a nice run of several failures and how ``pytest`` presents things: | |||
|         def test_not_in_text_multiline(self): | ||||
|             text = "some multiline\ntext\nwhich\nincludes foo\nand a\ntail" | ||||
|     >       assert "foo" not in text | ||||
|     E       AssertionError: assert 'foo' not in 'some multiline\ntext\nw...ncludes foo\nand a\ntail' | ||||
|     E       AssertionError: assert 'foo' not in 'some multil...nand a\ntail' | ||||
|     E         'foo' is contained here: | ||||
|     E           some multiline | ||||
|     E           text | ||||
|  | @ -267,7 +267,7 @@ Here is a nice run of several failures and how ``pytest`` presents things: | |||
|         def test_not_in_text_single_long(self): | ||||
|             text = "head " * 50 + "foo " + "tail " * 20 | ||||
|     >       assert "foo" not in text | ||||
|     E       AssertionError: assert 'foo' not in 'head head head head hea...ail tail tail tail tail ' | ||||
|     E       AssertionError: assert 'foo' not in 'head head h...l tail tail ' | ||||
|     E         'foo' is contained here: | ||||
|     E           head head foo tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail | ||||
|     E         ?           +++ | ||||
|  | @ -280,7 +280,7 @@ Here is a nice run of several failures and how ``pytest`` presents things: | |||
|         def test_not_in_text_single_long_term(self): | ||||
|             text = "head " * 50 + "f" * 70 + "tail " * 20 | ||||
|     >       assert "f" * 70 not in text | ||||
|     E       AssertionError: assert 'fffffffffff...ffffffffffff' not in 'head head he...l tail tail ' | ||||
|     E       AssertionError: assert 'fffffffffff...ffffffffffff' not in 'head head h...l tail tail ' | ||||
|     E         'ffffffffffffffffff...fffffffffffffffffff' is contained here: | ||||
|     E           head head fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffftail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail | ||||
|     E         ?           ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ | ||||
|  | @ -301,7 +301,7 @@ Here is a nice run of several failures and how ``pytest`` presents things: | |||
|             left = Foo(1, "b") | ||||
|             right = Foo(1, "c") | ||||
|     >       assert left == right | ||||
|     E       AssertionError: assert TestSpecialis...oo(a=1, b='b') == TestSpecialise...oo(a=1, b='c') | ||||
|     E       AssertionError: assert TestSpecialis...oo(a=1, b='b') == TestSpecialis...oo(a=1, b='c') | ||||
|     E         Omitting 1 identical items, use -vv to show | ||||
|     E         Differing attributes: | ||||
|     E         b: 'b' != 'c' | ||||
|  | @ -650,4 +650,4 @@ Here is a nice run of several failures and how ``pytest`` presents things: | |||
|     E        +  where 1 = This is JSON\n{\n  'foo': 'bar'\n}.a | ||||
| 
 | ||||
|     failure_demo.py:282: AssertionError | ||||
|     ======================== 44 failed in 0.12 seconds ========================= | ||||
|     ============================ 44 failed in 0.26s ============================ | ||||
|  |  | |||
|  | @ -65,7 +65,7 @@ Let's run this without supplying our new option: | |||
|     test_sample.py:6: AssertionError | ||||
|     --------------------------- Captured stdout call --------------------------- | ||||
|     first | ||||
|     1 failed in 0.12 seconds | ||||
|     1 failed in 0.02s | ||||
| 
 | ||||
| And now with supplying a command line option: | ||||
| 
 | ||||
|  | @ -89,7 +89,7 @@ And now with supplying a command line option: | |||
|     test_sample.py:6: AssertionError | ||||
|     --------------------------- Captured stdout call --------------------------- | ||||
|     second | ||||
|     1 failed in 0.12 seconds | ||||
|     1 failed in 0.02s | ||||
| 
 | ||||
| You can see that the command line option arrived in our test.  This | ||||
| completes the basic pattern.  However, one often rather wants to process | ||||
|  | @ -132,7 +132,7 @@ directory with the above conftest.py: | |||
|     rootdir: $REGENDOC_TMPDIR | ||||
|     collected 0 items | ||||
| 
 | ||||
|     ======================= no tests ran in 0.12 seconds ======================= | ||||
|     ========================== no tests ran in 0.00s =========================== | ||||
| 
 | ||||
| .. _`excontrolskip`: | ||||
| 
 | ||||
|  | @ -201,7 +201,7 @@ and when running it will see a skipped "slow" test: | |||
| 
 | ||||
|     ========================= short test summary info ========================== | ||||
|     SKIPPED [1] test_module.py:8: need --runslow option to run | ||||
|     =================== 1 passed, 1 skipped in 0.12 seconds ==================== | ||||
|     ======================= 1 passed, 1 skipped in 0.01s ======================= | ||||
| 
 | ||||
| Or run it including the ``slow`` marked test: | ||||
| 
 | ||||
|  | @ -216,7 +216,7 @@ Or run it including the ``slow`` marked test: | |||
| 
 | ||||
|     test_module.py ..                                                    [100%] | ||||
| 
 | ||||
|     ========================= 2 passed in 0.12 seconds ========================= | ||||
|     ============================ 2 passed in 0.01s ============================= | ||||
| 
 | ||||
| Writing well integrated assertion helpers | ||||
| -------------------------------------------------- | ||||
|  | @ -261,7 +261,7 @@ Let's run our little function: | |||
|     E       Failed: not configured: 42 | ||||
| 
 | ||||
|     test_checkconfig.py:11: Failed | ||||
|     1 failed in 0.12 seconds | ||||
|     1 failed in 0.02s | ||||
| 
 | ||||
| If you only want to hide certain exceptions, you can set ``__tracebackhide__`` | ||||
| to a callable which gets the ``ExceptionInfo`` object. You can for example use | ||||
|  | @ -358,7 +358,7 @@ which will add the string to the test header accordingly: | |||
|     rootdir: $REGENDOC_TMPDIR | ||||
|     collected 0 items | ||||
| 
 | ||||
|     ======================= no tests ran in 0.12 seconds ======================= | ||||
|     ========================== no tests ran in 0.00s =========================== | ||||
| 
 | ||||
| .. regendoc:wipe | ||||
| 
 | ||||
|  | @ -388,7 +388,7 @@ which will add info only when run with "--v": | |||
|     rootdir: $REGENDOC_TMPDIR | ||||
|     collecting ... collected 0 items | ||||
| 
 | ||||
|     ======================= no tests ran in 0.12 seconds ======================= | ||||
|     ========================== no tests ran in 0.00s =========================== | ||||
| 
 | ||||
| and nothing when run plainly: | ||||
| 
 | ||||
|  | @ -401,7 +401,7 @@ and nothing when run plainly: | |||
|     rootdir: $REGENDOC_TMPDIR | ||||
|     collected 0 items | ||||
| 
 | ||||
|     ======================= no tests ran in 0.12 seconds ======================= | ||||
|     ========================== no tests ran in 0.00s =========================== | ||||
| 
 | ||||
| profiling test duration | ||||
| -------------------------- | ||||
|  | @ -447,7 +447,7 @@ Now we can profile which test functions execute the slowest: | |||
|     0.30s call     test_some_are_slow.py::test_funcslow2 | ||||
|     0.20s call     test_some_are_slow.py::test_funcslow1 | ||||
|     0.10s call     test_some_are_slow.py::test_funcfast | ||||
|     ========================= 3 passed in 0.12 seconds ========================= | ||||
|     ============================ 3 passed in 0.61s ============================= | ||||
| 
 | ||||
| incremental testing - test steps | ||||
| --------------------------------------------------- | ||||
|  | @ -531,7 +531,7 @@ If we run this: | |||
|     ========================= short test summary info ========================== | ||||
|     XFAIL test_step.py::TestUserHandling::test_deletion | ||||
|       reason: previous test failed (test_modification) | ||||
|     ============== 1 failed, 2 passed, 1 xfailed in 0.12 seconds =============== | ||||
|     ================== 1 failed, 2 passed, 1 xfailed in 0.03s ================== | ||||
| 
 | ||||
| We'll see that ``test_deletion`` was not executed because ``test_modification`` | ||||
| failed.  It is reported as an "expected failure". | ||||
|  | @ -644,7 +644,7 @@ We can run this: | |||
|     E       assert 0 | ||||
| 
 | ||||
|     a/test_db2.py:2: AssertionError | ||||
|     ========== 3 failed, 2 passed, 1 xfailed, 1 error in 0.12 seconds ========== | ||||
|     ============= 3 failed, 2 passed, 1 xfailed, 1 error in 0.05s ============== | ||||
| 
 | ||||
| The two test modules in the ``a`` directory see the same ``db`` fixture instance | ||||
| while the one test in the sister-directory ``b`` doesn't see it.  We could of course | ||||
|  | @ -733,7 +733,7 @@ and run them: | |||
|     E       assert 0 | ||||
| 
 | ||||
|     test_module.py:6: AssertionError | ||||
|     ========================= 2 failed in 0.12 seconds ========================= | ||||
|     ============================ 2 failed in 0.02s ============================= | ||||
| 
 | ||||
| you will have a "failures" file which contains the failing test ids: | ||||
| 
 | ||||
|  | @ -848,7 +848,7 @@ and run it: | |||
|     E       assert 0 | ||||
| 
 | ||||
|     test_module.py:19: AssertionError | ||||
|     ==================== 2 failed, 1 error in 0.12 seconds ===================== | ||||
|     ======================== 2 failed, 1 error in 0.02s ======================== | ||||
| 
 | ||||
| You'll see that the fixture finalizers could use the precise reporting | ||||
| information. | ||||
|  |  | |||
|  | @ -81,4 +81,4 @@ If you run this without output capturing: | |||
|     .test other | ||||
|     .test_unit1 method called | ||||
|     . | ||||
|     4 passed in 0.12 seconds | ||||
|     4 passed in 0.01s | ||||
|  |  | |||
|  | @ -95,8 +95,8 @@ marked ``smtp_connection`` fixture function.  Running the test looks like this: | |||
|     >       assert 0  # for demo purposes | ||||
|     E       assert 0 | ||||
| 
 | ||||
|     test_smtpsimple.py:11: AssertionError | ||||
|     ========================= 1 failed in 0.12 seconds ========================= | ||||
|     test_smtpsimple.py:14: AssertionError | ||||
|     ============================ 1 failed in 0.18s ============================= | ||||
| 
 | ||||
| In the failure traceback we see that the test function was called with a | ||||
| ``smtp_connection`` argument, the ``smtplib.SMTP()`` instance created by the fixture | ||||
|  | @ -246,7 +246,7 @@ inspect what is going on and can now run the tests: | |||
|     >       assert 0  # for demo purposes | ||||
|     E       assert 0 | ||||
| 
 | ||||
|     test_module.py:6: AssertionError | ||||
|     test_module.py:7: AssertionError | ||||
|     ________________________________ test_noop _________________________________ | ||||
| 
 | ||||
|     smtp_connection = <smtplib.SMTP object at 0xdeadbeef> | ||||
|  | @ -257,8 +257,8 @@ inspect what is going on and can now run the tests: | |||
|     >       assert 0  # for demo purposes | ||||
|     E       assert 0 | ||||
| 
 | ||||
|     test_module.py:11: AssertionError | ||||
|     ========================= 2 failed in 0.12 seconds ========================= | ||||
|     test_module.py:13: AssertionError | ||||
|     ============================ 2 failed in 0.20s ============================= | ||||
| 
 | ||||
| You see the two ``assert 0`` failing and more importantly you can also see | ||||
| that the same (module-scoped) ``smtp_connection`` object was passed into the | ||||
|  | @ -315,15 +315,15 @@ Consider the code below: | |||
| 
 | ||||
| .. literalinclude:: example/fixtures/test_fixtures_order.py | ||||
| 
 | ||||
| The fixtures requested by ``test_foo`` will be instantiated in the following order: | ||||
| The fixtures requested by ``test_order`` will be instantiated in the following order: | ||||
| 
 | ||||
| 1. ``s1``: is the highest-scoped fixture (``session``). | ||||
| 2. ``m1``: is the second highest-scoped fixture (``module``). | ||||
| 3. ``a1``: is a ``function``-scoped ``autouse`` fixture: it will be instantiated before other fixtures | ||||
|    within the same scope. | ||||
| 4. ``f3``: is a ``function``-scoped fixture, required by ``f1``: it needs to be instantiated at this point | ||||
| 5. ``f1``: is the first ``function``-scoped fixture in ``test_foo`` parameter list. | ||||
| 6. ``f2``: is the last ``function``-scoped fixture in ``test_foo`` parameter list. | ||||
| 5. ``f1``: is the first ``function``-scoped fixture in ``test_order`` parameter list. | ||||
| 6. ``f2``: is the last ``function``-scoped fixture in ``test_order`` parameter list. | ||||
| 
 | ||||
| 
 | ||||
| .. _`finalization`: | ||||
|  | @ -361,7 +361,7 @@ Let's execute it: | |||
|     $ pytest -s -q --tb=no | ||||
|     FFteardown smtp | ||||
| 
 | ||||
|     2 failed in 0.12 seconds | ||||
|     2 failed in 0.20s | ||||
| 
 | ||||
| We see that the ``smtp_connection`` instance is finalized after the two | ||||
| tests finished execution.  Note that if we decorated our fixture | ||||
|  | @ -515,7 +515,7 @@ again, nothing much has changed: | |||
|     $ pytest -s -q --tb=no | ||||
|     FFfinalizing <smtplib.SMTP object at 0xdeadbeef> (smtp.gmail.com) | ||||
| 
 | ||||
|     2 failed in 0.12 seconds | ||||
|     2 failed in 0.21s | ||||
| 
 | ||||
| Let's quickly create another test module that actually sets the | ||||
| server URL in its module namespace: | ||||
|  | @ -538,7 +538,7 @@ Running it: | |||
|     F                                                                    [100%] | ||||
|     ================================= FAILURES ================================= | ||||
|     ______________________________ test_showhelo _______________________________ | ||||
|     test_anothersmtp.py:5: in test_showhelo | ||||
|     test_anothersmtp.py:6: in test_showhelo | ||||
|         assert 0, smtp_connection.helo() | ||||
|     E   AssertionError: (250, b'mail.python.org') | ||||
|     E   assert 0 | ||||
|  | @ -654,7 +654,7 @@ So let's just do another run: | |||
|     >       assert 0  # for demo purposes | ||||
|     E       assert 0 | ||||
| 
 | ||||
|     test_module.py:6: AssertionError | ||||
|     test_module.py:7: AssertionError | ||||
|     ________________________ test_noop[smtp.gmail.com] _________________________ | ||||
| 
 | ||||
|     smtp_connection = <smtplib.SMTP object at 0xdeadbeef> | ||||
|  | @ -665,7 +665,7 @@ So let's just do another run: | |||
|     >       assert 0  # for demo purposes | ||||
|     E       assert 0 | ||||
| 
 | ||||
|     test_module.py:11: AssertionError | ||||
|     test_module.py:13: AssertionError | ||||
|     ________________________ test_ehlo[mail.python.org] ________________________ | ||||
| 
 | ||||
|     smtp_connection = <smtplib.SMTP object at 0xdeadbeef> | ||||
|  | @ -676,7 +676,7 @@ So let's just do another run: | |||
|     >       assert b"smtp.gmail.com" in msg | ||||
|     E       AssertionError: assert b'smtp.gmail.com' in b'mail.python.org\nPIPELINING\nSIZE 51200000\nETRN\nSTARTTLS\nAUTH DIGEST-MD5 NTLM CRAM-MD5\nENHANCEDSTATUSCODES\n8BITMIME\nDSN\nSMTPUTF8\nCHUNKING' | ||||
| 
 | ||||
|     test_module.py:5: AssertionError | ||||
|     test_module.py:6: AssertionError | ||||
|     -------------------------- Captured stdout setup --------------------------- | ||||
|     finalizing <smtplib.SMTP object at 0xdeadbeef> | ||||
|     ________________________ test_noop[mail.python.org] ________________________ | ||||
|  | @ -689,10 +689,10 @@ So let's just do another run: | |||
|     >       assert 0  # for demo purposes | ||||
|     E       assert 0 | ||||
| 
 | ||||
|     test_module.py:11: AssertionError | ||||
|     test_module.py:13: AssertionError | ||||
|     ------------------------- Captured stdout teardown ------------------------- | ||||
|     finalizing <smtplib.SMTP object at 0xdeadbeef> | ||||
|     4 failed in 0.12 seconds | ||||
|     4 failed in 0.89s | ||||
| 
 | ||||
| We see that our two test functions each ran twice, against the different | ||||
| ``smtp_connection`` instances.  Note also, that with the ``mail.python.org`` | ||||
|  | @ -771,7 +771,7 @@ Running the above tests results in the following test IDs being used: | |||
|      <Function test_ehlo[mail.python.org]> | ||||
|      <Function test_noop[mail.python.org]> | ||||
| 
 | ||||
|    ======================= no tests ran in 0.12 seconds ======================= | ||||
|    ========================== no tests ran in 0.01s =========================== | ||||
| 
 | ||||
| .. _`fixture-parametrize-marks`: | ||||
| 
 | ||||
|  | @ -812,7 +812,7 @@ Running this test will *skip* the invocation of ``data_set`` with value ``2``: | |||
|     test_fixture_marks.py::test_data[1] PASSED                           [ 66%] | ||||
|     test_fixture_marks.py::test_data[2] SKIPPED                          [100%] | ||||
| 
 | ||||
|     =================== 2 passed, 1 skipped in 0.12 seconds ==================== | ||||
|     ======================= 2 passed, 1 skipped in 0.01s ======================= | ||||
| 
 | ||||
| .. _`interdependent fixtures`: | ||||
| 
 | ||||
|  | @ -861,7 +861,7 @@ Here we declare an ``app`` fixture which receives the previously defined | |||
|     test_appsetup.py::test_smtp_connection_exists[smtp.gmail.com] PASSED [ 50%] | ||||
|     test_appsetup.py::test_smtp_connection_exists[mail.python.org] PASSED [100%] | ||||
| 
 | ||||
|     ========================= 2 passed in 0.12 seconds ========================= | ||||
|     ============================ 2 passed in 0.44s ============================= | ||||
| 
 | ||||
| Due to the parametrization of ``smtp_connection``, the test will run twice with two | ||||
| different ``App`` instances and respective smtp servers.  There is no | ||||
|  | @ -971,7 +971,7 @@ Let's run the tests in verbose mode and with looking at the print-output: | |||
|       TEARDOWN modarg mod2 | ||||
| 
 | ||||
| 
 | ||||
|     ========================= 8 passed in 0.12 seconds ========================= | ||||
|     ============================ 8 passed in 0.01s ============================= | ||||
| 
 | ||||
| You can see that the parametrized module-scoped ``modarg`` resource caused an | ||||
| ordering of test execution that lead to the fewest possible "active" resources. | ||||
|  | @ -1043,7 +1043,7 @@ to verify our fixture is activated and the tests pass: | |||
| 
 | ||||
|     $ pytest -q | ||||
|     ..                                                                   [100%] | ||||
|     2 passed in 0.12 seconds | ||||
|     2 passed in 0.01s | ||||
| 
 | ||||
| You can specify multiple fixtures like this: | ||||
| 
 | ||||
|  | @ -1151,7 +1151,7 @@ If we run it, we get two passing tests: | |||
| 
 | ||||
|     $ pytest -q | ||||
|     ..                                                                   [100%] | ||||
|     2 passed in 0.12 seconds | ||||
|     2 passed in 0.01s | ||||
| 
 | ||||
| Here is how autouse fixtures work in other scopes: | ||||
| 
 | ||||
|  |  | |||
|  | @ -28,7 +28,7 @@ Install ``pytest`` | |||
| .. code-block:: bash | ||||
| 
 | ||||
|     $ pytest --version | ||||
|     This is pytest version 5.x.y, imported from $PYTHON_PREFIX/lib/python3.x/site-packages/pytest.py | ||||
|     This is pytest version 5.x.y, imported from $PYTHON_PREFIX/lib/python3.6/site-packages/pytest.py | ||||
| 
 | ||||
| .. _`simpletest`: | ||||
| 
 | ||||
|  | @ -68,8 +68,8 @@ That’s it. You can now execute the test function: | |||
|     E       assert 4 == 5 | ||||
|     E        +  where 4 = func(3) | ||||
| 
 | ||||
|     test_sample.py:5: AssertionError | ||||
|     ========================= 1 failed in 0.12 seconds ========================= | ||||
|     test_sample.py:6: AssertionError | ||||
|     ============================ 1 failed in 0.02s ============================= | ||||
| 
 | ||||
| This test returns a failure report because ``func(3)`` does not return ``5``. | ||||
| 
 | ||||
|  | @ -108,7 +108,7 @@ Execute the test function with “quiet” reporting mode: | |||
| 
 | ||||
|     $ pytest -q test_sysexit.py | ||||
|     .                                                                    [100%] | ||||
|     1 passed in 0.12 seconds | ||||
|     1 passed in 0.00s | ||||
| 
 | ||||
| Group multiple tests in a class | ||||
| -------------------------------------------------------------- | ||||
|  | @ -140,12 +140,12 @@ Once you develop multiple tests, you may want to group them into a class. pytest | |||
| 
 | ||||
|         def test_two(self): | ||||
|             x = "hello" | ||||
|     >       assert hasattr(x, 'check') | ||||
|     >       assert hasattr(x, "check") | ||||
|     E       AssertionError: assert False | ||||
|     E        +  where False = hasattr('hello', 'check') | ||||
| 
 | ||||
|     test_class.py:8: AssertionError | ||||
|     1 failed, 1 passed in 0.12 seconds | ||||
|     1 failed, 1 passed in 0.02s | ||||
| 
 | ||||
| The first test passed and the second failed. You can easily see the intermediate values in the assertion to help you understand the reason for the failure. | ||||
| 
 | ||||
|  | @ -180,7 +180,7 @@ List the name ``tmpdir`` in the test function signature and ``pytest`` will look | |||
|     test_tmpdir.py:3: AssertionError | ||||
|     --------------------------- Captured stdout call --------------------------- | ||||
|     PYTEST_TMPDIR/test_needsfiles0 | ||||
|     1 failed in 0.12 seconds | ||||
|     1 failed in 0.02s | ||||
| 
 | ||||
| More info on tmpdir handling is available at :ref:`Temporary directories and files <tmpdir handling>`. | ||||
| 
 | ||||
|  |  | |||
|  | @ -44,7 +44,7 @@ To execute it: | |||
|     E        +  where 4 = inc(3) | ||||
| 
 | ||||
|     test_sample.py:6: AssertionError | ||||
|     ========================= 1 failed in 0.12 seconds ========================= | ||||
|     ============================ 1 failed in 0.02s ============================= | ||||
| 
 | ||||
| Due to ``pytest``'s detailed assertion introspection, only plain ``assert`` statements are used. | ||||
| See :ref:`Getting Started <getstarted>` for more examples. | ||||
|  |  | |||
|  | @ -50,7 +50,7 @@ these patches. | |||
| :py:meth:`monkeypatch.chdir` to change the context of the current working directory | ||||
| during a test. | ||||
| 
 | ||||
| 5. Use py:meth:`monkeypatch.syspath_prepend` to modify ``sys.path`` which will also | ||||
| 5. Use :py:meth:`monkeypatch.syspath_prepend` to modify ``sys.path`` which will also | ||||
| call :py:meth:`pkg_resources.fixup_namespace_packages` and :py:meth:`importlib.invalidate_caches`. | ||||
| 
 | ||||
| See the `monkeypatch blog post`_ for some introduction material | ||||
|  |  | |||
|  | @ -75,7 +75,7 @@ them in turn: | |||
|     E        +  where 54 = eval('6*9') | ||||
| 
 | ||||
|     test_expectation.py:6: AssertionError | ||||
|     ==================== 1 failed, 2 passed in 0.12 seconds ==================== | ||||
|     ======================= 1 failed, 2 passed in 0.02s ======================== | ||||
| 
 | ||||
| .. note:: | ||||
| 
 | ||||
|  | @ -128,7 +128,7 @@ Let's run this: | |||
| 
 | ||||
|     test_expectation.py ..x                                              [100%] | ||||
| 
 | ||||
|     =================== 2 passed, 1 xfailed in 0.12 seconds ==================== | ||||
|     ======================= 2 passed, 1 xfailed in 0.02s ======================= | ||||
| 
 | ||||
| The one parameter set which caused a failure previously now | ||||
| shows up as an "xfailed (expected to fail)" test. | ||||
|  | @ -205,7 +205,7 @@ If we now pass two stringinput values, our test will run twice: | |||
| 
 | ||||
|     $ pytest -q --stringinput="hello" --stringinput="world" test_strings.py | ||||
|     ..                                                                   [100%] | ||||
|     2 passed in 0.12 seconds | ||||
|     2 passed in 0.01s | ||||
| 
 | ||||
| Let's also run with a stringinput that will lead to a failing test: | ||||
| 
 | ||||
|  | @ -225,7 +225,7 @@ Let's also run with a stringinput that will lead to a failing test: | |||
|     E        +    where <built-in method isalpha of str object at 0xdeadbeef> = '!'.isalpha | ||||
| 
 | ||||
|     test_strings.py:4: AssertionError | ||||
|     1 failed in 0.12 seconds | ||||
|     1 failed in 0.02s | ||||
| 
 | ||||
| As expected our test function fails. | ||||
| 
 | ||||
|  | @ -239,7 +239,7 @@ list: | |||
|     s                                                                    [100%] | ||||
|     ========================= short test summary info ========================== | ||||
|     SKIPPED [1] test_strings.py: got empty parameter set ['stringinput'], function test_valid_string at $REGENDOC_TMPDIR/test_strings.py:2 | ||||
|     1 skipped in 0.12 seconds | ||||
|     1 skipped in 0.00s | ||||
| 
 | ||||
| Note that when calling ``metafunc.parametrize`` multiple times with different parameter sets, all parameter names across | ||||
| those sets cannot be duplicated, otherwise an error will be raised. | ||||
|  |  | |||
|  | @ -7,8 +7,8 @@ Python 3.4's last release is scheduled for | |||
| `March 2019 <https://www.python.org/dev/peps/pep-0429/#release-schedule>`__. pytest is one of | ||||
| the participating projects of the https://python3statement.org. | ||||
| 
 | ||||
| The **pytest 4.6** series will be the last to support Python 2.7 and 3.4, and is scheduled | ||||
| to be released by **mid-2019**. **pytest 5.0** and onwards will support only Python 3.5+. | ||||
| The **pytest 4.6** series is the last to support Python 2.7 and 3.4, and was released in | ||||
| **June 2019**. **pytest 5.0** and onwards will support only Python 3.5+. | ||||
| 
 | ||||
| Thanks to the `python_requires`_ ``setuptools`` option, | ||||
| Python 2.7 and Python 3.4 users using a modern ``pip`` version | ||||
|  |  | |||
|  | @ -371,7 +371,7 @@ Running it with the report-on-xfail option gives this output: | |||
|     XFAIL xfail_demo.py::test_hello6 | ||||
|       reason: reason | ||||
|     XFAIL xfail_demo.py::test_hello7 | ||||
|     ======================== 7 xfailed in 0.12 seconds ========================= | ||||
|     ============================ 7 xfailed in 0.05s ============================ | ||||
| 
 | ||||
| .. _`skip/xfail with parametrize`: | ||||
| 
 | ||||
|  |  | |||
|  | @ -4,7 +4,6 @@ Talks and Tutorials | |||
| 
 | ||||
| .. sidebar:: Next Open Trainings | ||||
| 
 | ||||
|    - `Training at Workshoptage 2019 <https://workshoptage.ch/workshops/2019/test-driven-development-fuer-python-mit-pytest/>`_ (German), 10th September 2019, Rapperswil, Switzerland. | ||||
|    - `3 day hands-on workshop covering pytest, tox and devpi: "Professional Testing with Python" <https://python-academy.com/courses/specialtopics/python_course_testing.html>`_ (English), October 21 - 23, 2019, Leipzig, Germany. | ||||
| 
 | ||||
| .. _`funcargs`: funcargs.html | ||||
|  |  | |||
|  | @ -64,7 +64,7 @@ Running this would result in a passed test except for the last | |||
|     E       assert 0 | ||||
| 
 | ||||
|     test_tmp_path.py:13: AssertionError | ||||
|     ========================= 1 failed in 0.12 seconds ========================= | ||||
|     ============================ 1 failed in 0.02s ============================= | ||||
| 
 | ||||
| .. _`tmp_path_factory example`: | ||||
| 
 | ||||
|  | @ -132,8 +132,8 @@ Running this would result in a passed test except for the last | |||
|     >       assert 0 | ||||
|     E       assert 0 | ||||
| 
 | ||||
|     test_tmpdir.py:7: AssertionError | ||||
|     ========================= 1 failed in 0.12 seconds ========================= | ||||
|     test_tmpdir.py:9: AssertionError | ||||
|     ============================ 1 failed in 0.02s ============================= | ||||
| 
 | ||||
| .. _`tmpdir factory example`: | ||||
| 
 | ||||
|  |  | |||
|  | @ -155,7 +155,7 @@ the ``self.db`` values in the traceback: | |||
|     E       AssertionError: <conftest.db_class.<locals>.DummyDB object at 0xdeadbeef> | ||||
|     E       assert 0 | ||||
| 
 | ||||
|     test_unittest_db.py:9: AssertionError | ||||
|     test_unittest_db.py:10: AssertionError | ||||
|     ___________________________ MyTest.test_method2 ____________________________ | ||||
| 
 | ||||
|     self = <test_unittest_db.MyTest testMethod=test_method2> | ||||
|  | @ -165,8 +165,8 @@ the ``self.db`` values in the traceback: | |||
|     E       AssertionError: <conftest.db_class.<locals>.DummyDB object at 0xdeadbeef> | ||||
|     E       assert 0 | ||||
| 
 | ||||
|     test_unittest_db.py:12: AssertionError | ||||
|     ========================= 2 failed in 0.12 seconds ========================= | ||||
|     test_unittest_db.py:13: AssertionError | ||||
|     ============================ 2 failed in 0.02s ============================= | ||||
| 
 | ||||
| This default pytest traceback shows that the two test methods | ||||
| share the same ``self.db`` instance which was our intention | ||||
|  | @ -219,7 +219,7 @@ Running this test module ...: | |||
| 
 | ||||
|     $ pytest -q test_unittest_cleandir.py | ||||
|     .                                                                    [100%] | ||||
|     1 passed in 0.12 seconds | ||||
|     1 passed in 0.01s | ||||
| 
 | ||||
| ... gives us one passed test because the ``initdir`` fixture function | ||||
| was executed ahead of the ``test_method``. | ||||
|  |  | |||
|  | @ -247,7 +247,7 @@ Example: | |||
|     XPASS test_example.py::test_xpass always xfail | ||||
|     ERROR test_example.py::test_error - assert 0 | ||||
|     FAILED test_example.py::test_fail - assert 0 | ||||
|     = 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.12 seconds = | ||||
|     == 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.03s === | ||||
| 
 | ||||
| The ``-r`` options accepts a number of characters after it, with ``a`` used | ||||
| above meaning "all except passes". | ||||
|  | @ -297,7 +297,7 @@ More than one character can be used, so for example to only see failed and skipp | |||
|     ========================= short test summary info ========================== | ||||
|     FAILED test_example.py::test_fail - assert 0 | ||||
|     SKIPPED [1] $REGENDOC_TMPDIR/test_example.py:23: skipping this test | ||||
|     = 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.12 seconds = | ||||
|     == 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.03s === | ||||
| 
 | ||||
| Using ``p`` lists the passing tests, whilst ``P`` adds an extra section "PASSES" with those tests that passed but had | ||||
| captured output: | ||||
|  | @ -336,7 +336,7 @@ captured output: | |||
|     ok | ||||
|     ========================= short test summary info ========================== | ||||
|     PASSED test_example.py::test_ok | ||||
|     = 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.12 seconds = | ||||
|     == 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.03s === | ||||
| 
 | ||||
| .. _pdb-option: | ||||
| 
 | ||||
|  |  | |||
|  | @ -41,7 +41,7 @@ Running pytest now produces this output: | |||
|         warnings.warn(UserWarning("api v1, should use functions from v2")) | ||||
| 
 | ||||
|     -- Docs: https://docs.pytest.org/en/latest/warnings.html | ||||
|     =================== 1 passed, 1 warnings in 0.12 seconds =================== | ||||
|     ====================== 1 passed, 1 warnings in 0.00s ======================= | ||||
| 
 | ||||
| The ``-W`` flag can be passed to control which warnings will be displayed or even turn | ||||
| them into errors: | ||||
|  | @ -64,7 +64,7 @@ them into errors: | |||
|     E       UserWarning: api v1, should use functions from v2 | ||||
| 
 | ||||
|     test_show_warnings.py:5: UserWarning | ||||
|     1 failed in 0.12 seconds | ||||
|     1 failed in 0.02s | ||||
| 
 | ||||
| The same option can be set in the ``pytest.ini`` file using the ``filterwarnings`` ini option. | ||||
| For example, the configuration below will ignore all user warnings, but will transform | ||||
|  | @ -407,7 +407,7 @@ defines an ``__init__`` constructor, as this prevents the class from being insta | |||
|         class Test: | ||||
| 
 | ||||
|     -- Docs: https://docs.pytest.org/en/latest/warnings.html | ||||
|     1 warnings in 0.12 seconds | ||||
|     1 warnings in 0.00s | ||||
| 
 | ||||
| These warnings might be filtered using the same builtin mechanisms used to filter other types of warnings. | ||||
| 
 | ||||
|  |  | |||
|  | @ -442,7 +442,7 @@ additionally it is possible to copy examples for an example folder before runnin | |||
|         testdir.copy_example("test_example.py") | ||||
| 
 | ||||
|     -- Docs: https://docs.pytest.org/en/latest/warnings.html | ||||
|     =================== 2 passed, 1 warnings in 0.12 seconds =================== | ||||
|     ====================== 2 passed, 1 warnings in 0.12s ======================= | ||||
| 
 | ||||
| For more information about the result object that ``runpytest()`` returns, and | ||||
| the methods that it provides please check out the :py:class:`RunResult | ||||
|  |  | |||
|  | @ -596,7 +596,7 @@ class ExceptionInfo(Generic[_E]): | |||
|         ) | ||||
|         return fmt.repr_excinfo(self) | ||||
| 
 | ||||
|     def match(self, regexp: Union[str, Pattern]) -> bool: | ||||
|     def match(self, regexp: "Union[str, Pattern]") -> bool: | ||||
|         """ | ||||
|         Check whether the regular expression 'regexp' is found in the string | ||||
|         representation of the exception using ``re.search``. If it matches | ||||
|  |  | |||
|  | @ -35,9 +35,6 @@ PYTEST_TAG = "{}-pytest-{}".format(sys.implementation.cache_tag, version) | |||
| PYC_EXT = ".py" + (__debug__ and "c" or "o") | ||||
| PYC_TAIL = "." + PYTEST_TAG + PYC_EXT | ||||
| 
 | ||||
| AST_IS = ast.Is() | ||||
| AST_NONE = ast.NameConstant(None) | ||||
| 
 | ||||
| 
 | ||||
| class AssertionRewritingHook(importlib.abc.MetaPathFinder): | ||||
|     """PEP302/PEP451 import hook which rewrites asserts.""" | ||||
|  | @ -863,7 +860,7 @@ class AssertionRewriter(ast.NodeVisitor): | |||
|         internally already. | ||||
|         See issue #3191 for more details. | ||||
|         """ | ||||
|         val_is_none = ast.Compare(node, [AST_IS], [AST_NONE]) | ||||
|         val_is_none = ast.Compare(node, [ast.Is()], [ast.NameConstant(None)]) | ||||
|         send_warning = ast.parse( | ||||
|             """\ | ||||
| from _pytest.warning_types import PytestAssertRewriteWarning | ||||
|  |  | |||
|  | @ -9,6 +9,7 @@ import sys | |||
| from contextlib import contextmanager | ||||
| from inspect import Parameter | ||||
| from inspect import signature | ||||
| from typing import overload | ||||
| 
 | ||||
| import attr | ||||
| import py | ||||
|  | @ -27,9 +28,9 @@ MODULE_NOT_FOUND_ERROR = ( | |||
| 
 | ||||
| 
 | ||||
| if sys.version_info >= (3, 8): | ||||
|     from importlib import metadata as importlib_metadata  # noqa | ||||
|     from importlib import metadata as importlib_metadata  # noqa: F401 | ||||
| else: | ||||
|     import importlib_metadata  # noqa | ||||
|     import importlib_metadata  # noqa: F401 | ||||
| 
 | ||||
| 
 | ||||
| def _format_args(func): | ||||
|  | @ -347,3 +348,9 @@ class FuncargnamesCompatAttr: | |||
| 
 | ||||
|         warnings.warn(FUNCARGNAMES, stacklevel=2) | ||||
|         return self.fixturenames | ||||
| 
 | ||||
| 
 | ||||
| if sys.version_info < (3, 5, 2):  # pragma: no cover | ||||
| 
 | ||||
|     def overload(f):  # noqa: F811 | ||||
|         return f | ||||
|  |  | |||
|  | @ -72,7 +72,7 @@ def create_new_paste(contents): | |||
|     if m: | ||||
|         return "{}/show/{}".format(url, m.group(1)) | ||||
|     else: | ||||
|         return "bad response: " + response | ||||
|         return "bad response: " + response.decode("utf-8") | ||||
| 
 | ||||
| 
 | ||||
| def pytest_terminal_summary(terminalreporter): | ||||
|  |  | |||
|  | @ -13,7 +13,6 @@ from typing import Callable | |||
| from typing import cast | ||||
| from typing import Generic | ||||
| from typing import Optional | ||||
| from typing import overload | ||||
| from typing import Pattern | ||||
| from typing import Tuple | ||||
| from typing import TypeVar | ||||
|  | @ -22,12 +21,14 @@ from typing import Union | |||
| from more_itertools.more import always_iterable | ||||
| 
 | ||||
| import _pytest._code | ||||
| from _pytest.compat import overload | ||||
| from _pytest.compat import STRING_TYPES | ||||
| from _pytest.outcomes import fail | ||||
| 
 | ||||
| if False:  # TYPE_CHECKING | ||||
|     from typing import Type  # noqa: F401 (used in type string) | ||||
| 
 | ||||
| 
 | ||||
| BASE_TYPE = (type, STRING_TYPES) | ||||
| 
 | ||||
| 
 | ||||
|  | @ -547,12 +548,12 @@ _E = TypeVar("_E", bound=BaseException) | |||
| def raises( | ||||
|     expected_exception: Union["Type[_E]", Tuple["Type[_E]", ...]], | ||||
|     *, | ||||
|     match: Optional[Union[str, Pattern]] = ... | ||||
|     match: "Optional[Union[str, Pattern]]" = ... | ||||
| ) -> "RaisesContext[_E]": | ||||
|     ...  # pragma: no cover | ||||
| 
 | ||||
| 
 | ||||
| @overload | ||||
| @overload  # noqa: F811 | ||||
| def raises( | ||||
|     expected_exception: Union["Type[_E]", Tuple["Type[_E]", ...]], | ||||
|     func: Callable, | ||||
|  | @ -563,10 +564,10 @@ def raises( | |||
|     ...  # pragma: no cover | ||||
| 
 | ||||
| 
 | ||||
| def raises( | ||||
| def raises(  # noqa: F811 | ||||
|     expected_exception: Union["Type[_E]", Tuple["Type[_E]", ...]], | ||||
|     *args: Any, | ||||
|     match: Optional[Union[str, Pattern]] = None, | ||||
|     match: Optional[Union[str, "Pattern"]] = None, | ||||
|     **kwargs: Any | ||||
| ) -> Union["RaisesContext[_E]", Optional[_pytest._code.ExceptionInfo[_E]]]: | ||||
|     r""" | ||||
|  | @ -724,7 +725,7 @@ class RaisesContext(Generic[_E]): | |||
|         self, | ||||
|         expected_exception: Union["Type[_E]", Tuple["Type[_E]", ...]], | ||||
|         message: str, | ||||
|         match_expr: Optional[Union[str, Pattern]] = None, | ||||
|         match_expr: Optional[Union[str, "Pattern"]] = None, | ||||
|     ) -> None: | ||||
|         self.expected_exception = expected_exception | ||||
|         self.message = message | ||||
|  |  | |||
|  | @ -7,11 +7,11 @@ from typing import Callable | |||
| from typing import Iterator | ||||
| from typing import List | ||||
| from typing import Optional | ||||
| from typing import overload | ||||
| from typing import Pattern | ||||
| from typing import Tuple | ||||
| from typing import Union | ||||
| 
 | ||||
| from _pytest.compat import overload | ||||
| from _pytest.fixtures import yield_fixture | ||||
| from _pytest.outcomes import fail | ||||
| 
 | ||||
|  | @ -58,26 +58,26 @@ def deprecated_call(func=None, *args, **kwargs): | |||
| def warns( | ||||
|     expected_warning: Union["Type[Warning]", Tuple["Type[Warning]", ...]], | ||||
|     *, | ||||
|     match: Optional[Union[str, Pattern]] = ... | ||||
|     match: "Optional[Union[str, Pattern]]" = ... | ||||
| ) -> "WarningsChecker": | ||||
|     ...  # pragma: no cover | ||||
| 
 | ||||
| 
 | ||||
| @overload | ||||
| @overload  # noqa: F811 | ||||
| def warns( | ||||
|     expected_warning: Union["Type[Warning]", Tuple["Type[Warning]", ...]], | ||||
|     func: Callable, | ||||
|     *args: Any, | ||||
|     match: Optional[Union[str, Pattern]] = ..., | ||||
|     match: Optional[Union[str, "Pattern"]] = ..., | ||||
|     **kwargs: Any | ||||
| ) -> Union[Any]: | ||||
|     ...  # pragma: no cover | ||||
| 
 | ||||
| 
 | ||||
| def warns( | ||||
| def warns(  # noqa: F811 | ||||
|     expected_warning: Union["Type[Warning]", Tuple["Type[Warning]", ...]], | ||||
|     *args: Any, | ||||
|     match: Optional[Union[str, Pattern]] = None, | ||||
|     match: Optional[Union[str, "Pattern"]] = None, | ||||
|     **kwargs: Any | ||||
| ) -> Union["WarningsChecker", Any]: | ||||
|     r"""Assert that code raises a particular class of warning. | ||||
|  | @ -207,7 +207,7 @@ class WarningsChecker(WarningsRecorder): | |||
|         expected_warning: Optional[ | ||||
|             Union["Type[Warning]", Tuple["Type[Warning]", ...]] | ||||
|         ] = None, | ||||
|         match_expr: Optional[Union[str, Pattern]] = None, | ||||
|         match_expr: Optional[Union[str, "Pattern"]] = None, | ||||
|     ) -> None: | ||||
|         super().__init__() | ||||
| 
 | ||||
|  |  | |||
|  | @ -13,22 +13,22 @@ def test_getfuncargnames_functions(): | |||
|     """Test getfuncargnames for normal functions""" | ||||
| 
 | ||||
|     def f(): | ||||
|         pass | ||||
|         raise NotImplementedError() | ||||
| 
 | ||||
|     assert not fixtures.getfuncargnames(f) | ||||
| 
 | ||||
|     def g(arg): | ||||
|         pass | ||||
|         raise NotImplementedError() | ||||
| 
 | ||||
|     assert fixtures.getfuncargnames(g) == ("arg",) | ||||
| 
 | ||||
|     def h(arg1, arg2="hello"): | ||||
|         pass | ||||
|         raise NotImplementedError() | ||||
| 
 | ||||
|     assert fixtures.getfuncargnames(h) == ("arg1",) | ||||
| 
 | ||||
|     def j(arg1, arg2, arg3="hello"): | ||||
|         pass | ||||
|         raise NotImplementedError() | ||||
| 
 | ||||
|     assert fixtures.getfuncargnames(j) == ("arg1", "arg2") | ||||
| 
 | ||||
|  | @ -38,7 +38,7 @@ def test_getfuncargnames_methods(): | |||
| 
 | ||||
|     class A: | ||||
|         def f(self, arg1, arg2="hello"): | ||||
|             pass | ||||
|             raise NotImplementedError() | ||||
| 
 | ||||
|     assert fixtures.getfuncargnames(A().f) == ("arg1",) | ||||
| 
 | ||||
|  | @ -49,7 +49,7 @@ def test_getfuncargnames_staticmethod(): | |||
|     class A: | ||||
|         @staticmethod | ||||
|         def static(arg1, arg2, x=1): | ||||
|             pass | ||||
|             raise NotImplementedError() | ||||
| 
 | ||||
|     assert fixtures.getfuncargnames(A.static, cls=A) == ("arg1", "arg2") | ||||
| 
 | ||||
|  | @ -59,7 +59,7 @@ def test_getfuncargnames_partial(): | |||
|     import functools | ||||
| 
 | ||||
|     def check(arg1, arg2, i): | ||||
|         pass | ||||
|         raise NotImplementedError() | ||||
| 
 | ||||
|     class T: | ||||
|         test_ok = functools.partial(check, i=2) | ||||
|  | @ -73,7 +73,7 @@ def test_getfuncargnames_staticmethod_partial(): | |||
|     import functools | ||||
| 
 | ||||
|     def check(arg1, arg2, i): | ||||
|         pass | ||||
|         raise NotImplementedError() | ||||
| 
 | ||||
|     class T: | ||||
|         test_ok = staticmethod(functools.partial(check, i=2)) | ||||
|  | @ -3325,7 +3325,7 @@ class TestShowFixtures: | |||
|             @pytest.fixture | ||||
|             @pytest.fixture | ||||
|             def foo(): | ||||
|                 pass | ||||
|                 raise NotImplementedError() | ||||
| 
 | ||||
| 
 | ||||
| class TestContextManagerFixtureFuncs: | ||||
|  | @ -3951,7 +3951,7 @@ def test_call_fixture_function_error(): | |||
| 
 | ||||
|     @pytest.fixture | ||||
|     def fix(): | ||||
|         return 1 | ||||
|         raise NotImplementedError() | ||||
| 
 | ||||
|     with pytest.raises(pytest.fail.Exception): | ||||
|         assert fix() == 1 | ||||
|  |  | |||
|  | @ -163,9 +163,16 @@ class TestRaises: | |||
| 
 | ||||
|         class T: | ||||
|             def __call__(self): | ||||
|                 # Early versions of Python 3.5 have some bug causing the | ||||
|                 # __call__ frame to still refer to t even after everything | ||||
|                 # is done. This makes the test pass for them. | ||||
|                 if sys.version_info < (3, 5, 2):  # pragma: no cover | ||||
|                     del self | ||||
|                 raise ValueError | ||||
| 
 | ||||
|         t = T() | ||||
|         refcount = len(gc.get_referrers(t)) | ||||
| 
 | ||||
|         if method == "function": | ||||
|             pytest.raises(ValueError, t) | ||||
|         else: | ||||
|  | @ -175,14 +182,7 @@ class TestRaises: | |||
|         # ensure both forms of pytest.raises don't leave exceptions in sys.exc_info() | ||||
|         assert sys.exc_info() == (None, None, None) | ||||
| 
 | ||||
|         del t | ||||
|         # Make sure this does get updated in locals dict | ||||
|         # otherwise it could keep a reference | ||||
|         locals() | ||||
| 
 | ||||
|         # ensure the t instance is not stuck in a cyclic reference | ||||
|         for o in gc.get_objects(): | ||||
|             assert type(o) is not T | ||||
|         assert refcount == len(gc.get_referrers(t)) | ||||
| 
 | ||||
|     def test_raises_match(self): | ||||
|         msg = r"with base \d+" | ||||
|  |  | |||
|  | @ -490,7 +490,6 @@ class TestAssert_reprcompare: | |||
|         assert len(expl) > 1 | ||||
| 
 | ||||
|     def test_Sequence(self): | ||||
| 
 | ||||
|         if not hasattr(collections_abc, "MutableSequence"): | ||||
|             pytest.skip("cannot import MutableSequence") | ||||
|         MutableSequence = collections_abc.MutableSequence | ||||
|  | @ -806,9 +805,6 @@ class TestFormatExplanation: | |||
| 
 | ||||
| 
 | ||||
| class TestTruncateExplanation: | ||||
| 
 | ||||
|     """ Confirm assertion output is truncated as expected """ | ||||
| 
 | ||||
|     # The number of lines in the truncation explanation message. Used | ||||
|     # to calculate that results have the expected length. | ||||
|     LINES_IN_TRUNCATION_MSG = 2 | ||||
|  | @ -969,7 +965,13 @@ def test_pytest_assertrepr_compare_integration(testdir): | |||
|     ) | ||||
|     result = testdir.runpytest() | ||||
|     result.stdout.fnmatch_lines( | ||||
|         ["*def test_hello():*", "*assert x == y*", "*E*Extra items*left*", "*E*50*"] | ||||
|         [ | ||||
|             "*def test_hello():*", | ||||
|             "*assert x == y*", | ||||
|             "*E*Extra items*left*", | ||||
|             "*E*50*", | ||||
|             "*= 1 failed in*", | ||||
|         ] | ||||
|     ) | ||||
| 
 | ||||
| 
 | ||||
|  | @ -1302,3 +1304,23 @@ def test_exit_from_assertrepr_compare(monkeypatch): | |||
| 
 | ||||
|     with pytest.raises(outcomes.Exit, match="Quitting debugger"): | ||||
|         callequal(1, 1) | ||||
| 
 | ||||
| 
 | ||||
| def test_assertion_location_with_coverage(testdir): | ||||
|     """This used to report the wrong location when run with coverage (#5754).""" | ||||
|     p = testdir.makepyfile( | ||||
|         """ | ||||
|         def test(): | ||||
|             assert False, 1 | ||||
|             assert False, 2 | ||||
|         """ | ||||
|     ) | ||||
|     result = testdir.runpytest(str(p)) | ||||
|     result.stdout.fnmatch_lines( | ||||
|         [ | ||||
|             ">       assert False, 1", | ||||
|             "E       AssertionError: 1", | ||||
|             "E       assert False", | ||||
|             "*= 1 failed in*", | ||||
|         ] | ||||
|     ) | ||||
|  |  | |||
|  | @ -116,3 +116,15 @@ class TestPaste: | |||
|         assert "lexer=%s" % lexer in data.decode() | ||||
|         assert "code=full-paste-contents" in data.decode() | ||||
|         assert "expiry=1week" in data.decode() | ||||
| 
 | ||||
|     def test_create_new_paste_failure(self, pastebin, monkeypatch): | ||||
|         import io | ||||
|         import urllib.request | ||||
| 
 | ||||
|         def response(url, data): | ||||
|             stream = io.BytesIO(b"something bad occurred") | ||||
|             return stream | ||||
| 
 | ||||
|         monkeypatch.setattr(urllib.request, "urlopen", response) | ||||
|         result = pastebin.create_new_paste(b"full-paste-contents") | ||||
|         assert result == "bad response: something bad occurred" | ||||
|  |  | |||
							
								
								
									
										2
									
								
								tox.ini
								
								
								
								
							
							
						
						
									
										2
									
								
								tox.ini
								
								
								
								
							|  | @ -118,7 +118,7 @@ commands = python scripts/release.py {posargs} | |||
| description = create GitHub release after deployment | ||||
| basepython = python3.6 | ||||
| usedevelop = True | ||||
| passenv = GH_RELEASE_NOTES_TOKEN TRAVIS_TAG | ||||
| passenv = GH_RELEASE_NOTES_TOKEN TRAVIS_TAG TRAVIS_REPO_SLUG | ||||
| deps = | ||||
|     github3.py | ||||
|     pypandoc | ||||
|  |  | |||
		Loading…
	
		Reference in New Issue