commit
8ccc0177c8
|
@ -6,7 +6,7 @@ Here is a quick checklist that should be present in PRs.
|
||||||
-->
|
-->
|
||||||
|
|
||||||
- [ ] Target the `master` branch for bug fixes, documentation updates and trivial changes.
|
- [ ] Target the `master` branch for bug fixes, documentation updates and trivial changes.
|
||||||
- [ ] Target the `features` branch for new features and removals/deprecations.
|
- [ ] Target the `features` branch for new features, improvements, and removals/deprecations.
|
||||||
- [ ] Include documentation when adding new features.
|
- [ ] Include documentation when adding new features.
|
||||||
- [ ] Include new tests or update existing tests when applicable.
|
- [ ] Include new tests or update existing tests when applicable.
|
||||||
|
|
||||||
|
|
|
@ -42,15 +42,10 @@ repos:
|
||||||
hooks:
|
hooks:
|
||||||
- id: rst-backticks
|
- id: rst-backticks
|
||||||
- repo: https://github.com/pre-commit/mirrors-mypy
|
- repo: https://github.com/pre-commit/mirrors-mypy
|
||||||
rev: v0.711
|
rev: v0.720
|
||||||
hooks:
|
hooks:
|
||||||
- id: mypy
|
- id: mypy
|
||||||
name: mypy (src)
|
files: ^(src/|testing/)
|
||||||
files: ^src/
|
|
||||||
args: []
|
|
||||||
- id: mypy
|
|
||||||
name: mypy (testing)
|
|
||||||
files: ^testing/
|
|
||||||
args: []
|
args: []
|
||||||
- repo: local
|
- repo: local
|
||||||
hooks:
|
hooks:
|
||||||
|
@ -64,7 +59,7 @@ repos:
|
||||||
name: changelog filenames
|
name: changelog filenames
|
||||||
language: fail
|
language: fail
|
||||||
entry: 'changelog files must be named ####.(feature|bugfix|doc|deprecation|removal|vendor|trivial).rst'
|
entry: 'changelog files must be named ####.(feature|bugfix|doc|deprecation|removal|vendor|trivial).rst'
|
||||||
exclude: changelog/(\d+\.(feature|bugfix|doc|deprecation|removal|vendor|trivial).rst|README.rst|_template.rst)
|
exclude: changelog/(\d+\.(feature|improvement|bugfix|doc|deprecation|removal|vendor|trivial).rst|README.rst|_template.rst)
|
||||||
files: ^changelog/
|
files: ^changelog/
|
||||||
- id: py-deprecated
|
- id: py-deprecated
|
||||||
name: py library is deprecated
|
name: py library is deprecated
|
||||||
|
|
11
.travis.yml
11
.travis.yml
|
@ -72,8 +72,17 @@ jobs:
|
||||||
|
|
||||||
- stage: deploy
|
- stage: deploy
|
||||||
python: '3.6'
|
python: '3.6'
|
||||||
install: pip install -U setuptools setuptools_scm
|
install: pip install -U setuptools setuptools_scm tox
|
||||||
script: skip
|
script: skip
|
||||||
|
# token to upload github release notes: GH_RELEASE_NOTES_TOKEN
|
||||||
|
env:
|
||||||
|
- secure: "OjOeL7/0JUDkV00SsTs732e8vQjHynpbG9FKTNtZZJ+1Zn4Cib+hAlwmlBnvVukML0X60YpcfjnC4quDOIGLPsh5zeXnvJmYtAIIUNQXjWz8NhcGYrhyzuP1rqV22U68RTCdmOq3lMYU/W2acwHP7T49PwJtOiUM5kF120UAQ0Zi5EmkqkIvH8oM5mO9Dlver+/U7Htpz9rhKrHBXQNCMZI6yj2aUyukqB2PN2fjAlDbCF//+FmvYw9NjT4GeFOSkTCf4ER9yfqs7yglRfwiLtOCZ2qKQhWZNsSJDB89rxIRXWavJUjJKeY2EW2/NkomYJDpqJLIF4JeFRw/HhA47CYPeo6BJqyyNV+0CovL1frpWfi9UQw2cMbgFUkUIUk3F6DD59PHNIOX2R/HX56dQsw7WKl3QuHlCOkICXYg8F7Ta684IoKjeTX03/6QNOkURfDBwfGszY0FpbxrjCSWKom6RyZdyidnESaxv9RzjcIRZVh1rp8KMrwS1OrwRSdG0zjlsPr49hWMenN/8fKgcHTV4/r1Tj6mip0dorSRCrgUNIeRBKgmui6FS8642ab5JNKOxMteVPVR2sFuhjOQ0Jy+PmvceYY9ZMWc3+/B/KVh0dZ3hwvLGZep/vxDS2PwCA5/xw31714vT5LxidKo8yECjBynMU/wUTTS695D3NY="
|
||||||
|
addons:
|
||||||
|
apt:
|
||||||
|
packages:
|
||||||
|
# required by publish_gh_release_notes
|
||||||
|
- pandoc
|
||||||
|
after_deploy: tox -e publish_gh_release_notes
|
||||||
deploy:
|
deploy:
|
||||||
provider: pypi
|
provider: pypi
|
||||||
user: nicoddemus
|
user: nicoddemus
|
||||||
|
|
1
AUTHORS
1
AUTHORS
|
@ -71,6 +71,7 @@ Danielle Jenkins
|
||||||
Dave Hunt
|
Dave Hunt
|
||||||
David Díaz-Barquero
|
David Díaz-Barquero
|
||||||
David Mohr
|
David Mohr
|
||||||
|
David Paul Röthlisberger
|
||||||
David Szotten
|
David Szotten
|
||||||
David Vierra
|
David Vierra
|
||||||
Daw-Ran Liou
|
Daw-Ran Liou
|
||||||
|
|
158
CHANGELOG.rst
158
CHANGELOG.rst
|
@ -18,6 +18,164 @@ with advance notice in the **Deprecations** section of releases.
|
||||||
|
|
||||||
.. towncrier release notes start
|
.. towncrier release notes start
|
||||||
|
|
||||||
|
pytest 5.1.0 (2019-08-15)
|
||||||
|
=========================
|
||||||
|
|
||||||
|
Removals
|
||||||
|
--------
|
||||||
|
|
||||||
|
- `#5180 <https://github.com/pytest-dev/pytest/issues/5180>`_: As per our policy, the following features have been deprecated in the 4.X series and are now
|
||||||
|
removed:
|
||||||
|
|
||||||
|
* ``Request.getfuncargvalue``: use ``Request.getfixturevalue`` instead.
|
||||||
|
|
||||||
|
* ``pytest.raises`` and ``pytest.warns`` no longer support strings as the second argument.
|
||||||
|
|
||||||
|
* ``message`` parameter of ``pytest.raises``.
|
||||||
|
|
||||||
|
* ``pytest.raises``, ``pytest.warns`` and ``ParameterSet.param`` now use native keyword-only
|
||||||
|
syntax. This might change the exception message from previous versions, but they still raise
|
||||||
|
``TypeError`` on unknown keyword arguments as before.
|
||||||
|
|
||||||
|
* ``pytest.config`` global variable.
|
||||||
|
|
||||||
|
* ``tmpdir_factory.ensuretemp`` method.
|
||||||
|
|
||||||
|
* ``pytest_logwarning`` hook.
|
||||||
|
|
||||||
|
* ``RemovedInPytest4Warning`` warning type.
|
||||||
|
|
||||||
|
* ``request`` is now a reserved name for fixtures.
|
||||||
|
|
||||||
|
|
||||||
|
For more information consult
|
||||||
|
`Deprecations and Removals <https://docs.pytest.org/en/latest/deprecations.html>`__ in the docs.
|
||||||
|
|
||||||
|
|
||||||
|
- `#5565 <https://github.com/pytest-dev/pytest/issues/5565>`_: Removed unused support code for `unittest2 <https://pypi.org/project/unittest2/>`__.
|
||||||
|
|
||||||
|
The ``unittest2`` backport module is no longer
|
||||||
|
necessary since Python 3.3+, and the small amount of code in pytest to support it also doesn't seem
|
||||||
|
to be used: after removed, all tests still pass unchanged.
|
||||||
|
|
||||||
|
Although our policy is to introduce a deprecation period before removing any features or support
|
||||||
|
for third party libraries, because this code is apparently not used
|
||||||
|
at all (even if ``unittest2`` is used by a test suite executed by pytest), it was decided to
|
||||||
|
remove it in this release.
|
||||||
|
|
||||||
|
If you experience a regression because of this, please
|
||||||
|
`file an issue <https://github.com/pytest-dev/pytest/issues/new>`__.
|
||||||
|
|
||||||
|
|
||||||
|
- `#5615 <https://github.com/pytest-dev/pytest/issues/5615>`_: ``pytest.fail``, ``pytest.xfail`` and ``pytest.skip`` no longer support bytes for the message argument.
|
||||||
|
|
||||||
|
This was supported for Python 2 where it was tempting to use ``"message"``
|
||||||
|
instead of ``u"message"``.
|
||||||
|
|
||||||
|
Python 3 code is unlikely to pass ``bytes`` to these functions. If you do,
|
||||||
|
please decode it to an ``str`` beforehand.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Features
|
||||||
|
--------
|
||||||
|
|
||||||
|
- `#5564 <https://github.com/pytest-dev/pytest/issues/5564>`_: New ``Config.invocation_args`` attribute containing the unchanged arguments passed to ``pytest.main()``.
|
||||||
|
|
||||||
|
|
||||||
|
- `#5576 <https://github.com/pytest-dev/pytest/issues/5576>`_: New `NUMBER <https://docs.pytest.org/en/latest/doctest.html#using-doctest-options>`__
|
||||||
|
option for doctests to ignore irrelevant differences in floating-point numbers.
|
||||||
|
Inspired by Sébastien Boisgérault's `numtest <https://github.com/boisgera/numtest>`__
|
||||||
|
extension for doctest.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Improvements
|
||||||
|
------------
|
||||||
|
|
||||||
|
- `#5471 <https://github.com/pytest-dev/pytest/issues/5471>`_: JUnit XML now includes a timestamp and hostname in the testsuite tag.
|
||||||
|
|
||||||
|
|
||||||
|
- `#5707 <https://github.com/pytest-dev/pytest/issues/5707>`_: Time taken to run the test suite now includes a human-readable representation when it takes over
|
||||||
|
60 seconds, for example::
|
||||||
|
|
||||||
|
===== 2 failed in 102.70s (0:01:42) =====
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Bug Fixes
|
||||||
|
---------
|
||||||
|
|
||||||
|
- `#4344 <https://github.com/pytest-dev/pytest/issues/4344>`_: Fix RuntimeError/StopIteration when trying to collect package with "__init__.py" only.
|
||||||
|
|
||||||
|
|
||||||
|
- `#5115 <https://github.com/pytest-dev/pytest/issues/5115>`_: Warnings issued during ``pytest_configure`` are explicitly not treated as errors, even if configured as such, because it otherwise completely breaks pytest.
|
||||||
|
|
||||||
|
|
||||||
|
- `#5477 <https://github.com/pytest-dev/pytest/issues/5477>`_: The XML file produced by ``--junitxml`` now correctly contain a ``<testsuites>`` root element.
|
||||||
|
|
||||||
|
|
||||||
|
- `#5523 <https://github.com/pytest-dev/pytest/issues/5523>`_: Fixed using multiple short options together in the command-line (for example ``-vs``) in Python 3.8+.
|
||||||
|
|
||||||
|
|
||||||
|
- `#5524 <https://github.com/pytest-dev/pytest/issues/5524>`_: Fix issue where ``tmp_path`` and ``tmpdir`` would not remove directories containing files marked as read-only,
|
||||||
|
which could lead to pytest crashing when executed a second time with the ``--basetemp`` option.
|
||||||
|
|
||||||
|
|
||||||
|
- `#5537 <https://github.com/pytest-dev/pytest/issues/5537>`_: Replace ``importlib_metadata`` backport with ``importlib.metadata`` from the
|
||||||
|
standard library on Python 3.8+.
|
||||||
|
|
||||||
|
|
||||||
|
- `#5578 <https://github.com/pytest-dev/pytest/issues/5578>`_: Improve type checking for some exception-raising functions (``pytest.xfail``, ``pytest.skip``, etc)
|
||||||
|
so they provide better error messages when users meant to use marks (for example ``@pytest.xfail``
|
||||||
|
instead of ``@pytest.mark.xfail``).
|
||||||
|
|
||||||
|
|
||||||
|
- `#5606 <https://github.com/pytest-dev/pytest/issues/5606>`_: Fixed internal error when test functions were patched with objects that cannot be compared
|
||||||
|
for truth values against others, like ``numpy`` arrays.
|
||||||
|
|
||||||
|
|
||||||
|
- `#5634 <https://github.com/pytest-dev/pytest/issues/5634>`_: ``pytest.exit`` is now correctly handled in ``unittest`` cases.
|
||||||
|
This makes ``unittest`` cases handle ``quit`` from pytest's pdb correctly.
|
||||||
|
|
||||||
|
|
||||||
|
- `#5650 <https://github.com/pytest-dev/pytest/issues/5650>`_: Improved output when parsing an ini configuration file fails.
|
||||||
|
|
||||||
|
|
||||||
|
- `#5701 <https://github.com/pytest-dev/pytest/issues/5701>`_: Fix collection of ``staticmethod`` objects defined with ``functools.partial``.
|
||||||
|
|
||||||
|
|
||||||
|
- `#5734 <https://github.com/pytest-dev/pytest/issues/5734>`_: Skip async generator test functions, and update the warning message to refer to ``async def`` functions.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Improved Documentation
|
||||||
|
----------------------
|
||||||
|
|
||||||
|
- `#5669 <https://github.com/pytest-dev/pytest/issues/5669>`_: Add docstring for ``Testdir.copy_example``.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Trivial/Internal Changes
|
||||||
|
------------------------
|
||||||
|
|
||||||
|
- `#5095 <https://github.com/pytest-dev/pytest/issues/5095>`_: XML files of the ``xunit2`` family are now validated against the schema by pytest's own test suite
|
||||||
|
to avoid future regressions.
|
||||||
|
|
||||||
|
|
||||||
|
- `#5516 <https://github.com/pytest-dev/pytest/issues/5516>`_: Cache node splitting function which can improve collection performance in very large test suites.
|
||||||
|
|
||||||
|
|
||||||
|
- `#5603 <https://github.com/pytest-dev/pytest/issues/5603>`_: Simplified internal ``SafeRepr`` class and removed some dead code.
|
||||||
|
|
||||||
|
|
||||||
|
- `#5664 <https://github.com/pytest-dev/pytest/issues/5664>`_: When invoking pytest's own testsuite with ``PYTHONDONTWRITEBYTECODE=1``,
|
||||||
|
the ``test_xfail_handling`` test no longer fails.
|
||||||
|
|
||||||
|
|
||||||
|
- `#5684 <https://github.com/pytest-dev/pytest/issues/5684>`_: Replace manual handling of ``OSError.errno`` in the codebase by new ``OSError`` subclasses (``PermissionError``, ``FileNotFoundError``, etc.).
|
||||||
|
|
||||||
|
|
||||||
pytest 5.0.1 (2019-07-04)
|
pytest 5.0.1 (2019-07-04)
|
||||||
=========================
|
=========================
|
||||||
|
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
Fix RuntimeError/StopIteration when trying to collect package with "__init__.py" only.
|
|
|
@ -1 +0,0 @@
|
||||||
Warnings issued during ``pytest_configure`` are explicitly not treated as errors, even if configured as such, because it otherwise completely breaks pytest.
|
|
|
@ -1 +0,0 @@
|
||||||
Cache node splitting function which can improve collection performance in very large test suites.
|
|
|
@ -1,2 +0,0 @@
|
||||||
Fix issue where ``tmp_path`` and ``tmpdir`` would not remove directories containing files marked as read-only,
|
|
||||||
which could lead to pytest crashing when executed a second time with the ``--basetemp`` option.
|
|
|
@ -1,3 +0,0 @@
|
||||||
Improve type checking for some exception-raising functions (``pytest.xfail``, ``pytest.skip``, etc)
|
|
||||||
so they provide better error messages when users meant to use marks (for example ``@pytest.xfail``
|
|
||||||
instead of ``@pytest.mark.xfail``).
|
|
|
@ -1,2 +0,0 @@
|
||||||
Fixed internal error when test functions were patched with objects that cannot be compared
|
|
||||||
for truth values against others, like ``numpy`` arrays.
|
|
|
@ -1,2 +0,0 @@
|
||||||
``pytest.exit`` is now correctly handled in ``unittest`` cases.
|
|
||||||
This makes ``unittest`` cases handle ``quit`` from pytest's pdb correctly.
|
|
|
@ -1 +0,0 @@
|
||||||
Improved output when parsing an ini configuration file fails.
|
|
|
@ -1,2 +0,0 @@
|
||||||
When invoking pytest's own testsuite with ``PYTHONDONTWRITEBYTECODE=1``,
|
|
||||||
the ``test_xfail_handling`` test no longer fails.
|
|
|
@ -1 +0,0 @@
|
||||||
Add docstring for ``Testdir.copy_example``.
|
|
|
@ -1 +0,0 @@
|
||||||
Replace manual handling of ``OSError.errno`` in the codebase by new ``OSError`` subclasses (``PermissionError``, ``FileNotFoundError``, etc.).
|
|
|
@ -1 +0,0 @@
|
||||||
Fix collection of ``staticmethod`` objects defined with ``functools.partial``.
|
|
|
@ -1 +0,0 @@
|
||||||
Skip async generator test functions, and update the warning message to refer to ``async def`` functions.
|
|
|
@ -12,6 +12,7 @@ Each file should be named like ``<ISSUE>.<TYPE>.rst``, where
|
||||||
``<ISSUE>`` is an issue number, and ``<TYPE>`` is one of:
|
``<ISSUE>`` is an issue number, and ``<TYPE>`` is one of:
|
||||||
|
|
||||||
* ``feature``: new user facing features, like new command-line options and new behavior.
|
* ``feature``: new user facing features, like new command-line options and new behavior.
|
||||||
|
* ``improvement``: improvement of existing functionality, usually without requiring user intervention (for example, new fields being written in ``--junitxml``, improved colors in terminal, etc).
|
||||||
* ``bugfix``: fixes a reported bug.
|
* ``bugfix``: fixes a reported bug.
|
||||||
* ``doc``: documentation improvement, like rewording an entire session or adding missing docs.
|
* ``doc``: documentation improvement, like rewording an entire session or adding missing docs.
|
||||||
* ``deprecation``: feature deprecation.
|
* ``deprecation``: feature deprecation.
|
||||||
|
|
|
@ -6,6 +6,7 @@ Release announcements
|
||||||
:maxdepth: 2
|
:maxdepth: 2
|
||||||
|
|
||||||
|
|
||||||
|
release-5.1.0
|
||||||
release-5.0.1
|
release-5.0.1
|
||||||
release-5.0.0
|
release-5.0.0
|
||||||
release-4.6.5
|
release-4.6.5
|
||||||
|
|
|
@ -0,0 +1,56 @@
|
||||||
|
pytest-5.1.0
|
||||||
|
=======================================
|
||||||
|
|
||||||
|
The pytest team is proud to announce the 5.1.0 release!
|
||||||
|
|
||||||
|
pytest is a mature Python testing tool with more than a 2000 tests
|
||||||
|
against itself, passing on many different interpreters and platforms.
|
||||||
|
|
||||||
|
This release contains a number of bugs fixes and improvements, so users are encouraged
|
||||||
|
to take a look at the CHANGELOG:
|
||||||
|
|
||||||
|
https://docs.pytest.org/en/latest/changelog.html
|
||||||
|
|
||||||
|
For complete documentation, please visit:
|
||||||
|
|
||||||
|
https://docs.pytest.org/en/latest/
|
||||||
|
|
||||||
|
As usual, you can upgrade from pypi via:
|
||||||
|
|
||||||
|
pip install -U pytest
|
||||||
|
|
||||||
|
Thanks to all who contributed to this release, among them:
|
||||||
|
|
||||||
|
* Albert Tugushev
|
||||||
|
* Alexey Zankevich
|
||||||
|
* Anthony Sottile
|
||||||
|
* Bruno Oliveira
|
||||||
|
* Daniel Hahler
|
||||||
|
* David Röthlisberger
|
||||||
|
* Florian Bruhin
|
||||||
|
* Ilya Stepin
|
||||||
|
* Jon Dufresne
|
||||||
|
* Kaiqi
|
||||||
|
* Max R
|
||||||
|
* Miro Hrončok
|
||||||
|
* Oliver Bestwalter
|
||||||
|
* Ran Benita
|
||||||
|
* Ronny Pfannschmidt
|
||||||
|
* Samuel Searles-Bryant
|
||||||
|
* Semen Zhydenko
|
||||||
|
* Steffen Schroeder
|
||||||
|
* Thomas Grainger
|
||||||
|
* Tim Hoffmann
|
||||||
|
* William Woodall
|
||||||
|
* Wojtek Erbetowski
|
||||||
|
* Xixi Zhao
|
||||||
|
* Yash Todi
|
||||||
|
* boris
|
||||||
|
* dmitry.dygalo
|
||||||
|
* helloocc
|
||||||
|
* martbln
|
||||||
|
* mei-li
|
||||||
|
|
||||||
|
|
||||||
|
Happy testing,
|
||||||
|
The Pytest Development Team
|
|
@ -47,7 +47,7 @@ you will see the return value of the function call:
|
||||||
E + where 3 = f()
|
E + where 3 = f()
|
||||||
|
|
||||||
test_assert1.py:6: AssertionError
|
test_assert1.py:6: AssertionError
|
||||||
========================= 1 failed in 0.12 seconds =========================
|
============================ 1 failed in 0.05s =============================
|
||||||
|
|
||||||
``pytest`` has support for showing the values of the most common subexpressions
|
``pytest`` has support for showing the values of the most common subexpressions
|
||||||
including calls, attributes, comparisons, and binary and unary
|
including calls, attributes, comparisons, and binary and unary
|
||||||
|
@ -208,7 +208,7 @@ if you run this module:
|
||||||
E Use -v to get the full diff
|
E Use -v to get the full diff
|
||||||
|
|
||||||
test_assert2.py:6: AssertionError
|
test_assert2.py:6: AssertionError
|
||||||
========================= 1 failed in 0.12 seconds =========================
|
============================ 1 failed in 0.05s =============================
|
||||||
|
|
||||||
Special comparisons are done for a number of cases:
|
Special comparisons are done for a number of cases:
|
||||||
|
|
||||||
|
@ -279,7 +279,7 @@ the conftest file:
|
||||||
E vals: 1 != 2
|
E vals: 1 != 2
|
||||||
|
|
||||||
test_foocompare.py:12: AssertionError
|
test_foocompare.py:12: AssertionError
|
||||||
1 failed in 0.12 seconds
|
1 failed in 0.05s
|
||||||
|
|
||||||
.. _assert-details:
|
.. _assert-details:
|
||||||
.. _`assert introspection`:
|
.. _`assert introspection`:
|
||||||
|
|
|
@ -160,7 +160,7 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a
|
||||||
in python < 3.6 this is a pathlib2.Path
|
in python < 3.6 this is a pathlib2.Path
|
||||||
|
|
||||||
|
|
||||||
no tests ran in 0.12 seconds
|
no tests ran in 0.01s
|
||||||
|
|
||||||
You can also interactively ask for help, e.g. by typing on the Python interactive prompt something like:
|
You can also interactively ask for help, e.g. by typing on the Python interactive prompt something like:
|
||||||
|
|
||||||
|
|
|
@ -63,7 +63,7 @@ If you run this for the first time you will see two failures:
|
||||||
> pytest.fail("bad luck")
|
> pytest.fail("bad luck")
|
||||||
E Failed: bad luck
|
E Failed: bad luck
|
||||||
|
|
||||||
test_50.py:6: Failed
|
test_50.py:7: Failed
|
||||||
_______________________________ test_num[25] _______________________________
|
_______________________________ test_num[25] _______________________________
|
||||||
|
|
||||||
i = 25
|
i = 25
|
||||||
|
@ -74,8 +74,8 @@ If you run this for the first time you will see two failures:
|
||||||
> pytest.fail("bad luck")
|
> pytest.fail("bad luck")
|
||||||
E Failed: bad luck
|
E Failed: bad luck
|
||||||
|
|
||||||
test_50.py:6: Failed
|
test_50.py:7: Failed
|
||||||
2 failed, 48 passed in 0.12 seconds
|
2 failed, 48 passed in 0.16s
|
||||||
|
|
||||||
If you then run it with ``--lf``:
|
If you then run it with ``--lf``:
|
||||||
|
|
||||||
|
@ -102,7 +102,7 @@ If you then run it with ``--lf``:
|
||||||
> pytest.fail("bad luck")
|
> pytest.fail("bad luck")
|
||||||
E Failed: bad luck
|
E Failed: bad luck
|
||||||
|
|
||||||
test_50.py:6: Failed
|
test_50.py:7: Failed
|
||||||
_______________________________ test_num[25] _______________________________
|
_______________________________ test_num[25] _______________________________
|
||||||
|
|
||||||
i = 25
|
i = 25
|
||||||
|
@ -113,8 +113,8 @@ If you then run it with ``--lf``:
|
||||||
> pytest.fail("bad luck")
|
> pytest.fail("bad luck")
|
||||||
E Failed: bad luck
|
E Failed: bad luck
|
||||||
|
|
||||||
test_50.py:6: Failed
|
test_50.py:7: Failed
|
||||||
================= 2 failed, 48 deselected in 0.12 seconds ==================
|
===================== 2 failed, 48 deselected in 0.07s =====================
|
||||||
|
|
||||||
You have run only the two failing tests from the last run, while the 48 passing
|
You have run only the two failing tests from the last run, while the 48 passing
|
||||||
tests have not been run ("deselected").
|
tests have not been run ("deselected").
|
||||||
|
@ -146,7 +146,7 @@ of ``FF`` and dots):
|
||||||
> pytest.fail("bad luck")
|
> pytest.fail("bad luck")
|
||||||
E Failed: bad luck
|
E Failed: bad luck
|
||||||
|
|
||||||
test_50.py:6: Failed
|
test_50.py:7: Failed
|
||||||
_______________________________ test_num[25] _______________________________
|
_______________________________ test_num[25] _______________________________
|
||||||
|
|
||||||
i = 25
|
i = 25
|
||||||
|
@ -157,8 +157,8 @@ of ``FF`` and dots):
|
||||||
> pytest.fail("bad luck")
|
> pytest.fail("bad luck")
|
||||||
E Failed: bad luck
|
E Failed: bad luck
|
||||||
|
|
||||||
test_50.py:6: Failed
|
test_50.py:7: Failed
|
||||||
=================== 2 failed, 48 passed in 0.12 seconds ====================
|
======================= 2 failed, 48 passed in 0.15s =======================
|
||||||
|
|
||||||
.. _`config.cache`:
|
.. _`config.cache`:
|
||||||
|
|
||||||
|
@ -227,10 +227,10 @@ If you run this command for the first time, you can see the print statement:
|
||||||
> assert mydata == 23
|
> assert mydata == 23
|
||||||
E assert 42 == 23
|
E assert 42 == 23
|
||||||
|
|
||||||
test_caching.py:17: AssertionError
|
test_caching.py:20: AssertionError
|
||||||
-------------------------- Captured stdout setup ---------------------------
|
-------------------------- Captured stdout setup ---------------------------
|
||||||
running expensive computation...
|
running expensive computation...
|
||||||
1 failed in 0.12 seconds
|
1 failed in 0.05s
|
||||||
|
|
||||||
If you run it a second time, the value will be retrieved from
|
If you run it a second time, the value will be retrieved from
|
||||||
the cache and nothing will be printed:
|
the cache and nothing will be printed:
|
||||||
|
@ -248,8 +248,8 @@ the cache and nothing will be printed:
|
||||||
> assert mydata == 23
|
> assert mydata == 23
|
||||||
E assert 42 == 23
|
E assert 42 == 23
|
||||||
|
|
||||||
test_caching.py:17: AssertionError
|
test_caching.py:20: AssertionError
|
||||||
1 failed in 0.12 seconds
|
1 failed in 0.05s
|
||||||
|
|
||||||
See the :ref:`cache-api` for more details.
|
See the :ref:`cache-api` for more details.
|
||||||
|
|
||||||
|
@ -283,7 +283,7 @@ You can always peek at the content of the cache using the
|
||||||
example/value contains:
|
example/value contains:
|
||||||
42
|
42
|
||||||
|
|
||||||
======================= no tests ran in 0.12 seconds =======================
|
========================== no tests ran in 0.00s ===========================
|
||||||
|
|
||||||
``--cache-show`` takes an optional argument to specify a glob pattern for
|
``--cache-show`` takes an optional argument to specify a glob pattern for
|
||||||
filtering:
|
filtering:
|
||||||
|
@ -300,7 +300,7 @@ filtering:
|
||||||
example/value contains:
|
example/value contains:
|
||||||
42
|
42
|
||||||
|
|
||||||
======================= no tests ran in 0.12 seconds =======================
|
========================== no tests ran in 0.01s ===========================
|
||||||
|
|
||||||
Clearing Cache content
|
Clearing Cache content
|
||||||
----------------------
|
----------------------
|
||||||
|
|
|
@ -88,10 +88,10 @@ of the failing function and hide the other one:
|
||||||
> assert False
|
> assert False
|
||||||
E assert False
|
E assert False
|
||||||
|
|
||||||
test_module.py:9: AssertionError
|
test_module.py:12: AssertionError
|
||||||
-------------------------- Captured stdout setup ---------------------------
|
-------------------------- Captured stdout setup ---------------------------
|
||||||
setting up <function test_func2 at 0xdeadbeef>
|
setting up <function test_func2 at 0xdeadbeef>
|
||||||
==================== 1 failed, 1 passed in 0.12 seconds ====================
|
======================= 1 failed, 1 passed in 0.05s ========================
|
||||||
|
|
||||||
Accessing captured output from a test function
|
Accessing captured output from a test function
|
||||||
---------------------------------------------------
|
---------------------------------------------------
|
||||||
|
|
|
@ -20,8 +20,8 @@ Below is a complete list of all pytest features which are considered deprecated.
|
||||||
:ref:`standard warning filters <warnings>`.
|
:ref:`standard warning filters <warnings>`.
|
||||||
|
|
||||||
|
|
||||||
Removal of ``funcargnames`` alias for ``fixturenames``
|
``funcargnames`` alias for ``fixturenames``
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
.. deprecated:: 5.0
|
.. deprecated:: 5.0
|
||||||
|
|
||||||
|
@ -34,12 +34,47 @@ in places where we or plugin authors must distinguish between fixture names and
|
||||||
names supplied by non-fixture things such as ``pytest.mark.parametrize``.
|
names supplied by non-fixture things such as ``pytest.mark.parametrize``.
|
||||||
|
|
||||||
|
|
||||||
|
Result log (``--result-log``)
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
.. deprecated:: 4.0
|
||||||
|
|
||||||
|
The ``--result-log`` option produces a stream of test reports which can be
|
||||||
|
analysed at runtime. It uses a custom format which requires users to implement their own
|
||||||
|
parser, but the team believes using a line-based format that can be parsed using standard
|
||||||
|
tools would provide a suitable and better alternative.
|
||||||
|
|
||||||
|
The current plan is to provide an alternative in the pytest 5.0 series and remove the ``--result-log``
|
||||||
|
option in pytest 6.0 after the new implementation proves satisfactory to all users and is deemed
|
||||||
|
stable.
|
||||||
|
|
||||||
|
The actual alternative is still being discussed in issue `#4488 <https://github.com/pytest-dev/pytest/issues/4488>`__.
|
||||||
|
|
||||||
|
|
||||||
|
Removed Features
|
||||||
|
----------------
|
||||||
|
|
||||||
|
As stated in our :ref:`backwards-compatibility` policy, deprecated features are removed only in major releases after
|
||||||
|
an appropriate period of deprecation has passed.
|
||||||
|
|
||||||
|
|
||||||
|
``pytest.config`` global
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
.. versionremoved:: 5.0
|
||||||
|
|
||||||
|
The ``pytest.config`` global object is deprecated. Instead use
|
||||||
|
``request.config`` (via the ``request`` fixture) or if you are a plugin author
|
||||||
|
use the ``pytest_configure(config)`` hook. Note that many hooks can also access
|
||||||
|
the ``config`` object indirectly, through ``session.config`` or ``item.config`` for example.
|
||||||
|
|
||||||
|
|
||||||
.. _`raises message deprecated`:
|
.. _`raises message deprecated`:
|
||||||
|
|
||||||
``"message"`` parameter of ``pytest.raises``
|
``"message"`` parameter of ``pytest.raises``
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
.. deprecated:: 4.1
|
.. versionremoved:: 5.0
|
||||||
|
|
||||||
It is a common mistake to think this parameter will match the exception message, while in fact
|
It is a common mistake to think this parameter will match the exception message, while in fact
|
||||||
it only serves to provide a custom message in case the ``pytest.raises`` check fails. To prevent
|
it only serves to provide a custom message in case the ``pytest.raises`` check fails. To prevent
|
||||||
|
@ -70,22 +105,12 @@ If you still have concerns about this deprecation and future removal, please com
|
||||||
`issue #3974 <https://github.com/pytest-dev/pytest/issues/3974>`__.
|
`issue #3974 <https://github.com/pytest-dev/pytest/issues/3974>`__.
|
||||||
|
|
||||||
|
|
||||||
``pytest.config`` global
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
.. deprecated:: 4.1
|
|
||||||
|
|
||||||
The ``pytest.config`` global object is deprecated. Instead use
|
|
||||||
``request.config`` (via the ``request`` fixture) or if you are a plugin author
|
|
||||||
use the ``pytest_configure(config)`` hook. Note that many hooks can also access
|
|
||||||
the ``config`` object indirectly, through ``session.config`` or ``item.config`` for example.
|
|
||||||
|
|
||||||
.. _raises-warns-exec:
|
.. _raises-warns-exec:
|
||||||
|
|
||||||
``raises`` / ``warns`` with a string as the second argument
|
``raises`` / ``warns`` with a string as the second argument
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
.. deprecated:: 4.1
|
.. versionremoved:: 5.0
|
||||||
|
|
||||||
Use the context manager form of these instead. When necessary, invoke ``exec``
|
Use the context manager form of these instead. When necessary, invoke ``exec``
|
||||||
directly.
|
directly.
|
||||||
|
@ -116,27 +141,6 @@ Becomes:
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Result log (``--result-log``)
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
.. deprecated:: 4.0
|
|
||||||
|
|
||||||
The ``--result-log`` option produces a stream of test reports which can be
|
|
||||||
analysed at runtime. It uses a custom format which requires users to implement their own
|
|
||||||
parser, but the team believes using a line-based format that can be parsed using standard
|
|
||||||
tools would provide a suitable and better alternative.
|
|
||||||
|
|
||||||
The current plan is to provide an alternative in the pytest 5.0 series and remove the ``--result-log``
|
|
||||||
option in pytest 6.0 after the new implementation proves satisfactory to all users and is deemed
|
|
||||||
stable.
|
|
||||||
|
|
||||||
The actual alternative is still being discussed in issue `#4488 <https://github.com/pytest-dev/pytest/issues/4488>`__.
|
|
||||||
|
|
||||||
Removed Features
|
|
||||||
----------------
|
|
||||||
|
|
||||||
As stated in our :ref:`backwards-compatibility` policy, deprecated features are removed only in major releases after
|
|
||||||
an appropriate period of deprecation has passed.
|
|
||||||
|
|
||||||
Using ``Class`` in custom Collectors
|
Using ``Class`` in custom Collectors
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
|
@ -36,7 +36,7 @@ then you can just invoke ``pytest`` directly:
|
||||||
|
|
||||||
test_example.txt . [100%]
|
test_example.txt . [100%]
|
||||||
|
|
||||||
========================= 1 passed in 0.12 seconds =========================
|
============================ 1 passed in 0.02s =============================
|
||||||
|
|
||||||
By default, pytest will collect ``test*.txt`` files looking for doctest directives, but you
|
By default, pytest will collect ``test*.txt`` files looking for doctest directives, but you
|
||||||
can pass additional globs using the ``--doctest-glob`` option (multi-allowed).
|
can pass additional globs using the ``--doctest-glob`` option (multi-allowed).
|
||||||
|
@ -66,7 +66,7 @@ and functions, including from test modules:
|
||||||
mymodule.py . [ 50%]
|
mymodule.py . [ 50%]
|
||||||
test_example.txt . [100%]
|
test_example.txt . [100%]
|
||||||
|
|
||||||
========================= 2 passed in 0.12 seconds =========================
|
============================ 2 passed in 0.03s =============================
|
||||||
|
|
||||||
You can make these changes permanent in your project by
|
You can make these changes permanent in your project by
|
||||||
putting them into a pytest.ini file like this:
|
putting them into a pytest.ini file like this:
|
||||||
|
@ -103,7 +103,7 @@ that will be used for those doctest files using the
|
||||||
Using 'doctest' options
|
Using 'doctest' options
|
||||||
-----------------------
|
-----------------------
|
||||||
|
|
||||||
The standard ``doctest`` module provides some `options <https://docs.python.org/3/library/doctest.html#option-flags>`__
|
Python's standard ``doctest`` module provides some `options <https://docs.python.org/3/library/doctest.html#option-flags>`__
|
||||||
to configure the strictness of doctest tests. In pytest, you can enable those flags using the
|
to configure the strictness of doctest tests. In pytest, you can enable those flags using the
|
||||||
configuration file.
|
configuration file.
|
||||||
|
|
||||||
|
@ -115,23 +115,50 @@ lengthy exception stack traces you can just write:
|
||||||
[pytest]
|
[pytest]
|
||||||
doctest_optionflags= NORMALIZE_WHITESPACE IGNORE_EXCEPTION_DETAIL
|
doctest_optionflags= NORMALIZE_WHITESPACE IGNORE_EXCEPTION_DETAIL
|
||||||
|
|
||||||
pytest also introduces new options to allow doctests to run in Python 2 and
|
|
||||||
Python 3 unchanged:
|
|
||||||
|
|
||||||
* ``ALLOW_UNICODE``: when enabled, the ``u`` prefix is stripped from unicode
|
|
||||||
strings in expected doctest output.
|
|
||||||
|
|
||||||
* ``ALLOW_BYTES``: when enabled, the ``b`` prefix is stripped from byte strings
|
|
||||||
in expected doctest output.
|
|
||||||
|
|
||||||
Alternatively, options can be enabled by an inline comment in the doc test
|
Alternatively, options can be enabled by an inline comment in the doc test
|
||||||
itself:
|
itself:
|
||||||
|
|
||||||
.. code-block:: rst
|
.. code-block:: rst
|
||||||
|
|
||||||
# content of example.rst
|
>>> something_that_raises() # doctest: +IGNORE_EXCEPTION_DETAIL
|
||||||
>>> get_unicode_greeting() # doctest: +ALLOW_UNICODE
|
Traceback (most recent call last):
|
||||||
'Hello'
|
ValueError: ...
|
||||||
|
|
||||||
|
pytest also introduces new options:
|
||||||
|
|
||||||
|
* ``ALLOW_UNICODE``: when enabled, the ``u`` prefix is stripped from unicode
|
||||||
|
strings in expected doctest output. This allows doctests to run in Python 2
|
||||||
|
and Python 3 unchanged.
|
||||||
|
|
||||||
|
* ``ALLOW_BYTES``: similarly, the ``b`` prefix is stripped from byte strings
|
||||||
|
in expected doctest output.
|
||||||
|
|
||||||
|
* ``NUMBER``: when enabled, floating-point numbers only need to match as far as
|
||||||
|
the precision you have written in the expected doctest output. For example,
|
||||||
|
the following output would only need to match to 2 decimal places::
|
||||||
|
|
||||||
|
>>> math.pi
|
||||||
|
3.14
|
||||||
|
|
||||||
|
If you wrote ``3.1416`` then the actual output would need to match to 4
|
||||||
|
decimal places; and so on.
|
||||||
|
|
||||||
|
This avoids false positives caused by limited floating-point precision, like
|
||||||
|
this::
|
||||||
|
|
||||||
|
Expected:
|
||||||
|
0.233
|
||||||
|
Got:
|
||||||
|
0.23300000000000001
|
||||||
|
|
||||||
|
``NUMBER`` also supports lists of floating-point numbers -- in fact, it
|
||||||
|
matches floating-point numbers appearing anywhere in the output, even inside
|
||||||
|
a string! This means that it may not be appropriate to enable globally in
|
||||||
|
``doctest_optionflags`` in your configuration file.
|
||||||
|
|
||||||
|
|
||||||
|
Continue on failure
|
||||||
|
-------------------
|
||||||
|
|
||||||
By default, pytest would report only the first failure for a given doctest. If
|
By default, pytest would report only the first failure for a given doctest. If
|
||||||
you want to continue the test even when you have failures, do:
|
you want to continue the test even when you have failures, do:
|
||||||
|
|
|
@ -52,7 +52,7 @@ You can then restrict a test run to only run tests marked with ``webtest``:
|
||||||
|
|
||||||
test_server.py::test_send_http PASSED [100%]
|
test_server.py::test_send_http PASSED [100%]
|
||||||
|
|
||||||
================== 1 passed, 3 deselected in 0.12 seconds ==================
|
===================== 1 passed, 3 deselected in 0.01s ======================
|
||||||
|
|
||||||
Or the inverse, running all tests except the webtest ones:
|
Or the inverse, running all tests except the webtest ones:
|
||||||
|
|
||||||
|
@ -69,7 +69,7 @@ Or the inverse, running all tests except the webtest ones:
|
||||||
test_server.py::test_another PASSED [ 66%]
|
test_server.py::test_another PASSED [ 66%]
|
||||||
test_server.py::TestClass::test_method PASSED [100%]
|
test_server.py::TestClass::test_method PASSED [100%]
|
||||||
|
|
||||||
================== 3 passed, 1 deselected in 0.12 seconds ==================
|
===================== 3 passed, 1 deselected in 0.02s ======================
|
||||||
|
|
||||||
Selecting tests based on their node ID
|
Selecting tests based on their node ID
|
||||||
--------------------------------------
|
--------------------------------------
|
||||||
|
@ -89,7 +89,7 @@ tests based on their module, class, method, or function name:
|
||||||
|
|
||||||
test_server.py::TestClass::test_method PASSED [100%]
|
test_server.py::TestClass::test_method PASSED [100%]
|
||||||
|
|
||||||
========================= 1 passed in 0.12 seconds =========================
|
============================ 1 passed in 0.01s =============================
|
||||||
|
|
||||||
You can also select on the class:
|
You can also select on the class:
|
||||||
|
|
||||||
|
@ -104,7 +104,7 @@ You can also select on the class:
|
||||||
|
|
||||||
test_server.py::TestClass::test_method PASSED [100%]
|
test_server.py::TestClass::test_method PASSED [100%]
|
||||||
|
|
||||||
========================= 1 passed in 0.12 seconds =========================
|
============================ 1 passed in 0.01s =============================
|
||||||
|
|
||||||
Or select multiple nodes:
|
Or select multiple nodes:
|
||||||
|
|
||||||
|
@ -120,7 +120,7 @@ Or select multiple nodes:
|
||||||
test_server.py::TestClass::test_method PASSED [ 50%]
|
test_server.py::TestClass::test_method PASSED [ 50%]
|
||||||
test_server.py::test_send_http PASSED [100%]
|
test_server.py::test_send_http PASSED [100%]
|
||||||
|
|
||||||
========================= 2 passed in 0.12 seconds =========================
|
============================ 2 passed in 0.02s =============================
|
||||||
|
|
||||||
.. _node-id:
|
.. _node-id:
|
||||||
|
|
||||||
|
@ -159,7 +159,7 @@ select tests based on their names:
|
||||||
|
|
||||||
test_server.py::test_send_http PASSED [100%]
|
test_server.py::test_send_http PASSED [100%]
|
||||||
|
|
||||||
================== 1 passed, 3 deselected in 0.12 seconds ==================
|
===================== 1 passed, 3 deselected in 0.01s ======================
|
||||||
|
|
||||||
And you can also run all tests except the ones that match the keyword:
|
And you can also run all tests except the ones that match the keyword:
|
||||||
|
|
||||||
|
@ -176,7 +176,7 @@ And you can also run all tests except the ones that match the keyword:
|
||||||
test_server.py::test_another PASSED [ 66%]
|
test_server.py::test_another PASSED [ 66%]
|
||||||
test_server.py::TestClass::test_method PASSED [100%]
|
test_server.py::TestClass::test_method PASSED [100%]
|
||||||
|
|
||||||
================== 3 passed, 1 deselected in 0.12 seconds ==================
|
===================== 3 passed, 1 deselected in 0.02s ======================
|
||||||
|
|
||||||
Or to select "http" and "quick" tests:
|
Or to select "http" and "quick" tests:
|
||||||
|
|
||||||
|
@ -192,7 +192,7 @@ Or to select "http" and "quick" tests:
|
||||||
test_server.py::test_send_http PASSED [ 50%]
|
test_server.py::test_send_http PASSED [ 50%]
|
||||||
test_server.py::test_something_quick PASSED [100%]
|
test_server.py::test_something_quick PASSED [100%]
|
||||||
|
|
||||||
================== 2 passed, 2 deselected in 0.12 seconds ==================
|
===================== 2 passed, 2 deselected in 0.02s ======================
|
||||||
|
|
||||||
.. note::
|
.. note::
|
||||||
|
|
||||||
|
@ -413,7 +413,7 @@ the test needs:
|
||||||
|
|
||||||
test_someenv.py s [100%]
|
test_someenv.py s [100%]
|
||||||
|
|
||||||
======================== 1 skipped in 0.12 seconds =========================
|
============================ 1 skipped in 0.01s ============================
|
||||||
|
|
||||||
and here is one that specifies exactly the environment needed:
|
and here is one that specifies exactly the environment needed:
|
||||||
|
|
||||||
|
@ -428,7 +428,7 @@ and here is one that specifies exactly the environment needed:
|
||||||
|
|
||||||
test_someenv.py . [100%]
|
test_someenv.py . [100%]
|
||||||
|
|
||||||
========================= 1 passed in 0.12 seconds =========================
|
============================ 1 passed in 0.01s =============================
|
||||||
|
|
||||||
The ``--markers`` option always gives you a list of available markers:
|
The ``--markers`` option always gives you a list of available markers:
|
||||||
|
|
||||||
|
@ -499,7 +499,7 @@ The output is as follows:
|
||||||
$ pytest -q -s
|
$ pytest -q -s
|
||||||
Mark(name='my_marker', args=(<function hello_world at 0xdeadbeef>,), kwargs={})
|
Mark(name='my_marker', args=(<function hello_world at 0xdeadbeef>,), kwargs={})
|
||||||
.
|
.
|
||||||
1 passed in 0.12 seconds
|
1 passed in 0.01s
|
||||||
|
|
||||||
We can see that the custom marker has its argument set extended with the function ``hello_world``. This is the key difference between creating a custom marker as a callable, which invokes ``__call__`` behind the scenes, and using ``with_args``.
|
We can see that the custom marker has its argument set extended with the function ``hello_world``. This is the key difference between creating a custom marker as a callable, which invokes ``__call__`` behind the scenes, and using ``with_args``.
|
||||||
|
|
||||||
|
@ -551,7 +551,7 @@ Let's run this without capturing output and see what we get:
|
||||||
glob args=('class',) kwargs={'x': 2}
|
glob args=('class',) kwargs={'x': 2}
|
||||||
glob args=('module',) kwargs={'x': 1}
|
glob args=('module',) kwargs={'x': 1}
|
||||||
.
|
.
|
||||||
1 passed in 0.12 seconds
|
1 passed in 0.01s
|
||||||
|
|
||||||
marking platform specific tests with pytest
|
marking platform specific tests with pytest
|
||||||
--------------------------------------------------------------
|
--------------------------------------------------------------
|
||||||
|
@ -623,7 +623,7 @@ then you will see two tests skipped and two executed tests as expected:
|
||||||
|
|
||||||
========================= short test summary info ==========================
|
========================= short test summary info ==========================
|
||||||
SKIPPED [2] $REGENDOC_TMPDIR/conftest.py:13: cannot run on platform linux
|
SKIPPED [2] $REGENDOC_TMPDIR/conftest.py:13: cannot run on platform linux
|
||||||
=================== 2 passed, 2 skipped in 0.12 seconds ====================
|
======================= 2 passed, 2 skipped in 0.02s =======================
|
||||||
|
|
||||||
Note that if you specify a platform via the marker-command line option like this:
|
Note that if you specify a platform via the marker-command line option like this:
|
||||||
|
|
||||||
|
@ -638,7 +638,7 @@ Note that if you specify a platform via the marker-command line option like this
|
||||||
|
|
||||||
test_plat.py . [100%]
|
test_plat.py . [100%]
|
||||||
|
|
||||||
================== 1 passed, 3 deselected in 0.12 seconds ==================
|
===================== 1 passed, 3 deselected in 0.01s ======================
|
||||||
|
|
||||||
then the unmarked-tests will not be run. It is thus a way to restrict the run to the specific tests.
|
then the unmarked-tests will not be run. It is thus a way to restrict the run to the specific tests.
|
||||||
|
|
||||||
|
@ -711,7 +711,7 @@ We can now use the ``-m option`` to select one set:
|
||||||
test_module.py:8: in test_interface_complex
|
test_module.py:8: in test_interface_complex
|
||||||
assert 0
|
assert 0
|
||||||
E assert 0
|
E assert 0
|
||||||
================== 2 failed, 2 deselected in 0.12 seconds ==================
|
===================== 2 failed, 2 deselected in 0.07s ======================
|
||||||
|
|
||||||
or to select both "event" and "interface" tests:
|
or to select both "event" and "interface" tests:
|
||||||
|
|
||||||
|
@ -739,4 +739,4 @@ or to select both "event" and "interface" tests:
|
||||||
test_module.py:12: in test_event_simple
|
test_module.py:12: in test_event_simple
|
||||||
assert 0
|
assert 0
|
||||||
E assert 0
|
E assert 0
|
||||||
================== 3 failed, 1 deselected in 0.12 seconds ==================
|
===================== 3 failed, 1 deselected in 0.07s ======================
|
||||||
|
|
|
@ -41,7 +41,7 @@ now execute the test specification:
|
||||||
usecase execution failed
|
usecase execution failed
|
||||||
spec failed: 'some': 'other'
|
spec failed: 'some': 'other'
|
||||||
no further details known at this point.
|
no further details known at this point.
|
||||||
==================== 1 failed, 1 passed in 0.12 seconds ====================
|
======================= 1 failed, 1 passed in 0.06s ========================
|
||||||
|
|
||||||
.. regendoc:wipe
|
.. regendoc:wipe
|
||||||
|
|
||||||
|
@ -77,7 +77,7 @@ consulted when reporting in ``verbose`` mode:
|
||||||
usecase execution failed
|
usecase execution failed
|
||||||
spec failed: 'some': 'other'
|
spec failed: 'some': 'other'
|
||||||
no further details known at this point.
|
no further details known at this point.
|
||||||
==================== 1 failed, 1 passed in 0.12 seconds ====================
|
======================= 1 failed, 1 passed in 0.07s ========================
|
||||||
|
|
||||||
.. regendoc:wipe
|
.. regendoc:wipe
|
||||||
|
|
||||||
|
@ -97,4 +97,4 @@ interesting to just look at the collection tree:
|
||||||
<YamlItem hello>
|
<YamlItem hello>
|
||||||
<YamlItem ok>
|
<YamlItem ok>
|
||||||
|
|
||||||
======================= no tests ran in 0.12 seconds =======================
|
========================== no tests ran in 0.05s ===========================
|
||||||
|
|
|
@ -54,7 +54,7 @@ This means that we only run 2 tests if we do not pass ``--all``:
|
||||||
|
|
||||||
$ pytest -q test_compute.py
|
$ pytest -q test_compute.py
|
||||||
.. [100%]
|
.. [100%]
|
||||||
2 passed in 0.12 seconds
|
2 passed in 0.01s
|
||||||
|
|
||||||
We run only two computations, so we see two dots.
|
We run only two computations, so we see two dots.
|
||||||
let's run the full monty:
|
let's run the full monty:
|
||||||
|
@ -72,8 +72,8 @@ let's run the full monty:
|
||||||
> assert param1 < 4
|
> assert param1 < 4
|
||||||
E assert 4 < 4
|
E assert 4 < 4
|
||||||
|
|
||||||
test_compute.py:3: AssertionError
|
test_compute.py:4: AssertionError
|
||||||
1 failed, 4 passed in 0.12 seconds
|
1 failed, 4 passed in 0.06s
|
||||||
|
|
||||||
As expected when running the full range of ``param1`` values
|
As expected when running the full range of ``param1`` values
|
||||||
we'll get an error on the last one.
|
we'll get an error on the last one.
|
||||||
|
@ -172,7 +172,7 @@ objects, they are still using the default pytest representation:
|
||||||
<Function test_timedistance_v3[forward]>
|
<Function test_timedistance_v3[forward]>
|
||||||
<Function test_timedistance_v3[backward]>
|
<Function test_timedistance_v3[backward]>
|
||||||
|
|
||||||
======================= no tests ran in 0.12 seconds =======================
|
========================== no tests ran in 0.02s ===========================
|
||||||
|
|
||||||
In ``test_timedistance_v3``, we used ``pytest.param`` to specify the test IDs
|
In ``test_timedistance_v3``, we used ``pytest.param`` to specify the test IDs
|
||||||
together with the actual data, instead of listing them separately.
|
together with the actual data, instead of listing them separately.
|
||||||
|
@ -229,7 +229,7 @@ this is a fully self-contained example which you can run with:
|
||||||
|
|
||||||
test_scenarios.py .... [100%]
|
test_scenarios.py .... [100%]
|
||||||
|
|
||||||
========================= 4 passed in 0.12 seconds =========================
|
============================ 4 passed in 0.02s =============================
|
||||||
|
|
||||||
If you just collect tests you'll also nicely see 'advanced' and 'basic' as variants for the test function:
|
If you just collect tests you'll also nicely see 'advanced' and 'basic' as variants for the test function:
|
||||||
|
|
||||||
|
@ -248,7 +248,7 @@ If you just collect tests you'll also nicely see 'advanced' and 'basic' as varia
|
||||||
<Function test_demo1[advanced]>
|
<Function test_demo1[advanced]>
|
||||||
<Function test_demo2[advanced]>
|
<Function test_demo2[advanced]>
|
||||||
|
|
||||||
======================= no tests ran in 0.12 seconds =======================
|
========================== no tests ran in 0.02s ===========================
|
||||||
|
|
||||||
Note that we told ``metafunc.parametrize()`` that your scenario values
|
Note that we told ``metafunc.parametrize()`` that your scenario values
|
||||||
should be considered class-scoped. With pytest-2.3 this leads to a
|
should be considered class-scoped. With pytest-2.3 this leads to a
|
||||||
|
@ -323,7 +323,7 @@ Let's first see how it looks like at collection time:
|
||||||
<Function test_db_initialized[d1]>
|
<Function test_db_initialized[d1]>
|
||||||
<Function test_db_initialized[d2]>
|
<Function test_db_initialized[d2]>
|
||||||
|
|
||||||
======================= no tests ran in 0.12 seconds =======================
|
========================== no tests ran in 0.01s ===========================
|
||||||
|
|
||||||
And then when we run the test:
|
And then when we run the test:
|
||||||
|
|
||||||
|
@ -342,8 +342,8 @@ And then when we run the test:
|
||||||
> pytest.fail("deliberately failing for demo purposes")
|
> pytest.fail("deliberately failing for demo purposes")
|
||||||
E Failed: deliberately failing for demo purposes
|
E Failed: deliberately failing for demo purposes
|
||||||
|
|
||||||
test_backends.py:6: Failed
|
test_backends.py:8: Failed
|
||||||
1 failed, 1 passed in 0.12 seconds
|
1 failed, 1 passed in 0.05s
|
||||||
|
|
||||||
The first invocation with ``db == "DB1"`` passed while the second with ``db == "DB2"`` failed. Our ``db`` fixture function has instantiated each of the DB values during the setup phase while the ``pytest_generate_tests`` generated two according calls to the ``test_db_initialized`` during the collection phase.
|
The first invocation with ``db == "DB1"`` passed while the second with ``db == "DB2"`` failed. Our ``db`` fixture function has instantiated each of the DB values during the setup phase while the ``pytest_generate_tests`` generated two according calls to the ``test_db_initialized`` during the collection phase.
|
||||||
|
|
||||||
|
@ -394,7 +394,7 @@ The result of this test will be successful:
|
||||||
<Module test_indirect_list.py>
|
<Module test_indirect_list.py>
|
||||||
<Function test_indirect[a-b]>
|
<Function test_indirect[a-b]>
|
||||||
|
|
||||||
======================= no tests ran in 0.12 seconds =======================
|
========================== no tests ran in 0.01s ===========================
|
||||||
|
|
||||||
.. regendoc:wipe
|
.. regendoc:wipe
|
||||||
|
|
||||||
|
@ -453,8 +453,8 @@ argument sets to use for each test function. Let's run it:
|
||||||
> assert a == b
|
> assert a == b
|
||||||
E assert 1 == 2
|
E assert 1 == 2
|
||||||
|
|
||||||
test_parametrize.py:18: AssertionError
|
test_parametrize.py:21: AssertionError
|
||||||
1 failed, 2 passed in 0.12 seconds
|
1 failed, 2 passed in 0.07s
|
||||||
|
|
||||||
Indirect parametrization with multiple fixtures
|
Indirect parametrization with multiple fixtures
|
||||||
--------------------------------------------------------------
|
--------------------------------------------------------------
|
||||||
|
@ -479,7 +479,7 @@ Running it results in some skips if we don't have all the python interpreters in
|
||||||
========================= short test summary info ==========================
|
========================= short test summary info ==========================
|
||||||
SKIPPED [12] $REGENDOC_TMPDIR/CWD/multipython.py:30: 'python3.5' not found
|
SKIPPED [12] $REGENDOC_TMPDIR/CWD/multipython.py:30: 'python3.5' not found
|
||||||
SKIPPED [12] $REGENDOC_TMPDIR/CWD/multipython.py:30: 'python3.7' not found
|
SKIPPED [12] $REGENDOC_TMPDIR/CWD/multipython.py:30: 'python3.7' not found
|
||||||
3 passed, 24 skipped in 0.12 seconds
|
3 passed, 24 skipped in 0.43s
|
||||||
|
|
||||||
Indirect parametrization of optional implementations/imports
|
Indirect parametrization of optional implementations/imports
|
||||||
--------------------------------------------------------------------
|
--------------------------------------------------------------------
|
||||||
|
@ -547,8 +547,8 @@ If you run this with reporting for skips enabled:
|
||||||
test_module.py .s [100%]
|
test_module.py .s [100%]
|
||||||
|
|
||||||
========================= short test summary info ==========================
|
========================= short test summary info ==========================
|
||||||
SKIPPED [1] $REGENDOC_TMPDIR/conftest.py:11: could not import 'opt2': No module named 'opt2'
|
SKIPPED [1] $REGENDOC_TMPDIR/conftest.py:13: could not import 'opt2': No module named 'opt2'
|
||||||
=================== 1 passed, 1 skipped in 0.12 seconds ====================
|
======================= 1 passed, 1 skipped in 0.02s =======================
|
||||||
|
|
||||||
You'll see that we don't have an ``opt2`` module and thus the second test run
|
You'll see that we don't have an ``opt2`` module and thus the second test run
|
||||||
of our ``test_func1`` was skipped. A few notes:
|
of our ``test_func1`` was skipped. A few notes:
|
||||||
|
@ -610,7 +610,7 @@ Then run ``pytest`` with verbose mode and with only the ``basic`` marker:
|
||||||
test_pytest_param_example.py::test_eval[basic_2+4] PASSED [ 66%]
|
test_pytest_param_example.py::test_eval[basic_2+4] PASSED [ 66%]
|
||||||
test_pytest_param_example.py::test_eval[basic_6*9] XFAIL [100%]
|
test_pytest_param_example.py::test_eval[basic_6*9] XFAIL [100%]
|
||||||
|
|
||||||
============ 2 passed, 15 deselected, 1 xfailed in 0.12 seconds ============
|
=============== 2 passed, 15 deselected, 1 xfailed in 0.23s ================
|
||||||
|
|
||||||
As the result:
|
As the result:
|
||||||
|
|
||||||
|
|
|
@ -158,7 +158,7 @@ The test collection would look like this:
|
||||||
<Function simple_check>
|
<Function simple_check>
|
||||||
<Function complex_check>
|
<Function complex_check>
|
||||||
|
|
||||||
======================= no tests ran in 0.12 seconds =======================
|
========================== no tests ran in 0.01s ===========================
|
||||||
|
|
||||||
You can check for multiple glob patterns by adding a space between the patterns:
|
You can check for multiple glob patterns by adding a space between the patterns:
|
||||||
|
|
||||||
|
@ -221,7 +221,7 @@ You can always peek at the collection tree without running tests like this:
|
||||||
<Function test_method>
|
<Function test_method>
|
||||||
<Function test_anothermethod>
|
<Function test_anothermethod>
|
||||||
|
|
||||||
======================= no tests ran in 0.12 seconds =======================
|
========================== no tests ran in 0.01s ===========================
|
||||||
|
|
||||||
.. _customizing-test-collection:
|
.. _customizing-test-collection:
|
||||||
|
|
||||||
|
@ -297,7 +297,7 @@ file will be left out:
|
||||||
rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini
|
rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini
|
||||||
collected 0 items
|
collected 0 items
|
||||||
|
|
||||||
======================= no tests ran in 0.12 seconds =======================
|
========================== no tests ran in 0.04s ===========================
|
||||||
|
|
||||||
It's also possible to ignore files based on Unix shell-style wildcards by adding
|
It's also possible to ignore files based on Unix shell-style wildcards by adding
|
||||||
patterns to ``collect_ignore_glob``.
|
patterns to ``collect_ignore_glob``.
|
||||||
|
|
|
@ -119,7 +119,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||||
a = "1" * 100 + "a" + "2" * 100
|
a = "1" * 100 + "a" + "2" * 100
|
||||||
b = "1" * 100 + "b" + "2" * 100
|
b = "1" * 100 + "b" + "2" * 100
|
||||||
> assert a == b
|
> assert a == b
|
||||||
E AssertionError: assert '111111111111...2222222222222' == '1111111111111...2222222222222'
|
E AssertionError: assert '111111111111...2222222222222' == '111111111111...2222222222222'
|
||||||
E Skipping 90 identical leading characters in diff, use -v to show
|
E Skipping 90 identical leading characters in diff, use -v to show
|
||||||
E Skipping 91 identical trailing characters in diff, use -v to show
|
E Skipping 91 identical trailing characters in diff, use -v to show
|
||||||
E - 1111111111a222222222
|
E - 1111111111a222222222
|
||||||
|
@ -136,7 +136,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||||
a = "1\n" * 100 + "a" + "2\n" * 100
|
a = "1\n" * 100 + "a" + "2\n" * 100
|
||||||
b = "1\n" * 100 + "b" + "2\n" * 100
|
b = "1\n" * 100 + "b" + "2\n" * 100
|
||||||
> assert a == b
|
> assert a == b
|
||||||
E AssertionError: assert '1\n1\n1\n1\n...n2\n2\n2\n2\n' == '1\n1\n1\n1\n1...n2\n2\n2\n2\n'
|
E AssertionError: assert '1\n1\n1\n1\n...n2\n2\n2\n2\n' == '1\n1\n1\n1\n...n2\n2\n2\n2\n'
|
||||||
E Skipping 190 identical leading characters in diff, use -v to show
|
E Skipping 190 identical leading characters in diff, use -v to show
|
||||||
E Skipping 191 identical trailing characters in diff, use -v to show
|
E Skipping 191 identical trailing characters in diff, use -v to show
|
||||||
E 1
|
E 1
|
||||||
|
@ -235,7 +235,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||||
def test_not_in_text_multiline(self):
|
def test_not_in_text_multiline(self):
|
||||||
text = "some multiline\ntext\nwhich\nincludes foo\nand a\ntail"
|
text = "some multiline\ntext\nwhich\nincludes foo\nand a\ntail"
|
||||||
> assert "foo" not in text
|
> assert "foo" not in text
|
||||||
E AssertionError: assert 'foo' not in 'some multiline\ntext\nw...ncludes foo\nand a\ntail'
|
E AssertionError: assert 'foo' not in 'some multil...nand a\ntail'
|
||||||
E 'foo' is contained here:
|
E 'foo' is contained here:
|
||||||
E some multiline
|
E some multiline
|
||||||
E text
|
E text
|
||||||
|
@ -267,7 +267,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||||
def test_not_in_text_single_long(self):
|
def test_not_in_text_single_long(self):
|
||||||
text = "head " * 50 + "foo " + "tail " * 20
|
text = "head " * 50 + "foo " + "tail " * 20
|
||||||
> assert "foo" not in text
|
> assert "foo" not in text
|
||||||
E AssertionError: assert 'foo' not in 'head head head head hea...ail tail tail tail tail '
|
E AssertionError: assert 'foo' not in 'head head h...l tail tail '
|
||||||
E 'foo' is contained here:
|
E 'foo' is contained here:
|
||||||
E head head foo tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail
|
E head head foo tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail
|
||||||
E ? +++
|
E ? +++
|
||||||
|
@ -280,7 +280,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||||
def test_not_in_text_single_long_term(self):
|
def test_not_in_text_single_long_term(self):
|
||||||
text = "head " * 50 + "f" * 70 + "tail " * 20
|
text = "head " * 50 + "f" * 70 + "tail " * 20
|
||||||
> assert "f" * 70 not in text
|
> assert "f" * 70 not in text
|
||||||
E AssertionError: assert 'fffffffffff...ffffffffffff' not in 'head head he...l tail tail '
|
E AssertionError: assert 'fffffffffff...ffffffffffff' not in 'head head h...l tail tail '
|
||||||
E 'ffffffffffffffffff...fffffffffffffffffff' is contained here:
|
E 'ffffffffffffffffff...fffffffffffffffffff' is contained here:
|
||||||
E head head fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffftail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail
|
E head head fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffftail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail
|
||||||
E ? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
|
E ? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
|
||||||
|
@ -301,7 +301,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||||
left = Foo(1, "b")
|
left = Foo(1, "b")
|
||||||
right = Foo(1, "c")
|
right = Foo(1, "c")
|
||||||
> assert left == right
|
> assert left == right
|
||||||
E AssertionError: assert TestSpecialis...oo(a=1, b='b') == TestSpecialise...oo(a=1, b='c')
|
E AssertionError: assert TestSpecialis...oo(a=1, b='b') == TestSpecialis...oo(a=1, b='c')
|
||||||
E Omitting 1 identical items, use -vv to show
|
E Omitting 1 identical items, use -vv to show
|
||||||
E Differing attributes:
|
E Differing attributes:
|
||||||
E b: 'b' != 'c'
|
E b: 'b' != 'c'
|
||||||
|
@ -650,4 +650,4 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||||
E + where 1 = This is JSON\n{\n 'foo': 'bar'\n}.a
|
E + where 1 = This is JSON\n{\n 'foo': 'bar'\n}.a
|
||||||
|
|
||||||
failure_demo.py:282: AssertionError
|
failure_demo.py:282: AssertionError
|
||||||
======================== 44 failed in 0.12 seconds =========================
|
============================ 44 failed in 0.82s ============================
|
||||||
|
|
|
@ -65,7 +65,7 @@ Let's run this without supplying our new option:
|
||||||
test_sample.py:6: AssertionError
|
test_sample.py:6: AssertionError
|
||||||
--------------------------- Captured stdout call ---------------------------
|
--------------------------- Captured stdout call ---------------------------
|
||||||
first
|
first
|
||||||
1 failed in 0.12 seconds
|
1 failed in 0.06s
|
||||||
|
|
||||||
And now with supplying a command line option:
|
And now with supplying a command line option:
|
||||||
|
|
||||||
|
@ -89,7 +89,7 @@ And now with supplying a command line option:
|
||||||
test_sample.py:6: AssertionError
|
test_sample.py:6: AssertionError
|
||||||
--------------------------- Captured stdout call ---------------------------
|
--------------------------- Captured stdout call ---------------------------
|
||||||
second
|
second
|
||||||
1 failed in 0.12 seconds
|
1 failed in 0.06s
|
||||||
|
|
||||||
You can see that the command line option arrived in our test. This
|
You can see that the command line option arrived in our test. This
|
||||||
completes the basic pattern. However, one often rather wants to process
|
completes the basic pattern. However, one often rather wants to process
|
||||||
|
@ -132,7 +132,7 @@ directory with the above conftest.py:
|
||||||
rootdir: $REGENDOC_TMPDIR
|
rootdir: $REGENDOC_TMPDIR
|
||||||
collected 0 items
|
collected 0 items
|
||||||
|
|
||||||
======================= no tests ran in 0.12 seconds =======================
|
========================== no tests ran in 0.01s ===========================
|
||||||
|
|
||||||
.. _`excontrolskip`:
|
.. _`excontrolskip`:
|
||||||
|
|
||||||
|
@ -201,7 +201,7 @@ and when running it will see a skipped "slow" test:
|
||||||
|
|
||||||
========================= short test summary info ==========================
|
========================= short test summary info ==========================
|
||||||
SKIPPED [1] test_module.py:8: need --runslow option to run
|
SKIPPED [1] test_module.py:8: need --runslow option to run
|
||||||
=================== 1 passed, 1 skipped in 0.12 seconds ====================
|
======================= 1 passed, 1 skipped in 0.01s =======================
|
||||||
|
|
||||||
Or run it including the ``slow`` marked test:
|
Or run it including the ``slow`` marked test:
|
||||||
|
|
||||||
|
@ -216,7 +216,7 @@ Or run it including the ``slow`` marked test:
|
||||||
|
|
||||||
test_module.py .. [100%]
|
test_module.py .. [100%]
|
||||||
|
|
||||||
========================= 2 passed in 0.12 seconds =========================
|
============================ 2 passed in 0.01s =============================
|
||||||
|
|
||||||
Writing well integrated assertion helpers
|
Writing well integrated assertion helpers
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
@ -261,7 +261,7 @@ Let's run our little function:
|
||||||
E Failed: not configured: 42
|
E Failed: not configured: 42
|
||||||
|
|
||||||
test_checkconfig.py:11: Failed
|
test_checkconfig.py:11: Failed
|
||||||
1 failed in 0.12 seconds
|
1 failed in 0.05s
|
||||||
|
|
||||||
If you only want to hide certain exceptions, you can set ``__tracebackhide__``
|
If you only want to hide certain exceptions, you can set ``__tracebackhide__``
|
||||||
to a callable which gets the ``ExceptionInfo`` object. You can for example use
|
to a callable which gets the ``ExceptionInfo`` object. You can for example use
|
||||||
|
@ -358,7 +358,7 @@ which will add the string to the test header accordingly:
|
||||||
rootdir: $REGENDOC_TMPDIR
|
rootdir: $REGENDOC_TMPDIR
|
||||||
collected 0 items
|
collected 0 items
|
||||||
|
|
||||||
======================= no tests ran in 0.12 seconds =======================
|
========================== no tests ran in 0.00s ===========================
|
||||||
|
|
||||||
.. regendoc:wipe
|
.. regendoc:wipe
|
||||||
|
|
||||||
|
@ -388,7 +388,7 @@ which will add info only when run with "--v":
|
||||||
rootdir: $REGENDOC_TMPDIR
|
rootdir: $REGENDOC_TMPDIR
|
||||||
collecting ... collected 0 items
|
collecting ... collected 0 items
|
||||||
|
|
||||||
======================= no tests ran in 0.12 seconds =======================
|
========================== no tests ran in 0.00s ===========================
|
||||||
|
|
||||||
and nothing when run plainly:
|
and nothing when run plainly:
|
||||||
|
|
||||||
|
@ -401,7 +401,7 @@ and nothing when run plainly:
|
||||||
rootdir: $REGENDOC_TMPDIR
|
rootdir: $REGENDOC_TMPDIR
|
||||||
collected 0 items
|
collected 0 items
|
||||||
|
|
||||||
======================= no tests ran in 0.12 seconds =======================
|
========================== no tests ran in 0.00s ===========================
|
||||||
|
|
||||||
profiling test duration
|
profiling test duration
|
||||||
--------------------------
|
--------------------------
|
||||||
|
@ -445,9 +445,9 @@ Now we can profile which test functions execute the slowest:
|
||||||
|
|
||||||
========================= slowest 3 test durations =========================
|
========================= slowest 3 test durations =========================
|
||||||
0.30s call test_some_are_slow.py::test_funcslow2
|
0.30s call test_some_are_slow.py::test_funcslow2
|
||||||
0.20s call test_some_are_slow.py::test_funcslow1
|
0.25s call test_some_are_slow.py::test_funcslow1
|
||||||
0.10s call test_some_are_slow.py::test_funcfast
|
0.10s call test_some_are_slow.py::test_funcfast
|
||||||
========================= 3 passed in 0.12 seconds =========================
|
============================ 3 passed in 0.68s =============================
|
||||||
|
|
||||||
incremental testing - test steps
|
incremental testing - test steps
|
||||||
---------------------------------------------------
|
---------------------------------------------------
|
||||||
|
@ -531,7 +531,7 @@ If we run this:
|
||||||
========================= short test summary info ==========================
|
========================= short test summary info ==========================
|
||||||
XFAIL test_step.py::TestUserHandling::test_deletion
|
XFAIL test_step.py::TestUserHandling::test_deletion
|
||||||
reason: previous test failed (test_modification)
|
reason: previous test failed (test_modification)
|
||||||
============== 1 failed, 2 passed, 1 xfailed in 0.12 seconds ===============
|
================== 1 failed, 2 passed, 1 xfailed in 0.07s ==================
|
||||||
|
|
||||||
We'll see that ``test_deletion`` was not executed because ``test_modification``
|
We'll see that ``test_deletion`` was not executed because ``test_modification``
|
||||||
failed. It is reported as an "expected failure".
|
failed. It is reported as an "expected failure".
|
||||||
|
@ -644,7 +644,7 @@ We can run this:
|
||||||
E assert 0
|
E assert 0
|
||||||
|
|
||||||
a/test_db2.py:2: AssertionError
|
a/test_db2.py:2: AssertionError
|
||||||
========== 3 failed, 2 passed, 1 xfailed, 1 error in 0.12 seconds ==========
|
============= 3 failed, 2 passed, 1 xfailed, 1 error in 0.10s ==============
|
||||||
|
|
||||||
The two test modules in the ``a`` directory see the same ``db`` fixture instance
|
The two test modules in the ``a`` directory see the same ``db`` fixture instance
|
||||||
while the one test in the sister-directory ``b`` doesn't see it. We could of course
|
while the one test in the sister-directory ``b`` doesn't see it. We could of course
|
||||||
|
@ -733,7 +733,7 @@ and run them:
|
||||||
E assert 0
|
E assert 0
|
||||||
|
|
||||||
test_module.py:6: AssertionError
|
test_module.py:6: AssertionError
|
||||||
========================= 2 failed in 0.12 seconds =========================
|
============================ 2 failed in 0.07s =============================
|
||||||
|
|
||||||
you will have a "failures" file which contains the failing test ids:
|
you will have a "failures" file which contains the failing test ids:
|
||||||
|
|
||||||
|
@ -848,7 +848,7 @@ and run it:
|
||||||
E assert 0
|
E assert 0
|
||||||
|
|
||||||
test_module.py:19: AssertionError
|
test_module.py:19: AssertionError
|
||||||
==================== 2 failed, 1 error in 0.12 seconds =====================
|
======================== 2 failed, 1 error in 0.07s ========================
|
||||||
|
|
||||||
You'll see that the fixture finalizers could use the precise reporting
|
You'll see that the fixture finalizers could use the precise reporting
|
||||||
information.
|
information.
|
||||||
|
|
|
@ -81,4 +81,4 @@ If you run this without output capturing:
|
||||||
.test other
|
.test other
|
||||||
.test_unit1 method called
|
.test_unit1 method called
|
||||||
.
|
.
|
||||||
4 passed in 0.12 seconds
|
4 passed in 0.02s
|
||||||
|
|
|
@ -95,8 +95,8 @@ marked ``smtp_connection`` fixture function. Running the test looks like this:
|
||||||
> assert 0 # for demo purposes
|
> assert 0 # for demo purposes
|
||||||
E assert 0
|
E assert 0
|
||||||
|
|
||||||
test_smtpsimple.py:11: AssertionError
|
test_smtpsimple.py:14: AssertionError
|
||||||
========================= 1 failed in 0.12 seconds =========================
|
============================ 1 failed in 0.57s =============================
|
||||||
|
|
||||||
In the failure traceback we see that the test function was called with a
|
In the failure traceback we see that the test function was called with a
|
||||||
``smtp_connection`` argument, the ``smtplib.SMTP()`` instance created by the fixture
|
``smtp_connection`` argument, the ``smtplib.SMTP()`` instance created by the fixture
|
||||||
|
@ -246,7 +246,7 @@ inspect what is going on and can now run the tests:
|
||||||
> assert 0 # for demo purposes
|
> assert 0 # for demo purposes
|
||||||
E assert 0
|
E assert 0
|
||||||
|
|
||||||
test_module.py:6: AssertionError
|
test_module.py:7: AssertionError
|
||||||
________________________________ test_noop _________________________________
|
________________________________ test_noop _________________________________
|
||||||
|
|
||||||
smtp_connection = <smtplib.SMTP object at 0xdeadbeef>
|
smtp_connection = <smtplib.SMTP object at 0xdeadbeef>
|
||||||
|
@ -257,8 +257,8 @@ inspect what is going on and can now run the tests:
|
||||||
> assert 0 # for demo purposes
|
> assert 0 # for demo purposes
|
||||||
E assert 0
|
E assert 0
|
||||||
|
|
||||||
test_module.py:11: AssertionError
|
test_module.py:13: AssertionError
|
||||||
========================= 2 failed in 0.12 seconds =========================
|
============================ 2 failed in 0.76s =============================
|
||||||
|
|
||||||
You see the two ``assert 0`` failing and more importantly you can also see
|
You see the two ``assert 0`` failing and more importantly you can also see
|
||||||
that the same (module-scoped) ``smtp_connection`` object was passed into the
|
that the same (module-scoped) ``smtp_connection`` object was passed into the
|
||||||
|
@ -361,7 +361,7 @@ Let's execute it:
|
||||||
$ pytest -s -q --tb=no
|
$ pytest -s -q --tb=no
|
||||||
FFteardown smtp
|
FFteardown smtp
|
||||||
|
|
||||||
2 failed in 0.12 seconds
|
2 failed in 0.76s
|
||||||
|
|
||||||
We see that the ``smtp_connection`` instance is finalized after the two
|
We see that the ``smtp_connection`` instance is finalized after the two
|
||||||
tests finished execution. Note that if we decorated our fixture
|
tests finished execution. Note that if we decorated our fixture
|
||||||
|
@ -515,7 +515,7 @@ again, nothing much has changed:
|
||||||
$ pytest -s -q --tb=no
|
$ pytest -s -q --tb=no
|
||||||
FFfinalizing <smtplib.SMTP object at 0xdeadbeef> (smtp.gmail.com)
|
FFfinalizing <smtplib.SMTP object at 0xdeadbeef> (smtp.gmail.com)
|
||||||
|
|
||||||
2 failed in 0.12 seconds
|
2 failed in 0.76s
|
||||||
|
|
||||||
Let's quickly create another test module that actually sets the
|
Let's quickly create another test module that actually sets the
|
||||||
server URL in its module namespace:
|
server URL in its module namespace:
|
||||||
|
@ -538,7 +538,7 @@ Running it:
|
||||||
F [100%]
|
F [100%]
|
||||||
================================= FAILURES =================================
|
================================= FAILURES =================================
|
||||||
______________________________ test_showhelo _______________________________
|
______________________________ test_showhelo _______________________________
|
||||||
test_anothersmtp.py:5: in test_showhelo
|
test_anothersmtp.py:6: in test_showhelo
|
||||||
assert 0, smtp_connection.helo()
|
assert 0, smtp_connection.helo()
|
||||||
E AssertionError: (250, b'mail.python.org')
|
E AssertionError: (250, b'mail.python.org')
|
||||||
E assert 0
|
E assert 0
|
||||||
|
@ -654,7 +654,7 @@ So let's just do another run:
|
||||||
> assert 0 # for demo purposes
|
> assert 0 # for demo purposes
|
||||||
E assert 0
|
E assert 0
|
||||||
|
|
||||||
test_module.py:6: AssertionError
|
test_module.py:7: AssertionError
|
||||||
________________________ test_noop[smtp.gmail.com] _________________________
|
________________________ test_noop[smtp.gmail.com] _________________________
|
||||||
|
|
||||||
smtp_connection = <smtplib.SMTP object at 0xdeadbeef>
|
smtp_connection = <smtplib.SMTP object at 0xdeadbeef>
|
||||||
|
@ -665,7 +665,7 @@ So let's just do another run:
|
||||||
> assert 0 # for demo purposes
|
> assert 0 # for demo purposes
|
||||||
E assert 0
|
E assert 0
|
||||||
|
|
||||||
test_module.py:11: AssertionError
|
test_module.py:13: AssertionError
|
||||||
________________________ test_ehlo[mail.python.org] ________________________
|
________________________ test_ehlo[mail.python.org] ________________________
|
||||||
|
|
||||||
smtp_connection = <smtplib.SMTP object at 0xdeadbeef>
|
smtp_connection = <smtplib.SMTP object at 0xdeadbeef>
|
||||||
|
@ -676,7 +676,7 @@ So let's just do another run:
|
||||||
> assert b"smtp.gmail.com" in msg
|
> assert b"smtp.gmail.com" in msg
|
||||||
E AssertionError: assert b'smtp.gmail.com' in b'mail.python.org\nPIPELINING\nSIZE 51200000\nETRN\nSTARTTLS\nAUTH DIGEST-MD5 NTLM CRAM-MD5\nENHANCEDSTATUSCODES\n8BITMIME\nDSN\nSMTPUTF8\nCHUNKING'
|
E AssertionError: assert b'smtp.gmail.com' in b'mail.python.org\nPIPELINING\nSIZE 51200000\nETRN\nSTARTTLS\nAUTH DIGEST-MD5 NTLM CRAM-MD5\nENHANCEDSTATUSCODES\n8BITMIME\nDSN\nSMTPUTF8\nCHUNKING'
|
||||||
|
|
||||||
test_module.py:5: AssertionError
|
test_module.py:6: AssertionError
|
||||||
-------------------------- Captured stdout setup ---------------------------
|
-------------------------- Captured stdout setup ---------------------------
|
||||||
finalizing <smtplib.SMTP object at 0xdeadbeef>
|
finalizing <smtplib.SMTP object at 0xdeadbeef>
|
||||||
________________________ test_noop[mail.python.org] ________________________
|
________________________ test_noop[mail.python.org] ________________________
|
||||||
|
@ -689,10 +689,10 @@ So let's just do another run:
|
||||||
> assert 0 # for demo purposes
|
> assert 0 # for demo purposes
|
||||||
E assert 0
|
E assert 0
|
||||||
|
|
||||||
test_module.py:11: AssertionError
|
test_module.py:13: AssertionError
|
||||||
------------------------- Captured stdout teardown -------------------------
|
------------------------- Captured stdout teardown -------------------------
|
||||||
finalizing <smtplib.SMTP object at 0xdeadbeef>
|
finalizing <smtplib.SMTP object at 0xdeadbeef>
|
||||||
4 failed in 0.12 seconds
|
4 failed in 1.77s
|
||||||
|
|
||||||
We see that our two test functions each ran twice, against the different
|
We see that our two test functions each ran twice, against the different
|
||||||
``smtp_connection`` instances. Note also, that with the ``mail.python.org``
|
``smtp_connection`` instances. Note also, that with the ``mail.python.org``
|
||||||
|
@ -771,7 +771,7 @@ Running the above tests results in the following test IDs being used:
|
||||||
<Function test_ehlo[mail.python.org]>
|
<Function test_ehlo[mail.python.org]>
|
||||||
<Function test_noop[mail.python.org]>
|
<Function test_noop[mail.python.org]>
|
||||||
|
|
||||||
======================= no tests ran in 0.12 seconds =======================
|
========================== no tests ran in 0.04s ===========================
|
||||||
|
|
||||||
.. _`fixture-parametrize-marks`:
|
.. _`fixture-parametrize-marks`:
|
||||||
|
|
||||||
|
@ -812,7 +812,7 @@ Running this test will *skip* the invocation of ``data_set`` with value ``2``:
|
||||||
test_fixture_marks.py::test_data[1] PASSED [ 66%]
|
test_fixture_marks.py::test_data[1] PASSED [ 66%]
|
||||||
test_fixture_marks.py::test_data[2] SKIPPED [100%]
|
test_fixture_marks.py::test_data[2] SKIPPED [100%]
|
||||||
|
|
||||||
=================== 2 passed, 1 skipped in 0.12 seconds ====================
|
======================= 2 passed, 1 skipped in 0.01s =======================
|
||||||
|
|
||||||
.. _`interdependent fixtures`:
|
.. _`interdependent fixtures`:
|
||||||
|
|
||||||
|
@ -861,7 +861,7 @@ Here we declare an ``app`` fixture which receives the previously defined
|
||||||
test_appsetup.py::test_smtp_connection_exists[smtp.gmail.com] PASSED [ 50%]
|
test_appsetup.py::test_smtp_connection_exists[smtp.gmail.com] PASSED [ 50%]
|
||||||
test_appsetup.py::test_smtp_connection_exists[mail.python.org] PASSED [100%]
|
test_appsetup.py::test_smtp_connection_exists[mail.python.org] PASSED [100%]
|
||||||
|
|
||||||
========================= 2 passed in 0.12 seconds =========================
|
============================ 2 passed in 0.79s =============================
|
||||||
|
|
||||||
Due to the parametrization of ``smtp_connection``, the test will run twice with two
|
Due to the parametrization of ``smtp_connection``, the test will run twice with two
|
||||||
different ``App`` instances and respective smtp servers. There is no
|
different ``App`` instances and respective smtp servers. There is no
|
||||||
|
@ -971,7 +971,7 @@ Let's run the tests in verbose mode and with looking at the print-output:
|
||||||
TEARDOWN modarg mod2
|
TEARDOWN modarg mod2
|
||||||
|
|
||||||
|
|
||||||
========================= 8 passed in 0.12 seconds =========================
|
============================ 8 passed in 0.02s =============================
|
||||||
|
|
||||||
You can see that the parametrized module-scoped ``modarg`` resource caused an
|
You can see that the parametrized module-scoped ``modarg`` resource caused an
|
||||||
ordering of test execution that lead to the fewest possible "active" resources.
|
ordering of test execution that lead to the fewest possible "active" resources.
|
||||||
|
@ -1043,7 +1043,7 @@ to verify our fixture is activated and the tests pass:
|
||||||
|
|
||||||
$ pytest -q
|
$ pytest -q
|
||||||
.. [100%]
|
.. [100%]
|
||||||
2 passed in 0.12 seconds
|
2 passed in 0.02s
|
||||||
|
|
||||||
You can specify multiple fixtures like this:
|
You can specify multiple fixtures like this:
|
||||||
|
|
||||||
|
@ -1151,7 +1151,7 @@ If we run it, we get two passing tests:
|
||||||
|
|
||||||
$ pytest -q
|
$ pytest -q
|
||||||
.. [100%]
|
.. [100%]
|
||||||
2 passed in 0.12 seconds
|
2 passed in 0.02s
|
||||||
|
|
||||||
Here is how autouse fixtures work in other scopes:
|
Here is how autouse fixtures work in other scopes:
|
||||||
|
|
||||||
|
|
|
@ -28,7 +28,7 @@ Install ``pytest``
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
$ pytest --version
|
$ pytest --version
|
||||||
This is pytest version 5.x.y, imported from $PYTHON_PREFIX/lib/python3.x/site-packages/pytest.py
|
This is pytest version 5.x.y, imported from $PYTHON_PREFIX/lib/python3.6/site-packages/pytest.py
|
||||||
|
|
||||||
.. _`simpletest`:
|
.. _`simpletest`:
|
||||||
|
|
||||||
|
@ -68,8 +68,8 @@ That’s it. You can now execute the test function:
|
||||||
E assert 4 == 5
|
E assert 4 == 5
|
||||||
E + where 4 = func(3)
|
E + where 4 = func(3)
|
||||||
|
|
||||||
test_sample.py:5: AssertionError
|
test_sample.py:6: AssertionError
|
||||||
========================= 1 failed in 0.12 seconds =========================
|
============================ 1 failed in 0.05s =============================
|
||||||
|
|
||||||
This test returns a failure report because ``func(3)`` does not return ``5``.
|
This test returns a failure report because ``func(3)`` does not return ``5``.
|
||||||
|
|
||||||
|
@ -108,7 +108,7 @@ Execute the test function with “quiet” reporting mode:
|
||||||
|
|
||||||
$ pytest -q test_sysexit.py
|
$ pytest -q test_sysexit.py
|
||||||
. [100%]
|
. [100%]
|
||||||
1 passed in 0.12 seconds
|
1 passed in 0.01s
|
||||||
|
|
||||||
Group multiple tests in a class
|
Group multiple tests in a class
|
||||||
--------------------------------------------------------------
|
--------------------------------------------------------------
|
||||||
|
@ -140,12 +140,12 @@ Once you develop multiple tests, you may want to group them into a class. pytest
|
||||||
|
|
||||||
def test_two(self):
|
def test_two(self):
|
||||||
x = "hello"
|
x = "hello"
|
||||||
> assert hasattr(x, 'check')
|
> assert hasattr(x, "check")
|
||||||
E AssertionError: assert False
|
E AssertionError: assert False
|
||||||
E + where False = hasattr('hello', 'check')
|
E + where False = hasattr('hello', 'check')
|
||||||
|
|
||||||
test_class.py:8: AssertionError
|
test_class.py:8: AssertionError
|
||||||
1 failed, 1 passed in 0.12 seconds
|
1 failed, 1 passed in 0.05s
|
||||||
|
|
||||||
The first test passed and the second failed. You can easily see the intermediate values in the assertion to help you understand the reason for the failure.
|
The first test passed and the second failed. You can easily see the intermediate values in the assertion to help you understand the reason for the failure.
|
||||||
|
|
||||||
|
@ -180,7 +180,7 @@ List the name ``tmpdir`` in the test function signature and ``pytest`` will look
|
||||||
test_tmpdir.py:3: AssertionError
|
test_tmpdir.py:3: AssertionError
|
||||||
--------------------------- Captured stdout call ---------------------------
|
--------------------------- Captured stdout call ---------------------------
|
||||||
PYTEST_TMPDIR/test_needsfiles0
|
PYTEST_TMPDIR/test_needsfiles0
|
||||||
1 failed in 0.12 seconds
|
1 failed in 0.05s
|
||||||
|
|
||||||
More info on tmpdir handling is available at :ref:`Temporary directories and files <tmpdir handling>`.
|
More info on tmpdir handling is available at :ref:`Temporary directories and files <tmpdir handling>`.
|
||||||
|
|
||||||
|
|
|
@ -44,7 +44,7 @@ To execute it:
|
||||||
E + where 4 = inc(3)
|
E + where 4 = inc(3)
|
||||||
|
|
||||||
test_sample.py:6: AssertionError
|
test_sample.py:6: AssertionError
|
||||||
========================= 1 failed in 0.12 seconds =========================
|
============================ 1 failed in 0.06s =============================
|
||||||
|
|
||||||
Due to ``pytest``'s detailed assertion introspection, only plain ``assert`` statements are used.
|
Due to ``pytest``'s detailed assertion introspection, only plain ``assert`` statements are used.
|
||||||
See :ref:`Getting Started <getstarted>` for more examples.
|
See :ref:`Getting Started <getstarted>` for more examples.
|
||||||
|
|
|
@ -75,7 +75,7 @@ them in turn:
|
||||||
E + where 54 = eval('6*9')
|
E + where 54 = eval('6*9')
|
||||||
|
|
||||||
test_expectation.py:6: AssertionError
|
test_expectation.py:6: AssertionError
|
||||||
==================== 1 failed, 2 passed in 0.12 seconds ====================
|
======================= 1 failed, 2 passed in 0.05s ========================
|
||||||
|
|
||||||
.. note::
|
.. note::
|
||||||
|
|
||||||
|
@ -128,7 +128,7 @@ Let's run this:
|
||||||
|
|
||||||
test_expectation.py ..x [100%]
|
test_expectation.py ..x [100%]
|
||||||
|
|
||||||
=================== 2 passed, 1 xfailed in 0.12 seconds ====================
|
======================= 2 passed, 1 xfailed in 0.06s =======================
|
||||||
|
|
||||||
The one parameter set which caused a failure previously now
|
The one parameter set which caused a failure previously now
|
||||||
shows up as an "xfailed (expected to fail)" test.
|
shows up as an "xfailed (expected to fail)" test.
|
||||||
|
@ -205,7 +205,7 @@ If we now pass two stringinput values, our test will run twice:
|
||||||
|
|
||||||
$ pytest -q --stringinput="hello" --stringinput="world" test_strings.py
|
$ pytest -q --stringinput="hello" --stringinput="world" test_strings.py
|
||||||
.. [100%]
|
.. [100%]
|
||||||
2 passed in 0.12 seconds
|
2 passed in 0.01s
|
||||||
|
|
||||||
Let's also run with a stringinput that will lead to a failing test:
|
Let's also run with a stringinput that will lead to a failing test:
|
||||||
|
|
||||||
|
@ -225,7 +225,7 @@ Let's also run with a stringinput that will lead to a failing test:
|
||||||
E + where <built-in method isalpha of str object at 0xdeadbeef> = '!'.isalpha
|
E + where <built-in method isalpha of str object at 0xdeadbeef> = '!'.isalpha
|
||||||
|
|
||||||
test_strings.py:4: AssertionError
|
test_strings.py:4: AssertionError
|
||||||
1 failed in 0.12 seconds
|
1 failed in 0.05s
|
||||||
|
|
||||||
As expected our test function fails.
|
As expected our test function fails.
|
||||||
|
|
||||||
|
@ -239,7 +239,7 @@ list:
|
||||||
s [100%]
|
s [100%]
|
||||||
========================= short test summary info ==========================
|
========================= short test summary info ==========================
|
||||||
SKIPPED [1] test_strings.py: got empty parameter set ['stringinput'], function test_valid_string at $REGENDOC_TMPDIR/test_strings.py:2
|
SKIPPED [1] test_strings.py: got empty parameter set ['stringinput'], function test_valid_string at $REGENDOC_TMPDIR/test_strings.py:2
|
||||||
1 skipped in 0.12 seconds
|
1 skipped in 0.01s
|
||||||
|
|
||||||
Note that when calling ``metafunc.parametrize`` multiple times with different parameter sets, all parameter names across
|
Note that when calling ``metafunc.parametrize`` multiple times with different parameter sets, all parameter names across
|
||||||
those sets cannot be duplicated, otherwise an error will be raised.
|
those sets cannot be duplicated, otherwise an error will be raised.
|
||||||
|
|
|
@ -180,7 +180,7 @@ Skipping on a missing import dependency
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
You can skip tests on a missing import by using :ref:`pytest.importorskip ref`
|
You can skip tests on a missing import by using :ref:`pytest.importorskip ref`
|
||||||
at module level or within a test or test setup function.
|
at module level, within a test, or test setup function.
|
||||||
|
|
||||||
.. code-block:: python
|
.. code-block:: python
|
||||||
|
|
||||||
|
@ -371,7 +371,7 @@ Running it with the report-on-xfail option gives this output:
|
||||||
XFAIL xfail_demo.py::test_hello6
|
XFAIL xfail_demo.py::test_hello6
|
||||||
reason: reason
|
reason: reason
|
||||||
XFAIL xfail_demo.py::test_hello7
|
XFAIL xfail_demo.py::test_hello7
|
||||||
======================== 7 xfailed in 0.12 seconds =========================
|
============================ 7 xfailed in 0.17s ============================
|
||||||
|
|
||||||
.. _`skip/xfail with parametrize`:
|
.. _`skip/xfail with parametrize`:
|
||||||
|
|
||||||
|
|
|
@ -64,7 +64,7 @@ Running this would result in a passed test except for the last
|
||||||
E assert 0
|
E assert 0
|
||||||
|
|
||||||
test_tmp_path.py:13: AssertionError
|
test_tmp_path.py:13: AssertionError
|
||||||
========================= 1 failed in 0.12 seconds =========================
|
============================ 1 failed in 0.06s =============================
|
||||||
|
|
||||||
.. _`tmp_path_factory example`:
|
.. _`tmp_path_factory example`:
|
||||||
|
|
||||||
|
@ -132,8 +132,8 @@ Running this would result in a passed test except for the last
|
||||||
> assert 0
|
> assert 0
|
||||||
E assert 0
|
E assert 0
|
||||||
|
|
||||||
test_tmpdir.py:7: AssertionError
|
test_tmpdir.py:9: AssertionError
|
||||||
========================= 1 failed in 0.12 seconds =========================
|
============================ 1 failed in 0.05s =============================
|
||||||
|
|
||||||
.. _`tmpdir factory example`:
|
.. _`tmpdir factory example`:
|
||||||
|
|
||||||
|
|
|
@ -155,7 +155,7 @@ the ``self.db`` values in the traceback:
|
||||||
E AssertionError: <conftest.db_class.<locals>.DummyDB object at 0xdeadbeef>
|
E AssertionError: <conftest.db_class.<locals>.DummyDB object at 0xdeadbeef>
|
||||||
E assert 0
|
E assert 0
|
||||||
|
|
||||||
test_unittest_db.py:9: AssertionError
|
test_unittest_db.py:10: AssertionError
|
||||||
___________________________ MyTest.test_method2 ____________________________
|
___________________________ MyTest.test_method2 ____________________________
|
||||||
|
|
||||||
self = <test_unittest_db.MyTest testMethod=test_method2>
|
self = <test_unittest_db.MyTest testMethod=test_method2>
|
||||||
|
@ -165,8 +165,8 @@ the ``self.db`` values in the traceback:
|
||||||
E AssertionError: <conftest.db_class.<locals>.DummyDB object at 0xdeadbeef>
|
E AssertionError: <conftest.db_class.<locals>.DummyDB object at 0xdeadbeef>
|
||||||
E assert 0
|
E assert 0
|
||||||
|
|
||||||
test_unittest_db.py:12: AssertionError
|
test_unittest_db.py:13: AssertionError
|
||||||
========================= 2 failed in 0.12 seconds =========================
|
============================ 2 failed in 0.07s =============================
|
||||||
|
|
||||||
This default pytest traceback shows that the two test methods
|
This default pytest traceback shows that the two test methods
|
||||||
share the same ``self.db`` instance which was our intention
|
share the same ``self.db`` instance which was our intention
|
||||||
|
@ -219,7 +219,7 @@ Running this test module ...:
|
||||||
|
|
||||||
$ pytest -q test_unittest_cleandir.py
|
$ pytest -q test_unittest_cleandir.py
|
||||||
. [100%]
|
. [100%]
|
||||||
1 passed in 0.12 seconds
|
1 passed in 0.02s
|
||||||
|
|
||||||
... gives us one passed test because the ``initdir`` fixture function
|
... gives us one passed test because the ``initdir`` fixture function
|
||||||
was executed ahead of the ``test_method``.
|
was executed ahead of the ``test_method``.
|
||||||
|
|
|
@ -247,7 +247,7 @@ Example:
|
||||||
XPASS test_example.py::test_xpass always xfail
|
XPASS test_example.py::test_xpass always xfail
|
||||||
ERROR test_example.py::test_error - assert 0
|
ERROR test_example.py::test_error - assert 0
|
||||||
FAILED test_example.py::test_fail - assert 0
|
FAILED test_example.py::test_fail - assert 0
|
||||||
= 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.12 seconds =
|
== 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.08s ===
|
||||||
|
|
||||||
The ``-r`` options accepts a number of characters after it, with ``a`` used
|
The ``-r`` options accepts a number of characters after it, with ``a`` used
|
||||||
above meaning "all except passes".
|
above meaning "all except passes".
|
||||||
|
@ -297,7 +297,7 @@ More than one character can be used, so for example to only see failed and skipp
|
||||||
========================= short test summary info ==========================
|
========================= short test summary info ==========================
|
||||||
FAILED test_example.py::test_fail - assert 0
|
FAILED test_example.py::test_fail - assert 0
|
||||||
SKIPPED [1] $REGENDOC_TMPDIR/test_example.py:23: skipping this test
|
SKIPPED [1] $REGENDOC_TMPDIR/test_example.py:23: skipping this test
|
||||||
= 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.12 seconds =
|
== 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.08s ===
|
||||||
|
|
||||||
Using ``p`` lists the passing tests, whilst ``P`` adds an extra section "PASSES" with those tests that passed but had
|
Using ``p`` lists the passing tests, whilst ``P`` adds an extra section "PASSES" with those tests that passed but had
|
||||||
captured output:
|
captured output:
|
||||||
|
@ -336,7 +336,7 @@ captured output:
|
||||||
ok
|
ok
|
||||||
========================= short test summary info ==========================
|
========================= short test summary info ==========================
|
||||||
PASSED test_example.py::test_ok
|
PASSED test_example.py::test_ok
|
||||||
= 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.12 seconds =
|
== 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.08s ===
|
||||||
|
|
||||||
.. _pdb-option:
|
.. _pdb-option:
|
||||||
|
|
||||||
|
|
|
@ -41,7 +41,7 @@ Running pytest now produces this output:
|
||||||
warnings.warn(UserWarning("api v1, should use functions from v2"))
|
warnings.warn(UserWarning("api v1, should use functions from v2"))
|
||||||
|
|
||||||
-- Docs: https://docs.pytest.org/en/latest/warnings.html
|
-- Docs: https://docs.pytest.org/en/latest/warnings.html
|
||||||
=================== 1 passed, 1 warnings in 0.12 seconds ===================
|
====================== 1 passed, 1 warnings in 0.01s =======================
|
||||||
|
|
||||||
The ``-W`` flag can be passed to control which warnings will be displayed or even turn
|
The ``-W`` flag can be passed to control which warnings will be displayed or even turn
|
||||||
them into errors:
|
them into errors:
|
||||||
|
@ -64,7 +64,7 @@ them into errors:
|
||||||
E UserWarning: api v1, should use functions from v2
|
E UserWarning: api v1, should use functions from v2
|
||||||
|
|
||||||
test_show_warnings.py:5: UserWarning
|
test_show_warnings.py:5: UserWarning
|
||||||
1 failed in 0.12 seconds
|
1 failed in 0.05s
|
||||||
|
|
||||||
The same option can be set in the ``pytest.ini`` file using the ``filterwarnings`` ini option.
|
The same option can be set in the ``pytest.ini`` file using the ``filterwarnings`` ini option.
|
||||||
For example, the configuration below will ignore all user warnings, but will transform
|
For example, the configuration below will ignore all user warnings, but will transform
|
||||||
|
@ -407,7 +407,7 @@ defines an ``__init__`` constructor, as this prevents the class from being insta
|
||||||
class Test:
|
class Test:
|
||||||
|
|
||||||
-- Docs: https://docs.pytest.org/en/latest/warnings.html
|
-- Docs: https://docs.pytest.org/en/latest/warnings.html
|
||||||
1 warnings in 0.12 seconds
|
1 warnings in 0.01s
|
||||||
|
|
||||||
These warnings might be filtered using the same builtin mechanisms used to filter other types of warnings.
|
These warnings might be filtered using the same builtin mechanisms used to filter other types of warnings.
|
||||||
|
|
||||||
|
@ -433,5 +433,3 @@ The following warning types are used by pytest and are part of the public API:
|
||||||
.. autoclass:: pytest.PytestUnhandledCoroutineWarning
|
.. autoclass:: pytest.PytestUnhandledCoroutineWarning
|
||||||
|
|
||||||
.. autoclass:: pytest.PytestUnknownMarkWarning
|
.. autoclass:: pytest.PytestUnknownMarkWarning
|
||||||
|
|
||||||
.. autoclass:: pytest.RemovedInPytest4Warning
|
|
||||||
|
|
|
@ -442,7 +442,7 @@ additionally it is possible to copy examples for an example folder before runnin
|
||||||
testdir.copy_example("test_example.py")
|
testdir.copy_example("test_example.py")
|
||||||
|
|
||||||
-- Docs: https://docs.pytest.org/en/latest/warnings.html
|
-- Docs: https://docs.pytest.org/en/latest/warnings.html
|
||||||
=================== 2 passed, 1 warnings in 0.12 seconds ===================
|
====================== 2 passed, 1 warnings in 0.28s =======================
|
||||||
|
|
||||||
For more information about the result object that ``runpytest()`` returns, and
|
For more information about the result object that ``runpytest()`` returns, and
|
||||||
the methods that it provides please check out the :py:class:`RunResult
|
the methods that it provides please check out the :py:class:`RunResult
|
||||||
|
|
|
@ -30,6 +30,11 @@ template = "changelog/_template.rst"
|
||||||
name = "Features"
|
name = "Features"
|
||||||
showcontent = true
|
showcontent = true
|
||||||
|
|
||||||
|
[[tool.towncrier.type]]
|
||||||
|
directory = "improvement"
|
||||||
|
name = "Improvements"
|
||||||
|
showcontent = true
|
||||||
|
|
||||||
[[tool.towncrier.type]]
|
[[tool.towncrier.type]]
|
||||||
directory = "bugfix"
|
directory = "bugfix"
|
||||||
name = "Bug Fixes"
|
name = "Bug Fixes"
|
||||||
|
|
|
@ -0,0 +1,95 @@
|
||||||
|
"""
|
||||||
|
Script used to publish GitHub release notes extracted from CHANGELOG.rst.
|
||||||
|
|
||||||
|
This script is meant to be executed after a successful deployment in Travis.
|
||||||
|
|
||||||
|
Uses the following environment variables:
|
||||||
|
|
||||||
|
* GIT_TAG: the name of the tag of the current commit.
|
||||||
|
* GH_RELEASE_NOTES_TOKEN: a personal access token with 'repo' permissions. It should be encrypted using:
|
||||||
|
|
||||||
|
$travis encrypt GH_RELEASE_NOTES_TOKEN=<token> -r pytest-dev/pytest
|
||||||
|
|
||||||
|
And the contents pasted in the ``deploy.env.secure`` section in the ``travis.yml`` file.
|
||||||
|
|
||||||
|
The script also requires ``pandoc`` to be previously installed in the system.
|
||||||
|
|
||||||
|
Requires Python3.6+.
|
||||||
|
"""
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import github3
|
||||||
|
import pypandoc
|
||||||
|
|
||||||
|
|
||||||
|
def publish_github_release(slug, token, tag_name, body):
|
||||||
|
github = github3.login(token=token)
|
||||||
|
owner, repo = slug.split("/")
|
||||||
|
repo = github.repository(owner, repo)
|
||||||
|
return repo.create_release(tag_name=tag_name, body=body)
|
||||||
|
|
||||||
|
|
||||||
|
def parse_changelog(tag_name):
|
||||||
|
p = Path(__file__).parent.parent / "CHANGELOG.rst"
|
||||||
|
changelog_lines = p.read_text(encoding="UTF-8").splitlines()
|
||||||
|
|
||||||
|
title_regex = re.compile(r"pytest (\d\.\d+\.\d+) \(\d{4}-\d{2}-\d{2}\)")
|
||||||
|
consuming_version = False
|
||||||
|
version_lines = []
|
||||||
|
for line in changelog_lines:
|
||||||
|
m = title_regex.match(line)
|
||||||
|
if m:
|
||||||
|
# found the version we want: start to consume lines until we find the next version title
|
||||||
|
if m.group(1) == tag_name:
|
||||||
|
consuming_version = True
|
||||||
|
# found a new version title while parsing the version we want: break out
|
||||||
|
elif consuming_version:
|
||||||
|
break
|
||||||
|
if consuming_version:
|
||||||
|
version_lines.append(line)
|
||||||
|
|
||||||
|
return "\n".join(version_lines)
|
||||||
|
|
||||||
|
|
||||||
|
def convert_rst_to_md(text):
|
||||||
|
return pypandoc.convert_text(text, "md", format="rst")
|
||||||
|
|
||||||
|
|
||||||
|
def main(argv):
|
||||||
|
if len(argv) > 1:
|
||||||
|
tag_name = argv[1]
|
||||||
|
else:
|
||||||
|
tag_name = os.environ.get("TRAVIS_TAG")
|
||||||
|
if not tag_name:
|
||||||
|
print("tag_name not given and $TRAVIS_TAG not set", file=sys.stderr)
|
||||||
|
return 1
|
||||||
|
|
||||||
|
token = os.environ.get("GH_RELEASE_NOTES_TOKEN")
|
||||||
|
if not token:
|
||||||
|
print("GH_RELEASE_NOTES_TOKEN not set", file=sys.stderr)
|
||||||
|
return 1
|
||||||
|
|
||||||
|
slug = os.environ.get("TRAVIS_REPO_SLUG")
|
||||||
|
if not slug:
|
||||||
|
print("TRAVIS_REPO_SLUG not set", file=sys.stderr)
|
||||||
|
return 1
|
||||||
|
|
||||||
|
rst_body = parse_changelog(tag_name)
|
||||||
|
md_body = convert_rst_to_md(rst_body)
|
||||||
|
if not publish_github_release(slug, token, tag_name, md_body):
|
||||||
|
print("Could not publish release notes:", file=sys.stderr)
|
||||||
|
print(md_body, file=sys.stderr)
|
||||||
|
return 5
|
||||||
|
|
||||||
|
print()
|
||||||
|
print(f"Release notes for {tag_name} published successfully:")
|
||||||
|
print(f"https://github.com/{slug}/releases/tag/{tag_name}")
|
||||||
|
print()
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
sys.exit(main(sys.argv))
|
7
setup.py
7
setup.py
|
@ -11,7 +11,7 @@ INSTALL_REQUIRES = [
|
||||||
'pathlib2>=2.2.0;python_version<"3.6"',
|
'pathlib2>=2.2.0;python_version<"3.6"',
|
||||||
'colorama;sys_platform=="win32"',
|
'colorama;sys_platform=="win32"',
|
||||||
"pluggy>=0.12,<1.0",
|
"pluggy>=0.12,<1.0",
|
||||||
"importlib-metadata>=0.12",
|
'importlib-metadata>=0.12;python_version<"3.8"',
|
||||||
"wcwidth",
|
"wcwidth",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -21,7 +21,6 @@ def main():
|
||||||
use_scm_version={"write_to": "src/_pytest/_version.py"},
|
use_scm_version={"write_to": "src/_pytest/_version.py"},
|
||||||
setup_requires=["setuptools-scm", "setuptools>=40.0"],
|
setup_requires=["setuptools-scm", "setuptools>=40.0"],
|
||||||
package_dir={"": "src"},
|
package_dir={"": "src"},
|
||||||
# fmt: off
|
|
||||||
extras_require={
|
extras_require={
|
||||||
"testing": [
|
"testing": [
|
||||||
"argcomplete",
|
"argcomplete",
|
||||||
|
@ -29,9 +28,9 @@ def main():
|
||||||
"mock",
|
"mock",
|
||||||
"nose",
|
"nose",
|
||||||
"requests",
|
"requests",
|
||||||
],
|
"xmlschema",
|
||||||
|
]
|
||||||
},
|
},
|
||||||
# fmt: on
|
|
||||||
install_requires=INSTALL_REQUIRES,
|
install_requires=INSTALL_REQUIRES,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -5,6 +5,13 @@ import traceback
|
||||||
from inspect import CO_VARARGS
|
from inspect import CO_VARARGS
|
||||||
from inspect import CO_VARKEYWORDS
|
from inspect import CO_VARKEYWORDS
|
||||||
from traceback import format_exception_only
|
from traceback import format_exception_only
|
||||||
|
from types import TracebackType
|
||||||
|
from typing import Generic
|
||||||
|
from typing import Optional
|
||||||
|
from typing import Pattern
|
||||||
|
from typing import Tuple
|
||||||
|
from typing import TypeVar
|
||||||
|
from typing import Union
|
||||||
from weakref import ref
|
from weakref import ref
|
||||||
|
|
||||||
import attr
|
import attr
|
||||||
|
@ -15,6 +22,9 @@ import _pytest
|
||||||
from _pytest._io.saferepr import safeformat
|
from _pytest._io.saferepr import safeformat
|
||||||
from _pytest._io.saferepr import saferepr
|
from _pytest._io.saferepr import saferepr
|
||||||
|
|
||||||
|
if False: # TYPE_CHECKING
|
||||||
|
from typing import Type
|
||||||
|
|
||||||
|
|
||||||
class Code:
|
class Code:
|
||||||
""" wrapper around Python code objects """
|
""" wrapper around Python code objects """
|
||||||
|
@ -371,20 +381,52 @@ co_equal = compile(
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
_E = TypeVar("_E", bound=BaseException)
|
||||||
|
|
||||||
|
|
||||||
@attr.s(repr=False)
|
@attr.s(repr=False)
|
||||||
class ExceptionInfo:
|
class ExceptionInfo(Generic[_E]):
|
||||||
""" wraps sys.exc_info() objects and offers
|
""" wraps sys.exc_info() objects and offers
|
||||||
help for navigating the traceback.
|
help for navigating the traceback.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
_assert_start_repr = "AssertionError('assert "
|
_assert_start_repr = "AssertionError('assert "
|
||||||
|
|
||||||
_excinfo = attr.ib()
|
_excinfo = attr.ib(type=Optional[Tuple["Type[_E]", "_E", TracebackType]])
|
||||||
_striptext = attr.ib(default="")
|
_striptext = attr.ib(type=str, default="")
|
||||||
_traceback = attr.ib(default=None)
|
_traceback = attr.ib(type=Optional[Traceback], default=None)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def from_current(cls, exprinfo=None):
|
def from_exc_info(
|
||||||
|
cls,
|
||||||
|
exc_info: Tuple["Type[_E]", "_E", TracebackType],
|
||||||
|
exprinfo: Optional[str] = None,
|
||||||
|
) -> "ExceptionInfo[_E]":
|
||||||
|
"""returns an ExceptionInfo for an existing exc_info tuple.
|
||||||
|
|
||||||
|
.. warning::
|
||||||
|
|
||||||
|
Experimental API
|
||||||
|
|
||||||
|
|
||||||
|
:param exprinfo: a text string helping to determine if we should
|
||||||
|
strip ``AssertionError`` from the output, defaults
|
||||||
|
to the exception message/``__str__()``
|
||||||
|
"""
|
||||||
|
_striptext = ""
|
||||||
|
if exprinfo is None and isinstance(exc_info[1], AssertionError):
|
||||||
|
exprinfo = getattr(exc_info[1], "msg", None)
|
||||||
|
if exprinfo is None:
|
||||||
|
exprinfo = saferepr(exc_info[1])
|
||||||
|
if exprinfo and exprinfo.startswith(cls._assert_start_repr):
|
||||||
|
_striptext = "AssertionError: "
|
||||||
|
|
||||||
|
return cls(exc_info, _striptext)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_current(
|
||||||
|
cls, exprinfo: Optional[str] = None
|
||||||
|
) -> "ExceptionInfo[BaseException]":
|
||||||
"""returns an ExceptionInfo matching the current traceback
|
"""returns an ExceptionInfo matching the current traceback
|
||||||
|
|
||||||
.. warning::
|
.. warning::
|
||||||
|
@ -398,59 +440,71 @@ class ExceptionInfo:
|
||||||
"""
|
"""
|
||||||
tup = sys.exc_info()
|
tup = sys.exc_info()
|
||||||
assert tup[0] is not None, "no current exception"
|
assert tup[0] is not None, "no current exception"
|
||||||
_striptext = ""
|
assert tup[1] is not None, "no current exception"
|
||||||
if exprinfo is None and isinstance(tup[1], AssertionError):
|
assert tup[2] is not None, "no current exception"
|
||||||
exprinfo = getattr(tup[1], "msg", None)
|
exc_info = (tup[0], tup[1], tup[2])
|
||||||
if exprinfo is None:
|
return cls.from_exc_info(exc_info)
|
||||||
exprinfo = saferepr(tup[1])
|
|
||||||
if exprinfo and exprinfo.startswith(cls._assert_start_repr):
|
|
||||||
_striptext = "AssertionError: "
|
|
||||||
|
|
||||||
return cls(tup, _striptext)
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def for_later(cls):
|
def for_later(cls) -> "ExceptionInfo[_E]":
|
||||||
"""return an unfilled ExceptionInfo
|
"""return an unfilled ExceptionInfo
|
||||||
"""
|
"""
|
||||||
return cls(None)
|
return cls(None)
|
||||||
|
|
||||||
|
def fill_unfilled(self, exc_info: Tuple["Type[_E]", _E, TracebackType]) -> None:
|
||||||
|
"""fill an unfilled ExceptionInfo created with for_later()"""
|
||||||
|
assert self._excinfo is None, "ExceptionInfo was already filled"
|
||||||
|
self._excinfo = exc_info
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def type(self):
|
def type(self) -> "Type[_E]":
|
||||||
"""the exception class"""
|
"""the exception class"""
|
||||||
|
assert (
|
||||||
|
self._excinfo is not None
|
||||||
|
), ".type can only be used after the context manager exits"
|
||||||
return self._excinfo[0]
|
return self._excinfo[0]
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def value(self):
|
def value(self) -> _E:
|
||||||
"""the exception value"""
|
"""the exception value"""
|
||||||
|
assert (
|
||||||
|
self._excinfo is not None
|
||||||
|
), ".value can only be used after the context manager exits"
|
||||||
return self._excinfo[1]
|
return self._excinfo[1]
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def tb(self):
|
def tb(self) -> TracebackType:
|
||||||
"""the exception raw traceback"""
|
"""the exception raw traceback"""
|
||||||
|
assert (
|
||||||
|
self._excinfo is not None
|
||||||
|
), ".tb can only be used after the context manager exits"
|
||||||
return self._excinfo[2]
|
return self._excinfo[2]
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def typename(self):
|
def typename(self) -> str:
|
||||||
"""the type name of the exception"""
|
"""the type name of the exception"""
|
||||||
|
assert (
|
||||||
|
self._excinfo is not None
|
||||||
|
), ".typename can only be used after the context manager exits"
|
||||||
return self.type.__name__
|
return self.type.__name__
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def traceback(self):
|
def traceback(self) -> Traceback:
|
||||||
"""the traceback"""
|
"""the traceback"""
|
||||||
if self._traceback is None:
|
if self._traceback is None:
|
||||||
self._traceback = Traceback(self.tb, excinfo=ref(self))
|
self._traceback = Traceback(self.tb, excinfo=ref(self))
|
||||||
return self._traceback
|
return self._traceback
|
||||||
|
|
||||||
@traceback.setter
|
@traceback.setter
|
||||||
def traceback(self, value):
|
def traceback(self, value: Traceback) -> None:
|
||||||
self._traceback = value
|
self._traceback = value
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self) -> str:
|
||||||
if self._excinfo is None:
|
if self._excinfo is None:
|
||||||
return "<ExceptionInfo for raises contextmanager>"
|
return "<ExceptionInfo for raises contextmanager>"
|
||||||
return "<ExceptionInfo %s tblen=%d>" % (self.typename, len(self.traceback))
|
return "<ExceptionInfo %s tblen=%d>" % (self.typename, len(self.traceback))
|
||||||
|
|
||||||
def exconly(self, tryshort=False):
|
def exconly(self, tryshort: bool = False) -> str:
|
||||||
""" return the exception as a string
|
""" return the exception as a string
|
||||||
|
|
||||||
when 'tryshort' resolves to True, and the exception is a
|
when 'tryshort' resolves to True, and the exception is a
|
||||||
|
@ -466,11 +520,13 @@ class ExceptionInfo:
|
||||||
text = text[len(self._striptext) :]
|
text = text[len(self._striptext) :]
|
||||||
return text
|
return text
|
||||||
|
|
||||||
def errisinstance(self, exc):
|
def errisinstance(
|
||||||
|
self, exc: Union["Type[BaseException]", Tuple["Type[BaseException]", ...]]
|
||||||
|
) -> bool:
|
||||||
""" return True if the exception is an instance of exc """
|
""" return True if the exception is an instance of exc """
|
||||||
return isinstance(self.value, exc)
|
return isinstance(self.value, exc)
|
||||||
|
|
||||||
def _getreprcrash(self):
|
def _getreprcrash(self) -> "ReprFileLocation":
|
||||||
exconly = self.exconly(tryshort=True)
|
exconly = self.exconly(tryshort=True)
|
||||||
entry = self.traceback.getcrashentry()
|
entry = self.traceback.getcrashentry()
|
||||||
path, lineno = entry.frame.code.raw.co_filename, entry.lineno
|
path, lineno = entry.frame.code.raw.co_filename, entry.lineno
|
||||||
|
@ -478,13 +534,13 @@ class ExceptionInfo:
|
||||||
|
|
||||||
def getrepr(
|
def getrepr(
|
||||||
self,
|
self,
|
||||||
showlocals=False,
|
showlocals: bool = False,
|
||||||
style="long",
|
style: str = "long",
|
||||||
abspath=False,
|
abspath: bool = False,
|
||||||
tbfilter=True,
|
tbfilter: bool = True,
|
||||||
funcargs=False,
|
funcargs: bool = False,
|
||||||
truncate_locals=True,
|
truncate_locals: bool = True,
|
||||||
chain=True,
|
chain: bool = True,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Return str()able representation of this exception info.
|
Return str()able representation of this exception info.
|
||||||
|
@ -535,7 +591,7 @@ class ExceptionInfo:
|
||||||
)
|
)
|
||||||
return fmt.repr_excinfo(self)
|
return fmt.repr_excinfo(self)
|
||||||
|
|
||||||
def match(self, regexp):
|
def match(self, regexp: Union[str, Pattern]) -> bool:
|
||||||
"""
|
"""
|
||||||
Check whether the regular expression 'regexp' is found in the string
|
Check whether the regular expression 'regexp' is found in the string
|
||||||
representation of the exception using ``re.search``. If it matches
|
representation of the exception using ``re.search``. If it matches
|
||||||
|
|
|
@ -2,57 +2,48 @@ import pprint
|
||||||
import reprlib
|
import reprlib
|
||||||
|
|
||||||
|
|
||||||
def _call_and_format_exception(call, x, *args):
|
def _format_repr_exception(exc, obj):
|
||||||
try:
|
|
||||||
# Try the vanilla repr and make sure that the result is a string
|
|
||||||
return call(x, *args)
|
|
||||||
except Exception as exc:
|
|
||||||
exc_name = type(exc).__name__
|
exc_name = type(exc).__name__
|
||||||
try:
|
try:
|
||||||
exc_info = str(exc)
|
exc_info = str(exc)
|
||||||
except Exception:
|
except Exception:
|
||||||
exc_info = "unknown"
|
exc_info = "unknown"
|
||||||
return '<[{}("{}") raised in repr()] {} object at 0x{:x}>'.format(
|
return '<[{}("{}") raised in repr()] {} object at 0x{:x}>'.format(
|
||||||
exc_name, exc_info, x.__class__.__name__, id(x)
|
exc_name, exc_info, obj.__class__.__name__, id(obj)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _ellipsize(s, maxsize):
|
||||||
|
if len(s) > maxsize:
|
||||||
|
i = max(0, (maxsize - 3) // 2)
|
||||||
|
j = max(0, maxsize - 3 - i)
|
||||||
|
return s[:i] + "..." + s[len(s) - j :]
|
||||||
|
return s
|
||||||
|
|
||||||
|
|
||||||
class SafeRepr(reprlib.Repr):
|
class SafeRepr(reprlib.Repr):
|
||||||
"""subclass of repr.Repr that limits the resulting size of repr()
|
"""subclass of repr.Repr that limits the resulting size of repr()
|
||||||
and includes information on exceptions raised during the call.
|
and includes information on exceptions raised during the call.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
def __init__(self, maxsize):
|
||||||
|
super().__init__()
|
||||||
|
self.maxstring = maxsize
|
||||||
|
self.maxsize = maxsize
|
||||||
|
|
||||||
def repr(self, x):
|
def repr(self, x):
|
||||||
return self._callhelper(reprlib.Repr.repr, self, x)
|
try:
|
||||||
|
s = super().repr(x)
|
||||||
def repr_unicode(self, x, level):
|
except Exception as exc:
|
||||||
# Strictly speaking wrong on narrow builds
|
s = _format_repr_exception(exc, x)
|
||||||
def repr(u):
|
return _ellipsize(s, self.maxsize)
|
||||||
if "'" not in u:
|
|
||||||
return "'%s'" % u
|
|
||||||
elif '"' not in u:
|
|
||||||
return '"%s"' % u
|
|
||||||
else:
|
|
||||||
return "'%s'" % u.replace("'", r"\'")
|
|
||||||
|
|
||||||
s = repr(x[: self.maxstring])
|
|
||||||
if len(s) > self.maxstring:
|
|
||||||
i = max(0, (self.maxstring - 3) // 2)
|
|
||||||
j = max(0, self.maxstring - 3 - i)
|
|
||||||
s = repr(x[:i] + x[len(x) - j :])
|
|
||||||
s = s[:i] + "..." + s[len(s) - j :]
|
|
||||||
return s
|
|
||||||
|
|
||||||
def repr_instance(self, x, level):
|
def repr_instance(self, x, level):
|
||||||
return self._callhelper(repr, x)
|
try:
|
||||||
|
s = repr(x)
|
||||||
def _callhelper(self, call, x, *args):
|
except Exception as exc:
|
||||||
s = _call_and_format_exception(call, x, *args)
|
s = _format_repr_exception(exc, x)
|
||||||
if len(s) > self.maxsize:
|
return _ellipsize(s, self.maxsize)
|
||||||
i = max(0, (self.maxsize - 3) // 2)
|
|
||||||
j = max(0, self.maxsize - 3 - i)
|
|
||||||
s = s[:i] + "..." + s[len(s) - j :]
|
|
||||||
return s
|
|
||||||
|
|
||||||
|
|
||||||
def safeformat(obj):
|
def safeformat(obj):
|
||||||
|
@ -60,7 +51,10 @@ def safeformat(obj):
|
||||||
Failing __repr__ functions of user instances will be represented
|
Failing __repr__ functions of user instances will be represented
|
||||||
with a short exception info.
|
with a short exception info.
|
||||||
"""
|
"""
|
||||||
return _call_and_format_exception(pprint.pformat, obj)
|
try:
|
||||||
|
return pprint.pformat(obj)
|
||||||
|
except Exception as exc:
|
||||||
|
return _format_repr_exception(exc, obj)
|
||||||
|
|
||||||
|
|
||||||
def saferepr(obj, maxsize=240):
|
def saferepr(obj, maxsize=240):
|
||||||
|
@ -70,9 +64,4 @@ def saferepr(obj, maxsize=240):
|
||||||
care to never raise exceptions itself. This function is a wrapper
|
care to never raise exceptions itself. This function is a wrapper
|
||||||
around the Repr/reprlib functionality of the standard 2.6 lib.
|
around the Repr/reprlib functionality of the standard 2.6 lib.
|
||||||
"""
|
"""
|
||||||
# review exception handling
|
return SafeRepr(maxsize).repr(obj)
|
||||||
srepr = SafeRepr()
|
|
||||||
srepr.maxstring = maxsize
|
|
||||||
srepr.maxsize = maxsize
|
|
||||||
srepr.maxother = 160
|
|
||||||
return srepr.repr(obj)
|
|
||||||
|
|
|
@ -33,6 +33,9 @@ PYTEST_TAG = "{}-pytest-{}".format(sys.implementation.cache_tag, version)
|
||||||
PYC_EXT = ".py" + (__debug__ and "c" or "o")
|
PYC_EXT = ".py" + (__debug__ and "c" or "o")
|
||||||
PYC_TAIL = "." + PYTEST_TAG + PYC_EXT
|
PYC_TAIL = "." + PYTEST_TAG + PYC_EXT
|
||||||
|
|
||||||
|
AST_IS = ast.Is()
|
||||||
|
AST_NONE = ast.NameConstant(None)
|
||||||
|
|
||||||
|
|
||||||
class AssertionRewritingHook:
|
class AssertionRewritingHook:
|
||||||
"""PEP302/PEP451 import hook which rewrites asserts."""
|
"""PEP302/PEP451 import hook which rewrites asserts."""
|
||||||
|
@ -854,10 +857,7 @@ class AssertionRewriter(ast.NodeVisitor):
|
||||||
internally already.
|
internally already.
|
||||||
See issue #3191 for more details.
|
See issue #3191 for more details.
|
||||||
"""
|
"""
|
||||||
|
val_is_none = ast.Compare(node, [AST_IS], [AST_NONE])
|
||||||
# Using parse because it is different between py2 and py3.
|
|
||||||
AST_NONE = ast.parse("None").body[0].value
|
|
||||||
val_is_none = ast.Compare(node, [ast.Is()], [AST_NONE])
|
|
||||||
send_warning = ast.parse(
|
send_warning = ast.parse(
|
||||||
"""\
|
"""\
|
||||||
from _pytest.warning_types import PytestAssertRewriteWarning
|
from _pytest.warning_types import PytestAssertRewriteWarning
|
||||||
|
|
|
@ -119,9 +119,9 @@ def isiterable(obj):
|
||||||
|
|
||||||
def assertrepr_compare(config, op, left, right):
|
def assertrepr_compare(config, op, left, right):
|
||||||
"""Return specialised explanations for some operators/operands"""
|
"""Return specialised explanations for some operators/operands"""
|
||||||
width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op
|
maxsize = (80 - 15 - len(op) - 2) // 2 # 15 chars indentation, 1 space around op
|
||||||
left_repr = saferepr(left, maxsize=int(width // 2))
|
left_repr = saferepr(left, maxsize=maxsize)
|
||||||
right_repr = saferepr(right, maxsize=width - len(left_repr))
|
right_repr = saferepr(right, maxsize=maxsize)
|
||||||
|
|
||||||
summary = "{} {} {}".format(left_repr, op, right_repr)
|
summary = "{} {} {}".format(left_repr, op, right_repr)
|
||||||
|
|
||||||
|
|
|
@ -26,6 +26,12 @@ MODULE_NOT_FOUND_ERROR = (
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
if sys.version_info >= (3, 8):
|
||||||
|
from importlib import metadata as importlib_metadata # noqa
|
||||||
|
else:
|
||||||
|
import importlib_metadata # noqa
|
||||||
|
|
||||||
|
|
||||||
def _format_args(func):
|
def _format_args(func):
|
||||||
return str(signature(func))
|
return str(signature(func))
|
||||||
|
|
||||||
|
@ -52,11 +58,11 @@ def iscoroutinefunction(func):
|
||||||
return inspect.iscoroutinefunction(func) or getattr(func, "_is_coroutine", False)
|
return inspect.iscoroutinefunction(func) or getattr(func, "_is_coroutine", False)
|
||||||
|
|
||||||
|
|
||||||
def getlocation(function, curdir):
|
def getlocation(function, curdir=None):
|
||||||
function = get_real_func(function)
|
function = get_real_func(function)
|
||||||
fn = py.path.local(inspect.getfile(function))
|
fn = py.path.local(inspect.getfile(function))
|
||||||
lineno = function.__code__.co_firstlineno
|
lineno = function.__code__.co_firstlineno
|
||||||
if fn.relto(curdir):
|
if curdir is not None and fn.relto(curdir):
|
||||||
fn = fn.relto(curdir)
|
fn = fn.relto(curdir)
|
||||||
return "%s:%d" % (fn, lineno + 1)
|
return "%s:%d" % (fn, lineno + 1)
|
||||||
|
|
||||||
|
|
|
@ -8,8 +8,9 @@ import sys
|
||||||
import types
|
import types
|
||||||
import warnings
|
import warnings
|
||||||
from functools import lru_cache
|
from functools import lru_cache
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
import importlib_metadata
|
import attr
|
||||||
import py
|
import py
|
||||||
from packaging.version import Version
|
from packaging.version import Version
|
||||||
from pluggy import HookimplMarker
|
from pluggy import HookimplMarker
|
||||||
|
@ -18,6 +19,7 @@ from pluggy import PluginManager
|
||||||
|
|
||||||
import _pytest._code
|
import _pytest._code
|
||||||
import _pytest.assertion
|
import _pytest.assertion
|
||||||
|
import _pytest.deprecated
|
||||||
import _pytest.hookspec # the extension point definitions
|
import _pytest.hookspec # the extension point definitions
|
||||||
from .exceptions import PrintHelp
|
from .exceptions import PrintHelp
|
||||||
from .exceptions import UsageError
|
from .exceptions import UsageError
|
||||||
|
@ -25,6 +27,7 @@ from .findpaths import determine_setup
|
||||||
from .findpaths import exists
|
from .findpaths import exists
|
||||||
from _pytest._code import ExceptionInfo
|
from _pytest._code import ExceptionInfo
|
||||||
from _pytest._code import filter_traceback
|
from _pytest._code import filter_traceback
|
||||||
|
from _pytest.compat import importlib_metadata
|
||||||
from _pytest.outcomes import fail
|
from _pytest.outcomes import fail
|
||||||
from _pytest.outcomes import Skipped
|
from _pytest.outcomes import Skipped
|
||||||
from _pytest.warning_types import PytestConfigWarning
|
from _pytest.warning_types import PytestConfigWarning
|
||||||
|
@ -147,10 +150,15 @@ builtin_plugins = set(default_plugins)
|
||||||
builtin_plugins.add("pytester")
|
builtin_plugins.add("pytester")
|
||||||
|
|
||||||
|
|
||||||
def get_config(args=None):
|
def get_config(args=None, plugins=None):
|
||||||
# subsequent calls to main will create a fresh instance
|
# subsequent calls to main will create a fresh instance
|
||||||
pluginmanager = PytestPluginManager()
|
pluginmanager = PytestPluginManager()
|
||||||
config = Config(pluginmanager)
|
config = Config(
|
||||||
|
pluginmanager,
|
||||||
|
invocation_params=Config.InvocationParams(
|
||||||
|
args=args, plugins=plugins, dir=Path().resolve()
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
if args is not None:
|
if args is not None:
|
||||||
# Handle any "-p no:plugin" args.
|
# Handle any "-p no:plugin" args.
|
||||||
|
@ -183,7 +191,7 @@ def _prepareconfig(args=None, plugins=None):
|
||||||
msg = "`args` parameter expected to be a list or tuple of strings, got: {!r} (type: {})"
|
msg = "`args` parameter expected to be a list or tuple of strings, got: {!r} (type: {})"
|
||||||
raise TypeError(msg.format(args, type(args)))
|
raise TypeError(msg.format(args, type(args)))
|
||||||
|
|
||||||
config = get_config(args)
|
config = get_config(args, plugins)
|
||||||
pluginmanager = config.pluginmanager
|
pluginmanager = config.pluginmanager
|
||||||
try:
|
try:
|
||||||
if plugins:
|
if plugins:
|
||||||
|
@ -204,6 +212,19 @@ def _prepareconfig(args=None, plugins=None):
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def _fail_on_non_top_pytest_plugins(conftestpath, confcutdir):
|
||||||
|
msg = (
|
||||||
|
"Defining 'pytest_plugins' in a non-top-level conftest is no longer supported:\n"
|
||||||
|
"It affects the entire test suite instead of just below the conftest as expected.\n"
|
||||||
|
" {}\n"
|
||||||
|
"Please move it to a top level conftest file at the rootdir:\n"
|
||||||
|
" {}\n"
|
||||||
|
"For more information, visit:\n"
|
||||||
|
" https://docs.pytest.org/en/latest/deprecations.html#pytest-plugins-in-non-top-level-conftest-files"
|
||||||
|
)
|
||||||
|
fail(msg.format(conftestpath, confcutdir), pytrace=False)
|
||||||
|
|
||||||
|
|
||||||
class PytestPluginManager(PluginManager):
|
class PytestPluginManager(PluginManager):
|
||||||
"""
|
"""
|
||||||
Overwrites :py:class:`pluggy.PluginManager <pluggy.PluginManager>` to add pytest-specific
|
Overwrites :py:class:`pluggy.PluginManager <pluggy.PluginManager>` to add pytest-specific
|
||||||
|
@ -424,16 +445,7 @@ class PytestPluginManager(PluginManager):
|
||||||
and self._configured
|
and self._configured
|
||||||
and not self._using_pyargs
|
and not self._using_pyargs
|
||||||
):
|
):
|
||||||
from _pytest.deprecated import (
|
_fail_on_non_top_pytest_plugins(conftestpath, self._confcutdir)
|
||||||
PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST,
|
|
||||||
)
|
|
||||||
|
|
||||||
fail(
|
|
||||||
PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST.format(
|
|
||||||
conftestpath, self._confcutdir
|
|
||||||
),
|
|
||||||
pytrace=False,
|
|
||||||
)
|
|
||||||
except Exception:
|
except Exception:
|
||||||
raise ConftestImportFailure(conftestpath, sys.exc_info())
|
raise ConftestImportFailure(conftestpath, sys.exc_info())
|
||||||
|
|
||||||
|
@ -608,20 +620,57 @@ def _iter_rewritable_modules(package_files):
|
||||||
|
|
||||||
|
|
||||||
class Config:
|
class Config:
|
||||||
""" access to configuration values, pluginmanager and plugin hooks. """
|
"""
|
||||||
|
Access to configuration values, pluginmanager and plugin hooks.
|
||||||
|
|
||||||
def __init__(self, pluginmanager):
|
:ivar PytestPluginManager pluginmanager: the plugin manager handles plugin registration and hook invocation.
|
||||||
#: access to command line option as attributes.
|
|
||||||
#: (deprecated), use :py:func:`getoption() <_pytest.config.Config.getoption>` instead
|
:ivar argparse.Namespace option: access to command line option as attributes.
|
||||||
self.option = argparse.Namespace()
|
|
||||||
|
:ivar InvocationParams invocation_params:
|
||||||
|
|
||||||
|
Object containing the parameters regarding the ``pytest.main``
|
||||||
|
invocation.
|
||||||
|
|
||||||
|
Contains the following read-only attributes:
|
||||||
|
|
||||||
|
* ``args``: list of command-line arguments as passed to ``pytest.main()``.
|
||||||
|
* ``plugins``: list of extra plugins, might be None.
|
||||||
|
* ``dir``: directory where ``pytest.main()`` was invoked from.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@attr.s(frozen=True)
|
||||||
|
class InvocationParams:
|
||||||
|
"""Holds parameters passed during ``pytest.main()``
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
Currently the environment variable PYTEST_ADDOPTS is also handled by
|
||||||
|
pytest implicitly, not being part of the invocation.
|
||||||
|
|
||||||
|
Plugins accessing ``InvocationParams`` must be aware of that.
|
||||||
|
"""
|
||||||
|
|
||||||
|
args = attr.ib()
|
||||||
|
plugins = attr.ib()
|
||||||
|
dir = attr.ib()
|
||||||
|
|
||||||
|
def __init__(self, pluginmanager, *, invocation_params=None):
|
||||||
from .argparsing import Parser, FILE_OR_DIR
|
from .argparsing import Parser, FILE_OR_DIR
|
||||||
|
|
||||||
|
if invocation_params is None:
|
||||||
|
invocation_params = self.InvocationParams(
|
||||||
|
args=(), plugins=None, dir=Path().resolve()
|
||||||
|
)
|
||||||
|
|
||||||
|
self.option = argparse.Namespace()
|
||||||
|
self.invocation_params = invocation_params
|
||||||
|
|
||||||
_a = FILE_OR_DIR
|
_a = FILE_OR_DIR
|
||||||
self._parser = Parser(
|
self._parser = Parser(
|
||||||
usage="%(prog)s [options] [{}] [{}] [...]".format(_a, _a),
|
usage="%(prog)s [options] [{}] [{}] [...]".format(_a, _a),
|
||||||
processopt=self._processopt,
|
processopt=self._processopt,
|
||||||
)
|
)
|
||||||
#: a pluginmanager instance
|
|
||||||
self.pluginmanager = pluginmanager
|
self.pluginmanager = pluginmanager
|
||||||
self.trace = self.pluginmanager.trace.root.get("config")
|
self.trace = self.pluginmanager.trace.root.get("config")
|
||||||
self.hook = self.pluginmanager.hook
|
self.hook = self.pluginmanager.hook
|
||||||
|
@ -631,9 +680,13 @@ class Config:
|
||||||
self._cleanup = []
|
self._cleanup = []
|
||||||
self.pluginmanager.register(self, "pytestconfig")
|
self.pluginmanager.register(self, "pytestconfig")
|
||||||
self._configured = False
|
self._configured = False
|
||||||
self.invocation_dir = py.path.local()
|
|
||||||
self.hook.pytest_addoption.call_historic(kwargs=dict(parser=self._parser))
|
self.hook.pytest_addoption.call_historic(kwargs=dict(parser=self._parser))
|
||||||
|
|
||||||
|
@property
|
||||||
|
def invocation_dir(self):
|
||||||
|
"""Backward compatibility"""
|
||||||
|
return py.path.local(str(self.invocation_params.dir))
|
||||||
|
|
||||||
def add_cleanup(self, func):
|
def add_cleanup(self, func):
|
||||||
""" Add a function to be called when the config object gets out of
|
""" Add a function to be called when the config object gets out of
|
||||||
use (usually coninciding with pytest_unconfigure)."""
|
use (usually coninciding with pytest_unconfigure)."""
|
||||||
|
@ -864,7 +917,7 @@ class Config:
|
||||||
assert not hasattr(
|
assert not hasattr(
|
||||||
self, "args"
|
self, "args"
|
||||||
), "can only parse cmdline args at most once per Config object"
|
), "can only parse cmdline args at most once per Config object"
|
||||||
self._origargs = args
|
assert self.invocation_params.args == args
|
||||||
self.hook.pytest_addhooks.call_historic(
|
self.hook.pytest_addhooks.call_historic(
|
||||||
kwargs=dict(pluginmanager=self.pluginmanager)
|
kwargs=dict(pluginmanager=self.pluginmanager)
|
||||||
)
|
)
|
||||||
|
|
|
@ -20,8 +20,6 @@ def getcfg(args, config=None):
|
||||||
|
|
||||||
note: config is optional and used only to issue warnings explicitly (#2891).
|
note: config is optional and used only to issue warnings explicitly (#2891).
|
||||||
"""
|
"""
|
||||||
from _pytest.deprecated import CFG_PYTEST_SECTION
|
|
||||||
|
|
||||||
inibasenames = ["pytest.ini", "tox.ini", "setup.cfg"]
|
inibasenames = ["pytest.ini", "tox.ini", "setup.cfg"]
|
||||||
args = [x for x in args if not str(x).startswith("-")]
|
args = [x for x in args if not str(x).startswith("-")]
|
||||||
if not args:
|
if not args:
|
||||||
|
@ -101,6 +99,9 @@ def get_dirs_from_args(args):
|
||||||
return [get_dir_from_path(path) for path in possible_paths if path.exists()]
|
return [get_dir_from_path(path) for path in possible_paths if path.exists()]
|
||||||
|
|
||||||
|
|
||||||
|
CFG_PYTEST_SECTION = "[pytest] section in {filename} files is no longer supported, change to [tool:pytest] instead."
|
||||||
|
|
||||||
|
|
||||||
def determine_setup(inifile, args, rootdir_cmd_arg=None, config=None):
|
def determine_setup(inifile, args, rootdir_cmd_arg=None, config=None):
|
||||||
dirs = get_dirs_from_args(args)
|
dirs = get_dirs_from_args(args)
|
||||||
if inifile:
|
if inifile:
|
||||||
|
@ -111,8 +112,6 @@ def determine_setup(inifile, args, rootdir_cmd_arg=None, config=None):
|
||||||
try:
|
try:
|
||||||
inicfg = iniconfig[section]
|
inicfg = iniconfig[section]
|
||||||
if is_cfg_file and section == "pytest" and config is not None:
|
if is_cfg_file and section == "pytest" and config is not None:
|
||||||
from _pytest.deprecated import CFG_PYTEST_SECTION
|
|
||||||
|
|
||||||
fail(
|
fail(
|
||||||
CFG_PYTEST_SECTION.format(filename=str(inifile)), pytrace=False
|
CFG_PYTEST_SECTION.format(filename=str(inifile)), pytrace=False
|
||||||
)
|
)
|
||||||
|
|
|
@ -9,10 +9,6 @@ All constants defined in this module should be either PytestWarning instances or
|
||||||
in case of warnings which need to format their messages.
|
in case of warnings which need to format their messages.
|
||||||
"""
|
"""
|
||||||
from _pytest.warning_types import PytestDeprecationWarning
|
from _pytest.warning_types import PytestDeprecationWarning
|
||||||
from _pytest.warning_types import RemovedInPytest4Warning
|
|
||||||
from _pytest.warning_types import UnformattedWarning
|
|
||||||
|
|
||||||
YIELD_TESTS = "yield tests were removed in pytest 4.0 - {name} will be ignored"
|
|
||||||
|
|
||||||
# set of plugins which have been integrated into the core; we use this list to ignore
|
# set of plugins which have been integrated into the core; we use this list to ignore
|
||||||
# them during registration to avoid conflicts
|
# them during registration to avoid conflicts
|
||||||
|
@ -23,82 +19,13 @@ DEPRECATED_EXTERNAL_PLUGINS = {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
FIXTURE_FUNCTION_CALL = (
|
|
||||||
'Fixture "{name}" called directly. Fixtures are not meant to be called directly,\n'
|
|
||||||
"but are created automatically when test functions request them as parameters.\n"
|
|
||||||
"See https://docs.pytest.org/en/latest/fixture.html for more information about fixtures, and\n"
|
|
||||||
"https://docs.pytest.org/en/latest/deprecations.html#calling-fixtures-directly about how to update your code."
|
|
||||||
)
|
|
||||||
|
|
||||||
FIXTURE_NAMED_REQUEST = PytestDeprecationWarning(
|
|
||||||
"'request' is a reserved name for fixtures and will raise an error in future versions"
|
|
||||||
)
|
|
||||||
|
|
||||||
CFG_PYTEST_SECTION = "[pytest] section in {filename} files is no longer supported, change to [tool:pytest] instead."
|
|
||||||
|
|
||||||
GETFUNCARGVALUE = RemovedInPytest4Warning(
|
|
||||||
"getfuncargvalue is deprecated, use getfixturevalue"
|
|
||||||
)
|
|
||||||
|
|
||||||
FUNCARGNAMES = PytestDeprecationWarning(
|
FUNCARGNAMES = PytestDeprecationWarning(
|
||||||
"The `funcargnames` attribute was an alias for `fixturenames`, "
|
"The `funcargnames` attribute was an alias for `fixturenames`, "
|
||||||
"since pytest 2.3 - use the newer attribute instead."
|
"since pytest 2.3 - use the newer attribute instead."
|
||||||
)
|
)
|
||||||
|
|
||||||
RAISES_MESSAGE_PARAMETER = PytestDeprecationWarning(
|
|
||||||
"The 'message' parameter is deprecated.\n"
|
|
||||||
"(did you mean to use `match='some regex'` to check the exception message?)\n"
|
|
||||||
"Please see:\n"
|
|
||||||
" https://docs.pytest.org/en/4.6-maintenance/deprecations.html#message-parameter-of-pytest-raises"
|
|
||||||
)
|
|
||||||
|
|
||||||
RESULT_LOG = PytestDeprecationWarning(
|
RESULT_LOG = PytestDeprecationWarning(
|
||||||
"--result-log is deprecated and scheduled for removal in pytest 6.0.\n"
|
"--result-log is deprecated and scheduled for removal in pytest 6.0.\n"
|
||||||
"See https://docs.pytest.org/en/latest/deprecations.html#result-log-result-log for more information."
|
"See https://docs.pytest.org/en/latest/deprecations.html#result-log-result-log for more information."
|
||||||
)
|
)
|
||||||
|
|
||||||
RAISES_EXEC = PytestDeprecationWarning(
|
|
||||||
"raises(..., 'code(as_a_string)') is deprecated, use the context manager form or use `exec()` directly\n\n"
|
|
||||||
"See https://docs.pytest.org/en/latest/deprecations.html#raises-warns-exec"
|
|
||||||
)
|
|
||||||
WARNS_EXEC = PytestDeprecationWarning(
|
|
||||||
"warns(..., 'code(as_a_string)') is deprecated, use the context manager form or use `exec()` directly.\n\n"
|
|
||||||
"See https://docs.pytest.org/en/latest/deprecations.html#raises-warns-exec"
|
|
||||||
)
|
|
||||||
|
|
||||||
PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST = (
|
|
||||||
"Defining 'pytest_plugins' in a non-top-level conftest is no longer supported "
|
|
||||||
"because it affects the entire directory tree in a non-explicit way.\n"
|
|
||||||
" {}\n"
|
|
||||||
"Please move it to a top level conftest file at the rootdir:\n"
|
|
||||||
" {}\n"
|
|
||||||
"For more information, visit:\n"
|
|
||||||
" https://docs.pytest.org/en/latest/deprecations.html#pytest-plugins-in-non-top-level-conftest-files"
|
|
||||||
)
|
|
||||||
|
|
||||||
PYTEST_CONFIG_GLOBAL = PytestDeprecationWarning(
|
|
||||||
"the `pytest.config` global is deprecated. Please use `request.config` "
|
|
||||||
"or `pytest_configure` (if you're a pytest plugin) instead."
|
|
||||||
)
|
|
||||||
|
|
||||||
PYTEST_ENSURETEMP = RemovedInPytest4Warning(
|
|
||||||
"pytest/tmpdir_factory.ensuretemp is deprecated, \n"
|
|
||||||
"please use the tmp_path fixture or tmp_path_factory.mktemp"
|
|
||||||
)
|
|
||||||
|
|
||||||
PYTEST_LOGWARNING = PytestDeprecationWarning(
|
|
||||||
"pytest_logwarning is deprecated, no longer being called, and will be removed soon\n"
|
|
||||||
"please use pytest_warning_captured instead"
|
|
||||||
)
|
|
||||||
|
|
||||||
PYTEST_WARNS_UNKNOWN_KWARGS = UnformattedWarning(
|
|
||||||
PytestDeprecationWarning,
|
|
||||||
"pytest.warns() got unexpected keyword arguments: {args!r}.\n"
|
|
||||||
"This will be an error in future versions.",
|
|
||||||
)
|
|
||||||
|
|
||||||
PYTEST_PARAM_UNKNOWN_KWARGS = UnformattedWarning(
|
|
||||||
PytestDeprecationWarning,
|
|
||||||
"pytest.param() got unexpected keyword arguments: {args!r}.\n"
|
|
||||||
"This will be an error in future versions.",
|
|
||||||
)
|
|
||||||
|
|
|
@ -13,6 +13,7 @@ from _pytest._code.code import TerminalRepr
|
||||||
from _pytest.compat import safe_getattr
|
from _pytest.compat import safe_getattr
|
||||||
from _pytest.fixtures import FixtureRequest
|
from _pytest.fixtures import FixtureRequest
|
||||||
from _pytest.outcomes import Skipped
|
from _pytest.outcomes import Skipped
|
||||||
|
from _pytest.python_api import approx
|
||||||
from _pytest.warning_types import PytestWarning
|
from _pytest.warning_types import PytestWarning
|
||||||
|
|
||||||
DOCTEST_REPORT_CHOICE_NONE = "none"
|
DOCTEST_REPORT_CHOICE_NONE = "none"
|
||||||
|
@ -286,6 +287,7 @@ def _get_flag_lookup():
|
||||||
COMPARISON_FLAGS=doctest.COMPARISON_FLAGS,
|
COMPARISON_FLAGS=doctest.COMPARISON_FLAGS,
|
||||||
ALLOW_UNICODE=_get_allow_unicode_flag(),
|
ALLOW_UNICODE=_get_allow_unicode_flag(),
|
||||||
ALLOW_BYTES=_get_allow_bytes_flag(),
|
ALLOW_BYTES=_get_allow_bytes_flag(),
|
||||||
|
NUMBER=_get_number_flag(),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -453,10 +455,15 @@ def _setup_fixtures(doctest_item):
|
||||||
|
|
||||||
def _get_checker():
|
def _get_checker():
|
||||||
"""
|
"""
|
||||||
Returns a doctest.OutputChecker subclass that takes in account the
|
Returns a doctest.OutputChecker subclass that supports some
|
||||||
ALLOW_UNICODE option to ignore u'' prefixes in strings and ALLOW_BYTES
|
additional options:
|
||||||
to strip b'' prefixes.
|
|
||||||
Useful when the same doctest should run in Python 2 and Python 3.
|
* ALLOW_UNICODE and ALLOW_BYTES options to ignore u'' and b''
|
||||||
|
prefixes (respectively) in string literals. Useful when the same
|
||||||
|
doctest should run in Python 2 and Python 3.
|
||||||
|
|
||||||
|
* NUMBER to ignore floating-point differences smaller than the
|
||||||
|
precision of the literal number in the doctest.
|
||||||
|
|
||||||
An inner class is used to avoid importing "doctest" at the module
|
An inner class is used to avoid importing "doctest" at the module
|
||||||
level.
|
level.
|
||||||
|
@ -469,26 +476,46 @@ def _get_checker():
|
||||||
|
|
||||||
class LiteralsOutputChecker(doctest.OutputChecker):
|
class LiteralsOutputChecker(doctest.OutputChecker):
|
||||||
"""
|
"""
|
||||||
Copied from doctest_nose_plugin.py from the nltk project:
|
Based on doctest_nose_plugin.py from the nltk project
|
||||||
https://github.com/nltk/nltk
|
(https://github.com/nltk/nltk) and on the "numtest" doctest extension
|
||||||
|
by Sebastien Boisgerault (https://github.com/boisgera/numtest).
|
||||||
Further extended to also support byte literals.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
_unicode_literal_re = re.compile(r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE)
|
_unicode_literal_re = re.compile(r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE)
|
||||||
_bytes_literal_re = re.compile(r"(\W|^)[bB]([rR]?[\'\"])", re.UNICODE)
|
_bytes_literal_re = re.compile(r"(\W|^)[bB]([rR]?[\'\"])", re.UNICODE)
|
||||||
|
_number_re = re.compile(
|
||||||
|
r"""
|
||||||
|
(?P<number>
|
||||||
|
(?P<mantissa>
|
||||||
|
(?P<integer1> [+-]?\d*)\.(?P<fraction>\d+)
|
||||||
|
|
|
||||||
|
(?P<integer2> [+-]?\d+)\.
|
||||||
|
)
|
||||||
|
(?:
|
||||||
|
[Ee]
|
||||||
|
(?P<exponent1> [+-]?\d+)
|
||||||
|
)?
|
||||||
|
|
|
||||||
|
(?P<integer3> [+-]?\d+)
|
||||||
|
(?:
|
||||||
|
[Ee]
|
||||||
|
(?P<exponent2> [+-]?\d+)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
""",
|
||||||
|
re.VERBOSE,
|
||||||
|
)
|
||||||
|
|
||||||
def check_output(self, want, got, optionflags):
|
def check_output(self, want, got, optionflags):
|
||||||
res = doctest.OutputChecker.check_output(self, want, got, optionflags)
|
if doctest.OutputChecker.check_output(self, want, got, optionflags):
|
||||||
if res:
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
allow_unicode = optionflags & _get_allow_unicode_flag()
|
allow_unicode = optionflags & _get_allow_unicode_flag()
|
||||||
allow_bytes = optionflags & _get_allow_bytes_flag()
|
allow_bytes = optionflags & _get_allow_bytes_flag()
|
||||||
if not allow_unicode and not allow_bytes:
|
allow_number = optionflags & _get_number_flag()
|
||||||
return False
|
|
||||||
|
|
||||||
else: # pragma: no cover
|
if not allow_unicode and not allow_bytes and not allow_number:
|
||||||
|
return False
|
||||||
|
|
||||||
def remove_prefixes(regex, txt):
|
def remove_prefixes(regex, txt):
|
||||||
return re.sub(regex, r"\1\2", txt)
|
return re.sub(regex, r"\1\2", txt)
|
||||||
|
@ -496,11 +523,42 @@ def _get_checker():
|
||||||
if allow_unicode:
|
if allow_unicode:
|
||||||
want = remove_prefixes(self._unicode_literal_re, want)
|
want = remove_prefixes(self._unicode_literal_re, want)
|
||||||
got = remove_prefixes(self._unicode_literal_re, got)
|
got = remove_prefixes(self._unicode_literal_re, got)
|
||||||
|
|
||||||
if allow_bytes:
|
if allow_bytes:
|
||||||
want = remove_prefixes(self._bytes_literal_re, want)
|
want = remove_prefixes(self._bytes_literal_re, want)
|
||||||
got = remove_prefixes(self._bytes_literal_re, got)
|
got = remove_prefixes(self._bytes_literal_re, got)
|
||||||
res = doctest.OutputChecker.check_output(self, want, got, optionflags)
|
|
||||||
return res
|
if allow_number:
|
||||||
|
got = self._remove_unwanted_precision(want, got)
|
||||||
|
|
||||||
|
return doctest.OutputChecker.check_output(self, want, got, optionflags)
|
||||||
|
|
||||||
|
def _remove_unwanted_precision(self, want, got):
|
||||||
|
wants = list(self._number_re.finditer(want))
|
||||||
|
gots = list(self._number_re.finditer(got))
|
||||||
|
if len(wants) != len(gots):
|
||||||
|
return got
|
||||||
|
offset = 0
|
||||||
|
for w, g in zip(wants, gots):
|
||||||
|
fraction = w.group("fraction")
|
||||||
|
exponent = w.group("exponent1")
|
||||||
|
if exponent is None:
|
||||||
|
exponent = w.group("exponent2")
|
||||||
|
if fraction is None:
|
||||||
|
precision = 0
|
||||||
|
else:
|
||||||
|
precision = len(fraction)
|
||||||
|
if exponent is not None:
|
||||||
|
precision -= int(exponent)
|
||||||
|
if float(w.group()) == approx(float(g.group()), abs=10 ** -precision):
|
||||||
|
# They're close enough. Replace the text we actually
|
||||||
|
# got with the text we want, so that it will match when we
|
||||||
|
# check the string literally.
|
||||||
|
got = (
|
||||||
|
got[: g.start() + offset] + w.group() + got[g.end() + offset :]
|
||||||
|
)
|
||||||
|
offset += w.end() - w.start() - (g.end() - g.start())
|
||||||
|
return got
|
||||||
|
|
||||||
_get_checker.LiteralsOutputChecker = LiteralsOutputChecker
|
_get_checker.LiteralsOutputChecker = LiteralsOutputChecker
|
||||||
return _get_checker.LiteralsOutputChecker()
|
return _get_checker.LiteralsOutputChecker()
|
||||||
|
@ -524,6 +582,15 @@ def _get_allow_bytes_flag():
|
||||||
return doctest.register_optionflag("ALLOW_BYTES")
|
return doctest.register_optionflag("ALLOW_BYTES")
|
||||||
|
|
||||||
|
|
||||||
|
def _get_number_flag():
|
||||||
|
"""
|
||||||
|
Registers and returns the NUMBER flag.
|
||||||
|
"""
|
||||||
|
import doctest
|
||||||
|
|
||||||
|
return doctest.register_optionflag("NUMBER")
|
||||||
|
|
||||||
|
|
||||||
def _get_report_choice(key):
|
def _get_report_choice(key):
|
||||||
"""
|
"""
|
||||||
This function returns the actual `doctest` module flag value, we want to do it as late as possible to avoid
|
This function returns the actual `doctest` module flag value, we want to do it as late as possible to avoid
|
||||||
|
|
|
@ -2,7 +2,6 @@ import functools
|
||||||
import inspect
|
import inspect
|
||||||
import itertools
|
import itertools
|
||||||
import sys
|
import sys
|
||||||
import warnings
|
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
from collections import deque
|
from collections import deque
|
||||||
from collections import OrderedDict
|
from collections import OrderedDict
|
||||||
|
@ -28,8 +27,6 @@ from _pytest.compat import getlocation
|
||||||
from _pytest.compat import is_generator
|
from _pytest.compat import is_generator
|
||||||
from _pytest.compat import NOTSET
|
from _pytest.compat import NOTSET
|
||||||
from _pytest.compat import safe_getattr
|
from _pytest.compat import safe_getattr
|
||||||
from _pytest.deprecated import FIXTURE_FUNCTION_CALL
|
|
||||||
from _pytest.deprecated import FIXTURE_NAMED_REQUEST
|
|
||||||
from _pytest.outcomes import fail
|
from _pytest.outcomes import fail
|
||||||
from _pytest.outcomes import TEST_OUTCOME
|
from _pytest.outcomes import TEST_OUTCOME
|
||||||
|
|
||||||
|
@ -475,13 +472,6 @@ class FixtureRequest(FuncargnamesCompatAttr):
|
||||||
"""
|
"""
|
||||||
return self._get_active_fixturedef(argname).cached_result[0]
|
return self._get_active_fixturedef(argname).cached_result[0]
|
||||||
|
|
||||||
def getfuncargvalue(self, argname):
|
|
||||||
""" Deprecated, use getfixturevalue. """
|
|
||||||
from _pytest import deprecated
|
|
||||||
|
|
||||||
warnings.warn(deprecated.GETFUNCARGVALUE, stacklevel=2)
|
|
||||||
return self.getfixturevalue(argname)
|
|
||||||
|
|
||||||
def _get_active_fixturedef(self, argname):
|
def _get_active_fixturedef(self, argname):
|
||||||
try:
|
try:
|
||||||
return self._fixture_defs[argname]
|
return self._fixture_defs[argname]
|
||||||
|
@ -945,9 +935,12 @@ def wrap_function_to_error_out_if_called_directly(function, fixture_marker):
|
||||||
"""Wrap the given fixture function so we can raise an error about it being called directly,
|
"""Wrap the given fixture function so we can raise an error about it being called directly,
|
||||||
instead of used as an argument in a test function.
|
instead of used as an argument in a test function.
|
||||||
"""
|
"""
|
||||||
message = FIXTURE_FUNCTION_CALL.format(
|
message = (
|
||||||
name=fixture_marker.name or function.__name__
|
'Fixture "{name}" called directly. Fixtures are not meant to be called directly,\n'
|
||||||
)
|
"but are created automatically when test functions request them as parameters.\n"
|
||||||
|
"See https://docs.pytest.org/en/latest/fixture.html for more information about fixtures, and\n"
|
||||||
|
"https://docs.pytest.org/en/latest/deprecations.html#calling-fixtures-directly about how to update your code."
|
||||||
|
).format(name=fixture_marker.name or function.__name__)
|
||||||
|
|
||||||
@functools.wraps(function)
|
@functools.wraps(function)
|
||||||
def result(*args, **kwargs):
|
def result(*args, **kwargs):
|
||||||
|
@ -982,7 +975,13 @@ class FixtureFunctionMarker:
|
||||||
|
|
||||||
name = self.name or function.__name__
|
name = self.name or function.__name__
|
||||||
if name == "request":
|
if name == "request":
|
||||||
warnings.warn(FIXTURE_NAMED_REQUEST)
|
location = getlocation(function)
|
||||||
|
fail(
|
||||||
|
"'request' is a reserved word for fixtures, use another name:\n {}".format(
|
||||||
|
location
|
||||||
|
),
|
||||||
|
pytrace=False,
|
||||||
|
)
|
||||||
function._pytestfixturefunction = self
|
function._pytestfixturefunction = self
|
||||||
return function
|
return function
|
||||||
|
|
||||||
|
|
|
@ -98,7 +98,7 @@ def pytest_cmdline_parse():
|
||||||
py.__version__,
|
py.__version__,
|
||||||
".".join(map(str, sys.version_info)),
|
".".join(map(str, sys.version_info)),
|
||||||
os.getcwd(),
|
os.getcwd(),
|
||||||
config._origargs,
|
config.invocation_params.args,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
config.trace.root.setwriter(debugfile.write)
|
config.trace.root.setwriter(debugfile.write)
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
""" hook specifications for pytest plugins, invoked from main.py and builtin plugins. """
|
""" hook specifications for pytest plugins, invoked from main.py and builtin plugins. """
|
||||||
from pluggy import HookspecMarker
|
from pluggy import HookspecMarker
|
||||||
|
|
||||||
from _pytest.deprecated import PYTEST_LOGWARNING
|
|
||||||
|
|
||||||
hookspec = HookspecMarker("pytest")
|
hookspec = HookspecMarker("pytest")
|
||||||
|
|
||||||
|
@ -575,27 +574,6 @@ def pytest_terminal_summary(terminalreporter, exitstatus, config):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
@hookspec(historic=True, warn_on_impl=PYTEST_LOGWARNING)
|
|
||||||
def pytest_logwarning(message, code, nodeid, fslocation):
|
|
||||||
"""
|
|
||||||
.. deprecated:: 3.8
|
|
||||||
|
|
||||||
This hook is will stop working in a future release.
|
|
||||||
|
|
||||||
pytest no longer triggers this hook, but the
|
|
||||||
terminal writer still implements it to display warnings issued by
|
|
||||||
:meth:`_pytest.config.Config.warn` and :meth:`_pytest.nodes.Node.warn`. Calling those functions will be
|
|
||||||
an error in future releases.
|
|
||||||
|
|
||||||
process a warning specified by a message, a code string,
|
|
||||||
a nodeid and fslocation (both of which may be None
|
|
||||||
if the warning is not tied to a particular node/location).
|
|
||||||
|
|
||||||
.. note::
|
|
||||||
This hook is incompatible with ``hookwrapper=True``.
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
@hookspec(historic=True)
|
@hookspec(historic=True)
|
||||||
def pytest_warning_captured(warning_message, when, item):
|
def pytest_warning_captured(warning_message, when, item):
|
||||||
"""
|
"""
|
||||||
|
|
|
@ -10,9 +10,11 @@ src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd
|
||||||
"""
|
"""
|
||||||
import functools
|
import functools
|
||||||
import os
|
import os
|
||||||
|
import platform
|
||||||
import re
|
import re
|
||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
import py
|
import py
|
||||||
|
|
||||||
|
@ -657,8 +659,7 @@ class LogXML:
|
||||||
)
|
)
|
||||||
logfile.write('<?xml version="1.0" encoding="utf-8"?>')
|
logfile.write('<?xml version="1.0" encoding="utf-8"?>')
|
||||||
|
|
||||||
logfile.write(
|
suite_node = Junit.testsuite(
|
||||||
Junit.testsuite(
|
|
||||||
self._get_global_properties_node(),
|
self._get_global_properties_node(),
|
||||||
[x.to_xml() for x in self.node_reporters_ordered],
|
[x.to_xml() for x in self.node_reporters_ordered],
|
||||||
name=self.suite_name,
|
name=self.suite_name,
|
||||||
|
@ -667,8 +668,10 @@ class LogXML:
|
||||||
skipped=self.stats["skipped"],
|
skipped=self.stats["skipped"],
|
||||||
tests=numtests,
|
tests=numtests,
|
||||||
time="%.3f" % suite_time_delta,
|
time="%.3f" % suite_time_delta,
|
||||||
).unicode(indent=0)
|
timestamp=datetime.fromtimestamp(self.suite_start_time).isoformat(),
|
||||||
|
hostname=platform.node(),
|
||||||
)
|
)
|
||||||
|
logfile.write(Junit.testsuites([suite_node]).unicode(indent=0))
|
||||||
logfile.close()
|
logfile.close()
|
||||||
|
|
||||||
def pytest_terminal_summary(self, terminalreporter):
|
def pytest_terminal_summary(self, terminalreporter):
|
||||||
|
|
|
@ -5,7 +5,6 @@ import functools
|
||||||
import importlib
|
import importlib
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
import warnings
|
|
||||||
|
|
||||||
import attr
|
import attr
|
||||||
import py
|
import py
|
||||||
|
@ -15,7 +14,6 @@ from _pytest import nodes
|
||||||
from _pytest.config import directory_arg
|
from _pytest.config import directory_arg
|
||||||
from _pytest.config import hookimpl
|
from _pytest.config import hookimpl
|
||||||
from _pytest.config import UsageError
|
from _pytest.config import UsageError
|
||||||
from _pytest.deprecated import PYTEST_CONFIG_GLOBAL
|
|
||||||
from _pytest.outcomes import exit
|
from _pytest.outcomes import exit
|
||||||
from _pytest.runner import collect_one_node
|
from _pytest.runner import collect_one_node
|
||||||
|
|
||||||
|
@ -179,26 +177,6 @@ def pytest_addoption(parser):
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class _ConfigDeprecated:
|
|
||||||
def __init__(self, config):
|
|
||||||
self.__dict__["_config"] = config
|
|
||||||
|
|
||||||
def __getattr__(self, attr):
|
|
||||||
warnings.warn(PYTEST_CONFIG_GLOBAL, stacklevel=2)
|
|
||||||
return getattr(self._config, attr)
|
|
||||||
|
|
||||||
def __setattr__(self, attr, val):
|
|
||||||
warnings.warn(PYTEST_CONFIG_GLOBAL, stacklevel=2)
|
|
||||||
return setattr(self._config, attr, val)
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
return "{}({!r})".format(type(self).__name__, self._config)
|
|
||||||
|
|
||||||
|
|
||||||
def pytest_configure(config):
|
|
||||||
__import__("pytest").config = _ConfigDeprecated(config) # compatibility
|
|
||||||
|
|
||||||
|
|
||||||
def wrap_session(config, doit):
|
def wrap_session(config, doit):
|
||||||
"""Skeleton command line program"""
|
"""Skeleton command line program"""
|
||||||
session = Session(config)
|
session = Session(config)
|
||||||
|
|
|
@ -10,7 +10,6 @@ import attr
|
||||||
from ..compat import ascii_escaped
|
from ..compat import ascii_escaped
|
||||||
from ..compat import getfslineno
|
from ..compat import getfslineno
|
||||||
from ..compat import NOTSET
|
from ..compat import NOTSET
|
||||||
from _pytest.deprecated import PYTEST_PARAM_UNKNOWN_KWARGS
|
|
||||||
from _pytest.outcomes import fail
|
from _pytest.outcomes import fail
|
||||||
from _pytest.warning_types import PytestUnknownMarkWarning
|
from _pytest.warning_types import PytestUnknownMarkWarning
|
||||||
|
|
||||||
|
@ -62,26 +61,19 @@ def get_empty_parameterset_mark(config, argnames, func):
|
||||||
|
|
||||||
class ParameterSet(namedtuple("ParameterSet", "values, marks, id")):
|
class ParameterSet(namedtuple("ParameterSet", "values, marks, id")):
|
||||||
@classmethod
|
@classmethod
|
||||||
def param(cls, *values, **kwargs):
|
def param(cls, *values, marks=(), id=None):
|
||||||
marks = kwargs.pop("marks", ())
|
|
||||||
if isinstance(marks, MarkDecorator):
|
if isinstance(marks, MarkDecorator):
|
||||||
marks = (marks,)
|
marks = (marks,)
|
||||||
else:
|
else:
|
||||||
assert isinstance(marks, (tuple, list, set))
|
assert isinstance(marks, (tuple, list, set))
|
||||||
|
|
||||||
id_ = kwargs.pop("id", None)
|
if id is not None:
|
||||||
if id_ is not None:
|
if not isinstance(id, str):
|
||||||
if not isinstance(id_, str):
|
|
||||||
raise TypeError(
|
raise TypeError(
|
||||||
"Expected id to be a string, got {}: {!r}".format(type(id_), id_)
|
"Expected id to be a string, got {}: {!r}".format(type(id), id)
|
||||||
)
|
)
|
||||||
id_ = ascii_escaped(id_)
|
id = ascii_escaped(id)
|
||||||
|
return cls(values, marks, id)
|
||||||
if kwargs:
|
|
||||||
warnings.warn(
|
|
||||||
PYTEST_PARAM_UNKNOWN_KWARGS.format(args=sorted(kwargs)), stacklevel=3
|
|
||||||
)
|
|
||||||
return cls(values, marks, id_)
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def extract_from(cls, parameterset, force_tuple=False):
|
def extract_from(cls, parameterset, force_tuple=False):
|
||||||
|
|
|
@ -1,31 +1,9 @@
|
||||||
""" run test suites written for nose. """
|
""" run test suites written for nose. """
|
||||||
import sys
|
|
||||||
|
|
||||||
import pytest
|
|
||||||
from _pytest import python
|
from _pytest import python
|
||||||
from _pytest import runner
|
|
||||||
from _pytest import unittest
|
from _pytest import unittest
|
||||||
from _pytest.config import hookimpl
|
from _pytest.config import hookimpl
|
||||||
|
|
||||||
|
|
||||||
def get_skip_exceptions():
|
|
||||||
skip_classes = set()
|
|
||||||
for module_name in ("unittest", "unittest2", "nose"):
|
|
||||||
mod = sys.modules.get(module_name)
|
|
||||||
if hasattr(mod, "SkipTest"):
|
|
||||||
skip_classes.add(mod.SkipTest)
|
|
||||||
return tuple(skip_classes)
|
|
||||||
|
|
||||||
|
|
||||||
def pytest_runtest_makereport(item, call):
|
|
||||||
if call.excinfo and call.excinfo.errisinstance(get_skip_exceptions()):
|
|
||||||
# let's substitute the excinfo with a pytest.skip one
|
|
||||||
call2 = runner.CallInfo.from_call(
|
|
||||||
lambda: pytest.skip(str(call.excinfo.value)), call.when
|
|
||||||
)
|
|
||||||
call.excinfo = call2.excinfo
|
|
||||||
|
|
||||||
|
|
||||||
@hookimpl(trylast=True)
|
@hookimpl(trylast=True)
|
||||||
def pytest_runtest_setup(item):
|
def pytest_runtest_setup(item):
|
||||||
if is_potential_nosetest(item):
|
if is_potential_nosetest(item):
|
||||||
|
@ -40,9 +18,6 @@ def teardown_nose(item):
|
||||||
if is_potential_nosetest(item):
|
if is_potential_nosetest(item):
|
||||||
if not call_optional(item.obj, "teardown"):
|
if not call_optional(item.obj, "teardown"):
|
||||||
call_optional(item.parent.obj, "teardown")
|
call_optional(item.parent.obj, "teardown")
|
||||||
# if hasattr(item.parent, '_nosegensetup'):
|
|
||||||
# #call_optional(item._nosegensetup, 'teardown')
|
|
||||||
# del item.parent._nosegensetup
|
|
||||||
|
|
||||||
|
|
||||||
def is_potential_nosetest(item):
|
def is_potential_nosetest(item):
|
||||||
|
|
|
@ -3,16 +3,21 @@ exception classes and constants handling test outcomes
|
||||||
as well as functions creating them
|
as well as functions creating them
|
||||||
"""
|
"""
|
||||||
import sys
|
import sys
|
||||||
|
from typing import Any
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
from packaging.version import Version
|
from packaging.version import Version
|
||||||
|
|
||||||
|
if False: # TYPE_CHECKING
|
||||||
|
from typing import NoReturn
|
||||||
|
|
||||||
|
|
||||||
class OutcomeException(BaseException):
|
class OutcomeException(BaseException):
|
||||||
""" OutcomeException and its subclass instances indicate and
|
""" OutcomeException and its subclass instances indicate and
|
||||||
contain info about test and collection outcomes.
|
contain info about test and collection outcomes.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, msg=None, pytrace=True):
|
def __init__(self, msg: Optional[str] = None, pytrace: bool = True) -> None:
|
||||||
if msg is not None and not isinstance(msg, str):
|
if msg is not None and not isinstance(msg, str):
|
||||||
error_msg = (
|
error_msg = (
|
||||||
"{} expected string as 'msg' parameter, got '{}' instead.\n"
|
"{} expected string as 'msg' parameter, got '{}' instead.\n"
|
||||||
|
@ -23,7 +28,7 @@ class OutcomeException(BaseException):
|
||||||
self.msg = msg
|
self.msg = msg
|
||||||
self.pytrace = pytrace
|
self.pytrace = pytrace
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self) -> str:
|
||||||
if self.msg:
|
if self.msg:
|
||||||
return self.msg
|
return self.msg
|
||||||
return "<{} instance>".format(self.__class__.__name__)
|
return "<{} instance>".format(self.__class__.__name__)
|
||||||
|
@ -39,7 +44,12 @@ class Skipped(OutcomeException):
|
||||||
# in order to have Skipped exception printing shorter/nicer
|
# in order to have Skipped exception printing shorter/nicer
|
||||||
__module__ = "builtins"
|
__module__ = "builtins"
|
||||||
|
|
||||||
def __init__(self, msg=None, pytrace=True, allow_module_level=False):
|
def __init__(
|
||||||
|
self,
|
||||||
|
msg: Optional[str] = None,
|
||||||
|
pytrace: bool = True,
|
||||||
|
allow_module_level: bool = False,
|
||||||
|
) -> None:
|
||||||
OutcomeException.__init__(self, msg=msg, pytrace=pytrace)
|
OutcomeException.__init__(self, msg=msg, pytrace=pytrace)
|
||||||
self.allow_module_level = allow_module_level
|
self.allow_module_level = allow_module_level
|
||||||
|
|
||||||
|
@ -53,7 +63,9 @@ class Failed(OutcomeException):
|
||||||
class Exit(Exception):
|
class Exit(Exception):
|
||||||
""" raised for immediate program exits (no tracebacks/summaries)"""
|
""" raised for immediate program exits (no tracebacks/summaries)"""
|
||||||
|
|
||||||
def __init__(self, msg="unknown reason", returncode=None):
|
def __init__(
|
||||||
|
self, msg: str = "unknown reason", returncode: Optional[int] = None
|
||||||
|
) -> None:
|
||||||
self.msg = msg
|
self.msg = msg
|
||||||
self.returncode = returncode
|
self.returncode = returncode
|
||||||
super().__init__(msg)
|
super().__init__(msg)
|
||||||
|
@ -62,7 +74,7 @@ class Exit(Exception):
|
||||||
# exposed helper methods
|
# exposed helper methods
|
||||||
|
|
||||||
|
|
||||||
def exit(msg, returncode=None):
|
def exit(msg: str, returncode: Optional[int] = None) -> "NoReturn":
|
||||||
"""
|
"""
|
||||||
Exit testing process.
|
Exit testing process.
|
||||||
|
|
||||||
|
@ -77,7 +89,7 @@ def exit(msg, returncode=None):
|
||||||
exit.Exception = Exit # type: ignore
|
exit.Exception = Exit # type: ignore
|
||||||
|
|
||||||
|
|
||||||
def skip(msg="", *, allow_module_level=False):
|
def skip(msg: str = "", *, allow_module_level: bool = False) -> "NoReturn":
|
||||||
"""
|
"""
|
||||||
Skip an executing test with the given message.
|
Skip an executing test with the given message.
|
||||||
|
|
||||||
|
@ -104,7 +116,7 @@ def skip(msg="", *, allow_module_level=False):
|
||||||
skip.Exception = Skipped # type: ignore
|
skip.Exception = Skipped # type: ignore
|
||||||
|
|
||||||
|
|
||||||
def fail(msg="", pytrace=True):
|
def fail(msg: str = "", pytrace: bool = True) -> "NoReturn":
|
||||||
"""
|
"""
|
||||||
Explicitly fail an executing test with the given message.
|
Explicitly fail an executing test with the given message.
|
||||||
|
|
||||||
|
@ -124,7 +136,7 @@ class XFailed(Failed):
|
||||||
""" raised from an explicit call to pytest.xfail() """
|
""" raised from an explicit call to pytest.xfail() """
|
||||||
|
|
||||||
|
|
||||||
def xfail(reason=""):
|
def xfail(reason: str = "") -> "NoReturn":
|
||||||
"""
|
"""
|
||||||
Imperatively xfail an executing test or setup functions with the given reason.
|
Imperatively xfail an executing test or setup functions with the given reason.
|
||||||
|
|
||||||
|
@ -142,12 +154,14 @@ def xfail(reason=""):
|
||||||
xfail.Exception = XFailed # type: ignore
|
xfail.Exception = XFailed # type: ignore
|
||||||
|
|
||||||
|
|
||||||
def importorskip(modname, minversion=None, reason=None):
|
def importorskip(
|
||||||
|
modname: str, minversion: Optional[str] = None, reason: Optional[str] = None
|
||||||
|
) -> Any:
|
||||||
"""Imports and returns the requested module ``modname``, or skip the
|
"""Imports and returns the requested module ``modname``, or skip the
|
||||||
current test if the module cannot be imported.
|
current test if the module cannot be imported.
|
||||||
|
|
||||||
:param str modname: the name of the module to import
|
:param str modname: the name of the module to import
|
||||||
:param str minversion: if given, the imported module ``__version__``
|
:param str minversion: if given, the imported module's ``__version__``
|
||||||
attribute must be at least this minimal version, otherwise the test is
|
attribute must be at least this minimal version, otherwise the test is
|
||||||
still skipped.
|
still skipped.
|
||||||
:param str reason: if given, this reason is shown as the message when the
|
:param str reason: if given, this reason is shown as the message when the
|
||||||
|
|
|
@ -340,7 +340,10 @@ def _config_for_test():
|
||||||
config._ensure_unconfigure() # cleanup, e.g. capman closing tmpfiles.
|
config._ensure_unconfigure() # cleanup, e.g. capman closing tmpfiles.
|
||||||
|
|
||||||
|
|
||||||
rex_outcome = re.compile(r"(\d+) ([\w-]+)")
|
# regex to match the session duration string in the summary: "74.34s"
|
||||||
|
rex_session_duration = re.compile(r"\d+\.\d\ds")
|
||||||
|
# regex to match all the counts and phrases in the summary line: "34 passed, 111 skipped"
|
||||||
|
rex_outcome = re.compile(r"(\d+) (\w+)")
|
||||||
|
|
||||||
|
|
||||||
class RunResult:
|
class RunResult:
|
||||||
|
@ -379,14 +382,11 @@ class RunResult:
|
||||||
|
|
||||||
"""
|
"""
|
||||||
for line in reversed(self.outlines):
|
for line in reversed(self.outlines):
|
||||||
if "seconds" in line:
|
if rex_session_duration.search(line):
|
||||||
outcomes = rex_outcome.findall(line)
|
outcomes = rex_outcome.findall(line)
|
||||||
if outcomes:
|
return {noun: int(count) for (count, noun) in outcomes}
|
||||||
d = {}
|
|
||||||
for num, cat in outcomes:
|
raise ValueError("Pytest terminal summary report not found")
|
||||||
d[cat] = int(num)
|
|
||||||
return d
|
|
||||||
raise ValueError("Pytest terminal report not found")
|
|
||||||
|
|
||||||
def assert_outcomes(
|
def assert_outcomes(
|
||||||
self, passed=0, skipped=0, failed=0, error=0, xpassed=0, xfailed=0
|
self, passed=0, skipped=0, failed=0, error=0, xpassed=0, xfailed=0
|
||||||
|
@ -632,7 +632,7 @@ class Testdir:
|
||||||
def copy_example(self, name=None):
|
def copy_example(self, name=None):
|
||||||
"""Copy file from project's directory into the testdir.
|
"""Copy file from project's directory into the testdir.
|
||||||
|
|
||||||
:param str name: The name of the file for copy.
|
:param str name: The name of the file to copy.
|
||||||
:return: path to the copied directory (inside ``self.tmpdir``).
|
:return: path to the copied directory (inside ``self.tmpdir``).
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
@ -1194,6 +1194,8 @@ class Testdir:
|
||||||
pytest.skip("pypy-64 bit not supported")
|
pytest.skip("pypy-64 bit not supported")
|
||||||
if sys.platform.startswith("freebsd"):
|
if sys.platform.startswith("freebsd"):
|
||||||
pytest.xfail("pexpect does not work reliably on freebsd")
|
pytest.xfail("pexpect does not work reliably on freebsd")
|
||||||
|
if not hasattr(pexpect, "spawn"):
|
||||||
|
pytest.skip("pexpect.spawn not available")
|
||||||
logfile = self.tmpdir.join("spawn.out").open("wb")
|
logfile = self.tmpdir.join("spawn.out").open("wb")
|
||||||
|
|
||||||
# Do not load user config.
|
# Do not load user config.
|
||||||
|
|
|
@ -1,18 +1,18 @@
|
||||||
""" Python test discovery, setup and run of test functions. """
|
""" Python test discovery, setup and run of test functions. """
|
||||||
import collections
|
|
||||||
import enum
|
import enum
|
||||||
import fnmatch
|
import fnmatch
|
||||||
import inspect
|
import inspect
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
import warnings
|
import warnings
|
||||||
|
from collections import Counter
|
||||||
|
from collections.abc import Sequence
|
||||||
from functools import partial
|
from functools import partial
|
||||||
from textwrap import dedent
|
from textwrap import dedent
|
||||||
|
|
||||||
import py
|
import py
|
||||||
|
|
||||||
import _pytest
|
import _pytest
|
||||||
from _pytest import deprecated
|
|
||||||
from _pytest import fixtures
|
from _pytest import fixtures
|
||||||
from _pytest import nodes
|
from _pytest import nodes
|
||||||
from _pytest._code import filter_traceback
|
from _pytest._code import filter_traceback
|
||||||
|
@ -225,7 +225,9 @@ def pytest_pycollect_makeitem(collector, name, obj):
|
||||||
elif getattr(obj, "__test__", True):
|
elif getattr(obj, "__test__", True):
|
||||||
if is_generator(obj):
|
if is_generator(obj):
|
||||||
res = Function(name, parent=collector)
|
res = Function(name, parent=collector)
|
||||||
reason = deprecated.YIELD_TESTS.format(name=name)
|
reason = "yield tests were removed in pytest 4.0 - {name} will be ignored".format(
|
||||||
|
name=name
|
||||||
|
)
|
||||||
res.add_marker(MARK_GEN.xfail(run=False, reason=reason))
|
res.add_marker(MARK_GEN.xfail(run=False, reason=reason))
|
||||||
res.warn(PytestCollectionWarning(reason))
|
res.warn(PytestCollectionWarning(reason))
|
||||||
else:
|
else:
|
||||||
|
@ -246,9 +248,6 @@ class PyobjContext:
|
||||||
class PyobjMixin(PyobjContext):
|
class PyobjMixin(PyobjContext):
|
||||||
_ALLOW_MARKERS = True
|
_ALLOW_MARKERS = True
|
||||||
|
|
||||||
def __init__(self, *k, **kw):
|
|
||||||
super().__init__(*k, **kw)
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def obj(self):
|
def obj(self):
|
||||||
"""Underlying Python object."""
|
"""Underlying Python object."""
|
||||||
|
@ -400,12 +399,8 @@ class PyCollector(PyobjMixin, nodes.Collector):
|
||||||
methods.append(module.pytest_generate_tests)
|
methods.append(module.pytest_generate_tests)
|
||||||
if hasattr(cls, "pytest_generate_tests"):
|
if hasattr(cls, "pytest_generate_tests"):
|
||||||
methods.append(cls().pytest_generate_tests)
|
methods.append(cls().pytest_generate_tests)
|
||||||
if methods:
|
|
||||||
self.ihook.pytest_generate_tests.call_extra(
|
self.ihook.pytest_generate_tests.call_extra(methods, dict(metafunc=metafunc))
|
||||||
methods, dict(metafunc=metafunc)
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
self.ihook.pytest_generate_tests(metafunc=metafunc)
|
|
||||||
|
|
||||||
if not metafunc._calls:
|
if not metafunc._calls:
|
||||||
yield Function(name, parent=self, fixtureinfo=fixtureinfo)
|
yield Function(name, parent=self, fixtureinfo=fixtureinfo)
|
||||||
|
@ -450,13 +445,12 @@ class Module(nodes.File, PyCollector):
|
||||||
Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with
|
Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with
|
||||||
other fixtures (#517).
|
other fixtures (#517).
|
||||||
"""
|
"""
|
||||||
setup_module = _get_non_fixture_func(self.obj, "setUpModule")
|
setup_module = _get_first_non_fixture_func(
|
||||||
if setup_module is None:
|
self.obj, ("setUpModule", "setup_module")
|
||||||
setup_module = _get_non_fixture_func(self.obj, "setup_module")
|
)
|
||||||
|
teardown_module = _get_first_non_fixture_func(
|
||||||
teardown_module = _get_non_fixture_func(self.obj, "tearDownModule")
|
self.obj, ("tearDownModule", "teardown_module")
|
||||||
if teardown_module is None:
|
)
|
||||||
teardown_module = _get_non_fixture_func(self.obj, "teardown_module")
|
|
||||||
|
|
||||||
if setup_module is None and teardown_module is None:
|
if setup_module is None and teardown_module is None:
|
||||||
return
|
return
|
||||||
|
@ -478,8 +472,10 @@ class Module(nodes.File, PyCollector):
|
||||||
Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with
|
Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with
|
||||||
other fixtures (#517).
|
other fixtures (#517).
|
||||||
"""
|
"""
|
||||||
setup_function = _get_non_fixture_func(self.obj, "setup_function")
|
setup_function = _get_first_non_fixture_func(self.obj, ("setup_function",))
|
||||||
teardown_function = _get_non_fixture_func(self.obj, "teardown_function")
|
teardown_function = _get_first_non_fixture_func(
|
||||||
|
self.obj, ("teardown_function",)
|
||||||
|
)
|
||||||
if setup_function is None and teardown_function is None:
|
if setup_function is None and teardown_function is None:
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -563,15 +559,15 @@ class Package(Module):
|
||||||
def setup(self):
|
def setup(self):
|
||||||
# not using fixtures to call setup_module here because autouse fixtures
|
# not using fixtures to call setup_module here because autouse fixtures
|
||||||
# from packages are not called automatically (#4085)
|
# from packages are not called automatically (#4085)
|
||||||
setup_module = _get_non_fixture_func(self.obj, "setUpModule")
|
setup_module = _get_first_non_fixture_func(
|
||||||
if setup_module is None:
|
self.obj, ("setUpModule", "setup_module")
|
||||||
setup_module = _get_non_fixture_func(self.obj, "setup_module")
|
)
|
||||||
if setup_module is not None:
|
if setup_module is not None:
|
||||||
_call_with_optional_argument(setup_module, self.obj)
|
_call_with_optional_argument(setup_module, self.obj)
|
||||||
|
|
||||||
teardown_module = _get_non_fixture_func(self.obj, "tearDownModule")
|
teardown_module = _get_first_non_fixture_func(
|
||||||
if teardown_module is None:
|
self.obj, ("tearDownModule", "teardown_module")
|
||||||
teardown_module = _get_non_fixture_func(self.obj, "teardown_module")
|
)
|
||||||
if teardown_module is not None:
|
if teardown_module is not None:
|
||||||
func = partial(_call_with_optional_argument, teardown_module, self.obj)
|
func = partial(_call_with_optional_argument, teardown_module, self.obj)
|
||||||
self.addfinalizer(func)
|
self.addfinalizer(func)
|
||||||
|
@ -662,27 +658,6 @@ class Package(Module):
|
||||||
pkg_prefixes.add(path)
|
pkg_prefixes.add(path)
|
||||||
|
|
||||||
|
|
||||||
def _get_xunit_setup_teardown(holder, attr_name, param_obj=None):
|
|
||||||
"""
|
|
||||||
Return a callable to perform xunit-style setup or teardown if
|
|
||||||
the function exists in the ``holder`` object.
|
|
||||||
The ``param_obj`` parameter is the parameter which will be passed to the function
|
|
||||||
when the callable is called without arguments, defaults to the ``holder`` object.
|
|
||||||
Return ``None`` if a suitable callable is not found.
|
|
||||||
"""
|
|
||||||
# TODO: only needed because of Package!
|
|
||||||
param_obj = param_obj if param_obj is not None else holder
|
|
||||||
result = _get_non_fixture_func(holder, attr_name)
|
|
||||||
if result is not None:
|
|
||||||
arg_count = result.__code__.co_argcount
|
|
||||||
if inspect.ismethod(result):
|
|
||||||
arg_count -= 1
|
|
||||||
if arg_count:
|
|
||||||
return lambda: result(param_obj)
|
|
||||||
else:
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def _call_with_optional_argument(func, arg):
|
def _call_with_optional_argument(func, arg):
|
||||||
"""Call the given function with the given argument if func accepts one argument, otherwise
|
"""Call the given function with the given argument if func accepts one argument, otherwise
|
||||||
calls func without arguments"""
|
calls func without arguments"""
|
||||||
|
@ -695,13 +670,14 @@ def _call_with_optional_argument(func, arg):
|
||||||
func()
|
func()
|
||||||
|
|
||||||
|
|
||||||
def _get_non_fixture_func(obj, name):
|
def _get_first_non_fixture_func(obj, names):
|
||||||
"""Return the attribute from the given object to be used as a setup/teardown
|
"""Return the attribute from the given object to be used as a setup/teardown
|
||||||
xunit-style function, but only if not marked as a fixture to
|
xunit-style function, but only if not marked as a fixture to
|
||||||
avoid calling it twice.
|
avoid calling it twice.
|
||||||
"""
|
"""
|
||||||
|
for name in names:
|
||||||
meth = getattr(obj, name, None)
|
meth = getattr(obj, name, None)
|
||||||
if fixtures.getfixturemarker(meth) is None:
|
if meth is not None and fixtures.getfixturemarker(meth) is None:
|
||||||
return meth
|
return meth
|
||||||
|
|
||||||
|
|
||||||
|
@ -742,7 +718,7 @@ class Class(PyCollector):
|
||||||
Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with
|
Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with
|
||||||
other fixtures (#517).
|
other fixtures (#517).
|
||||||
"""
|
"""
|
||||||
setup_class = _get_non_fixture_func(self.obj, "setup_class")
|
setup_class = _get_first_non_fixture_func(self.obj, ("setup_class",))
|
||||||
teardown_class = getattr(self.obj, "teardown_class", None)
|
teardown_class = getattr(self.obj, "teardown_class", None)
|
||||||
if setup_class is None and teardown_class is None:
|
if setup_class is None and teardown_class is None:
|
||||||
return
|
return
|
||||||
|
@ -766,7 +742,7 @@ class Class(PyCollector):
|
||||||
Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with
|
Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with
|
||||||
other fixtures (#517).
|
other fixtures (#517).
|
||||||
"""
|
"""
|
||||||
setup_method = _get_non_fixture_func(self.obj, "setup_method")
|
setup_method = _get_first_non_fixture_func(self.obj, ("setup_method",))
|
||||||
teardown_method = getattr(self.obj, "teardown_method", None)
|
teardown_method = getattr(self.obj, "teardown_method", None)
|
||||||
if setup_method is None and teardown_method is None:
|
if setup_method is None and teardown_method is None:
|
||||||
return
|
return
|
||||||
|
@ -904,18 +880,6 @@ class CallSpec2:
|
||||||
self._idlist.append(id)
|
self._idlist.append(id)
|
||||||
self.marks.extend(normalize_mark_list(marks))
|
self.marks.extend(normalize_mark_list(marks))
|
||||||
|
|
||||||
def setall(self, funcargs, id, param):
|
|
||||||
for x in funcargs:
|
|
||||||
self._checkargnotcontained(x)
|
|
||||||
self.funcargs.update(funcargs)
|
|
||||||
if id is not NOTSET:
|
|
||||||
self._idlist.append(id)
|
|
||||||
if param is not NOTSET:
|
|
||||||
assert self._globalparam is NOTSET
|
|
||||||
self._globalparam = param
|
|
||||||
for arg in funcargs:
|
|
||||||
self._arg2scopenum[arg] = fixtures.scopenum_function
|
|
||||||
|
|
||||||
|
|
||||||
class Metafunc(fixtures.FuncargnamesCompatAttr):
|
class Metafunc(fixtures.FuncargnamesCompatAttr):
|
||||||
"""
|
"""
|
||||||
|
@ -1076,12 +1040,9 @@ class Metafunc(fixtures.FuncargnamesCompatAttr):
|
||||||
* "params" if the argname should be the parameter of a fixture of the same name.
|
* "params" if the argname should be the parameter of a fixture of the same name.
|
||||||
* "funcargs" if the argname should be a parameter to the parametrized test function.
|
* "funcargs" if the argname should be a parameter to the parametrized test function.
|
||||||
"""
|
"""
|
||||||
valtypes = {}
|
if isinstance(indirect, bool):
|
||||||
if indirect is True:
|
valtypes = dict.fromkeys(argnames, "params" if indirect else "funcargs")
|
||||||
valtypes = dict.fromkeys(argnames, "params")
|
elif isinstance(indirect, Sequence):
|
||||||
elif indirect is False:
|
|
||||||
valtypes = dict.fromkeys(argnames, "funcargs")
|
|
||||||
elif isinstance(indirect, (tuple, list)):
|
|
||||||
valtypes = dict.fromkeys(argnames, "funcargs")
|
valtypes = dict.fromkeys(argnames, "funcargs")
|
||||||
for arg in indirect:
|
for arg in indirect:
|
||||||
if arg not in argnames:
|
if arg not in argnames:
|
||||||
|
@ -1092,6 +1053,13 @@ class Metafunc(fixtures.FuncargnamesCompatAttr):
|
||||||
pytrace=False,
|
pytrace=False,
|
||||||
)
|
)
|
||||||
valtypes[arg] = "params"
|
valtypes[arg] = "params"
|
||||||
|
else:
|
||||||
|
fail(
|
||||||
|
"In {func}: expected Sequence or boolean for indirect, got {type}".format(
|
||||||
|
type=type(indirect).__name__, func=self.function.__name__
|
||||||
|
),
|
||||||
|
pytrace=False,
|
||||||
|
)
|
||||||
return valtypes
|
return valtypes
|
||||||
|
|
||||||
def _validate_if_using_arg_names(self, argnames, indirect):
|
def _validate_if_using_arg_names(self, argnames, indirect):
|
||||||
|
@ -1191,7 +1159,7 @@ def _idval(val, argname, idx, idfn, item, config):
|
||||||
return str(val)
|
return str(val)
|
||||||
elif isinstance(val, REGEX_TYPE):
|
elif isinstance(val, REGEX_TYPE):
|
||||||
return ascii_escaped(val.pattern)
|
return ascii_escaped(val.pattern)
|
||||||
elif enum is not None and isinstance(val, enum.Enum):
|
elif isinstance(val, enum.Enum):
|
||||||
return str(val)
|
return str(val)
|
||||||
elif (inspect.isclass(val) or inspect.isfunction(val)) and hasattr(val, "__name__"):
|
elif (inspect.isclass(val) or inspect.isfunction(val)) and hasattr(val, "__name__"):
|
||||||
return val.__name__
|
return val.__name__
|
||||||
|
@ -1219,7 +1187,7 @@ def idmaker(argnames, parametersets, idfn=None, ids=None, config=None, item=None
|
||||||
if len(set(ids)) != len(ids):
|
if len(set(ids)) != len(ids):
|
||||||
# The ids are not unique
|
# The ids are not unique
|
||||||
duplicates = [testid for testid in ids if ids.count(testid) > 1]
|
duplicates = [testid for testid in ids if ids.count(testid) > 1]
|
||||||
counters = collections.defaultdict(lambda: 0)
|
counters = Counter()
|
||||||
for index, testid in enumerate(ids):
|
for index, testid in enumerate(ids):
|
||||||
if testid in duplicates:
|
if testid in duplicates:
|
||||||
ids[index] = testid + str(counters[testid])
|
ids[index] = testid + str(counters[testid])
|
||||||
|
@ -1408,14 +1376,11 @@ class Function(FunctionMixin, nodes.Item, fixtures.FuncargnamesCompatAttr):
|
||||||
# https://github.com/pytest-dev/pytest/issues/4569
|
# https://github.com/pytest-dev/pytest/issues/4569
|
||||||
|
|
||||||
self.keywords.update(
|
self.keywords.update(
|
||||||
dict.fromkeys(
|
{
|
||||||
[
|
mark.name: True
|
||||||
mark.name
|
|
||||||
for mark in self.iter_markers()
|
for mark in self.iter_markers()
|
||||||
if mark.name not in self.keywords
|
if mark.name not in self.keywords
|
||||||
],
|
}
|
||||||
True,
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
if fixtureinfo is None:
|
if fixtureinfo is None:
|
||||||
|
|
|
@ -1,23 +1,33 @@
|
||||||
import inspect
|
import inspect
|
||||||
import math
|
import math
|
||||||
import pprint
|
import pprint
|
||||||
import sys
|
|
||||||
import warnings
|
|
||||||
from collections.abc import Iterable
|
from collections.abc import Iterable
|
||||||
from collections.abc import Mapping
|
from collections.abc import Mapping
|
||||||
from collections.abc import Sized
|
from collections.abc import Sized
|
||||||
from decimal import Decimal
|
from decimal import Decimal
|
||||||
from itertools import filterfalse
|
from itertools import filterfalse
|
||||||
from numbers import Number
|
from numbers import Number
|
||||||
|
from types import TracebackType
|
||||||
|
from typing import Any
|
||||||
|
from typing import Callable
|
||||||
|
from typing import cast
|
||||||
|
from typing import Generic
|
||||||
|
from typing import Optional
|
||||||
|
from typing import overload
|
||||||
|
from typing import Pattern
|
||||||
|
from typing import Tuple
|
||||||
|
from typing import TypeVar
|
||||||
from typing import Union
|
from typing import Union
|
||||||
|
|
||||||
from more_itertools.more import always_iterable
|
from more_itertools.more import always_iterable
|
||||||
|
|
||||||
import _pytest._code
|
import _pytest._code
|
||||||
from _pytest import deprecated
|
|
||||||
from _pytest.compat import STRING_TYPES
|
from _pytest.compat import STRING_TYPES
|
||||||
from _pytest.outcomes import fail
|
from _pytest.outcomes import fail
|
||||||
|
|
||||||
|
if False: # TYPE_CHECKING
|
||||||
|
from typing import Type # noqa: F401 (used in type string)
|
||||||
|
|
||||||
BASE_TYPE = (type, STRING_TYPES)
|
BASE_TYPE = (type, STRING_TYPES)
|
||||||
|
|
||||||
|
|
||||||
|
@ -530,8 +540,35 @@ def _is_numpy_array(obj):
|
||||||
|
|
||||||
# builtin pytest.raises helper
|
# builtin pytest.raises helper
|
||||||
|
|
||||||
|
_E = TypeVar("_E", bound=BaseException)
|
||||||
|
|
||||||
def raises(expected_exception, *args, **kwargs):
|
|
||||||
|
@overload
|
||||||
|
def raises(
|
||||||
|
expected_exception: Union["Type[_E]", Tuple["Type[_E]", ...]],
|
||||||
|
*,
|
||||||
|
match: Optional[Union[str, Pattern]] = ...
|
||||||
|
) -> "RaisesContext[_E]":
|
||||||
|
... # pragma: no cover
|
||||||
|
|
||||||
|
|
||||||
|
@overload
|
||||||
|
def raises(
|
||||||
|
expected_exception: Union["Type[_E]", Tuple["Type[_E]", ...]],
|
||||||
|
func: Callable,
|
||||||
|
*args: Any,
|
||||||
|
match: Optional[str] = ...,
|
||||||
|
**kwargs: Any
|
||||||
|
) -> Optional[_pytest._code.ExceptionInfo[_E]]:
|
||||||
|
... # pragma: no cover
|
||||||
|
|
||||||
|
|
||||||
|
def raises(
|
||||||
|
expected_exception: Union["Type[_E]", Tuple["Type[_E]", ...]],
|
||||||
|
*args: Any,
|
||||||
|
match: Optional[Union[str, Pattern]] = None,
|
||||||
|
**kwargs: Any
|
||||||
|
) -> Union["RaisesContext[_E]", Optional[_pytest._code.ExceptionInfo[_E]]]:
|
||||||
r"""
|
r"""
|
||||||
Assert that a code block/function call raises ``expected_exception``
|
Assert that a code block/function call raises ``expected_exception``
|
||||||
or raise a failure exception otherwise.
|
or raise a failure exception otherwise.
|
||||||
|
@ -544,8 +581,6 @@ def raises(expected_exception, *args, **kwargs):
|
||||||
|
|
||||||
__ https://docs.python.org/3/library/re.html#regular-expression-syntax
|
__ https://docs.python.org/3/library/re.html#regular-expression-syntax
|
||||||
|
|
||||||
:kwparam message: **(deprecated since 4.1)** if specified, provides a custom failure message
|
|
||||||
if the exception is not raised. See :ref:`the deprecation docs <raises message deprecated>` for a workaround.
|
|
||||||
|
|
||||||
.. currentmodule:: _pytest._code
|
.. currentmodule:: _pytest._code
|
||||||
|
|
||||||
|
@ -652,70 +687,71 @@ def raises(expected_exception, *args, **kwargs):
|
||||||
for exc in filterfalse(
|
for exc in filterfalse(
|
||||||
inspect.isclass, always_iterable(expected_exception, BASE_TYPE)
|
inspect.isclass, always_iterable(expected_exception, BASE_TYPE)
|
||||||
):
|
):
|
||||||
msg = (
|
msg = "exceptions must be derived from BaseException, not %s"
|
||||||
"exceptions must be old-style classes or"
|
|
||||||
" derived from BaseException, not %s"
|
|
||||||
)
|
|
||||||
raise TypeError(msg % type(exc))
|
raise TypeError(msg % type(exc))
|
||||||
|
|
||||||
message = "DID NOT RAISE {}".format(expected_exception)
|
message = "DID NOT RAISE {}".format(expected_exception)
|
||||||
match_expr = None
|
|
||||||
|
|
||||||
if not args:
|
if not args:
|
||||||
if "message" in kwargs:
|
|
||||||
message = kwargs.pop("message")
|
|
||||||
warnings.warn(deprecated.RAISES_MESSAGE_PARAMETER, stacklevel=2)
|
|
||||||
if "match" in kwargs:
|
|
||||||
match_expr = kwargs.pop("match")
|
|
||||||
if kwargs:
|
if kwargs:
|
||||||
msg = "Unexpected keyword arguments passed to pytest.raises: "
|
msg = "Unexpected keyword arguments passed to pytest.raises: "
|
||||||
msg += ", ".join(sorted(kwargs))
|
msg += ", ".join(sorted(kwargs))
|
||||||
|
msg += "\nUse context-manager form instead?"
|
||||||
raise TypeError(msg)
|
raise TypeError(msg)
|
||||||
return RaisesContext(expected_exception, message, match_expr)
|
return RaisesContext(expected_exception, message, match)
|
||||||
elif isinstance(args[0], str):
|
|
||||||
warnings.warn(deprecated.RAISES_EXEC, stacklevel=2)
|
|
||||||
code, = args
|
|
||||||
assert isinstance(code, str)
|
|
||||||
frame = sys._getframe(1)
|
|
||||||
loc = frame.f_locals.copy()
|
|
||||||
loc.update(kwargs)
|
|
||||||
# print "raises frame scope: %r" % frame.f_locals
|
|
||||||
try:
|
|
||||||
code = _pytest._code.Source(code).compile(_genframe=frame)
|
|
||||||
exec(code, frame.f_globals, loc)
|
|
||||||
# XXX didn't mean f_globals == f_locals something special?
|
|
||||||
# this is destroyed here ...
|
|
||||||
except expected_exception:
|
|
||||||
return _pytest._code.ExceptionInfo.from_current()
|
|
||||||
else:
|
else:
|
||||||
func = args[0]
|
func = args[0]
|
||||||
|
if not callable(func):
|
||||||
|
raise TypeError(
|
||||||
|
"{!r} object (type: {}) must be callable".format(func, type(func))
|
||||||
|
)
|
||||||
try:
|
try:
|
||||||
func(*args[1:], **kwargs)
|
func(*args[1:], **kwargs)
|
||||||
except expected_exception:
|
except expected_exception as e:
|
||||||
return _pytest._code.ExceptionInfo.from_current()
|
# We just caught the exception - there is a traceback.
|
||||||
|
assert e.__traceback__ is not None
|
||||||
|
return _pytest._code.ExceptionInfo.from_exc_info(
|
||||||
|
(type(e), e, e.__traceback__)
|
||||||
|
)
|
||||||
fail(message)
|
fail(message)
|
||||||
|
|
||||||
|
|
||||||
raises.Exception = fail.Exception # type: ignore
|
raises.Exception = fail.Exception # type: ignore
|
||||||
|
|
||||||
|
|
||||||
class RaisesContext:
|
class RaisesContext(Generic[_E]):
|
||||||
def __init__(self, expected_exception, message, match_expr):
|
def __init__(
|
||||||
|
self,
|
||||||
|
expected_exception: Union["Type[_E]", Tuple["Type[_E]", ...]],
|
||||||
|
message: str,
|
||||||
|
match_expr: Optional[Union[str, Pattern]] = None,
|
||||||
|
) -> None:
|
||||||
self.expected_exception = expected_exception
|
self.expected_exception = expected_exception
|
||||||
self.message = message
|
self.message = message
|
||||||
self.match_expr = match_expr
|
self.match_expr = match_expr
|
||||||
self.excinfo = None
|
self.excinfo = None # type: Optional[_pytest._code.ExceptionInfo[_E]]
|
||||||
|
|
||||||
def __enter__(self):
|
def __enter__(self) -> _pytest._code.ExceptionInfo[_E]:
|
||||||
self.excinfo = _pytest._code.ExceptionInfo.for_later()
|
self.excinfo = _pytest._code.ExceptionInfo.for_later()
|
||||||
return self.excinfo
|
return self.excinfo
|
||||||
|
|
||||||
def __exit__(self, *tp):
|
def __exit__(
|
||||||
|
self,
|
||||||
|
exc_type: Optional["Type[BaseException]"],
|
||||||
|
exc_val: Optional[BaseException],
|
||||||
|
exc_tb: Optional[TracebackType],
|
||||||
|
) -> bool:
|
||||||
__tracebackhide__ = True
|
__tracebackhide__ = True
|
||||||
if tp[0] is None:
|
if exc_type is None:
|
||||||
fail(self.message)
|
fail(self.message)
|
||||||
self.excinfo.__init__(tp)
|
assert self.excinfo is not None
|
||||||
suppress_exception = issubclass(self.excinfo.type, self.expected_exception)
|
if not issubclass(exc_type, self.expected_exception):
|
||||||
if self.match_expr is not None and suppress_exception:
|
return False
|
||||||
|
# Cast to narrow the exception type now that it's verified.
|
||||||
|
exc_info = cast(
|
||||||
|
Tuple["Type[_E]", _E, TracebackType], (exc_type, exc_val, exc_tb)
|
||||||
|
)
|
||||||
|
self.excinfo.fill_unfilled(exc_info)
|
||||||
|
if self.match_expr is not None:
|
||||||
self.excinfo.match(self.match_expr)
|
self.excinfo.match(self.match_expr)
|
||||||
return suppress_exception
|
return True
|
||||||
|
|
|
@ -1,15 +1,23 @@
|
||||||
""" recording warnings during test function execution. """
|
""" recording warnings during test function execution. """
|
||||||
import inspect
|
|
||||||
import re
|
import re
|
||||||
import sys
|
|
||||||
import warnings
|
import warnings
|
||||||
|
from types import TracebackType
|
||||||
|
from typing import Any
|
||||||
|
from typing import Callable
|
||||||
|
from typing import Iterator
|
||||||
|
from typing import List
|
||||||
|
from typing import Optional
|
||||||
|
from typing import overload
|
||||||
|
from typing import Pattern
|
||||||
|
from typing import Tuple
|
||||||
|
from typing import Union
|
||||||
|
|
||||||
import _pytest._code
|
|
||||||
from _pytest.deprecated import PYTEST_WARNS_UNKNOWN_KWARGS
|
|
||||||
from _pytest.deprecated import WARNS_EXEC
|
|
||||||
from _pytest.fixtures import yield_fixture
|
from _pytest.fixtures import yield_fixture
|
||||||
from _pytest.outcomes import fail
|
from _pytest.outcomes import fail
|
||||||
|
|
||||||
|
if False: # TYPE_CHECKING
|
||||||
|
from typing import Type
|
||||||
|
|
||||||
|
|
||||||
@yield_fixture
|
@yield_fixture
|
||||||
def recwarn():
|
def recwarn():
|
||||||
|
@ -46,7 +54,32 @@ def deprecated_call(func=None, *args, **kwargs):
|
||||||
return warns((DeprecationWarning, PendingDeprecationWarning), *args, **kwargs)
|
return warns((DeprecationWarning, PendingDeprecationWarning), *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
def warns(expected_warning, *args, **kwargs):
|
@overload
|
||||||
|
def warns(
|
||||||
|
expected_warning: Union["Type[Warning]", Tuple["Type[Warning]", ...]],
|
||||||
|
*,
|
||||||
|
match: Optional[Union[str, Pattern]] = ...
|
||||||
|
) -> "WarningsChecker":
|
||||||
|
... # pragma: no cover
|
||||||
|
|
||||||
|
|
||||||
|
@overload
|
||||||
|
def warns(
|
||||||
|
expected_warning: Union["Type[Warning]", Tuple["Type[Warning]", ...]],
|
||||||
|
func: Callable,
|
||||||
|
*args: Any,
|
||||||
|
match: Optional[Union[str, Pattern]] = ...,
|
||||||
|
**kwargs: Any
|
||||||
|
) -> Union[Any]:
|
||||||
|
... # pragma: no cover
|
||||||
|
|
||||||
|
|
||||||
|
def warns(
|
||||||
|
expected_warning: Union["Type[Warning]", Tuple["Type[Warning]", ...]],
|
||||||
|
*args: Any,
|
||||||
|
match: Optional[Union[str, Pattern]] = None,
|
||||||
|
**kwargs: Any
|
||||||
|
) -> Union["WarningsChecker", Any]:
|
||||||
r"""Assert that code raises a particular class of warning.
|
r"""Assert that code raises a particular class of warning.
|
||||||
|
|
||||||
Specifically, the parameter ``expected_warning`` can be a warning class or
|
Specifically, the parameter ``expected_warning`` can be a warning class or
|
||||||
|
@ -80,25 +113,18 @@ def warns(expected_warning, *args, **kwargs):
|
||||||
"""
|
"""
|
||||||
__tracebackhide__ = True
|
__tracebackhide__ = True
|
||||||
if not args:
|
if not args:
|
||||||
match_expr = kwargs.pop("match", None)
|
|
||||||
if kwargs:
|
if kwargs:
|
||||||
warnings.warn(
|
msg = "Unexpected keyword arguments passed to pytest.warns: "
|
||||||
PYTEST_WARNS_UNKNOWN_KWARGS.format(args=sorted(kwargs)), stacklevel=2
|
msg += ", ".join(sorted(kwargs))
|
||||||
)
|
msg += "\nUse context-manager form instead?"
|
||||||
return WarningsChecker(expected_warning, match_expr=match_expr)
|
raise TypeError(msg)
|
||||||
elif isinstance(args[0], str):
|
return WarningsChecker(expected_warning, match_expr=match)
|
||||||
warnings.warn(WARNS_EXEC, stacklevel=2)
|
|
||||||
code, = args
|
|
||||||
assert isinstance(code, str)
|
|
||||||
frame = sys._getframe(1)
|
|
||||||
loc = frame.f_locals.copy()
|
|
||||||
loc.update(kwargs)
|
|
||||||
|
|
||||||
with WarningsChecker(expected_warning):
|
|
||||||
code = _pytest._code.Source(code).compile()
|
|
||||||
exec(code, frame.f_globals, loc)
|
|
||||||
else:
|
else:
|
||||||
func = args[0]
|
func = args[0]
|
||||||
|
if not callable(func):
|
||||||
|
raise TypeError(
|
||||||
|
"{!r} object (type: {}) must be callable".format(func, type(func))
|
||||||
|
)
|
||||||
with WarningsChecker(expected_warning):
|
with WarningsChecker(expected_warning):
|
||||||
return func(*args[1:], **kwargs)
|
return func(*args[1:], **kwargs)
|
||||||
|
|
||||||
|
@ -112,26 +138,26 @@ class WarningsRecorder(warnings.catch_warnings):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__(record=True)
|
super().__init__(record=True)
|
||||||
self._entered = False
|
self._entered = False
|
||||||
self._list = []
|
self._list = [] # type: List[warnings._Record]
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def list(self):
|
def list(self) -> List["warnings._Record"]:
|
||||||
"""The list of recorded warnings."""
|
"""The list of recorded warnings."""
|
||||||
return self._list
|
return self._list
|
||||||
|
|
||||||
def __getitem__(self, i):
|
def __getitem__(self, i: int) -> "warnings._Record":
|
||||||
"""Get a recorded warning by index."""
|
"""Get a recorded warning by index."""
|
||||||
return self._list[i]
|
return self._list[i]
|
||||||
|
|
||||||
def __iter__(self):
|
def __iter__(self) -> Iterator["warnings._Record"]:
|
||||||
"""Iterate through the recorded warnings."""
|
"""Iterate through the recorded warnings."""
|
||||||
return iter(self._list)
|
return iter(self._list)
|
||||||
|
|
||||||
def __len__(self):
|
def __len__(self) -> int:
|
||||||
"""The number of recorded warnings."""
|
"""The number of recorded warnings."""
|
||||||
return len(self._list)
|
return len(self._list)
|
||||||
|
|
||||||
def pop(self, cls=Warning):
|
def pop(self, cls: "Type[Warning]" = Warning) -> "warnings._Record":
|
||||||
"""Pop the first recorded warning, raise exception if not exists."""
|
"""Pop the first recorded warning, raise exception if not exists."""
|
||||||
for i, w in enumerate(self._list):
|
for i, w in enumerate(self._list):
|
||||||
if issubclass(w.category, cls):
|
if issubclass(w.category, cls):
|
||||||
|
@ -139,54 +165,80 @@ class WarningsRecorder(warnings.catch_warnings):
|
||||||
__tracebackhide__ = True
|
__tracebackhide__ = True
|
||||||
raise AssertionError("%r not found in warning list" % cls)
|
raise AssertionError("%r not found in warning list" % cls)
|
||||||
|
|
||||||
def clear(self):
|
def clear(self) -> None:
|
||||||
"""Clear the list of recorded warnings."""
|
"""Clear the list of recorded warnings."""
|
||||||
self._list[:] = []
|
self._list[:] = []
|
||||||
|
|
||||||
def __enter__(self):
|
# Type ignored because it doesn't exactly warnings.catch_warnings.__enter__
|
||||||
|
# -- it returns a List but we only emulate one.
|
||||||
|
def __enter__(self) -> "WarningsRecorder": # type: ignore
|
||||||
if self._entered:
|
if self._entered:
|
||||||
__tracebackhide__ = True
|
__tracebackhide__ = True
|
||||||
raise RuntimeError("Cannot enter %r twice" % self)
|
raise RuntimeError("Cannot enter %r twice" % self)
|
||||||
self._list = super().__enter__()
|
_list = super().__enter__()
|
||||||
|
# record=True means it's None.
|
||||||
|
assert _list is not None
|
||||||
|
self._list = _list
|
||||||
warnings.simplefilter("always")
|
warnings.simplefilter("always")
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def __exit__(self, *exc_info):
|
def __exit__(
|
||||||
|
self,
|
||||||
|
exc_type: Optional["Type[BaseException]"],
|
||||||
|
exc_val: Optional[BaseException],
|
||||||
|
exc_tb: Optional[TracebackType],
|
||||||
|
) -> bool:
|
||||||
if not self._entered:
|
if not self._entered:
|
||||||
__tracebackhide__ = True
|
__tracebackhide__ = True
|
||||||
raise RuntimeError("Cannot exit %r without entering first" % self)
|
raise RuntimeError("Cannot exit %r without entering first" % self)
|
||||||
|
|
||||||
super().__exit__(*exc_info)
|
super().__exit__(exc_type, exc_val, exc_tb)
|
||||||
|
|
||||||
# Built-in catch_warnings does not reset entered state so we do it
|
# Built-in catch_warnings does not reset entered state so we do it
|
||||||
# manually here for this context manager to become reusable.
|
# manually here for this context manager to become reusable.
|
||||||
self._entered = False
|
self._entered = False
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
class WarningsChecker(WarningsRecorder):
|
class WarningsChecker(WarningsRecorder):
|
||||||
def __init__(self, expected_warning=None, match_expr=None):
|
def __init__(
|
||||||
|
self,
|
||||||
|
expected_warning: Optional[
|
||||||
|
Union["Type[Warning]", Tuple["Type[Warning]", ...]]
|
||||||
|
] = None,
|
||||||
|
match_expr: Optional[Union[str, Pattern]] = None,
|
||||||
|
) -> None:
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
|
||||||
msg = "exceptions must be old-style classes or derived from Warning, not %s"
|
msg = "exceptions must be derived from Warning, not %s"
|
||||||
if isinstance(expected_warning, tuple):
|
if expected_warning is None:
|
||||||
|
expected_warning_tup = None
|
||||||
|
elif isinstance(expected_warning, tuple):
|
||||||
for exc in expected_warning:
|
for exc in expected_warning:
|
||||||
if not inspect.isclass(exc):
|
if not issubclass(exc, Warning):
|
||||||
raise TypeError(msg % type(exc))
|
raise TypeError(msg % type(exc))
|
||||||
elif inspect.isclass(expected_warning):
|
expected_warning_tup = expected_warning
|
||||||
expected_warning = (expected_warning,)
|
elif issubclass(expected_warning, Warning):
|
||||||
elif expected_warning is not None:
|
expected_warning_tup = (expected_warning,)
|
||||||
|
else:
|
||||||
raise TypeError(msg % type(expected_warning))
|
raise TypeError(msg % type(expected_warning))
|
||||||
|
|
||||||
self.expected_warning = expected_warning
|
self.expected_warning = expected_warning_tup
|
||||||
self.match_expr = match_expr
|
self.match_expr = match_expr
|
||||||
|
|
||||||
def __exit__(self, *exc_info):
|
def __exit__(
|
||||||
super().__exit__(*exc_info)
|
self,
|
||||||
|
exc_type: Optional["Type[BaseException]"],
|
||||||
|
exc_val: Optional[BaseException],
|
||||||
|
exc_tb: Optional[TracebackType],
|
||||||
|
) -> bool:
|
||||||
|
super().__exit__(exc_type, exc_val, exc_tb)
|
||||||
|
|
||||||
__tracebackhide__ = True
|
__tracebackhide__ = True
|
||||||
|
|
||||||
# only check if we're not currently handling an exception
|
# only check if we're not currently handling an exception
|
||||||
if all(a is None for a in exc_info):
|
if exc_type is None and exc_val is None and exc_tb is None:
|
||||||
if self.expected_warning is not None:
|
if self.expected_warning is not None:
|
||||||
if not any(issubclass(r.category, self.expected_warning) for r in self):
|
if not any(issubclass(r.category, self.expected_warning) for r in self):
|
||||||
__tracebackhide__ = True
|
__tracebackhide__ = True
|
||||||
|
@ -211,3 +263,4 @@ class WarningsChecker(WarningsRecorder):
|
||||||
[each.message for each in self],
|
[each.message for each in self],
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
return False
|
||||||
|
|
|
@ -249,10 +249,11 @@ def pytest_make_collect_report(collector):
|
||||||
if not call.excinfo:
|
if not call.excinfo:
|
||||||
outcome = "passed"
|
outcome = "passed"
|
||||||
else:
|
else:
|
||||||
from _pytest import nose
|
skip_exceptions = [Skipped]
|
||||||
|
unittest = sys.modules.get("unittest")
|
||||||
skip_exceptions = (Skipped,) + nose.get_skip_exceptions()
|
if unittest is not None:
|
||||||
if call.excinfo.errisinstance(skip_exceptions):
|
skip_exceptions.append(unittest.SkipTest)
|
||||||
|
if call.excinfo.errisinstance(tuple(skip_exceptions)):
|
||||||
outcome = "skipped"
|
outcome = "skipped"
|
||||||
r = collector._repr_failure_py(call.excinfo, "line").reprcrash
|
r = collector._repr_failure_py(call.excinfo, "line").reprcrash
|
||||||
longrepr = (str(r.path), r.lineno, r.message)
|
longrepr = (str(r.path), r.lineno, r.message)
|
||||||
|
@ -277,10 +278,7 @@ class SetupState:
|
||||||
self._finalizers = {}
|
self._finalizers = {}
|
||||||
|
|
||||||
def addfinalizer(self, finalizer, colitem):
|
def addfinalizer(self, finalizer, colitem):
|
||||||
""" attach a finalizer to the given colitem.
|
""" attach a finalizer to the given colitem. """
|
||||||
if colitem is None, this will add a finalizer that
|
|
||||||
is called at the end of teardown_all().
|
|
||||||
"""
|
|
||||||
assert colitem and not isinstance(colitem, tuple)
|
assert colitem and not isinstance(colitem, tuple)
|
||||||
assert callable(finalizer)
|
assert callable(finalizer)
|
||||||
# assert colitem in self.stack # some unit tests don't setup stack :/
|
# assert colitem in self.stack # some unit tests don't setup stack :/
|
||||||
|
@ -308,12 +306,9 @@ class SetupState:
|
||||||
|
|
||||||
def _teardown_with_finalization(self, colitem):
|
def _teardown_with_finalization(self, colitem):
|
||||||
self._callfinalizers(colitem)
|
self._callfinalizers(colitem)
|
||||||
if hasattr(colitem, "teardown"):
|
|
||||||
colitem.teardown()
|
colitem.teardown()
|
||||||
for colitem in self._finalizers:
|
for colitem in self._finalizers:
|
||||||
assert (
|
assert colitem in self.stack
|
||||||
colitem is None or colitem in self.stack or isinstance(colitem, tuple)
|
|
||||||
)
|
|
||||||
|
|
||||||
def teardown_all(self):
|
def teardown_all(self):
|
||||||
while self.stack:
|
while self.stack:
|
||||||
|
|
|
@ -4,6 +4,7 @@ This is a good source for looking at the various reporting hooks.
|
||||||
"""
|
"""
|
||||||
import argparse
|
import argparse
|
||||||
import collections
|
import collections
|
||||||
|
import datetime
|
||||||
import platform
|
import platform
|
||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
|
@ -861,7 +862,7 @@ class TerminalReporter:
|
||||||
def summary_stats(self):
|
def summary_stats(self):
|
||||||
session_duration = time.time() - self._sessionstarttime
|
session_duration = time.time() - self._sessionstarttime
|
||||||
(line, color) = build_summary_stats_line(self.stats)
|
(line, color) = build_summary_stats_line(self.stats)
|
||||||
msg = "{} in {:.2f} seconds".format(line, session_duration)
|
msg = "{} in {}".format(line, format_session_duration(session_duration))
|
||||||
markup = {color: True, "bold": True}
|
markup = {color: True, "bold": True}
|
||||||
|
|
||||||
if self.verbosity >= 0:
|
if self.verbosity >= 0:
|
||||||
|
@ -1055,3 +1056,12 @@ def _plugin_nameversions(plugininfo):
|
||||||
if name not in values:
|
if name not in values:
|
||||||
values.append(name)
|
values.append(name)
|
||||||
return values
|
return values
|
||||||
|
|
||||||
|
|
||||||
|
def format_session_duration(seconds):
|
||||||
|
"""Format the given seconds in a human readable manner to show in the final summary"""
|
||||||
|
if seconds < 60:
|
||||||
|
return "{:.2f}s".format(seconds)
|
||||||
|
else:
|
||||||
|
dt = datetime.timedelta(seconds=int(seconds))
|
||||||
|
return "{:.2f}s ({})".format(seconds, dt)
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import tempfile
|
import tempfile
|
||||||
import warnings
|
|
||||||
|
|
||||||
import attr
|
import attr
|
||||||
import py
|
import py
|
||||||
|
@ -88,19 +87,6 @@ class TempdirFactory:
|
||||||
|
|
||||||
_tmppath_factory = attr.ib()
|
_tmppath_factory = attr.ib()
|
||||||
|
|
||||||
def ensuretemp(self, string, dir=1):
|
|
||||||
""" (deprecated) return temporary directory path with
|
|
||||||
the given string as the trailing part. It is usually
|
|
||||||
better to use the 'tmpdir' function argument which
|
|
||||||
provides an empty unique-per-test-invocation directory
|
|
||||||
and is guaranteed to be empty.
|
|
||||||
"""
|
|
||||||
# py.log._apiwarn(">1.1", "use tmpdir function argument")
|
|
||||||
from .deprecated import PYTEST_ENSURETEMP
|
|
||||||
|
|
||||||
warnings.warn(PYTEST_ENSURETEMP, stacklevel=2)
|
|
||||||
return self.getbasetemp().ensure(string, dir=dir)
|
|
||||||
|
|
||||||
def mktemp(self, basename, numbered=True):
|
def mktemp(self, basename, numbered=True):
|
||||||
"""Create a subdirectory of the base temporary directory and return it.
|
"""Create a subdirectory of the base temporary directory and return it.
|
||||||
If ``numbered``, ensure the directory is unique by adding a number
|
If ``numbered``, ensure the directory is unique by adding a number
|
||||||
|
@ -138,7 +124,6 @@ def pytest_configure(config):
|
||||||
config._cleanup.append(mp.undo)
|
config._cleanup.append(mp.undo)
|
||||||
mp.setattr(config, "_tmp_path_factory", tmppath_handler, raising=False)
|
mp.setattr(config, "_tmp_path_factory", tmppath_handler, raising=False)
|
||||||
mp.setattr(config, "_tmpdirhandler", t, raising=False)
|
mp.setattr(config, "_tmpdirhandler", t, raising=False)
|
||||||
mp.setattr(pytest, "ensuretemp", t.ensuretemp, raising=False)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session")
|
@pytest.fixture(scope="session")
|
||||||
|
|
|
@ -12,6 +12,7 @@ from _pytest.outcomes import skip
|
||||||
from _pytest.outcomes import xfail
|
from _pytest.outcomes import xfail
|
||||||
from _pytest.python import Class
|
from _pytest.python import Class
|
||||||
from _pytest.python import Function
|
from _pytest.python import Function
|
||||||
|
from _pytest.runner import CallInfo
|
||||||
|
|
||||||
|
|
||||||
def pytest_pycollect_makeitem(collector, name, obj):
|
def pytest_pycollect_makeitem(collector, name, obj):
|
||||||
|
@ -229,6 +230,14 @@ def pytest_runtest_makereport(item, call):
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
unittest = sys.modules.get("unittest")
|
||||||
|
if unittest and call.excinfo and call.excinfo.errisinstance(unittest.SkipTest):
|
||||||
|
# let's substitute the excinfo with a pytest.skip one
|
||||||
|
call2 = CallInfo.from_call(
|
||||||
|
lambda: pytest.skip(str(call.excinfo.value)), call.when
|
||||||
|
)
|
||||||
|
call.excinfo = call2.excinfo
|
||||||
|
|
||||||
|
|
||||||
# twisted trial support
|
# twisted trial support
|
||||||
|
|
||||||
|
|
|
@ -103,16 +103,6 @@ class PytestUnknownMarkWarning(PytestWarning):
|
||||||
__module__ = "pytest"
|
__module__ = "pytest"
|
||||||
|
|
||||||
|
|
||||||
class RemovedInPytest4Warning(PytestDeprecationWarning):
|
|
||||||
"""
|
|
||||||
Bases: :class:`pytest.PytestDeprecationWarning`.
|
|
||||||
|
|
||||||
Warning class for features scheduled to be removed in pytest 4.0.
|
|
||||||
"""
|
|
||||||
|
|
||||||
__module__ = "pytest"
|
|
||||||
|
|
||||||
|
|
||||||
@attr.s
|
@attr.s
|
||||||
class UnformattedWarning:
|
class UnformattedWarning:
|
||||||
"""Used to hold warnings that need to format their message at runtime, as opposed to a direct message.
|
"""Used to hold warnings that need to format their message at runtime, as opposed to a direct message.
|
||||||
|
|
|
@ -4,8 +4,6 @@ from contextlib import contextmanager
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
SHOW_PYTEST_WARNINGS_ARG = "-Walways::pytest.RemovedInPytest4Warning"
|
|
||||||
|
|
||||||
|
|
||||||
def _setoption(wmod, arg):
|
def _setoption(wmod, arg):
|
||||||
"""
|
"""
|
||||||
|
@ -74,9 +72,6 @@ def catch_warnings_for_item(config, ihook, when, item):
|
||||||
warnings.filterwarnings("always", category=DeprecationWarning)
|
warnings.filterwarnings("always", category=DeprecationWarning)
|
||||||
warnings.filterwarnings("always", category=PendingDeprecationWarning)
|
warnings.filterwarnings("always", category=PendingDeprecationWarning)
|
||||||
|
|
||||||
warnings.filterwarnings("error", category=pytest.RemovedInPytest4Warning)
|
|
||||||
warnings.filterwarnings("error", category=pytest.PytestDeprecationWarning)
|
|
||||||
|
|
||||||
# filters should have this precedence: mark, cmdline options, ini
|
# filters should have this precedence: mark, cmdline options, ini
|
||||||
# filters should be applied in the inverse order of precedence
|
# filters should be applied in the inverse order of precedence
|
||||||
for arg in inifilters:
|
for arg in inifilters:
|
||||||
|
|
|
@ -44,7 +44,7 @@ from _pytest.warning_types import PytestExperimentalApiWarning
|
||||||
from _pytest.warning_types import PytestUnhandledCoroutineWarning
|
from _pytest.warning_types import PytestUnhandledCoroutineWarning
|
||||||
from _pytest.warning_types import PytestUnknownMarkWarning
|
from _pytest.warning_types import PytestUnknownMarkWarning
|
||||||
from _pytest.warning_types import PytestWarning
|
from _pytest.warning_types import PytestWarning
|
||||||
from _pytest.warning_types import RemovedInPytest4Warning
|
|
||||||
|
|
||||||
set_trace = __pytestPDB.set_trace
|
set_trace = __pytestPDB.set_trace
|
||||||
|
|
||||||
|
@ -84,7 +84,6 @@ __all__ = [
|
||||||
"PytestWarning",
|
"PytestWarning",
|
||||||
"raises",
|
"raises",
|
||||||
"register_assert_rewrite",
|
"register_assert_rewrite",
|
||||||
"RemovedInPytest4Warning",
|
|
||||||
"Session",
|
"Session",
|
||||||
"set_trace",
|
"set_trace",
|
||||||
"skip",
|
"skip",
|
||||||
|
|
|
@ -4,12 +4,11 @@ import textwrap
|
||||||
import types
|
import types
|
||||||
|
|
||||||
import attr
|
import attr
|
||||||
import importlib_metadata
|
|
||||||
import py
|
import py
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
from _pytest.compat import importlib_metadata
|
||||||
from _pytest.main import ExitCode
|
from _pytest.main import ExitCode
|
||||||
from _pytest.warnings import SHOW_PYTEST_WARNINGS_ARG
|
|
||||||
|
|
||||||
|
|
||||||
def prepend_pythonpath(*dirs):
|
def prepend_pythonpath(*dirs):
|
||||||
|
@ -343,7 +342,7 @@ class TestGeneralUsage:
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
p = testdir.makepyfile("""def test_func(x): pass""")
|
p = testdir.makepyfile("""def test_func(x): pass""")
|
||||||
res = testdir.runpytest(p, SHOW_PYTEST_WARNINGS_ARG)
|
res = testdir.runpytest(p)
|
||||||
assert res.ret == 0
|
assert res.ret == 0
|
||||||
res.stdout.fnmatch_lines(["*1 skipped*"])
|
res.stdout.fnmatch_lines(["*1 skipped*"])
|
||||||
|
|
||||||
|
@ -356,9 +355,7 @@ class TestGeneralUsage:
|
||||||
pass
|
pass
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
res = testdir.runpytest(
|
res = testdir.runpytest(p.basename + "::" + "test_func[1]")
|
||||||
p.basename + "::" + "test_func[1]", SHOW_PYTEST_WARNINGS_ARG
|
|
||||||
)
|
|
||||||
assert res.ret == 0
|
assert res.ret == 0
|
||||||
res.stdout.fnmatch_lines(["*1 passed*"])
|
res.stdout.fnmatch_lines(["*1 passed*"])
|
||||||
|
|
||||||
|
|
|
@ -58,7 +58,7 @@ class TWMock:
|
||||||
fullwidth = 80
|
fullwidth = 80
|
||||||
|
|
||||||
|
|
||||||
def test_excinfo_simple():
|
def test_excinfo_simple() -> None:
|
||||||
try:
|
try:
|
||||||
raise ValueError
|
raise ValueError
|
||||||
except ValueError:
|
except ValueError:
|
||||||
|
@ -66,6 +66,14 @@ def test_excinfo_simple():
|
||||||
assert info.type == ValueError
|
assert info.type == ValueError
|
||||||
|
|
||||||
|
|
||||||
|
def test_excinfo_from_exc_info_simple():
|
||||||
|
try:
|
||||||
|
raise ValueError
|
||||||
|
except ValueError as e:
|
||||||
|
info = _pytest._code.ExceptionInfo.from_exc_info((type(e), e, e.__traceback__))
|
||||||
|
assert info.type == ValueError
|
||||||
|
|
||||||
|
|
||||||
def test_excinfo_getstatement():
|
def test_excinfo_getstatement():
|
||||||
def g():
|
def g():
|
||||||
raise ValueError
|
raise ValueError
|
||||||
|
|
|
@ -1,39 +1,5 @@
|
||||||
import os
|
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
from _pytest import deprecated
|
from _pytest import deprecated
|
||||||
from _pytest.warning_types import PytestDeprecationWarning
|
|
||||||
from _pytest.warnings import SHOW_PYTEST_WARNINGS_ARG
|
|
||||||
|
|
||||||
pytestmark = pytest.mark.pytester_example_path("deprecated")
|
|
||||||
|
|
||||||
|
|
||||||
def test_pytest_setup_cfg_unsupported(testdir):
|
|
||||||
testdir.makefile(
|
|
||||||
".cfg",
|
|
||||||
setup="""
|
|
||||||
[pytest]
|
|
||||||
addopts = --verbose
|
|
||||||
""",
|
|
||||||
)
|
|
||||||
with pytest.raises(pytest.fail.Exception):
|
|
||||||
testdir.runpytest()
|
|
||||||
|
|
||||||
|
|
||||||
def test_pytest_custom_cfg_unsupported(testdir):
|
|
||||||
testdir.makefile(
|
|
||||||
".cfg",
|
|
||||||
custom="""
|
|
||||||
[pytest]
|
|
||||||
addopts = --verbose
|
|
||||||
""",
|
|
||||||
)
|
|
||||||
with pytest.raises(pytest.fail.Exception):
|
|
||||||
testdir.runpytest("-c", "custom.cfg")
|
|
||||||
|
|
||||||
|
|
||||||
def test_getfuncargvalue_is_deprecated(request):
|
|
||||||
pytest.deprecated_call(request.getfuncargvalue, "tmpdir")
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.filterwarnings("default")
|
@pytest.mark.filterwarnings("default")
|
||||||
|
@ -78,142 +44,3 @@ def test_external_plugins_integrated(testdir, plugin):
|
||||||
|
|
||||||
with pytest.warns(pytest.PytestConfigWarning):
|
with pytest.warns(pytest.PytestConfigWarning):
|
||||||
testdir.parseconfig("-p", plugin)
|
testdir.parseconfig("-p", plugin)
|
||||||
|
|
||||||
|
|
||||||
def test_raises_message_argument_deprecated():
|
|
||||||
with pytest.warns(pytest.PytestDeprecationWarning):
|
|
||||||
with pytest.raises(RuntimeError, message="foobar"):
|
|
||||||
raise RuntimeError
|
|
||||||
|
|
||||||
|
|
||||||
def test_pytest_plugins_in_non_top_level_conftest_deprecated(testdir):
|
|
||||||
from _pytest.deprecated import PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST
|
|
||||||
|
|
||||||
testdir.makepyfile(
|
|
||||||
**{
|
|
||||||
"subdirectory/conftest.py": """
|
|
||||||
pytest_plugins=['capture']
|
|
||||||
"""
|
|
||||||
}
|
|
||||||
)
|
|
||||||
testdir.makepyfile(
|
|
||||||
"""
|
|
||||||
def test_func():
|
|
||||||
pass
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
res = testdir.runpytest(SHOW_PYTEST_WARNINGS_ARG)
|
|
||||||
assert res.ret == 2
|
|
||||||
msg = str(PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST).splitlines()[0]
|
|
||||||
res.stdout.fnmatch_lines(
|
|
||||||
["*{msg}*".format(msg=msg), "*subdirectory{sep}conftest.py*".format(sep=os.sep)]
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("use_pyargs", [True, False])
|
|
||||||
def test_pytest_plugins_in_non_top_level_conftest_unsupported_pyargs(
|
|
||||||
testdir, use_pyargs
|
|
||||||
):
|
|
||||||
"""When using --pyargs, do not emit the warning about non-top-level conftest warnings (#4039, #4044)"""
|
|
||||||
from _pytest.deprecated import PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST
|
|
||||||
|
|
||||||
files = {
|
|
||||||
"src/pkg/__init__.py": "",
|
|
||||||
"src/pkg/conftest.py": "",
|
|
||||||
"src/pkg/test_root.py": "def test(): pass",
|
|
||||||
"src/pkg/sub/__init__.py": "",
|
|
||||||
"src/pkg/sub/conftest.py": "pytest_plugins=['capture']",
|
|
||||||
"src/pkg/sub/test_bar.py": "def test(): pass",
|
|
||||||
}
|
|
||||||
testdir.makepyfile(**files)
|
|
||||||
testdir.syspathinsert(testdir.tmpdir.join("src"))
|
|
||||||
|
|
||||||
args = ("--pyargs", "pkg") if use_pyargs else ()
|
|
||||||
args += (SHOW_PYTEST_WARNINGS_ARG,)
|
|
||||||
res = testdir.runpytest(*args)
|
|
||||||
assert res.ret == (0 if use_pyargs else 2)
|
|
||||||
msg = str(PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST).splitlines()[0]
|
|
||||||
if use_pyargs:
|
|
||||||
assert msg not in res.stdout.str()
|
|
||||||
else:
|
|
||||||
res.stdout.fnmatch_lines(["*{msg}*".format(msg=msg)])
|
|
||||||
|
|
||||||
|
|
||||||
def test_pytest_plugins_in_non_top_level_conftest_unsupported_no_top_level_conftest(
|
|
||||||
testdir
|
|
||||||
):
|
|
||||||
from _pytest.deprecated import PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST
|
|
||||||
|
|
||||||
subdirectory = testdir.tmpdir.join("subdirectory")
|
|
||||||
subdirectory.mkdir()
|
|
||||||
testdir.makeconftest(
|
|
||||||
"""
|
|
||||||
pytest_plugins=['capture']
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
testdir.tmpdir.join("conftest.py").move(subdirectory.join("conftest.py"))
|
|
||||||
|
|
||||||
testdir.makepyfile(
|
|
||||||
"""
|
|
||||||
def test_func():
|
|
||||||
pass
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
|
|
||||||
res = testdir.runpytest_subprocess()
|
|
||||||
assert res.ret == 2
|
|
||||||
msg = str(PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST).splitlines()[0]
|
|
||||||
res.stdout.fnmatch_lines(
|
|
||||||
["*{msg}*".format(msg=msg), "*subdirectory{sep}conftest.py*".format(sep=os.sep)]
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def test_pytest_plugins_in_non_top_level_conftest_unsupported_no_false_positives(
|
|
||||||
testdir
|
|
||||||
):
|
|
||||||
from _pytest.deprecated import PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST
|
|
||||||
|
|
||||||
subdirectory = testdir.tmpdir.join("subdirectory")
|
|
||||||
subdirectory.mkdir()
|
|
||||||
testdir.makeconftest(
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
testdir.tmpdir.join("conftest.py").move(subdirectory.join("conftest.py"))
|
|
||||||
|
|
||||||
testdir.makeconftest(
|
|
||||||
"""
|
|
||||||
import warnings
|
|
||||||
warnings.filterwarnings('always', category=DeprecationWarning)
|
|
||||||
pytest_plugins=['capture']
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
testdir.makepyfile(
|
|
||||||
"""
|
|
||||||
def test_func():
|
|
||||||
pass
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
res = testdir.runpytest_subprocess()
|
|
||||||
assert res.ret == 0
|
|
||||||
msg = str(PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST).splitlines()[0]
|
|
||||||
assert msg not in res.stdout.str()
|
|
||||||
|
|
||||||
|
|
||||||
def test_fixture_named_request(testdir):
|
|
||||||
testdir.copy_example()
|
|
||||||
result = testdir.runpytest()
|
|
||||||
result.stdout.fnmatch_lines(
|
|
||||||
[
|
|
||||||
"*'request' is a reserved name for fixtures and will raise an error in future versions"
|
|
||||||
]
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def test_pytest_warns_unknown_kwargs():
|
|
||||||
with pytest.warns(
|
|
||||||
PytestDeprecationWarning,
|
|
||||||
match=r"pytest.warns\(\) got unexpected keyword arguments: \['foo'\]",
|
|
||||||
):
|
|
||||||
pytest.warns(UserWarning, foo="hello")
|
|
||||||
|
|
|
@ -0,0 +1,147 @@
|
||||||
|
<?xml version="1.0" encoding="UTF-8" ?>
|
||||||
|
<!--
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2014, Gregory Boissinot
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in
|
||||||
|
all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
THE SOFTWARE.
|
||||||
|
-->
|
||||||
|
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
|
||||||
|
<xs:simpleType name="SUREFIRE_TIME">
|
||||||
|
<xs:restriction base="xs:string">
|
||||||
|
<xs:pattern value="(([0-9]{0,3},)*[0-9]{3}|[0-9]{0,3})*(\.[0-9]{0,3})?"/>
|
||||||
|
</xs:restriction>
|
||||||
|
</xs:simpleType>
|
||||||
|
|
||||||
|
<xs:complexType name="rerunType" mixed="true"> <!-- mixed (XML contains text) to be compatible with version previous than 2.22.1 -->
|
||||||
|
<xs:sequence>
|
||||||
|
<xs:element name="stackTrace" type="xs:string" minOccurs="0" /> <!-- optional to be compatible with version previous than 2.22.1 -->
|
||||||
|
<xs:element name="system-out" type="xs:string" minOccurs="0" />
|
||||||
|
<xs:element name="system-err" type="xs:string" minOccurs="0" />
|
||||||
|
</xs:sequence>
|
||||||
|
<xs:attribute name="message" type="xs:string" />
|
||||||
|
<xs:attribute name="type" type="xs:string" use="required" />
|
||||||
|
</xs:complexType>
|
||||||
|
|
||||||
|
<xs:element name="failure">
|
||||||
|
<xs:complexType mixed="true">
|
||||||
|
<xs:attribute name="type" type="xs:string"/>
|
||||||
|
<xs:attribute name="message" type="xs:string"/>
|
||||||
|
</xs:complexType>
|
||||||
|
</xs:element>
|
||||||
|
|
||||||
|
<xs:element name="error">
|
||||||
|
<xs:complexType mixed="true">
|
||||||
|
<xs:attribute name="type" type="xs:string"/>
|
||||||
|
<xs:attribute name="message" type="xs:string"/>
|
||||||
|
</xs:complexType>
|
||||||
|
</xs:element>
|
||||||
|
|
||||||
|
<xs:element name="skipped">
|
||||||
|
<xs:complexType mixed="true">
|
||||||
|
<xs:attribute name="type" type="xs:string"/>
|
||||||
|
<xs:attribute name="message" type="xs:string"/>
|
||||||
|
</xs:complexType>
|
||||||
|
</xs:element>
|
||||||
|
|
||||||
|
<xs:element name="properties">
|
||||||
|
<xs:complexType>
|
||||||
|
<xs:sequence>
|
||||||
|
<xs:element ref="property" minOccurs="0" maxOccurs="unbounded"/>
|
||||||
|
</xs:sequence>
|
||||||
|
</xs:complexType>
|
||||||
|
</xs:element>
|
||||||
|
|
||||||
|
<xs:element name="property">
|
||||||
|
<xs:complexType>
|
||||||
|
<xs:attribute name="name" type="xs:string" use="required"/>
|
||||||
|
<xs:attribute name="value" type="xs:string" use="required"/>
|
||||||
|
</xs:complexType>
|
||||||
|
</xs:element>
|
||||||
|
|
||||||
|
<xs:element name="system-err" type="xs:string"/>
|
||||||
|
<xs:element name="system-out" type="xs:string"/>
|
||||||
|
<xs:element name="rerunFailure" type="rerunType"/>
|
||||||
|
<xs:element name="rerunError" type="rerunType"/>
|
||||||
|
<xs:element name="flakyFailure" type="rerunType"/>
|
||||||
|
<xs:element name="flakyError" type="rerunType"/>
|
||||||
|
|
||||||
|
<xs:element name="testcase">
|
||||||
|
<xs:complexType>
|
||||||
|
<xs:sequence>
|
||||||
|
<xs:choice minOccurs="0" maxOccurs="unbounded">
|
||||||
|
<xs:element ref="skipped"/>
|
||||||
|
<xs:element ref="error"/>
|
||||||
|
<xs:element ref="failure"/>
|
||||||
|
<xs:element ref="rerunFailure" minOccurs="0" maxOccurs="unbounded"/>
|
||||||
|
<xs:element ref="rerunError" minOccurs="0" maxOccurs="unbounded"/>
|
||||||
|
<xs:element ref="flakyFailure" minOccurs="0" maxOccurs="unbounded"/>
|
||||||
|
<xs:element ref="flakyError" minOccurs="0" maxOccurs="unbounded"/>
|
||||||
|
<xs:element ref="system-out"/>
|
||||||
|
<xs:element ref="system-err"/>
|
||||||
|
</xs:choice>
|
||||||
|
</xs:sequence>
|
||||||
|
<xs:attribute name="name" type="xs:string" use="required"/>
|
||||||
|
<xs:attribute name="time" type="xs:string"/>
|
||||||
|
<xs:attribute name="classname" type="xs:string"/>
|
||||||
|
<xs:attribute name="group" type="xs:string"/>
|
||||||
|
</xs:complexType>
|
||||||
|
</xs:element>
|
||||||
|
|
||||||
|
<xs:element name="testsuite">
|
||||||
|
<xs:complexType>
|
||||||
|
<xs:choice minOccurs="0" maxOccurs="unbounded">
|
||||||
|
<xs:element ref="testsuite"/>
|
||||||
|
<xs:element ref="properties"/>
|
||||||
|
<xs:element ref="testcase"/>
|
||||||
|
<xs:element ref="system-out"/>
|
||||||
|
<xs:element ref="system-err"/>
|
||||||
|
</xs:choice>
|
||||||
|
<xs:attribute name="name" type="xs:string" use="required"/>
|
||||||
|
<xs:attribute name="tests" type="xs:string" use="required"/>
|
||||||
|
<xs:attribute name="failures" type="xs:string" use="required"/>
|
||||||
|
<xs:attribute name="errors" type="xs:string" use="required"/>
|
||||||
|
<xs:attribute name="group" type="xs:string" />
|
||||||
|
<xs:attribute name="time" type="SUREFIRE_TIME"/>
|
||||||
|
<xs:attribute name="skipped" type="xs:string" />
|
||||||
|
<xs:attribute name="timestamp" type="xs:string" />
|
||||||
|
<xs:attribute name="hostname" type="xs:string" />
|
||||||
|
<xs:attribute name="id" type="xs:string" />
|
||||||
|
<xs:attribute name="package" type="xs:string" />
|
||||||
|
<xs:attribute name="file" type="xs:string"/>
|
||||||
|
<xs:attribute name="log" type="xs:string"/>
|
||||||
|
<xs:attribute name="url" type="xs:string"/>
|
||||||
|
<xs:attribute name="version" type="xs:string"/>
|
||||||
|
</xs:complexType>
|
||||||
|
</xs:element>
|
||||||
|
|
||||||
|
<xs:element name="testsuites">
|
||||||
|
<xs:complexType>
|
||||||
|
<xs:sequence>
|
||||||
|
<xs:element ref="testsuite" minOccurs="0" maxOccurs="unbounded" />
|
||||||
|
</xs:sequence>
|
||||||
|
<xs:attribute name="name" type="xs:string" />
|
||||||
|
<xs:attribute name="time" type="SUREFIRE_TIME"/>
|
||||||
|
<xs:attribute name="tests" type="xs:string" />
|
||||||
|
<xs:attribute name="failures" type="xs:string" />
|
||||||
|
<xs:attribute name="errors" type="xs:string" />
|
||||||
|
</xs:complexType>
|
||||||
|
</xs:element>
|
||||||
|
|
||||||
|
</xs:schema>
|
|
@ -45,10 +45,21 @@ def test_exceptions():
|
||||||
assert "unknown" in s2
|
assert "unknown" in s2
|
||||||
|
|
||||||
|
|
||||||
|
def test_buggy_builtin_repr():
|
||||||
|
# Simulate a case where a repr for a builtin raises.
|
||||||
|
# reprlib dispatches by type name, so use "int".
|
||||||
|
|
||||||
|
class int:
|
||||||
|
def __repr__(self):
|
||||||
|
raise ValueError("Buggy repr!")
|
||||||
|
|
||||||
|
assert "Buggy" in saferepr(int())
|
||||||
|
|
||||||
|
|
||||||
def test_big_repr():
|
def test_big_repr():
|
||||||
from _pytest._io.saferepr import SafeRepr
|
from _pytest._io.saferepr import SafeRepr
|
||||||
|
|
||||||
assert len(saferepr(range(1000))) <= len("[" + SafeRepr().maxlist * "1000" + "]")
|
assert len(saferepr(range(1000))) <= len("[" + SafeRepr(0).maxlist * "1000" + "]")
|
||||||
|
|
||||||
|
|
||||||
def test_repr_on_newstyle():
|
def test_repr_on_newstyle():
|
||||||
|
|
|
@ -946,7 +946,7 @@ def test_collection_collect_only_live_logging(testdir, verbose):
|
||||||
expected_lines.extend(
|
expected_lines.extend(
|
||||||
[
|
[
|
||||||
"*test_collection_collect_only_live_logging.py::test_simple*",
|
"*test_collection_collect_only_live_logging.py::test_simple*",
|
||||||
"no tests ran in * seconds",
|
"no tests ran in 0.[0-9][0-9]s",
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
elif verbose == "-qq":
|
elif verbose == "-qq":
|
||||||
|
|
|
@ -7,7 +7,6 @@ from _pytest.fixtures import FixtureLookupError
|
||||||
from _pytest.fixtures import FixtureRequest
|
from _pytest.fixtures import FixtureRequest
|
||||||
from _pytest.pathlib import Path
|
from _pytest.pathlib import Path
|
||||||
from _pytest.pytester import get_public_names
|
from _pytest.pytester import get_public_names
|
||||||
from _pytest.warnings import SHOW_PYTEST_WARNINGS_ARG
|
|
||||||
|
|
||||||
|
|
||||||
def test_getfuncargnames_functions():
|
def test_getfuncargnames_functions():
|
||||||
|
@ -639,8 +638,7 @@ class TestRequestBasic:
|
||||||
result = testdir.runpytest()
|
result = testdir.runpytest()
|
||||||
result.stdout.fnmatch_lines(["* 2 passed in *"])
|
result.stdout.fnmatch_lines(["* 2 passed in *"])
|
||||||
|
|
||||||
@pytest.mark.parametrize("getfixmethod", ("getfixturevalue", "getfuncargvalue"))
|
def test_getfixturevalue(self, testdir):
|
||||||
def test_getfixturevalue(self, testdir, getfixmethod):
|
|
||||||
item = testdir.getitem(
|
item = testdir.getitem(
|
||||||
"""
|
"""
|
||||||
import pytest
|
import pytest
|
||||||
|
@ -653,30 +651,17 @@ class TestRequestBasic:
|
||||||
def test_func(something): pass
|
def test_func(something): pass
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
import contextlib
|
|
||||||
|
|
||||||
if getfixmethod == "getfuncargvalue":
|
|
||||||
warning_expectation = pytest.warns(DeprecationWarning)
|
|
||||||
else:
|
|
||||||
# see #1830 for a cleaner way to accomplish this
|
|
||||||
@contextlib.contextmanager
|
|
||||||
def expecting_no_warning():
|
|
||||||
yield
|
|
||||||
|
|
||||||
warning_expectation = expecting_no_warning()
|
|
||||||
|
|
||||||
req = item._request
|
req = item._request
|
||||||
with warning_expectation:
|
|
||||||
fixture_fetcher = getattr(req, getfixmethod)
|
|
||||||
with pytest.raises(FixtureLookupError):
|
with pytest.raises(FixtureLookupError):
|
||||||
fixture_fetcher("notexists")
|
req.getfixturevalue("notexists")
|
||||||
val = fixture_fetcher("something")
|
val = req.getfixturevalue("something")
|
||||||
assert val == 1
|
assert val == 1
|
||||||
val = fixture_fetcher("something")
|
val = req.getfixturevalue("something")
|
||||||
assert val == 1
|
assert val == 1
|
||||||
val2 = fixture_fetcher("other")
|
val2 = req.getfixturevalue("other")
|
||||||
assert val2 == 2
|
assert val2 == 2
|
||||||
val2 = fixture_fetcher("other") # see about caching
|
val2 = req.getfixturevalue("other") # see about caching
|
||||||
assert val2 == 2
|
assert val2 == 2
|
||||||
pytest._fillfuncargs(item)
|
pytest._fillfuncargs(item)
|
||||||
assert item.funcargs["something"] == 1
|
assert item.funcargs["something"] == 1
|
||||||
|
@ -1181,21 +1166,6 @@ class TestFixtureUsages:
|
||||||
values = reprec.getfailedcollections()
|
values = reprec.getfailedcollections()
|
||||||
assert len(values) == 1
|
assert len(values) == 1
|
||||||
|
|
||||||
def test_request_can_be_overridden(self, testdir):
|
|
||||||
testdir.makepyfile(
|
|
||||||
"""
|
|
||||||
import pytest
|
|
||||||
@pytest.fixture()
|
|
||||||
def request(request):
|
|
||||||
request.a = 1
|
|
||||||
return request
|
|
||||||
def test_request(request):
|
|
||||||
assert request.a == 1
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
reprec = testdir.inline_run("-Wignore::pytest.PytestDeprecationWarning")
|
|
||||||
reprec.assertoutcome(passed=1)
|
|
||||||
|
|
||||||
def test_usefixtures_marker(self, testdir):
|
def test_usefixtures_marker(self, testdir):
|
||||||
testdir.makepyfile(
|
testdir.makepyfile(
|
||||||
"""
|
"""
|
||||||
|
@ -2240,7 +2210,7 @@ class TestFixtureMarker:
|
||||||
pass
|
pass
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result = testdir.runpytest(SHOW_PYTEST_WARNINGS_ARG)
|
result = testdir.runpytest()
|
||||||
assert result.ret != 0
|
assert result.ret != 0
|
||||||
result.stdout.fnmatch_lines(
|
result.stdout.fnmatch_lines(
|
||||||
["*ScopeMismatch*You tried*function*session*request*"]
|
["*ScopeMismatch*You tried*function*session*request*"]
|
||||||
|
@ -4028,3 +3998,14 @@ def test_fixture_param_shadowing(testdir):
|
||||||
result.stdout.fnmatch_lines(["*::test_normal_fixture[[]a[]]*"])
|
result.stdout.fnmatch_lines(["*::test_normal_fixture[[]a[]]*"])
|
||||||
result.stdout.fnmatch_lines(["*::test_normal_fixture[[]b[]]*"])
|
result.stdout.fnmatch_lines(["*::test_normal_fixture[[]b[]]*"])
|
||||||
result.stdout.fnmatch_lines(["*::test_indirect[[]1[]]*"])
|
result.stdout.fnmatch_lines(["*::test_indirect[[]1[]]*"])
|
||||||
|
|
||||||
|
|
||||||
|
def test_fixture_named_request(testdir):
|
||||||
|
testdir.copy_example("fixtures/test_fixture_named_request.py")
|
||||||
|
result = testdir.runpytest()
|
||||||
|
result.stdout.fnmatch_lines(
|
||||||
|
[
|
||||||
|
"*'request' is a reserved word for fixtures, use another name:",
|
||||||
|
" *test_fixture_named_request.py:5",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
|
@ -9,7 +9,6 @@ from hypothesis import strategies
|
||||||
import pytest
|
import pytest
|
||||||
from _pytest import fixtures
|
from _pytest import fixtures
|
||||||
from _pytest import python
|
from _pytest import python
|
||||||
from _pytest.warnings import SHOW_PYTEST_WARNINGS_ARG
|
|
||||||
|
|
||||||
|
|
||||||
class TestMetafunc:
|
class TestMetafunc:
|
||||||
|
@ -600,6 +599,17 @@ class TestMetafunc:
|
||||||
assert metafunc._calls[0].funcargs == dict(x="a", y="b")
|
assert metafunc._calls[0].funcargs == dict(x="a", y="b")
|
||||||
assert metafunc._calls[0].params == {}
|
assert metafunc._calls[0].params == {}
|
||||||
|
|
||||||
|
def test_parametrize_indirect_wrong_type(self):
|
||||||
|
def func(x, y):
|
||||||
|
pass
|
||||||
|
|
||||||
|
metafunc = self.Metafunc(func)
|
||||||
|
with pytest.raises(
|
||||||
|
pytest.fail.Exception,
|
||||||
|
match="In func: expected Sequence or boolean for indirect, got dict",
|
||||||
|
):
|
||||||
|
metafunc.parametrize("x, y", [("a", "b")], indirect={})
|
||||||
|
|
||||||
def test_parametrize_indirect_list_functional(self, testdir):
|
def test_parametrize_indirect_list_functional(self, testdir):
|
||||||
"""
|
"""
|
||||||
#714
|
#714
|
||||||
|
@ -915,7 +925,7 @@ class TestMetafuncFunctional:
|
||||||
assert metafunc.cls == TestClass
|
assert metafunc.cls == TestClass
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result = testdir.runpytest(p, "-v", SHOW_PYTEST_WARNINGS_ARG)
|
result = testdir.runpytest(p, "-v")
|
||||||
result.assert_outcomes(passed=2)
|
result.assert_outcomes(passed=2)
|
||||||
|
|
||||||
def test_two_functions(self, testdir):
|
def test_two_functions(self, testdir):
|
||||||
|
@ -931,7 +941,7 @@ class TestMetafuncFunctional:
|
||||||
assert arg1 in (10, 20)
|
assert arg1 in (10, 20)
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result = testdir.runpytest("-v", p, SHOW_PYTEST_WARNINGS_ARG)
|
result = testdir.runpytest("-v", p)
|
||||||
result.stdout.fnmatch_lines(
|
result.stdout.fnmatch_lines(
|
||||||
[
|
[
|
||||||
"*test_func1*0*PASS*",
|
"*test_func1*0*PASS*",
|
||||||
|
@ -967,7 +977,7 @@ class TestMetafuncFunctional:
|
||||||
assert hello == "world"
|
assert hello == "world"
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result = testdir.runpytest("-v", p, SHOW_PYTEST_WARNINGS_ARG)
|
result = testdir.runpytest("-v", p)
|
||||||
result.stdout.fnmatch_lines(["*test_myfunc*hello*PASS*", "*1 passed*"])
|
result.stdout.fnmatch_lines(["*test_myfunc*hello*PASS*", "*1 passed*"])
|
||||||
|
|
||||||
def test_two_functions_not_same_instance(self, testdir):
|
def test_two_functions_not_same_instance(self, testdir):
|
||||||
|
@ -982,7 +992,7 @@ class TestMetafuncFunctional:
|
||||||
self.x = 1
|
self.x = 1
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result = testdir.runpytest("-v", p, SHOW_PYTEST_WARNINGS_ARG)
|
result = testdir.runpytest("-v", p)
|
||||||
result.stdout.fnmatch_lines(
|
result.stdout.fnmatch_lines(
|
||||||
["*test_func*0*PASS*", "*test_func*1*PASS*", "*2 pass*"]
|
["*test_func*0*PASS*", "*test_func*1*PASS*", "*2 pass*"]
|
||||||
)
|
)
|
||||||
|
@ -1000,7 +1010,7 @@ class TestMetafuncFunctional:
|
||||||
self.val = 1
|
self.val = 1
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result = testdir.runpytest(p, SHOW_PYTEST_WARNINGS_ARG)
|
result = testdir.runpytest(p)
|
||||||
result.assert_outcomes(passed=1)
|
result.assert_outcomes(passed=1)
|
||||||
|
|
||||||
def test_parametrize_functional2(self, testdir):
|
def test_parametrize_functional2(self, testdir):
|
||||||
|
@ -1522,7 +1532,7 @@ class TestMarkersWithParametrization:
|
||||||
assert n + 1 == expected
|
assert n + 1 == expected
|
||||||
"""
|
"""
|
||||||
testdir.makepyfile(s)
|
testdir.makepyfile(s)
|
||||||
rec = testdir.inline_run("-m", "foo", SHOW_PYTEST_WARNINGS_ARG)
|
rec = testdir.inline_run("-m", "foo")
|
||||||
passed, skipped, fail = rec.listoutcomes()
|
passed, skipped, fail = rec.listoutcomes()
|
||||||
assert len(passed) == 1
|
assert len(passed) == 1
|
||||||
assert len(skipped) == 0
|
assert len(skipped) == 0
|
||||||
|
@ -1562,7 +1572,7 @@ class TestMarkersWithParametrization:
|
||||||
assert n + 1 == expected
|
assert n + 1 == expected
|
||||||
"""
|
"""
|
||||||
testdir.makepyfile(s)
|
testdir.makepyfile(s)
|
||||||
reprec = testdir.inline_run(SHOW_PYTEST_WARNINGS_ARG)
|
reprec = testdir.inline_run()
|
||||||
# xfail is skip??
|
# xfail is skip??
|
||||||
reprec.assertoutcome(passed=2, skipped=1)
|
reprec.assertoutcome(passed=2, skipped=1)
|
||||||
|
|
||||||
|
@ -1579,7 +1589,7 @@ class TestMarkersWithParametrization:
|
||||||
assert n % 2 == 0
|
assert n % 2 == 0
|
||||||
"""
|
"""
|
||||||
testdir.makepyfile(s)
|
testdir.makepyfile(s)
|
||||||
reprec = testdir.inline_run(SHOW_PYTEST_WARNINGS_ARG)
|
reprec = testdir.inline_run()
|
||||||
reprec.assertoutcome(passed=2, skipped=1)
|
reprec.assertoutcome(passed=2, skipped=1)
|
||||||
|
|
||||||
def test_xfail_with_arg(self, testdir):
|
def test_xfail_with_arg(self, testdir):
|
||||||
|
@ -1595,7 +1605,7 @@ class TestMarkersWithParametrization:
|
||||||
assert n + 1 == expected
|
assert n + 1 == expected
|
||||||
"""
|
"""
|
||||||
testdir.makepyfile(s)
|
testdir.makepyfile(s)
|
||||||
reprec = testdir.inline_run(SHOW_PYTEST_WARNINGS_ARG)
|
reprec = testdir.inline_run()
|
||||||
reprec.assertoutcome(passed=2, skipped=1)
|
reprec.assertoutcome(passed=2, skipped=1)
|
||||||
|
|
||||||
def test_xfail_with_kwarg(self, testdir):
|
def test_xfail_with_kwarg(self, testdir):
|
||||||
|
@ -1611,7 +1621,7 @@ class TestMarkersWithParametrization:
|
||||||
assert n + 1 == expected
|
assert n + 1 == expected
|
||||||
"""
|
"""
|
||||||
testdir.makepyfile(s)
|
testdir.makepyfile(s)
|
||||||
reprec = testdir.inline_run(SHOW_PYTEST_WARNINGS_ARG)
|
reprec = testdir.inline_run()
|
||||||
reprec.assertoutcome(passed=2, skipped=1)
|
reprec.assertoutcome(passed=2, skipped=1)
|
||||||
|
|
||||||
def test_xfail_with_arg_and_kwarg(self, testdir):
|
def test_xfail_with_arg_and_kwarg(self, testdir):
|
||||||
|
@ -1627,7 +1637,7 @@ class TestMarkersWithParametrization:
|
||||||
assert n + 1 == expected
|
assert n + 1 == expected
|
||||||
"""
|
"""
|
||||||
testdir.makepyfile(s)
|
testdir.makepyfile(s)
|
||||||
reprec = testdir.inline_run(SHOW_PYTEST_WARNINGS_ARG)
|
reprec = testdir.inline_run()
|
||||||
reprec.assertoutcome(passed=2, skipped=1)
|
reprec.assertoutcome(passed=2, skipped=1)
|
||||||
|
|
||||||
@pytest.mark.parametrize("strict", [True, False])
|
@pytest.mark.parametrize("strict", [True, False])
|
||||||
|
@ -1648,7 +1658,7 @@ class TestMarkersWithParametrization:
|
||||||
strict=strict
|
strict=strict
|
||||||
)
|
)
|
||||||
testdir.makepyfile(s)
|
testdir.makepyfile(s)
|
||||||
reprec = testdir.inline_run(SHOW_PYTEST_WARNINGS_ARG)
|
reprec = testdir.inline_run()
|
||||||
passed, failed = (2, 1) if strict else (3, 0)
|
passed, failed = (2, 1) if strict else (3, 0)
|
||||||
reprec.assertoutcome(passed=passed, failed=failed)
|
reprec.assertoutcome(passed=passed, failed=failed)
|
||||||
|
|
||||||
|
@ -1672,7 +1682,7 @@ class TestMarkersWithParametrization:
|
||||||
assert n + 1 == expected
|
assert n + 1 == expected
|
||||||
"""
|
"""
|
||||||
testdir.makepyfile(s)
|
testdir.makepyfile(s)
|
||||||
reprec = testdir.inline_run(SHOW_PYTEST_WARNINGS_ARG)
|
reprec = testdir.inline_run()
|
||||||
reprec.assertoutcome(passed=2, skipped=2)
|
reprec.assertoutcome(passed=2, skipped=2)
|
||||||
|
|
||||||
def test_parametrize_ID_generation_string_int_works(self, testdir):
|
def test_parametrize_ID_generation_string_int_works(self, testdir):
|
||||||
|
|
|
@ -2,35 +2,20 @@ import sys
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
from _pytest.outcomes import Failed
|
from _pytest.outcomes import Failed
|
||||||
from _pytest.warning_types import PytestDeprecationWarning
|
|
||||||
|
|
||||||
|
|
||||||
class TestRaises:
|
class TestRaises:
|
||||||
|
def test_check_callable(self):
|
||||||
|
with pytest.raises(TypeError, match=r".* must be callable"):
|
||||||
|
pytest.raises(RuntimeError, "int('qwe')")
|
||||||
|
|
||||||
def test_raises(self):
|
def test_raises(self):
|
||||||
source = "int('qwe')"
|
excinfo = pytest.raises(ValueError, int, "qwe")
|
||||||
with pytest.warns(PytestDeprecationWarning):
|
assert "invalid literal" in str(excinfo.value)
|
||||||
excinfo = pytest.raises(ValueError, source)
|
|
||||||
code = excinfo.traceback[-1].frame.code
|
|
||||||
s = str(code.fullsource)
|
|
||||||
assert s == source
|
|
||||||
|
|
||||||
def test_raises_exec(self):
|
|
||||||
with pytest.warns(PytestDeprecationWarning) as warninfo:
|
|
||||||
pytest.raises(ValueError, "a,x = []")
|
|
||||||
assert warninfo[0].filename == __file__
|
|
||||||
|
|
||||||
def test_raises_exec_correct_filename(self):
|
|
||||||
with pytest.warns(PytestDeprecationWarning):
|
|
||||||
excinfo = pytest.raises(ValueError, 'int("s")')
|
|
||||||
assert __file__ in excinfo.traceback[-1].path
|
|
||||||
|
|
||||||
def test_raises_syntax_error(self):
|
|
||||||
with pytest.warns(PytestDeprecationWarning) as warninfo:
|
|
||||||
pytest.raises(SyntaxError, "qwe qwe qwe")
|
|
||||||
assert warninfo[0].filename == __file__
|
|
||||||
|
|
||||||
def test_raises_function(self):
|
def test_raises_function(self):
|
||||||
pytest.raises(ValueError, int, "hello")
|
excinfo = pytest.raises(ValueError, int, "hello")
|
||||||
|
assert "invalid literal" in str(excinfo.value)
|
||||||
|
|
||||||
def test_raises_callable_no_exception(self):
|
def test_raises_callable_no_exception(self):
|
||||||
class A:
|
class A:
|
||||||
|
@ -169,17 +154,6 @@ class TestRaises:
|
||||||
else:
|
else:
|
||||||
assert False, "Expected pytest.raises.Exception"
|
assert False, "Expected pytest.raises.Exception"
|
||||||
|
|
||||||
def test_custom_raise_message(self):
|
|
||||||
message = "TEST_MESSAGE"
|
|
||||||
try:
|
|
||||||
with pytest.warns(PytestDeprecationWarning):
|
|
||||||
with pytest.raises(ValueError, message=message):
|
|
||||||
pass
|
|
||||||
except pytest.raises.Exception as e:
|
|
||||||
assert e.msg == message
|
|
||||||
else:
|
|
||||||
assert False, "Expected pytest.raises.Exception"
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("method", ["function", "with"])
|
@pytest.mark.parametrize("method", ["function", "with"])
|
||||||
def test_raises_cyclic_reference(self, method):
|
def test_raises_cyclic_reference(self, method):
|
||||||
"""
|
"""
|
||||||
|
@ -274,3 +248,9 @@ class TestRaises:
|
||||||
with pytest.raises(CrappyClass()):
|
with pytest.raises(CrappyClass()):
|
||||||
pass
|
pass
|
||||||
assert "via __class__" in excinfo.value.args[0]
|
assert "via __class__" in excinfo.value.args[0]
|
||||||
|
|
||||||
|
def test_raises_context_manager_with_kwargs(self):
|
||||||
|
with pytest.raises(TypeError) as excinfo:
|
||||||
|
with pytest.raises(Exception, foo="bar"):
|
||||||
|
pass
|
||||||
|
assert "Unexpected keyword arguments" in str(excinfo.value)
|
||||||
|
|
|
@ -172,7 +172,8 @@ class TestImportHookInstallation:
|
||||||
return check
|
return check
|
||||||
""",
|
""",
|
||||||
"mainwrapper.py": """\
|
"mainwrapper.py": """\
|
||||||
import pytest, importlib_metadata
|
import pytest
|
||||||
|
from _pytest.compat import importlib_metadata
|
||||||
|
|
||||||
class DummyEntryPoint(object):
|
class DummyEntryPoint(object):
|
||||||
name = 'spam'
|
name = 'spam'
|
||||||
|
|
|
@ -200,6 +200,16 @@ class TestAssertionRewrite:
|
||||||
else:
|
else:
|
||||||
assert msg == ["assert cls == 42"]
|
assert msg == ["assert cls == 42"]
|
||||||
|
|
||||||
|
def test_assertrepr_compare_same_width(self, request):
|
||||||
|
"""Should use same width/truncation with same initial width."""
|
||||||
|
|
||||||
|
def f():
|
||||||
|
assert "1234567890" * 5 + "A" == "1234567890" * 5 + "B"
|
||||||
|
|
||||||
|
assert getmsg(f).splitlines()[0] == (
|
||||||
|
"assert '123456789012...901234567890A' == '123456789012...901234567890B'"
|
||||||
|
)
|
||||||
|
|
||||||
def test_dont_rewrite_if_hasattr_fails(self, request):
|
def test_dont_rewrite_if_hasattr_fails(self, request):
|
||||||
class Y:
|
class Y:
|
||||||
""" A class whos getattr fails, but not with `AttributeError` """
|
""" A class whos getattr fails, but not with `AttributeError` """
|
||||||
|
|
|
@ -1,10 +1,11 @@
|
||||||
|
import os
|
||||||
import sys
|
import sys
|
||||||
import textwrap
|
import textwrap
|
||||||
|
from pathlib import Path
|
||||||
import importlib_metadata
|
|
||||||
|
|
||||||
import _pytest._code
|
import _pytest._code
|
||||||
import pytest
|
import pytest
|
||||||
|
from _pytest.compat import importlib_metadata
|
||||||
from _pytest.config import _iter_rewritable_modules
|
from _pytest.config import _iter_rewritable_modules
|
||||||
from _pytest.config.exceptions import UsageError
|
from _pytest.config.exceptions import UsageError
|
||||||
from _pytest.config.findpaths import determine_setup
|
from _pytest.config.findpaths import determine_setup
|
||||||
|
@ -446,7 +447,7 @@ class TestConfigFromdictargs:
|
||||||
assert config.option.capture == "no"
|
assert config.option.capture == "no"
|
||||||
assert config.args == args
|
assert config.args == args
|
||||||
|
|
||||||
def test_origargs(self, _sys_snapshot):
|
def test_invocation_params_args(self, _sys_snapshot):
|
||||||
"""Show that fromdictargs can handle args in their "orig" format"""
|
"""Show that fromdictargs can handle args in their "orig" format"""
|
||||||
from _pytest.config import Config
|
from _pytest.config import Config
|
||||||
|
|
||||||
|
@ -455,7 +456,7 @@ class TestConfigFromdictargs:
|
||||||
|
|
||||||
config = Config.fromdictargs(option_dict, args)
|
config = Config.fromdictargs(option_dict, args)
|
||||||
assert config.args == ["a", "b"]
|
assert config.args == ["a", "b"]
|
||||||
assert config._origargs == args
|
assert config.invocation_params.args == args
|
||||||
assert config.option.verbose == 4
|
assert config.option.verbose == 4
|
||||||
assert config.option.capture == "no"
|
assert config.option.capture == "no"
|
||||||
|
|
||||||
|
@ -1205,6 +1206,29 @@ def test_config_does_not_load_blocked_plugin_from_args(testdir):
|
||||||
assert result.ret == ExitCode.USAGE_ERROR
|
assert result.ret == ExitCode.USAGE_ERROR
|
||||||
|
|
||||||
|
|
||||||
|
def test_invocation_args(testdir):
|
||||||
|
"""Ensure that Config.invocation_* arguments are correctly defined"""
|
||||||
|
|
||||||
|
class DummyPlugin:
|
||||||
|
pass
|
||||||
|
|
||||||
|
p = testdir.makepyfile("def test(): pass")
|
||||||
|
plugin = DummyPlugin()
|
||||||
|
rec = testdir.inline_run(p, "-v", plugins=[plugin])
|
||||||
|
calls = rec.getcalls("pytest_runtest_protocol")
|
||||||
|
assert len(calls) == 1
|
||||||
|
call = calls[0]
|
||||||
|
config = call.item.config
|
||||||
|
|
||||||
|
assert config.invocation_params.args == [p, "-v"]
|
||||||
|
assert config.invocation_params.dir == Path(str(testdir.tmpdir))
|
||||||
|
|
||||||
|
plugins = config.invocation_params.plugins
|
||||||
|
assert len(plugins) == 2
|
||||||
|
assert plugins[0] is plugin
|
||||||
|
assert type(plugins[1]).__name__ == "Collect" # installed by testdir.inline_run()
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"plugin",
|
"plugin",
|
||||||
[
|
[
|
||||||
|
@ -1248,3 +1272,140 @@ def test_config_blocked_default_plugins(testdir, plugin):
|
||||||
result.stdout.fnmatch_lines(["* 1 failed in *"])
|
result.stdout.fnmatch_lines(["* 1 failed in *"])
|
||||||
else:
|
else:
|
||||||
assert result.stdout.lines == [""]
|
assert result.stdout.lines == [""]
|
||||||
|
|
||||||
|
|
||||||
|
class TestSetupCfg:
|
||||||
|
def test_pytest_setup_cfg_unsupported(self, testdir):
|
||||||
|
testdir.makefile(
|
||||||
|
".cfg",
|
||||||
|
setup="""
|
||||||
|
[pytest]
|
||||||
|
addopts = --verbose
|
||||||
|
""",
|
||||||
|
)
|
||||||
|
with pytest.raises(pytest.fail.Exception):
|
||||||
|
testdir.runpytest()
|
||||||
|
|
||||||
|
def test_pytest_custom_cfg_unsupported(self, testdir):
|
||||||
|
testdir.makefile(
|
||||||
|
".cfg",
|
||||||
|
custom="""
|
||||||
|
[pytest]
|
||||||
|
addopts = --verbose
|
||||||
|
""",
|
||||||
|
)
|
||||||
|
with pytest.raises(pytest.fail.Exception):
|
||||||
|
testdir.runpytest("-c", "custom.cfg")
|
||||||
|
|
||||||
|
|
||||||
|
class TestPytestPluginsVariable:
|
||||||
|
def test_pytest_plugins_in_non_top_level_conftest_unsupported(self, testdir):
|
||||||
|
testdir.makepyfile(
|
||||||
|
**{
|
||||||
|
"subdirectory/conftest.py": """
|
||||||
|
pytest_plugins=['capture']
|
||||||
|
"""
|
||||||
|
}
|
||||||
|
)
|
||||||
|
testdir.makepyfile(
|
||||||
|
"""
|
||||||
|
def test_func():
|
||||||
|
pass
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
res = testdir.runpytest()
|
||||||
|
assert res.ret == 2
|
||||||
|
msg = "Defining 'pytest_plugins' in a non-top-level conftest is no longer supported"
|
||||||
|
res.stdout.fnmatch_lines(
|
||||||
|
[
|
||||||
|
"*{msg}*".format(msg=msg),
|
||||||
|
"*subdirectory{sep}conftest.py*".format(sep=os.sep),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("use_pyargs", [True, False])
|
||||||
|
def test_pytest_plugins_in_non_top_level_conftest_unsupported_pyargs(
|
||||||
|
self, testdir, use_pyargs
|
||||||
|
):
|
||||||
|
"""When using --pyargs, do not emit the warning about non-top-level conftest warnings (#4039, #4044)"""
|
||||||
|
|
||||||
|
files = {
|
||||||
|
"src/pkg/__init__.py": "",
|
||||||
|
"src/pkg/conftest.py": "",
|
||||||
|
"src/pkg/test_root.py": "def test(): pass",
|
||||||
|
"src/pkg/sub/__init__.py": "",
|
||||||
|
"src/pkg/sub/conftest.py": "pytest_plugins=['capture']",
|
||||||
|
"src/pkg/sub/test_bar.py": "def test(): pass",
|
||||||
|
}
|
||||||
|
testdir.makepyfile(**files)
|
||||||
|
testdir.syspathinsert(testdir.tmpdir.join("src"))
|
||||||
|
|
||||||
|
args = ("--pyargs", "pkg") if use_pyargs else ()
|
||||||
|
res = testdir.runpytest(*args)
|
||||||
|
assert res.ret == (0 if use_pyargs else 2)
|
||||||
|
msg = (
|
||||||
|
msg
|
||||||
|
) = "Defining 'pytest_plugins' in a non-top-level conftest is no longer supported"
|
||||||
|
if use_pyargs:
|
||||||
|
assert msg not in res.stdout.str()
|
||||||
|
else:
|
||||||
|
res.stdout.fnmatch_lines(["*{msg}*".format(msg=msg)])
|
||||||
|
|
||||||
|
def test_pytest_plugins_in_non_top_level_conftest_unsupported_no_top_level_conftest(
|
||||||
|
self, testdir
|
||||||
|
):
|
||||||
|
subdirectory = testdir.tmpdir.join("subdirectory")
|
||||||
|
subdirectory.mkdir()
|
||||||
|
testdir.makeconftest(
|
||||||
|
"""
|
||||||
|
pytest_plugins=['capture']
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
testdir.tmpdir.join("conftest.py").move(subdirectory.join("conftest.py"))
|
||||||
|
|
||||||
|
testdir.makepyfile(
|
||||||
|
"""
|
||||||
|
def test_func():
|
||||||
|
pass
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
|
||||||
|
res = testdir.runpytest_subprocess()
|
||||||
|
assert res.ret == 2
|
||||||
|
msg = "Defining 'pytest_plugins' in a non-top-level conftest is no longer supported"
|
||||||
|
res.stdout.fnmatch_lines(
|
||||||
|
[
|
||||||
|
"*{msg}*".format(msg=msg),
|
||||||
|
"*subdirectory{sep}conftest.py*".format(sep=os.sep),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_pytest_plugins_in_non_top_level_conftest_unsupported_no_false_positives(
|
||||||
|
self, testdir
|
||||||
|
):
|
||||||
|
subdirectory = testdir.tmpdir.join("subdirectory")
|
||||||
|
subdirectory.mkdir()
|
||||||
|
testdir.makeconftest(
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
testdir.tmpdir.join("conftest.py").move(subdirectory.join("conftest.py"))
|
||||||
|
|
||||||
|
testdir.makeconftest(
|
||||||
|
"""
|
||||||
|
import warnings
|
||||||
|
warnings.filterwarnings('always', category=DeprecationWarning)
|
||||||
|
pytest_plugins=['capture']
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
testdir.makepyfile(
|
||||||
|
"""
|
||||||
|
def test_func():
|
||||||
|
pass
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
res = testdir.runpytest_subprocess()
|
||||||
|
assert res.ret == 0
|
||||||
|
msg = "Defining 'pytest_plugins' in a non-top-level conftest is no longer supported"
|
||||||
|
assert msg not in res.stdout.str()
|
||||||
|
|
|
@ -3,6 +3,7 @@ import textwrap
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
from _pytest.compat import MODULE_NOT_FOUND_ERROR
|
from _pytest.compat import MODULE_NOT_FOUND_ERROR
|
||||||
|
from _pytest.doctest import _get_checker
|
||||||
from _pytest.doctest import _is_mocked
|
from _pytest.doctest import _is_mocked
|
||||||
from _pytest.doctest import _patch_unwrap_mock_aware
|
from _pytest.doctest import _patch_unwrap_mock_aware
|
||||||
from _pytest.doctest import DoctestItem
|
from _pytest.doctest import DoctestItem
|
||||||
|
@ -838,6 +839,154 @@ class TestLiterals:
|
||||||
reprec = testdir.inline_run()
|
reprec = testdir.inline_run()
|
||||||
reprec.assertoutcome(failed=1)
|
reprec.assertoutcome(failed=1)
|
||||||
|
|
||||||
|
def test_number_re(self):
|
||||||
|
for s in [
|
||||||
|
"1.",
|
||||||
|
"+1.",
|
||||||
|
"-1.",
|
||||||
|
".1",
|
||||||
|
"+.1",
|
||||||
|
"-.1",
|
||||||
|
"0.1",
|
||||||
|
"+0.1",
|
||||||
|
"-0.1",
|
||||||
|
"1e5",
|
||||||
|
"+1e5",
|
||||||
|
"1e+5",
|
||||||
|
"+1e+5",
|
||||||
|
"1e-5",
|
||||||
|
"+1e-5",
|
||||||
|
"-1e-5",
|
||||||
|
"1.2e3",
|
||||||
|
"-1.2e-3",
|
||||||
|
]:
|
||||||
|
print(s)
|
||||||
|
m = _get_checker()._number_re.match(s)
|
||||||
|
assert m is not None
|
||||||
|
assert float(m.group()) == pytest.approx(float(s))
|
||||||
|
for s in ["1", "abc"]:
|
||||||
|
print(s)
|
||||||
|
assert _get_checker()._number_re.match(s) is None
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("config_mode", ["ini", "comment"])
|
||||||
|
def test_number_precision(self, testdir, config_mode):
|
||||||
|
"""Test the NUMBER option."""
|
||||||
|
if config_mode == "ini":
|
||||||
|
testdir.makeini(
|
||||||
|
"""
|
||||||
|
[pytest]
|
||||||
|
doctest_optionflags = NUMBER
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
comment = ""
|
||||||
|
else:
|
||||||
|
comment = "#doctest: +NUMBER"
|
||||||
|
|
||||||
|
testdir.maketxtfile(
|
||||||
|
test_doc="""
|
||||||
|
|
||||||
|
Scalars:
|
||||||
|
|
||||||
|
>>> import math
|
||||||
|
>>> math.pi {comment}
|
||||||
|
3.141592653589793
|
||||||
|
>>> math.pi {comment}
|
||||||
|
3.1416
|
||||||
|
>>> math.pi {comment}
|
||||||
|
3.14
|
||||||
|
>>> -math.pi {comment}
|
||||||
|
-3.14
|
||||||
|
>>> math.pi {comment}
|
||||||
|
3.
|
||||||
|
>>> 3. {comment}
|
||||||
|
3.0
|
||||||
|
>>> 3. {comment}
|
||||||
|
3.
|
||||||
|
>>> 3. {comment}
|
||||||
|
3.01
|
||||||
|
>>> 3. {comment}
|
||||||
|
2.99
|
||||||
|
>>> .299 {comment}
|
||||||
|
.3
|
||||||
|
>>> .301 {comment}
|
||||||
|
.3
|
||||||
|
>>> 951. {comment}
|
||||||
|
1e3
|
||||||
|
>>> 1049. {comment}
|
||||||
|
1e3
|
||||||
|
>>> -1049. {comment}
|
||||||
|
-1e3
|
||||||
|
>>> 1e3 {comment}
|
||||||
|
1e3
|
||||||
|
>>> 1e3 {comment}
|
||||||
|
1000.
|
||||||
|
|
||||||
|
Lists:
|
||||||
|
|
||||||
|
>>> [3.1415, 0.097, 13.1, 7, 8.22222e5, 0.598e-2] {comment}
|
||||||
|
[3.14, 0.1, 13., 7, 8.22e5, 6.0e-3]
|
||||||
|
>>> [[0.333, 0.667], [0.999, 1.333]] {comment}
|
||||||
|
[[0.33, 0.667], [0.999, 1.333]]
|
||||||
|
>>> [[[0.101]]] {comment}
|
||||||
|
[[[0.1]]]
|
||||||
|
|
||||||
|
Doesn't barf on non-numbers:
|
||||||
|
|
||||||
|
>>> 'abc' {comment}
|
||||||
|
'abc'
|
||||||
|
>>> None {comment}
|
||||||
|
""".format(
|
||||||
|
comment=comment
|
||||||
|
)
|
||||||
|
)
|
||||||
|
reprec = testdir.inline_run()
|
||||||
|
reprec.assertoutcome(passed=1)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"expression,output",
|
||||||
|
[
|
||||||
|
# ints shouldn't match floats:
|
||||||
|
("3.0", "3"),
|
||||||
|
("3e0", "3"),
|
||||||
|
("1e3", "1000"),
|
||||||
|
("3", "3.0"),
|
||||||
|
# Rounding:
|
||||||
|
("3.1", "3.0"),
|
||||||
|
("3.1", "3.2"),
|
||||||
|
("3.1", "4.0"),
|
||||||
|
("8.22e5", "810000.0"),
|
||||||
|
# Only the actual output is rounded up, not the expected output:
|
||||||
|
("3.0", "2.98"),
|
||||||
|
("1e3", "999"),
|
||||||
|
# The current implementation doesn't understand that numbers inside
|
||||||
|
# strings shouldn't be treated as numbers:
|
||||||
|
pytest.param("'3.1416'", "'3.14'", marks=pytest.mark.xfail),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
def test_number_non_matches(self, testdir, expression, output):
|
||||||
|
testdir.maketxtfile(
|
||||||
|
test_doc="""
|
||||||
|
>>> {expression} #doctest: +NUMBER
|
||||||
|
{output}
|
||||||
|
""".format(
|
||||||
|
expression=expression, output=output
|
||||||
|
)
|
||||||
|
)
|
||||||
|
reprec = testdir.inline_run()
|
||||||
|
reprec.assertoutcome(passed=0, failed=1)
|
||||||
|
|
||||||
|
def test_number_and_allow_unicode(self, testdir):
|
||||||
|
testdir.maketxtfile(
|
||||||
|
test_doc="""
|
||||||
|
>>> from collections import namedtuple
|
||||||
|
>>> T = namedtuple('T', 'a b c')
|
||||||
|
>>> T(a=0.2330000001, b=u'str', c=b'bytes') # doctest: +ALLOW_UNICODE, +ALLOW_BYTES, +NUMBER
|
||||||
|
T(a=0.233, b=u'str', c='bytes')
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
reprec = testdir.inline_run()
|
||||||
|
reprec.assertoutcome(passed=1)
|
||||||
|
|
||||||
|
|
||||||
class TestDoctestSkips:
|
class TestDoctestSkips:
|
||||||
"""
|
"""
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
import importlib_metadata
|
from _pytest.compat import importlib_metadata
|
||||||
|
|
||||||
|
|
||||||
def test_pytest_entry_points_are_identical():
|
def test_pytest_entry_points_are_identical():
|
||||||
|
|
|
@ -1,19 +1,48 @@
|
||||||
import os
|
import os
|
||||||
|
import platform
|
||||||
|
from datetime import datetime
|
||||||
|
from pathlib import Path
|
||||||
from xml.dom import minidom
|
from xml.dom import minidom
|
||||||
|
|
||||||
import py
|
import py
|
||||||
|
import xmlschema
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
from _pytest.junitxml import LogXML
|
from _pytest.junitxml import LogXML
|
||||||
from _pytest.reports import BaseReport
|
from _pytest.reports import BaseReport
|
||||||
|
|
||||||
|
|
||||||
def runandparse(testdir, *args):
|
@pytest.fixture(scope="session")
|
||||||
resultpath = testdir.tmpdir.join("junit.xml")
|
def schema():
|
||||||
result = testdir.runpytest("--junitxml=%s" % resultpath, *args)
|
"""Returns a xmlschema.XMLSchema object for the junit-10.xsd file"""
|
||||||
xmldoc = minidom.parse(str(resultpath))
|
fn = Path(__file__).parent / "example_scripts/junit-10.xsd"
|
||||||
|
with fn.open() as f:
|
||||||
|
return xmlschema.XMLSchema(f)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def run_and_parse(testdir, schema):
|
||||||
|
"""
|
||||||
|
Fixture that returns a function that can be used to execute pytest and return
|
||||||
|
the parsed ``DomNode`` of the root xml node.
|
||||||
|
|
||||||
|
The ``family`` parameter is used to configure the ``junit_family`` of the written report.
|
||||||
|
"xunit2" is also automatically validated against the schema.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def run(*args, family="xunit1"):
|
||||||
|
if family:
|
||||||
|
args = ("-o", "junit_family=" + family) + args
|
||||||
|
xml_path = testdir.tmpdir.join("junit.xml")
|
||||||
|
result = testdir.runpytest("--junitxml=%s" % xml_path, *args)
|
||||||
|
if family == "xunit2":
|
||||||
|
with xml_path.open() as f:
|
||||||
|
schema.validate(f)
|
||||||
|
xmldoc = minidom.parse(str(xml_path))
|
||||||
return result, DomNode(xmldoc)
|
return result, DomNode(xmldoc)
|
||||||
|
|
||||||
|
return run
|
||||||
|
|
||||||
|
|
||||||
def assert_attr(node, **kwargs):
|
def assert_attr(node, **kwargs):
|
||||||
__tracebackhide__ = True
|
__tracebackhide__ = True
|
||||||
|
@ -41,6 +70,16 @@ class DomNode:
|
||||||
def _by_tag(self, tag):
|
def _by_tag(self, tag):
|
||||||
return self.__node.getElementsByTagName(tag)
|
return self.__node.getElementsByTagName(tag)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def children(self):
|
||||||
|
return [type(self)(x) for x in self.__node.childNodes]
|
||||||
|
|
||||||
|
@property
|
||||||
|
def get_unique_child(self):
|
||||||
|
children = self.children
|
||||||
|
assert len(children) == 1
|
||||||
|
return children[0]
|
||||||
|
|
||||||
def find_nth_by_tag(self, tag, n):
|
def find_nth_by_tag(self, tag, n):
|
||||||
items = self._by_tag(tag)
|
items = self._by_tag(tag)
|
||||||
try:
|
try:
|
||||||
|
@ -75,12 +114,16 @@ class DomNode:
|
||||||
return self.__node.tagName
|
return self.__node.tagName
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def next_siebling(self):
|
def next_sibling(self):
|
||||||
return type(self)(self.__node.nextSibling)
|
return type(self)(self.__node.nextSibling)
|
||||||
|
|
||||||
|
|
||||||
|
parametrize_families = pytest.mark.parametrize("xunit_family", ["xunit1", "xunit2"])
|
||||||
|
|
||||||
|
|
||||||
class TestPython:
|
class TestPython:
|
||||||
def test_summing_simple(self, testdir):
|
@parametrize_families
|
||||||
|
def test_summing_simple(self, testdir, run_and_parse, xunit_family):
|
||||||
testdir.makepyfile(
|
testdir.makepyfile(
|
||||||
"""
|
"""
|
||||||
import pytest
|
import pytest
|
||||||
|
@ -98,12 +141,13 @@ class TestPython:
|
||||||
assert 1
|
assert 1
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result, dom = runandparse(testdir)
|
result, dom = run_and_parse(family=xunit_family)
|
||||||
assert result.ret
|
assert result.ret
|
||||||
node = dom.find_first_by_tag("testsuite")
|
node = dom.find_first_by_tag("testsuite")
|
||||||
node.assert_attr(name="pytest", errors=0, failures=1, skipped=2, tests=5)
|
node.assert_attr(name="pytest", errors=0, failures=1, skipped=2, tests=5)
|
||||||
|
|
||||||
def test_summing_simple_with_errors(self, testdir):
|
@parametrize_families
|
||||||
|
def test_summing_simple_with_errors(self, testdir, run_and_parse, xunit_family):
|
||||||
testdir.makepyfile(
|
testdir.makepyfile(
|
||||||
"""
|
"""
|
||||||
import pytest
|
import pytest
|
||||||
|
@ -124,12 +168,38 @@ class TestPython:
|
||||||
assert True
|
assert True
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result, dom = runandparse(testdir)
|
result, dom = run_and_parse(family=xunit_family)
|
||||||
assert result.ret
|
assert result.ret
|
||||||
node = dom.find_first_by_tag("testsuite")
|
node = dom.find_first_by_tag("testsuite")
|
||||||
node.assert_attr(name="pytest", errors=1, failures=2, skipped=1, tests=5)
|
node.assert_attr(name="pytest", errors=1, failures=2, skipped=1, tests=5)
|
||||||
|
|
||||||
def test_timing_function(self, testdir):
|
@parametrize_families
|
||||||
|
def test_hostname_in_xml(self, testdir, run_and_parse, xunit_family):
|
||||||
|
testdir.makepyfile(
|
||||||
|
"""
|
||||||
|
def test_pass():
|
||||||
|
pass
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
result, dom = run_and_parse(family=xunit_family)
|
||||||
|
node = dom.find_first_by_tag("testsuite")
|
||||||
|
node.assert_attr(hostname=platform.node())
|
||||||
|
|
||||||
|
@parametrize_families
|
||||||
|
def test_timestamp_in_xml(self, testdir, run_and_parse, xunit_family):
|
||||||
|
testdir.makepyfile(
|
||||||
|
"""
|
||||||
|
def test_pass():
|
||||||
|
pass
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
start_time = datetime.now()
|
||||||
|
result, dom = run_and_parse(family=xunit_family)
|
||||||
|
node = dom.find_first_by_tag("testsuite")
|
||||||
|
timestamp = datetime.strptime(node["timestamp"], "%Y-%m-%dT%H:%M:%S.%f")
|
||||||
|
assert start_time <= timestamp < datetime.now()
|
||||||
|
|
||||||
|
def test_timing_function(self, testdir, run_and_parse):
|
||||||
testdir.makepyfile(
|
testdir.makepyfile(
|
||||||
"""
|
"""
|
||||||
import time, pytest
|
import time, pytest
|
||||||
|
@ -141,14 +211,16 @@ class TestPython:
|
||||||
time.sleep(0.01)
|
time.sleep(0.01)
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result, dom = runandparse(testdir)
|
result, dom = run_and_parse()
|
||||||
node = dom.find_first_by_tag("testsuite")
|
node = dom.find_first_by_tag("testsuite")
|
||||||
tnode = node.find_first_by_tag("testcase")
|
tnode = node.find_first_by_tag("testcase")
|
||||||
val = tnode["time"]
|
val = tnode["time"]
|
||||||
assert round(float(val), 2) >= 0.03
|
assert round(float(val), 2) >= 0.03
|
||||||
|
|
||||||
@pytest.mark.parametrize("duration_report", ["call", "total"])
|
@pytest.mark.parametrize("duration_report", ["call", "total"])
|
||||||
def test_junit_duration_report(self, testdir, monkeypatch, duration_report):
|
def test_junit_duration_report(
|
||||||
|
self, testdir, monkeypatch, duration_report, run_and_parse
|
||||||
|
):
|
||||||
|
|
||||||
# mock LogXML.node_reporter so it always sets a known duration to each test report object
|
# mock LogXML.node_reporter so it always sets a known duration to each test report object
|
||||||
original_node_reporter = LogXML.node_reporter
|
original_node_reporter = LogXML.node_reporter
|
||||||
|
@ -166,8 +238,8 @@ class TestPython:
|
||||||
pass
|
pass
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result, dom = runandparse(
|
result, dom = run_and_parse(
|
||||||
testdir, "-o", "junit_duration_report={}".format(duration_report)
|
"-o", "junit_duration_report={}".format(duration_report)
|
||||||
)
|
)
|
||||||
node = dom.find_first_by_tag("testsuite")
|
node = dom.find_first_by_tag("testsuite")
|
||||||
tnode = node.find_first_by_tag("testcase")
|
tnode = node.find_first_by_tag("testcase")
|
||||||
|
@ -178,7 +250,8 @@ class TestPython:
|
||||||
assert duration_report == "call"
|
assert duration_report == "call"
|
||||||
assert val == 1.0
|
assert val == 1.0
|
||||||
|
|
||||||
def test_setup_error(self, testdir):
|
@parametrize_families
|
||||||
|
def test_setup_error(self, testdir, run_and_parse, xunit_family):
|
||||||
testdir.makepyfile(
|
testdir.makepyfile(
|
||||||
"""
|
"""
|
||||||
import pytest
|
import pytest
|
||||||
|
@ -190,7 +263,7 @@ class TestPython:
|
||||||
pass
|
pass
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result, dom = runandparse(testdir)
|
result, dom = run_and_parse(family=xunit_family)
|
||||||
assert result.ret
|
assert result.ret
|
||||||
node = dom.find_first_by_tag("testsuite")
|
node = dom.find_first_by_tag("testsuite")
|
||||||
node.assert_attr(errors=1, tests=1)
|
node.assert_attr(errors=1, tests=1)
|
||||||
|
@ -200,7 +273,8 @@ class TestPython:
|
||||||
fnode.assert_attr(message="test setup failure")
|
fnode.assert_attr(message="test setup failure")
|
||||||
assert "ValueError" in fnode.toxml()
|
assert "ValueError" in fnode.toxml()
|
||||||
|
|
||||||
def test_teardown_error(self, testdir):
|
@parametrize_families
|
||||||
|
def test_teardown_error(self, testdir, run_and_parse, xunit_family):
|
||||||
testdir.makepyfile(
|
testdir.makepyfile(
|
||||||
"""
|
"""
|
||||||
import pytest
|
import pytest
|
||||||
|
@ -213,7 +287,7 @@ class TestPython:
|
||||||
pass
|
pass
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result, dom = runandparse(testdir)
|
result, dom = run_and_parse(family=xunit_family)
|
||||||
assert result.ret
|
assert result.ret
|
||||||
node = dom.find_first_by_tag("testsuite")
|
node = dom.find_first_by_tag("testsuite")
|
||||||
tnode = node.find_first_by_tag("testcase")
|
tnode = node.find_first_by_tag("testcase")
|
||||||
|
@ -222,7 +296,8 @@ class TestPython:
|
||||||
fnode.assert_attr(message="test teardown failure")
|
fnode.assert_attr(message="test teardown failure")
|
||||||
assert "ValueError" in fnode.toxml()
|
assert "ValueError" in fnode.toxml()
|
||||||
|
|
||||||
def test_call_failure_teardown_error(self, testdir):
|
@parametrize_families
|
||||||
|
def test_call_failure_teardown_error(self, testdir, run_and_parse, xunit_family):
|
||||||
testdir.makepyfile(
|
testdir.makepyfile(
|
||||||
"""
|
"""
|
||||||
import pytest
|
import pytest
|
||||||
|
@ -235,7 +310,7 @@ class TestPython:
|
||||||
raise Exception("Call Exception")
|
raise Exception("Call Exception")
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result, dom = runandparse(testdir)
|
result, dom = run_and_parse(family=xunit_family)
|
||||||
assert result.ret
|
assert result.ret
|
||||||
node = dom.find_first_by_tag("testsuite")
|
node = dom.find_first_by_tag("testsuite")
|
||||||
node.assert_attr(errors=1, failures=1, tests=1)
|
node.assert_attr(errors=1, failures=1, tests=1)
|
||||||
|
@ -247,7 +322,8 @@ class TestPython:
|
||||||
snode = second.find_first_by_tag("error")
|
snode = second.find_first_by_tag("error")
|
||||||
snode.assert_attr(message="test teardown failure")
|
snode.assert_attr(message="test teardown failure")
|
||||||
|
|
||||||
def test_skip_contains_name_reason(self, testdir):
|
@parametrize_families
|
||||||
|
def test_skip_contains_name_reason(self, testdir, run_and_parse, xunit_family):
|
||||||
testdir.makepyfile(
|
testdir.makepyfile(
|
||||||
"""
|
"""
|
||||||
import pytest
|
import pytest
|
||||||
|
@ -255,7 +331,7 @@ class TestPython:
|
||||||
pytest.skip("hello23")
|
pytest.skip("hello23")
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result, dom = runandparse(testdir)
|
result, dom = run_and_parse(family=xunit_family)
|
||||||
assert result.ret == 0
|
assert result.ret == 0
|
||||||
node = dom.find_first_by_tag("testsuite")
|
node = dom.find_first_by_tag("testsuite")
|
||||||
node.assert_attr(skipped=1)
|
node.assert_attr(skipped=1)
|
||||||
|
@ -264,7 +340,8 @@ class TestPython:
|
||||||
snode = tnode.find_first_by_tag("skipped")
|
snode = tnode.find_first_by_tag("skipped")
|
||||||
snode.assert_attr(type="pytest.skip", message="hello23")
|
snode.assert_attr(type="pytest.skip", message="hello23")
|
||||||
|
|
||||||
def test_mark_skip_contains_name_reason(self, testdir):
|
@parametrize_families
|
||||||
|
def test_mark_skip_contains_name_reason(self, testdir, run_and_parse, xunit_family):
|
||||||
testdir.makepyfile(
|
testdir.makepyfile(
|
||||||
"""
|
"""
|
||||||
import pytest
|
import pytest
|
||||||
|
@ -273,7 +350,7 @@ class TestPython:
|
||||||
assert True
|
assert True
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result, dom = runandparse(testdir)
|
result, dom = run_and_parse(family=xunit_family)
|
||||||
assert result.ret == 0
|
assert result.ret == 0
|
||||||
node = dom.find_first_by_tag("testsuite")
|
node = dom.find_first_by_tag("testsuite")
|
||||||
node.assert_attr(skipped=1)
|
node.assert_attr(skipped=1)
|
||||||
|
@ -284,7 +361,10 @@ class TestPython:
|
||||||
snode = tnode.find_first_by_tag("skipped")
|
snode = tnode.find_first_by_tag("skipped")
|
||||||
snode.assert_attr(type="pytest.skip", message="hello24")
|
snode.assert_attr(type="pytest.skip", message="hello24")
|
||||||
|
|
||||||
def test_mark_skipif_contains_name_reason(self, testdir):
|
@parametrize_families
|
||||||
|
def test_mark_skipif_contains_name_reason(
|
||||||
|
self, testdir, run_and_parse, xunit_family
|
||||||
|
):
|
||||||
testdir.makepyfile(
|
testdir.makepyfile(
|
||||||
"""
|
"""
|
||||||
import pytest
|
import pytest
|
||||||
|
@ -294,7 +374,7 @@ class TestPython:
|
||||||
assert True
|
assert True
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result, dom = runandparse(testdir)
|
result, dom = run_and_parse(family=xunit_family)
|
||||||
assert result.ret == 0
|
assert result.ret == 0
|
||||||
node = dom.find_first_by_tag("testsuite")
|
node = dom.find_first_by_tag("testsuite")
|
||||||
node.assert_attr(skipped=1)
|
node.assert_attr(skipped=1)
|
||||||
|
@ -305,7 +385,10 @@ class TestPython:
|
||||||
snode = tnode.find_first_by_tag("skipped")
|
snode = tnode.find_first_by_tag("skipped")
|
||||||
snode.assert_attr(type="pytest.skip", message="hello25")
|
snode.assert_attr(type="pytest.skip", message="hello25")
|
||||||
|
|
||||||
def test_mark_skip_doesnt_capture_output(self, testdir):
|
@parametrize_families
|
||||||
|
def test_mark_skip_doesnt_capture_output(
|
||||||
|
self, testdir, run_and_parse, xunit_family
|
||||||
|
):
|
||||||
testdir.makepyfile(
|
testdir.makepyfile(
|
||||||
"""
|
"""
|
||||||
import pytest
|
import pytest
|
||||||
|
@ -314,12 +397,13 @@ class TestPython:
|
||||||
print("bar!")
|
print("bar!")
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result, dom = runandparse(testdir)
|
result, dom = run_and_parse(family=xunit_family)
|
||||||
assert result.ret == 0
|
assert result.ret == 0
|
||||||
node_xml = dom.find_first_by_tag("testsuite").toxml()
|
node_xml = dom.find_first_by_tag("testsuite").toxml()
|
||||||
assert "bar!" not in node_xml
|
assert "bar!" not in node_xml
|
||||||
|
|
||||||
def test_classname_instance(self, testdir):
|
@parametrize_families
|
||||||
|
def test_classname_instance(self, testdir, run_and_parse, xunit_family):
|
||||||
testdir.makepyfile(
|
testdir.makepyfile(
|
||||||
"""
|
"""
|
||||||
class TestClass(object):
|
class TestClass(object):
|
||||||
|
@ -327,7 +411,7 @@ class TestPython:
|
||||||
assert 0
|
assert 0
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result, dom = runandparse(testdir)
|
result, dom = run_and_parse(family=xunit_family)
|
||||||
assert result.ret
|
assert result.ret
|
||||||
node = dom.find_first_by_tag("testsuite")
|
node = dom.find_first_by_tag("testsuite")
|
||||||
node.assert_attr(failures=1)
|
node.assert_attr(failures=1)
|
||||||
|
@ -336,20 +420,22 @@ class TestPython:
|
||||||
classname="test_classname_instance.TestClass", name="test_method"
|
classname="test_classname_instance.TestClass", name="test_method"
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_classname_nested_dir(self, testdir):
|
@parametrize_families
|
||||||
|
def test_classname_nested_dir(self, testdir, run_and_parse, xunit_family):
|
||||||
p = testdir.tmpdir.ensure("sub", "test_hello.py")
|
p = testdir.tmpdir.ensure("sub", "test_hello.py")
|
||||||
p.write("def test_func(): 0/0")
|
p.write("def test_func(): 0/0")
|
||||||
result, dom = runandparse(testdir)
|
result, dom = run_and_parse(family=xunit_family)
|
||||||
assert result.ret
|
assert result.ret
|
||||||
node = dom.find_first_by_tag("testsuite")
|
node = dom.find_first_by_tag("testsuite")
|
||||||
node.assert_attr(failures=1)
|
node.assert_attr(failures=1)
|
||||||
tnode = node.find_first_by_tag("testcase")
|
tnode = node.find_first_by_tag("testcase")
|
||||||
tnode.assert_attr(classname="sub.test_hello", name="test_func")
|
tnode.assert_attr(classname="sub.test_hello", name="test_func")
|
||||||
|
|
||||||
def test_internal_error(self, testdir):
|
@parametrize_families
|
||||||
|
def test_internal_error(self, testdir, run_and_parse, xunit_family):
|
||||||
testdir.makeconftest("def pytest_runtest_protocol(): 0 / 0")
|
testdir.makeconftest("def pytest_runtest_protocol(): 0 / 0")
|
||||||
testdir.makepyfile("def test_function(): pass")
|
testdir.makepyfile("def test_function(): pass")
|
||||||
result, dom = runandparse(testdir)
|
result, dom = run_and_parse(family=xunit_family)
|
||||||
assert result.ret
|
assert result.ret
|
||||||
node = dom.find_first_by_tag("testsuite")
|
node = dom.find_first_by_tag("testsuite")
|
||||||
node.assert_attr(errors=1, tests=1)
|
node.assert_attr(errors=1, tests=1)
|
||||||
|
@ -360,7 +446,10 @@ class TestPython:
|
||||||
assert "Division" in fnode.toxml()
|
assert "Division" in fnode.toxml()
|
||||||
|
|
||||||
@pytest.mark.parametrize("junit_logging", ["no", "system-out", "system-err"])
|
@pytest.mark.parametrize("junit_logging", ["no", "system-out", "system-err"])
|
||||||
def test_failure_function(self, testdir, junit_logging):
|
@parametrize_families
|
||||||
|
def test_failure_function(
|
||||||
|
self, testdir, junit_logging, run_and_parse, xunit_family
|
||||||
|
):
|
||||||
testdir.makepyfile(
|
testdir.makepyfile(
|
||||||
"""
|
"""
|
||||||
import logging
|
import logging
|
||||||
|
@ -375,7 +464,9 @@ class TestPython:
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
|
|
||||||
result, dom = runandparse(testdir, "-o", "junit_logging=%s" % junit_logging)
|
result, dom = run_and_parse(
|
||||||
|
"-o", "junit_logging=%s" % junit_logging, family=xunit_family
|
||||||
|
)
|
||||||
assert result.ret
|
assert result.ret
|
||||||
node = dom.find_first_by_tag("testsuite")
|
node = dom.find_first_by_tag("testsuite")
|
||||||
node.assert_attr(failures=1, tests=1)
|
node.assert_attr(failures=1, tests=1)
|
||||||
|
@ -384,11 +475,11 @@ class TestPython:
|
||||||
fnode = tnode.find_first_by_tag("failure")
|
fnode = tnode.find_first_by_tag("failure")
|
||||||
fnode.assert_attr(message="ValueError: 42")
|
fnode.assert_attr(message="ValueError: 42")
|
||||||
assert "ValueError" in fnode.toxml()
|
assert "ValueError" in fnode.toxml()
|
||||||
systemout = fnode.next_siebling
|
systemout = fnode.next_sibling
|
||||||
assert systemout.tag == "system-out"
|
assert systemout.tag == "system-out"
|
||||||
assert "hello-stdout" in systemout.toxml()
|
assert "hello-stdout" in systemout.toxml()
|
||||||
assert "info msg" not in systemout.toxml()
|
assert "info msg" not in systemout.toxml()
|
||||||
systemerr = systemout.next_siebling
|
systemerr = systemout.next_sibling
|
||||||
assert systemerr.tag == "system-err"
|
assert systemerr.tag == "system-err"
|
||||||
assert "hello-stderr" in systemerr.toxml()
|
assert "hello-stderr" in systemerr.toxml()
|
||||||
assert "info msg" not in systemerr.toxml()
|
assert "info msg" not in systemerr.toxml()
|
||||||
|
@ -403,7 +494,8 @@ class TestPython:
|
||||||
assert "warning msg" not in systemout.toxml()
|
assert "warning msg" not in systemout.toxml()
|
||||||
assert "warning msg" not in systemerr.toxml()
|
assert "warning msg" not in systemerr.toxml()
|
||||||
|
|
||||||
def test_failure_verbose_message(self, testdir):
|
@parametrize_families
|
||||||
|
def test_failure_verbose_message(self, testdir, run_and_parse, xunit_family):
|
||||||
testdir.makepyfile(
|
testdir.makepyfile(
|
||||||
"""
|
"""
|
||||||
import sys
|
import sys
|
||||||
|
@ -411,14 +503,14 @@ class TestPython:
|
||||||
assert 0, "An error"
|
assert 0, "An error"
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
|
result, dom = run_and_parse(family=xunit_family)
|
||||||
result, dom = runandparse(testdir)
|
|
||||||
node = dom.find_first_by_tag("testsuite")
|
node = dom.find_first_by_tag("testsuite")
|
||||||
tnode = node.find_first_by_tag("testcase")
|
tnode = node.find_first_by_tag("testcase")
|
||||||
fnode = tnode.find_first_by_tag("failure")
|
fnode = tnode.find_first_by_tag("failure")
|
||||||
fnode.assert_attr(message="AssertionError: An error assert 0")
|
fnode.assert_attr(message="AssertionError: An error assert 0")
|
||||||
|
|
||||||
def test_failure_escape(self, testdir):
|
@parametrize_families
|
||||||
|
def test_failure_escape(self, testdir, run_and_parse, xunit_family):
|
||||||
testdir.makepyfile(
|
testdir.makepyfile(
|
||||||
"""
|
"""
|
||||||
import pytest
|
import pytest
|
||||||
|
@ -428,7 +520,7 @@ class TestPython:
|
||||||
assert 0
|
assert 0
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result, dom = runandparse(testdir)
|
result, dom = run_and_parse(family=xunit_family)
|
||||||
assert result.ret
|
assert result.ret
|
||||||
node = dom.find_first_by_tag("testsuite")
|
node = dom.find_first_by_tag("testsuite")
|
||||||
node.assert_attr(failures=3, tests=3)
|
node.assert_attr(failures=3, tests=3)
|
||||||
|
@ -443,7 +535,8 @@ class TestPython:
|
||||||
text = sysout.text
|
text = sysout.text
|
||||||
assert text == "%s\n" % char
|
assert text == "%s\n" % char
|
||||||
|
|
||||||
def test_junit_prefixing(self, testdir):
|
@parametrize_families
|
||||||
|
def test_junit_prefixing(self, testdir, run_and_parse, xunit_family):
|
||||||
testdir.makepyfile(
|
testdir.makepyfile(
|
||||||
"""
|
"""
|
||||||
def test_func():
|
def test_func():
|
||||||
|
@ -453,7 +546,7 @@ class TestPython:
|
||||||
pass
|
pass
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result, dom = runandparse(testdir, "--junitprefix=xyz")
|
result, dom = run_and_parse("--junitprefix=xyz", family=xunit_family)
|
||||||
assert result.ret
|
assert result.ret
|
||||||
node = dom.find_first_by_tag("testsuite")
|
node = dom.find_first_by_tag("testsuite")
|
||||||
node.assert_attr(failures=1, tests=2)
|
node.assert_attr(failures=1, tests=2)
|
||||||
|
@ -464,7 +557,8 @@ class TestPython:
|
||||||
classname="xyz.test_junit_prefixing.TestHello", name="test_hello"
|
classname="xyz.test_junit_prefixing.TestHello", name="test_hello"
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_xfailure_function(self, testdir):
|
@parametrize_families
|
||||||
|
def test_xfailure_function(self, testdir, run_and_parse, xunit_family):
|
||||||
testdir.makepyfile(
|
testdir.makepyfile(
|
||||||
"""
|
"""
|
||||||
import pytest
|
import pytest
|
||||||
|
@ -472,7 +566,7 @@ class TestPython:
|
||||||
pytest.xfail("42")
|
pytest.xfail("42")
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result, dom = runandparse(testdir)
|
result, dom = run_and_parse(family=xunit_family)
|
||||||
assert not result.ret
|
assert not result.ret
|
||||||
node = dom.find_first_by_tag("testsuite")
|
node = dom.find_first_by_tag("testsuite")
|
||||||
node.assert_attr(skipped=1, tests=1)
|
node.assert_attr(skipped=1, tests=1)
|
||||||
|
@ -480,9 +574,9 @@ class TestPython:
|
||||||
tnode.assert_attr(classname="test_xfailure_function", name="test_xfail")
|
tnode.assert_attr(classname="test_xfailure_function", name="test_xfail")
|
||||||
fnode = tnode.find_first_by_tag("skipped")
|
fnode = tnode.find_first_by_tag("skipped")
|
||||||
fnode.assert_attr(type="pytest.xfail", message="42")
|
fnode.assert_attr(type="pytest.xfail", message="42")
|
||||||
# assert "ValueError" in fnode.toxml()
|
|
||||||
|
|
||||||
def test_xfailure_marker(self, testdir):
|
@parametrize_families
|
||||||
|
def test_xfailure_marker(self, testdir, run_and_parse, xunit_family):
|
||||||
testdir.makepyfile(
|
testdir.makepyfile(
|
||||||
"""
|
"""
|
||||||
import pytest
|
import pytest
|
||||||
|
@ -491,7 +585,7 @@ class TestPython:
|
||||||
assert False
|
assert False
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result, dom = runandparse(testdir)
|
result, dom = run_and_parse(family=xunit_family)
|
||||||
assert not result.ret
|
assert not result.ret
|
||||||
node = dom.find_first_by_tag("testsuite")
|
node = dom.find_first_by_tag("testsuite")
|
||||||
node.assert_attr(skipped=1, tests=1)
|
node.assert_attr(skipped=1, tests=1)
|
||||||
|
@ -500,7 +594,7 @@ class TestPython:
|
||||||
fnode = tnode.find_first_by_tag("skipped")
|
fnode = tnode.find_first_by_tag("skipped")
|
||||||
fnode.assert_attr(type="pytest.xfail", message="42")
|
fnode.assert_attr(type="pytest.xfail", message="42")
|
||||||
|
|
||||||
def test_xfail_captures_output_once(self, testdir):
|
def test_xfail_captures_output_once(self, testdir, run_and_parse):
|
||||||
testdir.makepyfile(
|
testdir.makepyfile(
|
||||||
"""
|
"""
|
||||||
import sys
|
import sys
|
||||||
|
@ -513,13 +607,14 @@ class TestPython:
|
||||||
assert 0
|
assert 0
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result, dom = runandparse(testdir)
|
result, dom = run_and_parse()
|
||||||
node = dom.find_first_by_tag("testsuite")
|
node = dom.find_first_by_tag("testsuite")
|
||||||
tnode = node.find_first_by_tag("testcase")
|
tnode = node.find_first_by_tag("testcase")
|
||||||
assert len(tnode.find_by_tag("system-err")) == 1
|
assert len(tnode.find_by_tag("system-err")) == 1
|
||||||
assert len(tnode.find_by_tag("system-out")) == 1
|
assert len(tnode.find_by_tag("system-out")) == 1
|
||||||
|
|
||||||
def test_xfailure_xpass(self, testdir):
|
@parametrize_families
|
||||||
|
def test_xfailure_xpass(self, testdir, run_and_parse, xunit_family):
|
||||||
testdir.makepyfile(
|
testdir.makepyfile(
|
||||||
"""
|
"""
|
||||||
import pytest
|
import pytest
|
||||||
|
@ -528,14 +623,15 @@ class TestPython:
|
||||||
pass
|
pass
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result, dom = runandparse(testdir)
|
result, dom = run_and_parse(family=xunit_family)
|
||||||
# assert result.ret
|
# assert result.ret
|
||||||
node = dom.find_first_by_tag("testsuite")
|
node = dom.find_first_by_tag("testsuite")
|
||||||
node.assert_attr(skipped=0, tests=1)
|
node.assert_attr(skipped=0, tests=1)
|
||||||
tnode = node.find_first_by_tag("testcase")
|
tnode = node.find_first_by_tag("testcase")
|
||||||
tnode.assert_attr(classname="test_xfailure_xpass", name="test_xpass")
|
tnode.assert_attr(classname="test_xfailure_xpass", name="test_xpass")
|
||||||
|
|
||||||
def test_xfailure_xpass_strict(self, testdir):
|
@parametrize_families
|
||||||
|
def test_xfailure_xpass_strict(self, testdir, run_and_parse, xunit_family):
|
||||||
testdir.makepyfile(
|
testdir.makepyfile(
|
||||||
"""
|
"""
|
||||||
import pytest
|
import pytest
|
||||||
|
@ -544,7 +640,7 @@ class TestPython:
|
||||||
pass
|
pass
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result, dom = runandparse(testdir)
|
result, dom = run_and_parse(family=xunit_family)
|
||||||
# assert result.ret
|
# assert result.ret
|
||||||
node = dom.find_first_by_tag("testsuite")
|
node = dom.find_first_by_tag("testsuite")
|
||||||
node.assert_attr(skipped=0, tests=1)
|
node.assert_attr(skipped=0, tests=1)
|
||||||
|
@ -553,9 +649,10 @@ class TestPython:
|
||||||
fnode = tnode.find_first_by_tag("failure")
|
fnode = tnode.find_first_by_tag("failure")
|
||||||
fnode.assert_attr(message="[XPASS(strict)] This needs to fail!")
|
fnode.assert_attr(message="[XPASS(strict)] This needs to fail!")
|
||||||
|
|
||||||
def test_collect_error(self, testdir):
|
@parametrize_families
|
||||||
|
def test_collect_error(self, testdir, run_and_parse, xunit_family):
|
||||||
testdir.makepyfile("syntax error")
|
testdir.makepyfile("syntax error")
|
||||||
result, dom = runandparse(testdir)
|
result, dom = run_and_parse(family=xunit_family)
|
||||||
assert result.ret
|
assert result.ret
|
||||||
node = dom.find_first_by_tag("testsuite")
|
node = dom.find_first_by_tag("testsuite")
|
||||||
node.assert_attr(errors=1, tests=1)
|
node.assert_attr(errors=1, tests=1)
|
||||||
|
@ -564,7 +661,7 @@ class TestPython:
|
||||||
fnode.assert_attr(message="collection failure")
|
fnode.assert_attr(message="collection failure")
|
||||||
assert "SyntaxError" in fnode.toxml()
|
assert "SyntaxError" in fnode.toxml()
|
||||||
|
|
||||||
def test_unicode(self, testdir):
|
def test_unicode(self, testdir, run_and_parse):
|
||||||
value = "hx\xc4\x85\xc4\x87\n"
|
value = "hx\xc4\x85\xc4\x87\n"
|
||||||
testdir.makepyfile(
|
testdir.makepyfile(
|
||||||
"""\
|
"""\
|
||||||
|
@ -575,14 +672,14 @@ class TestPython:
|
||||||
"""
|
"""
|
||||||
% value
|
% value
|
||||||
)
|
)
|
||||||
result, dom = runandparse(testdir)
|
result, dom = run_and_parse()
|
||||||
assert result.ret == 1
|
assert result.ret == 1
|
||||||
tnode = dom.find_first_by_tag("testcase")
|
tnode = dom.find_first_by_tag("testcase")
|
||||||
fnode = tnode.find_first_by_tag("failure")
|
fnode = tnode.find_first_by_tag("failure")
|
||||||
assert "hx" in fnode.toxml()
|
assert "hx" in fnode.toxml()
|
||||||
|
|
||||||
def test_assertion_binchars(self, testdir):
|
def test_assertion_binchars(self, testdir, run_and_parse):
|
||||||
"""this test did fail when the escaping wasn't strict"""
|
"""this test did fail when the escaping wasnt strict"""
|
||||||
testdir.makepyfile(
|
testdir.makepyfile(
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@ -593,23 +690,23 @@ class TestPython:
|
||||||
assert M1 == M2
|
assert M1 == M2
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result, dom = runandparse(testdir)
|
result, dom = run_and_parse()
|
||||||
print(dom.toxml())
|
print(dom.toxml())
|
||||||
|
|
||||||
def test_pass_captures_stdout(self, testdir):
|
def test_pass_captures_stdout(self, testdir, run_and_parse):
|
||||||
testdir.makepyfile(
|
testdir.makepyfile(
|
||||||
"""
|
"""
|
||||||
def test_pass():
|
def test_pass():
|
||||||
print('hello-stdout')
|
print('hello-stdout')
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result, dom = runandparse(testdir)
|
result, dom = run_and_parse()
|
||||||
node = dom.find_first_by_tag("testsuite")
|
node = dom.find_first_by_tag("testsuite")
|
||||||
pnode = node.find_first_by_tag("testcase")
|
pnode = node.find_first_by_tag("testcase")
|
||||||
systemout = pnode.find_first_by_tag("system-out")
|
systemout = pnode.find_first_by_tag("system-out")
|
||||||
assert "hello-stdout" in systemout.toxml()
|
assert "hello-stdout" in systemout.toxml()
|
||||||
|
|
||||||
def test_pass_captures_stderr(self, testdir):
|
def test_pass_captures_stderr(self, testdir, run_and_parse):
|
||||||
testdir.makepyfile(
|
testdir.makepyfile(
|
||||||
"""
|
"""
|
||||||
import sys
|
import sys
|
||||||
|
@ -617,13 +714,13 @@ class TestPython:
|
||||||
sys.stderr.write('hello-stderr')
|
sys.stderr.write('hello-stderr')
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result, dom = runandparse(testdir)
|
result, dom = run_and_parse()
|
||||||
node = dom.find_first_by_tag("testsuite")
|
node = dom.find_first_by_tag("testsuite")
|
||||||
pnode = node.find_first_by_tag("testcase")
|
pnode = node.find_first_by_tag("testcase")
|
||||||
systemout = pnode.find_first_by_tag("system-err")
|
systemout = pnode.find_first_by_tag("system-err")
|
||||||
assert "hello-stderr" in systemout.toxml()
|
assert "hello-stderr" in systemout.toxml()
|
||||||
|
|
||||||
def test_setup_error_captures_stdout(self, testdir):
|
def test_setup_error_captures_stdout(self, testdir, run_and_parse):
|
||||||
testdir.makepyfile(
|
testdir.makepyfile(
|
||||||
"""
|
"""
|
||||||
import pytest
|
import pytest
|
||||||
|
@ -636,13 +733,13 @@ class TestPython:
|
||||||
pass
|
pass
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result, dom = runandparse(testdir)
|
result, dom = run_and_parse()
|
||||||
node = dom.find_first_by_tag("testsuite")
|
node = dom.find_first_by_tag("testsuite")
|
||||||
pnode = node.find_first_by_tag("testcase")
|
pnode = node.find_first_by_tag("testcase")
|
||||||
systemout = pnode.find_first_by_tag("system-out")
|
systemout = pnode.find_first_by_tag("system-out")
|
||||||
assert "hello-stdout" in systemout.toxml()
|
assert "hello-stdout" in systemout.toxml()
|
||||||
|
|
||||||
def test_setup_error_captures_stderr(self, testdir):
|
def test_setup_error_captures_stderr(self, testdir, run_and_parse):
|
||||||
testdir.makepyfile(
|
testdir.makepyfile(
|
||||||
"""
|
"""
|
||||||
import sys
|
import sys
|
||||||
|
@ -656,13 +753,13 @@ class TestPython:
|
||||||
pass
|
pass
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result, dom = runandparse(testdir)
|
result, dom = run_and_parse()
|
||||||
node = dom.find_first_by_tag("testsuite")
|
node = dom.find_first_by_tag("testsuite")
|
||||||
pnode = node.find_first_by_tag("testcase")
|
pnode = node.find_first_by_tag("testcase")
|
||||||
systemout = pnode.find_first_by_tag("system-err")
|
systemout = pnode.find_first_by_tag("system-err")
|
||||||
assert "hello-stderr" in systemout.toxml()
|
assert "hello-stderr" in systemout.toxml()
|
||||||
|
|
||||||
def test_avoid_double_stdout(self, testdir):
|
def test_avoid_double_stdout(self, testdir, run_and_parse):
|
||||||
testdir.makepyfile(
|
testdir.makepyfile(
|
||||||
"""
|
"""
|
||||||
import sys
|
import sys
|
||||||
|
@ -677,7 +774,7 @@ class TestPython:
|
||||||
sys.stdout.write('hello-stdout call')
|
sys.stdout.write('hello-stdout call')
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result, dom = runandparse(testdir)
|
result, dom = run_and_parse()
|
||||||
node = dom.find_first_by_tag("testsuite")
|
node = dom.find_first_by_tag("testsuite")
|
||||||
pnode = node.find_first_by_tag("testcase")
|
pnode = node.find_first_by_tag("testcase")
|
||||||
systemout = pnode.find_first_by_tag("system-out")
|
systemout = pnode.find_first_by_tag("system-out")
|
||||||
|
@ -720,7 +817,8 @@ def test_dont_configure_on_slaves(tmpdir):
|
||||||
|
|
||||||
|
|
||||||
class TestNonPython:
|
class TestNonPython:
|
||||||
def test_summing_simple(self, testdir):
|
@parametrize_families
|
||||||
|
def test_summing_simple(self, testdir, run_and_parse, xunit_family):
|
||||||
testdir.makeconftest(
|
testdir.makeconftest(
|
||||||
"""
|
"""
|
||||||
import pytest
|
import pytest
|
||||||
|
@ -738,7 +836,7 @@ class TestNonPython:
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
testdir.tmpdir.join("myfile.xyz").write("hello")
|
testdir.tmpdir.join("myfile.xyz").write("hello")
|
||||||
result, dom = runandparse(testdir)
|
result, dom = run_and_parse(family=xunit_family)
|
||||||
assert result.ret
|
assert result.ret
|
||||||
node = dom.find_first_by_tag("testsuite")
|
node = dom.find_first_by_tag("testsuite")
|
||||||
node.assert_attr(errors=0, failures=1, skipped=0, tests=1)
|
node.assert_attr(errors=0, failures=1, skipped=0, tests=1)
|
||||||
|
@ -786,8 +884,8 @@ def test_nullbyte_replace(testdir):
|
||||||
|
|
||||||
def test_invalid_xml_escape():
|
def test_invalid_xml_escape():
|
||||||
# Test some more invalid xml chars, the full range should be
|
# Test some more invalid xml chars, the full range should be
|
||||||
# tested really but let's just thest the edges of the ranges
|
# tested really but let's just test the edges of the ranges
|
||||||
# intead.
|
# instead.
|
||||||
# XXX This only tests low unicode character points for now as
|
# XXX This only tests low unicode character points for now as
|
||||||
# there are some issues with the testing infrastructure for
|
# there are some issues with the testing infrastructure for
|
||||||
# the higher ones.
|
# the higher ones.
|
||||||
|
@ -871,7 +969,7 @@ def test_logxml_check_isdir(testdir):
|
||||||
result.stderr.fnmatch_lines(["*--junitxml must be a filename*"])
|
result.stderr.fnmatch_lines(["*--junitxml must be a filename*"])
|
||||||
|
|
||||||
|
|
||||||
def test_escaped_parametrized_names_xml(testdir):
|
def test_escaped_parametrized_names_xml(testdir, run_and_parse):
|
||||||
testdir.makepyfile(
|
testdir.makepyfile(
|
||||||
"""\
|
"""\
|
||||||
import pytest
|
import pytest
|
||||||
|
@ -880,13 +978,13 @@ def test_escaped_parametrized_names_xml(testdir):
|
||||||
assert char
|
assert char
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result, dom = runandparse(testdir)
|
result, dom = run_and_parse()
|
||||||
assert result.ret == 0
|
assert result.ret == 0
|
||||||
node = dom.find_first_by_tag("testcase")
|
node = dom.find_first_by_tag("testcase")
|
||||||
node.assert_attr(name="test_func[\\x00]")
|
node.assert_attr(name="test_func[\\x00]")
|
||||||
|
|
||||||
|
|
||||||
def test_double_colon_split_function_issue469(testdir):
|
def test_double_colon_split_function_issue469(testdir, run_and_parse):
|
||||||
testdir.makepyfile(
|
testdir.makepyfile(
|
||||||
"""
|
"""
|
||||||
import pytest
|
import pytest
|
||||||
|
@ -895,14 +993,14 @@ def test_double_colon_split_function_issue469(testdir):
|
||||||
pass
|
pass
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result, dom = runandparse(testdir)
|
result, dom = run_and_parse()
|
||||||
assert result.ret == 0
|
assert result.ret == 0
|
||||||
node = dom.find_first_by_tag("testcase")
|
node = dom.find_first_by_tag("testcase")
|
||||||
node.assert_attr(classname="test_double_colon_split_function_issue469")
|
node.assert_attr(classname="test_double_colon_split_function_issue469")
|
||||||
node.assert_attr(name="test_func[double::colon]")
|
node.assert_attr(name="test_func[double::colon]")
|
||||||
|
|
||||||
|
|
||||||
def test_double_colon_split_method_issue469(testdir):
|
def test_double_colon_split_method_issue469(testdir, run_and_parse):
|
||||||
testdir.makepyfile(
|
testdir.makepyfile(
|
||||||
"""
|
"""
|
||||||
import pytest
|
import pytest
|
||||||
|
@ -912,7 +1010,7 @@ def test_double_colon_split_method_issue469(testdir):
|
||||||
pass
|
pass
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result, dom = runandparse(testdir)
|
result, dom = run_and_parse()
|
||||||
assert result.ret == 0
|
assert result.ret == 0
|
||||||
node = dom.find_first_by_tag("testcase")
|
node = dom.find_first_by_tag("testcase")
|
||||||
node.assert_attr(classname="test_double_colon_split_method_issue469.TestClass")
|
node.assert_attr(classname="test_double_colon_split_method_issue469.TestClass")
|
||||||
|
@ -948,7 +1046,7 @@ def test_unicode_issue368(testdir):
|
||||||
log.pytest_sessionfinish()
|
log.pytest_sessionfinish()
|
||||||
|
|
||||||
|
|
||||||
def test_record_property(testdir):
|
def test_record_property(testdir, run_and_parse):
|
||||||
testdir.makepyfile(
|
testdir.makepyfile(
|
||||||
"""
|
"""
|
||||||
import pytest
|
import pytest
|
||||||
|
@ -960,7 +1058,7 @@ def test_record_property(testdir):
|
||||||
record_property("foo", "<1");
|
record_property("foo", "<1");
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result, dom = runandparse(testdir, "-rwv")
|
result, dom = run_and_parse("-rwv")
|
||||||
node = dom.find_first_by_tag("testsuite")
|
node = dom.find_first_by_tag("testsuite")
|
||||||
tnode = node.find_first_by_tag("testcase")
|
tnode = node.find_first_by_tag("testcase")
|
||||||
psnode = tnode.find_first_by_tag("properties")
|
psnode = tnode.find_first_by_tag("properties")
|
||||||
|
@ -969,7 +1067,7 @@ def test_record_property(testdir):
|
||||||
pnodes[1].assert_attr(name="foo", value="<1")
|
pnodes[1].assert_attr(name="foo", value="<1")
|
||||||
|
|
||||||
|
|
||||||
def test_record_property_same_name(testdir):
|
def test_record_property_same_name(testdir, run_and_parse):
|
||||||
testdir.makepyfile(
|
testdir.makepyfile(
|
||||||
"""
|
"""
|
||||||
def test_record_with_same_name(record_property):
|
def test_record_with_same_name(record_property):
|
||||||
|
@ -977,7 +1075,7 @@ def test_record_property_same_name(testdir):
|
||||||
record_property("foo", "baz")
|
record_property("foo", "baz")
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result, dom = runandparse(testdir, "-rw")
|
result, dom = run_and_parse("-rw")
|
||||||
node = dom.find_first_by_tag("testsuite")
|
node = dom.find_first_by_tag("testsuite")
|
||||||
tnode = node.find_first_by_tag("testcase")
|
tnode = node.find_first_by_tag("testcase")
|
||||||
psnode = tnode.find_first_by_tag("properties")
|
psnode = tnode.find_first_by_tag("properties")
|
||||||
|
@ -1001,7 +1099,7 @@ def test_record_fixtures_without_junitxml(testdir, fixture_name):
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.filterwarnings("default")
|
@pytest.mark.filterwarnings("default")
|
||||||
def test_record_attribute(testdir):
|
def test_record_attribute(testdir, run_and_parse):
|
||||||
testdir.makeini(
|
testdir.makeini(
|
||||||
"""
|
"""
|
||||||
[pytest]
|
[pytest]
|
||||||
|
@ -1019,7 +1117,7 @@ def test_record_attribute(testdir):
|
||||||
record_xml_attribute("foo", "<1");
|
record_xml_attribute("foo", "<1");
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result, dom = runandparse(testdir, "-rw")
|
result, dom = run_and_parse("-rw")
|
||||||
node = dom.find_first_by_tag("testsuite")
|
node = dom.find_first_by_tag("testsuite")
|
||||||
tnode = node.find_first_by_tag("testcase")
|
tnode = node.find_first_by_tag("testcase")
|
||||||
tnode.assert_attr(bar="1")
|
tnode.assert_attr(bar="1")
|
||||||
|
@ -1031,7 +1129,7 @@ def test_record_attribute(testdir):
|
||||||
|
|
||||||
@pytest.mark.filterwarnings("default")
|
@pytest.mark.filterwarnings("default")
|
||||||
@pytest.mark.parametrize("fixture_name", ["record_xml_attribute", "record_property"])
|
@pytest.mark.parametrize("fixture_name", ["record_xml_attribute", "record_property"])
|
||||||
def test_record_fixtures_xunit2(testdir, fixture_name):
|
def test_record_fixtures_xunit2(testdir, fixture_name, run_and_parse):
|
||||||
"""Ensure record_xml_attribute and record_property drop values when outside of legacy family
|
"""Ensure record_xml_attribute and record_property drop values when outside of legacy family
|
||||||
"""
|
"""
|
||||||
testdir.makeini(
|
testdir.makeini(
|
||||||
|
@ -1054,7 +1152,7 @@ def test_record_fixtures_xunit2(testdir, fixture_name):
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
result, dom = runandparse(testdir, "-rw")
|
result, dom = run_and_parse("-rw", family=None)
|
||||||
expected_lines = []
|
expected_lines = []
|
||||||
if fixture_name == "record_xml_attribute":
|
if fixture_name == "record_xml_attribute":
|
||||||
expected_lines.append(
|
expected_lines.append(
|
||||||
|
@ -1069,7 +1167,7 @@ def test_record_fixtures_xunit2(testdir, fixture_name):
|
||||||
result.stdout.fnmatch_lines(expected_lines)
|
result.stdout.fnmatch_lines(expected_lines)
|
||||||
|
|
||||||
|
|
||||||
def test_random_report_log_xdist(testdir, monkeypatch):
|
def test_random_report_log_xdist(testdir, monkeypatch, run_and_parse):
|
||||||
"""xdist calls pytest_runtest_logreport as they are executed by the slaves,
|
"""xdist calls pytest_runtest_logreport as they are executed by the slaves,
|
||||||
with nodes from several nodes overlapping, so junitxml must cope with that
|
with nodes from several nodes overlapping, so junitxml must cope with that
|
||||||
to produce correct reports. #1064
|
to produce correct reports. #1064
|
||||||
|
@ -1084,7 +1182,7 @@ def test_random_report_log_xdist(testdir, monkeypatch):
|
||||||
assert i != 22
|
assert i != 22
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
_, dom = runandparse(testdir, "-n2")
|
_, dom = run_and_parse("-n2")
|
||||||
suite_node = dom.find_first_by_tag("testsuite")
|
suite_node = dom.find_first_by_tag("testsuite")
|
||||||
failed = []
|
failed = []
|
||||||
for case_node in suite_node.find_by_tag("testcase"):
|
for case_node in suite_node.find_by_tag("testcase"):
|
||||||
|
@ -1094,7 +1192,22 @@ def test_random_report_log_xdist(testdir, monkeypatch):
|
||||||
assert failed == ["test_x[22]"]
|
assert failed == ["test_x[22]"]
|
||||||
|
|
||||||
|
|
||||||
def test_runs_twice(testdir):
|
@parametrize_families
|
||||||
|
def test_root_testsuites_tag(testdir, run_and_parse, xunit_family):
|
||||||
|
testdir.makepyfile(
|
||||||
|
"""
|
||||||
|
def test_x():
|
||||||
|
pass
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
_, dom = run_and_parse(family=xunit_family)
|
||||||
|
root = dom.get_unique_child
|
||||||
|
assert root.tag == "testsuites"
|
||||||
|
suite_node = root.get_unique_child
|
||||||
|
assert suite_node.tag == "testsuite"
|
||||||
|
|
||||||
|
|
||||||
|
def test_runs_twice(testdir, run_and_parse):
|
||||||
f = testdir.makepyfile(
|
f = testdir.makepyfile(
|
||||||
"""
|
"""
|
||||||
def test_pass():
|
def test_pass():
|
||||||
|
@ -1102,14 +1215,13 @@ def test_runs_twice(testdir):
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
|
|
||||||
result, dom = runandparse(testdir, f, f)
|
result, dom = run_and_parse(f, f)
|
||||||
assert "INTERNALERROR" not in result.stdout.str()
|
assert "INTERNALERROR" not in result.stdout.str()
|
||||||
first, second = [x["classname"] for x in dom.find_by_tag("testcase")]
|
first, second = [x["classname"] for x in dom.find_by_tag("testcase")]
|
||||||
assert first == second
|
assert first == second
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.xfail(reason="hangs", run=False)
|
def test_runs_twice_xdist(testdir, run_and_parse):
|
||||||
def test_runs_twice_xdist(testdir):
|
|
||||||
pytest.importorskip("xdist")
|
pytest.importorskip("xdist")
|
||||||
f = testdir.makepyfile(
|
f = testdir.makepyfile(
|
||||||
"""
|
"""
|
||||||
|
@ -1118,13 +1230,13 @@ def test_runs_twice_xdist(testdir):
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
|
|
||||||
result, dom = runandparse(testdir, f, "--dist", "each", "--tx", "2*popen")
|
result, dom = run_and_parse(f, "--dist", "each", "--tx", "2*popen")
|
||||||
assert "INTERNALERROR" not in result.stdout.str()
|
assert "INTERNALERROR" not in result.stdout.str()
|
||||||
first, second = [x["classname"] for x in dom.find_by_tag("testcase")]
|
first, second = [x["classname"] for x in dom.find_by_tag("testcase")]
|
||||||
assert first == second
|
assert first == second
|
||||||
|
|
||||||
|
|
||||||
def test_fancy_items_regression(testdir):
|
def test_fancy_items_regression(testdir, run_and_parse):
|
||||||
# issue 1259
|
# issue 1259
|
||||||
testdir.makeconftest(
|
testdir.makeconftest(
|
||||||
"""
|
"""
|
||||||
|
@ -1157,7 +1269,7 @@ def test_fancy_items_regression(testdir):
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
|
|
||||||
result, dom = runandparse(testdir)
|
result, dom = run_and_parse()
|
||||||
|
|
||||||
assert "INTERNALERROR" not in result.stdout.str()
|
assert "INTERNALERROR" not in result.stdout.str()
|
||||||
|
|
||||||
|
@ -1176,9 +1288,10 @@ def test_fancy_items_regression(testdir):
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
def test_global_properties(testdir):
|
@parametrize_families
|
||||||
|
def test_global_properties(testdir, xunit_family):
|
||||||
path = testdir.tmpdir.join("test_global_properties.xml")
|
path = testdir.tmpdir.join("test_global_properties.xml")
|
||||||
log = LogXML(str(path), None)
|
log = LogXML(str(path), None, family=xunit_family)
|
||||||
|
|
||||||
class Report(BaseReport):
|
class Report(BaseReport):
|
||||||
sections = []
|
sections = []
|
||||||
|
@ -1236,7 +1349,8 @@ def test_url_property(testdir):
|
||||||
), "The URL did not get written to the xml"
|
), "The URL did not get written to the xml"
|
||||||
|
|
||||||
|
|
||||||
def test_record_testsuite_property(testdir):
|
@parametrize_families
|
||||||
|
def test_record_testsuite_property(testdir, run_and_parse, xunit_family):
|
||||||
testdir.makepyfile(
|
testdir.makepyfile(
|
||||||
"""
|
"""
|
||||||
def test_func1(record_testsuite_property):
|
def test_func1(record_testsuite_property):
|
||||||
|
@ -1246,7 +1360,7 @@ def test_record_testsuite_property(testdir):
|
||||||
record_testsuite_property("stats", 10)
|
record_testsuite_property("stats", 10)
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result, dom = runandparse(testdir)
|
result, dom = run_and_parse(family=xunit_family)
|
||||||
assert result.ret == 0
|
assert result.ret == 0
|
||||||
node = dom.find_first_by_tag("testsuite")
|
node = dom.find_first_by_tag("testsuite")
|
||||||
properties_node = node.find_first_by_tag("properties")
|
properties_node = node.find_first_by_tag("properties")
|
||||||
|
@ -1284,14 +1398,16 @@ def test_record_testsuite_property_type_checking(testdir, junit):
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("suite_name", ["my_suite", ""])
|
@pytest.mark.parametrize("suite_name", ["my_suite", ""])
|
||||||
def test_set_suite_name(testdir, suite_name):
|
@parametrize_families
|
||||||
|
def test_set_suite_name(testdir, suite_name, run_and_parse, xunit_family):
|
||||||
if suite_name:
|
if suite_name:
|
||||||
testdir.makeini(
|
testdir.makeini(
|
||||||
"""
|
"""
|
||||||
[pytest]
|
[pytest]
|
||||||
junit_suite_name={}
|
junit_suite_name={suite_name}
|
||||||
|
junit_family={family}
|
||||||
""".format(
|
""".format(
|
||||||
suite_name
|
suite_name=suite_name, family=xunit_family
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
expected = suite_name
|
expected = suite_name
|
||||||
|
@ -1305,13 +1421,13 @@ def test_set_suite_name(testdir, suite_name):
|
||||||
pass
|
pass
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result, dom = runandparse(testdir)
|
result, dom = run_and_parse(family=xunit_family)
|
||||||
assert result.ret == 0
|
assert result.ret == 0
|
||||||
node = dom.find_first_by_tag("testsuite")
|
node = dom.find_first_by_tag("testsuite")
|
||||||
node.assert_attr(name=expected)
|
node.assert_attr(name=expected)
|
||||||
|
|
||||||
|
|
||||||
def test_escaped_skipreason_issue3533(testdir):
|
def test_escaped_skipreason_issue3533(testdir, run_and_parse):
|
||||||
testdir.makepyfile(
|
testdir.makepyfile(
|
||||||
"""
|
"""
|
||||||
import pytest
|
import pytest
|
||||||
|
@ -1320,20 +1436,26 @@ def test_escaped_skipreason_issue3533(testdir):
|
||||||
pass
|
pass
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
_, dom = runandparse(testdir)
|
_, dom = run_and_parse()
|
||||||
node = dom.find_first_by_tag("testcase")
|
node = dom.find_first_by_tag("testcase")
|
||||||
snode = node.find_first_by_tag("skipped")
|
snode = node.find_first_by_tag("skipped")
|
||||||
assert "1 <> 2" in snode.text
|
assert "1 <> 2" in snode.text
|
||||||
snode.assert_attr(message="1 <> 2")
|
snode.assert_attr(message="1 <> 2")
|
||||||
|
|
||||||
|
|
||||||
def test_logging_passing_tests_disabled_does_not_log_test_output(testdir):
|
@parametrize_families
|
||||||
|
def test_logging_passing_tests_disabled_does_not_log_test_output(
|
||||||
|
testdir, run_and_parse, xunit_family
|
||||||
|
):
|
||||||
testdir.makeini(
|
testdir.makeini(
|
||||||
"""
|
"""
|
||||||
[pytest]
|
[pytest]
|
||||||
junit_log_passing_tests=False
|
junit_log_passing_tests=False
|
||||||
junit_logging=system-out
|
junit_logging=system-out
|
||||||
"""
|
junit_family={family}
|
||||||
|
""".format(
|
||||||
|
family=xunit_family
|
||||||
|
)
|
||||||
)
|
)
|
||||||
testdir.makepyfile(
|
testdir.makepyfile(
|
||||||
"""
|
"""
|
||||||
|
@ -1347,7 +1469,7 @@ def test_logging_passing_tests_disabled_does_not_log_test_output(testdir):
|
||||||
logging.warning('hello')
|
logging.warning('hello')
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result, dom = runandparse(testdir)
|
result, dom = run_and_parse(family=xunit_family)
|
||||||
assert result.ret == 0
|
assert result.ret == 0
|
||||||
node = dom.find_first_by_tag("testcase")
|
node = dom.find_first_by_tag("testcase")
|
||||||
assert len(node.find_by_tag("system-err")) == 0
|
assert len(node.find_by_tag("system-err")) == 0
|
||||||
|
|
|
@ -8,12 +8,6 @@ from _pytest.mark import EMPTY_PARAMETERSET_OPTION
|
||||||
from _pytest.mark import MarkGenerator as Mark
|
from _pytest.mark import MarkGenerator as Mark
|
||||||
from _pytest.nodes import Collector
|
from _pytest.nodes import Collector
|
||||||
from _pytest.nodes import Node
|
from _pytest.nodes import Node
|
||||||
from _pytest.warning_types import PytestDeprecationWarning
|
|
||||||
from _pytest.warnings import SHOW_PYTEST_WARNINGS_ARG
|
|
||||||
|
|
||||||
ignore_markinfo = pytest.mark.filterwarnings(
|
|
||||||
"ignore:MarkInfo objects:pytest.RemovedInPytest4Warning"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class TestMark:
|
class TestMark:
|
||||||
|
@ -25,7 +19,8 @@ class TestMark:
|
||||||
|
|
||||||
def test_pytest_mark_notcallable(self):
|
def test_pytest_mark_notcallable(self):
|
||||||
mark = Mark()
|
mark = Mark()
|
||||||
pytest.raises((AttributeError, TypeError), mark)
|
with pytest.raises(TypeError):
|
||||||
|
mark()
|
||||||
|
|
||||||
def test_mark_with_param(self):
|
def test_mark_with_param(self):
|
||||||
def some_function(abc):
|
def some_function(abc):
|
||||||
|
@ -625,7 +620,6 @@ class TestFunctional:
|
||||||
reprec = testdir.inline_run()
|
reprec = testdir.inline_run()
|
||||||
reprec.assertoutcome(passed=1)
|
reprec.assertoutcome(passed=1)
|
||||||
|
|
||||||
@ignore_markinfo
|
|
||||||
def test_keyword_added_for_session(self, testdir):
|
def test_keyword_added_for_session(self, testdir):
|
||||||
testdir.makeconftest(
|
testdir.makeconftest(
|
||||||
"""
|
"""
|
||||||
|
@ -651,7 +645,7 @@ class TestFunctional:
|
||||||
assert marker.kwargs == {}
|
assert marker.kwargs == {}
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
reprec = testdir.inline_run("-m", "mark1", SHOW_PYTEST_WARNINGS_ARG)
|
reprec = testdir.inline_run("-m", "mark1")
|
||||||
reprec.assertoutcome(passed=1)
|
reprec.assertoutcome(passed=1)
|
||||||
|
|
||||||
def assert_markers(self, items, **expected):
|
def assert_markers(self, items, **expected):
|
||||||
|
@ -689,7 +683,7 @@ class TestFunctional:
|
||||||
assert True
|
assert True
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
reprec = testdir.inline_run(SHOW_PYTEST_WARNINGS_ARG)
|
reprec = testdir.inline_run()
|
||||||
reprec.assertoutcome(skipped=1)
|
reprec.assertoutcome(skipped=1)
|
||||||
|
|
||||||
|
|
||||||
|
@ -989,7 +983,7 @@ def test_markers_from_parametrize(testdir):
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
|
|
||||||
result = testdir.runpytest(SHOW_PYTEST_WARNINGS_ARG)
|
result = testdir.runpytest()
|
||||||
result.assert_outcomes(passed=4)
|
result.assert_outcomes(passed=4)
|
||||||
|
|
||||||
|
|
||||||
|
@ -1003,15 +997,3 @@ def test_pytest_param_id_requires_string():
|
||||||
@pytest.mark.parametrize("s", (None, "hello world"))
|
@pytest.mark.parametrize("s", (None, "hello world"))
|
||||||
def test_pytest_param_id_allows_none_or_string(s):
|
def test_pytest_param_id_allows_none_or_string(s):
|
||||||
assert pytest.param(id=s)
|
assert pytest.param(id=s)
|
||||||
|
|
||||||
|
|
||||||
def test_pytest_param_warning_on_unknown_kwargs():
|
|
||||||
with pytest.warns(PytestDeprecationWarning) as warninfo:
|
|
||||||
# typo, should be marks=
|
|
||||||
pytest.param(1, 2, mark=pytest.mark.xfail())
|
|
||||||
assert warninfo[0].filename == __file__
|
|
||||||
msg, = warninfo[0].message.args
|
|
||||||
assert msg == (
|
|
||||||
"pytest.param() got unexpected keyword arguments: ['mark'].\n"
|
|
||||||
"This will be an error in future versions."
|
|
||||||
)
|
|
||||||
|
|
|
@ -72,8 +72,7 @@ def test_make_hook_recorder(testdir):
|
||||||
def test_parseconfig(testdir):
|
def test_parseconfig(testdir):
|
||||||
config1 = testdir.parseconfig()
|
config1 = testdir.parseconfig()
|
||||||
config2 = testdir.parseconfig()
|
config2 = testdir.parseconfig()
|
||||||
assert config2 != config1
|
assert config2 is not config1
|
||||||
assert config1 != pytest.config
|
|
||||||
|
|
||||||
|
|
||||||
def test_testdir_runs_with_plugin(testdir):
|
def test_testdir_runs_with_plugin(testdir):
|
||||||
|
@ -279,7 +278,7 @@ def test_assert_outcomes_after_pytest_error(testdir):
|
||||||
testdir.makepyfile("def test_foo(): assert True")
|
testdir.makepyfile("def test_foo(): assert True")
|
||||||
|
|
||||||
result = testdir.runpytest("--unexpected-argument")
|
result = testdir.runpytest("--unexpected-argument")
|
||||||
with pytest.raises(ValueError, match="Pytest terminal report not found"):
|
with pytest.raises(ValueError, match="Pytest terminal summary report not found"):
|
||||||
result.assert_outcomes(passed=0)
|
result.assert_outcomes(passed=0)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -3,7 +3,6 @@ import warnings
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
from _pytest.recwarn import WarningsRecorder
|
from _pytest.recwarn import WarningsRecorder
|
||||||
from _pytest.warning_types import PytestDeprecationWarning
|
|
||||||
|
|
||||||
|
|
||||||
def test_recwarn_stacklevel(recwarn):
|
def test_recwarn_stacklevel(recwarn):
|
||||||
|
@ -206,22 +205,17 @@ class TestDeprecatedCall:
|
||||||
|
|
||||||
|
|
||||||
class TestWarns:
|
class TestWarns:
|
||||||
def test_strings(self):
|
def test_check_callable(self):
|
||||||
|
source = "warnings.warn('w1', RuntimeWarning)"
|
||||||
|
with pytest.raises(TypeError, match=r".* must be callable"):
|
||||||
|
pytest.warns(RuntimeWarning, source)
|
||||||
|
|
||||||
|
def test_several_messages(self):
|
||||||
# different messages, b/c Python suppresses multiple identical warnings
|
# different messages, b/c Python suppresses multiple identical warnings
|
||||||
source1 = "warnings.warn('w1', RuntimeWarning)"
|
pytest.warns(RuntimeWarning, lambda: warnings.warn("w1", RuntimeWarning))
|
||||||
source2 = "warnings.warn('w2', RuntimeWarning)"
|
with pytest.raises(pytest.fail.Exception):
|
||||||
source3 = "warnings.warn('w3', RuntimeWarning)"
|
pytest.warns(UserWarning, lambda: warnings.warn("w2", RuntimeWarning))
|
||||||
with pytest.warns(PytestDeprecationWarning) as warninfo: # yo dawg
|
pytest.warns(RuntimeWarning, lambda: warnings.warn("w3", RuntimeWarning))
|
||||||
pytest.warns(RuntimeWarning, source1)
|
|
||||||
pytest.raises(
|
|
||||||
pytest.fail.Exception, lambda: pytest.warns(UserWarning, source2)
|
|
||||||
)
|
|
||||||
pytest.warns(RuntimeWarning, source3)
|
|
||||||
assert len(warninfo) == 3
|
|
||||||
for w in warninfo:
|
|
||||||
assert w.filename == __file__
|
|
||||||
msg, = w.message.args
|
|
||||||
assert msg.startswith("warns(..., 'code(as_a_string)') is deprecated")
|
|
||||||
|
|
||||||
def test_function(self):
|
def test_function(self):
|
||||||
pytest.warns(
|
pytest.warns(
|
||||||
|
@ -380,3 +374,9 @@ class TestWarns:
|
||||||
assert f() == 10
|
assert f() == 10
|
||||||
assert pytest.warns(UserWarning, f) == 10
|
assert pytest.warns(UserWarning, f) == 10
|
||||||
assert pytest.warns(UserWarning, f) == 10
|
assert pytest.warns(UserWarning, f) == 10
|
||||||
|
|
||||||
|
def test_warns_context_manager_with_kwargs(self):
|
||||||
|
with pytest.raises(TypeError) as excinfo:
|
||||||
|
with pytest.warns(UserWarning, foo="bar"):
|
||||||
|
pass
|
||||||
|
assert "Unexpected keyword arguments" in str(excinfo.value)
|
||||||
|
|
|
@ -617,7 +617,7 @@ class TestTerminalFunctional:
|
||||||
pluggy.__version__,
|
pluggy.__version__,
|
||||||
),
|
),
|
||||||
"*test_header_trailer_info.py .*",
|
"*test_header_trailer_info.py .*",
|
||||||
"=* 1 passed*in *.[0-9][0-9] seconds *=",
|
"=* 1 passed*in *.[0-9][0-9]s *=",
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
if request.config.pluginmanager.list_plugin_distinfo():
|
if request.config.pluginmanager.list_plugin_distinfo():
|
||||||
|
@ -1678,3 +1678,20 @@ def test_line_with_reprcrash(monkeypatch):
|
||||||
check("😄😄😄😄😄\n2nd line", 41, "FAILED nodeid::😄::withunicode - 😄😄...")
|
check("😄😄😄😄😄\n2nd line", 41, "FAILED nodeid::😄::withunicode - 😄😄...")
|
||||||
check("😄😄😄😄😄\n2nd line", 42, "FAILED nodeid::😄::withunicode - 😄😄😄...")
|
check("😄😄😄😄😄\n2nd line", 42, "FAILED nodeid::😄::withunicode - 😄😄😄...")
|
||||||
check("😄😄😄😄😄\n2nd line", 80, "FAILED nodeid::😄::withunicode - 😄😄😄😄😄")
|
check("😄😄😄😄😄\n2nd line", 80, "FAILED nodeid::😄::withunicode - 😄😄😄😄😄")
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"seconds, expected",
|
||||||
|
[
|
||||||
|
(10.0, "10.00s"),
|
||||||
|
(10.34, "10.34s"),
|
||||||
|
(59.99, "59.99s"),
|
||||||
|
(60.55, "60.55s (0:01:00)"),
|
||||||
|
(123.55, "123.55s (0:02:03)"),
|
||||||
|
(60 * 60 + 0.5, "3600.50s (1:00:00)"),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
def test_format_session_duration(seconds, expected):
|
||||||
|
from _pytest.terminal import format_session_duration
|
||||||
|
|
||||||
|
assert format_session_duration(seconds) == expected
|
||||||
|
|
|
@ -7,7 +7,6 @@ import attr
|
||||||
import pytest
|
import pytest
|
||||||
from _pytest import pathlib
|
from _pytest import pathlib
|
||||||
from _pytest.pathlib import Path
|
from _pytest.pathlib import Path
|
||||||
from _pytest.warnings import SHOW_PYTEST_WARNINGS_ARG
|
|
||||||
|
|
||||||
|
|
||||||
def test_tmpdir_fixture(testdir):
|
def test_tmpdir_fixture(testdir):
|
||||||
|
@ -16,13 +15,6 @@ def test_tmpdir_fixture(testdir):
|
||||||
results.stdout.fnmatch_lines(["*1 passed*"])
|
results.stdout.fnmatch_lines(["*1 passed*"])
|
||||||
|
|
||||||
|
|
||||||
def test_ensuretemp(recwarn):
|
|
||||||
d1 = pytest.ensuretemp("hello")
|
|
||||||
d2 = pytest.ensuretemp("hello")
|
|
||||||
assert d1 == d2
|
|
||||||
assert d1.check(dir=1)
|
|
||||||
|
|
||||||
|
|
||||||
@attr.s
|
@attr.s
|
||||||
class FakeConfig:
|
class FakeConfig:
|
||||||
basetemp = attr.ib()
|
basetemp = attr.ib()
|
||||||
|
@ -87,12 +79,13 @@ def test_basetemp(testdir):
|
||||||
p = testdir.makepyfile(
|
p = testdir.makepyfile(
|
||||||
"""
|
"""
|
||||||
import pytest
|
import pytest
|
||||||
def test_1():
|
def test_1(tmpdir_factory):
|
||||||
pytest.ensuretemp("hello")
|
tmpdir_factory.mktemp('hello', numbered=False)
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result = testdir.runpytest(p, "--basetemp=%s" % mytemp, SHOW_PYTEST_WARNINGS_ARG)
|
result = testdir.runpytest(p, "--basetemp=%s" % mytemp)
|
||||||
assert result.ret == 0
|
assert result.ret == 0
|
||||||
|
print(mytemp)
|
||||||
assert mytemp.join("hello").check()
|
assert mytemp.join("hello").check()
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -939,9 +939,7 @@ def test_class_method_containing_test_issue1558(testdir):
|
||||||
reprec.assertoutcome(passed=1)
|
reprec.assertoutcome(passed=1)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize("base", ["builtins.object", "unittest.TestCase"])
|
||||||
"base", ["builtins.object", "unittest.TestCase", "unittest2.TestCase"]
|
|
||||||
)
|
|
||||||
def test_usefixtures_marker_on_unittest(base, testdir):
|
def test_usefixtures_marker_on_unittest(base, testdir):
|
||||||
"""#3498"""
|
"""#3498"""
|
||||||
module = base.rsplit(".", 1)[0]
|
module = base.rsplit(".", 1)[0]
|
||||||
|
|
|
@ -498,38 +498,15 @@ class TestDeprecationWarningsByDefault:
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("change_default", [None, "ini", "cmdline"])
|
@pytest.mark.parametrize("change_default", [None, "ini", "cmdline"])
|
||||||
def test_removed_in_pytest4_warning_as_error(testdir, change_default):
|
@pytest.mark.skip(
|
||||||
testdir.makepyfile(
|
reason="This test should be enabled again before pytest 6.0 is released"
|
||||||
"""
|
)
|
||||||
import warnings, pytest
|
|
||||||
def test():
|
|
||||||
warnings.warn(pytest.RemovedInPytest4Warning("some warning"))
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
if change_default == "ini":
|
|
||||||
testdir.makeini(
|
|
||||||
"""
|
|
||||||
[pytest]
|
|
||||||
filterwarnings =
|
|
||||||
ignore::pytest.RemovedInPytest4Warning
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
|
|
||||||
args = (
|
|
||||||
("-Wignore::pytest.RemovedInPytest4Warning",)
|
|
||||||
if change_default == "cmdline"
|
|
||||||
else ()
|
|
||||||
)
|
|
||||||
result = testdir.runpytest(*args)
|
|
||||||
if change_default is None:
|
|
||||||
result.stdout.fnmatch_lines(["* 1 failed in *"])
|
|
||||||
else:
|
|
||||||
assert change_default in ("ini", "cmdline")
|
|
||||||
result.stdout.fnmatch_lines(["* 1 passed in *"])
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("change_default", [None, "ini", "cmdline"])
|
|
||||||
def test_deprecation_warning_as_error(testdir, change_default):
|
def test_deprecation_warning_as_error(testdir, change_default):
|
||||||
|
"""This ensures that PytestDeprecationWarnings raised by pytest are turned into errors.
|
||||||
|
|
||||||
|
This test should be enabled as part of each major release, and skipped again afterwards
|
||||||
|
to ensure our deprecations are turning into warnings as expected.
|
||||||
|
"""
|
||||||
testdir.makepyfile(
|
testdir.makepyfile(
|
||||||
"""
|
"""
|
||||||
import warnings, pytest
|
import warnings, pytest
|
||||||
|
|
17
tox.ini
17
tox.ini
|
@ -45,7 +45,6 @@ deps =
|
||||||
pexpect: pexpect
|
pexpect: pexpect
|
||||||
pluggymaster: git+https://github.com/pytest-dev/pluggy.git@master
|
pluggymaster: git+https://github.com/pytest-dev/pluggy.git@master
|
||||||
twisted: twisted
|
twisted: twisted
|
||||||
twisted: unittest2
|
|
||||||
xdist: pytest-xdist>=1.13
|
xdist: pytest-xdist>=1.13
|
||||||
{env:_PYTEST_TOX_EXTRA_DEP:}
|
{env:_PYTEST_TOX_EXTRA_DEP:}
|
||||||
platform = {env:_PYTEST_TOX_PLATFORM:.*}
|
platform = {env:_PYTEST_TOX_PLATFORM:.*}
|
||||||
|
@ -115,6 +114,17 @@ deps =
|
||||||
wheel
|
wheel
|
||||||
commands = python scripts/release.py {posargs}
|
commands = python scripts/release.py {posargs}
|
||||||
|
|
||||||
|
[testenv:publish_gh_release_notes]
|
||||||
|
description = create GitHub release after deployment
|
||||||
|
basepython = python3.6
|
||||||
|
usedevelop = True
|
||||||
|
passenv = GH_RELEASE_NOTES_TOKEN TRAVIS_TAG
|
||||||
|
deps =
|
||||||
|
github3.py
|
||||||
|
pypandoc
|
||||||
|
commands = python scripts/publish_gh_release_notes.py
|
||||||
|
|
||||||
|
|
||||||
[pytest]
|
[pytest]
|
||||||
minversion = 2.0
|
minversion = 2.0
|
||||||
addopts = -ra -p pytester --strict-markers
|
addopts = -ra -p pytester --strict-markers
|
||||||
|
@ -128,9 +138,6 @@ norecursedirs = testing/example_scripts
|
||||||
xfail_strict=true
|
xfail_strict=true
|
||||||
filterwarnings =
|
filterwarnings =
|
||||||
error
|
error
|
||||||
ignore:yield tests are deprecated, and scheduled to be removed in pytest 4.0:pytest.RemovedInPytest4Warning
|
|
||||||
ignore:Metafunc.addcall is deprecated and scheduled to be removed in pytest 4.0:pytest.RemovedInPytest4Warning
|
|
||||||
ignore::pytest.RemovedInPytest4Warning
|
|
||||||
default:Using or importing the ABCs:DeprecationWarning:unittest2.*
|
default:Using or importing the ABCs:DeprecationWarning:unittest2.*
|
||||||
ignore:Module already imported so cannot be rewritten:pytest.PytestWarning
|
ignore:Module already imported so cannot be rewritten:pytest.PytestWarning
|
||||||
# produced by python3.6/site.py itself (3.6.7 on Travis, could not trigger it with 3.6.8).
|
# produced by python3.6/site.py itself (3.6.7 on Travis, could not trigger it with 3.6.8).
|
||||||
|
@ -160,7 +167,7 @@ markers =
|
||||||
|
|
||||||
[flake8]
|
[flake8]
|
||||||
max-line-length = 120
|
max-line-length = 120
|
||||||
ignore = E203,W503
|
extend-ignore = E203
|
||||||
|
|
||||||
[isort]
|
[isort]
|
||||||
; This config mimics what reorder-python-imports does.
|
; This config mimics what reorder-python-imports does.
|
||||||
|
|
Loading…
Reference in New Issue