Merge branch 'main' into deprecate-nose-plugin
This commit is contained in:
commit
c3c48ff19c
|
@ -1,6 +1,6 @@
|
||||||
repos:
|
repos:
|
||||||
- repo: https://github.com/psf/black
|
- repo: https://github.com/psf/black
|
||||||
rev: 22.3.0
|
rev: 22.6.0
|
||||||
hooks:
|
hooks:
|
||||||
- id: black
|
- id: black
|
||||||
args: [--safe, --quiet]
|
args: [--safe, --quiet]
|
||||||
|
@ -10,7 +10,7 @@ repos:
|
||||||
- id: blacken-docs
|
- id: blacken-docs
|
||||||
additional_dependencies: [black==20.8b1]
|
additional_dependencies: [black==20.8b1]
|
||||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||||
rev: v4.2.0
|
rev: v4.3.0
|
||||||
hooks:
|
hooks:
|
||||||
- id: trailing-whitespace
|
- id: trailing-whitespace
|
||||||
- id: end-of-file-fixer
|
- id: end-of-file-fixer
|
||||||
|
@ -37,17 +37,17 @@ repos:
|
||||||
- flake8-typing-imports==1.12.0
|
- flake8-typing-imports==1.12.0
|
||||||
- flake8-docstrings==1.5.0
|
- flake8-docstrings==1.5.0
|
||||||
- repo: https://github.com/asottile/reorder_python_imports
|
- repo: https://github.com/asottile/reorder_python_imports
|
||||||
rev: v3.0.1
|
rev: v3.8.2
|
||||||
hooks:
|
hooks:
|
||||||
- id: reorder-python-imports
|
- id: reorder-python-imports
|
||||||
args: ['--application-directories=.:src', --py37-plus]
|
args: ['--application-directories=.:src', --py37-plus]
|
||||||
- repo: https://github.com/asottile/pyupgrade
|
- repo: https://github.com/asottile/pyupgrade
|
||||||
rev: v2.32.0
|
rev: v2.37.2
|
||||||
hooks:
|
hooks:
|
||||||
- id: pyupgrade
|
- id: pyupgrade
|
||||||
args: [--py37-plus]
|
args: [--py37-plus]
|
||||||
- repo: https://github.com/asottile/setup-cfg-fmt
|
- repo: https://github.com/asottile/setup-cfg-fmt
|
||||||
rev: v1.20.1
|
rev: v1.20.2
|
||||||
hooks:
|
hooks:
|
||||||
- id: setup-cfg-fmt
|
- id: setup-cfg-fmt
|
||||||
args: [--max-py-version=3.10]
|
args: [--max-py-version=3.10]
|
||||||
|
@ -56,7 +56,7 @@ repos:
|
||||||
hooks:
|
hooks:
|
||||||
- id: python-use-type-annotations
|
- id: python-use-type-annotations
|
||||||
- repo: https://github.com/pre-commit/mirrors-mypy
|
- repo: https://github.com/pre-commit/mirrors-mypy
|
||||||
rev: v0.942
|
rev: v0.971
|
||||||
hooks:
|
hooks:
|
||||||
- id: mypy
|
- id: mypy
|
||||||
files: ^(src/|testing/)
|
files: ^(src/|testing/)
|
||||||
|
@ -67,7 +67,6 @@ repos:
|
||||||
- attrs>=19.2.0
|
- attrs>=19.2.0
|
||||||
- packaging
|
- packaging
|
||||||
- tomli
|
- tomli
|
||||||
- types-atomicwrites
|
|
||||||
- types-pkg_resources
|
- types-pkg_resources
|
||||||
- repo: local
|
- repo: local
|
||||||
hooks:
|
hooks:
|
||||||
|
@ -101,7 +100,7 @@ repos:
|
||||||
types: [python]
|
types: [python]
|
||||||
- id: py-path-deprecated
|
- id: py-path-deprecated
|
||||||
name: py.path usage is deprecated
|
name: py.path usage is deprecated
|
||||||
exclude: docs|src/_pytest/deprecated.py|testing/deprecated_test.py
|
exclude: docs|src/_pytest/deprecated.py|testing/deprecated_test.py|src/_pytest/legacypath.py
|
||||||
language: pygrep
|
language: pygrep
|
||||||
entry: \bpy\.path\.local
|
entry: \bpy\.path\.local
|
||||||
types: [python]
|
types: [python]
|
||||||
|
|
15
AUTHORS
15
AUTHORS
|
@ -15,6 +15,7 @@ Alan Velasco
|
||||||
Alexander Johnson
|
Alexander Johnson
|
||||||
Alexander King
|
Alexander King
|
||||||
Alexei Kozlenok
|
Alexei Kozlenok
|
||||||
|
Alice Purcell
|
||||||
Allan Feldman
|
Allan Feldman
|
||||||
Aly Sivji
|
Aly Sivji
|
||||||
Amir Elkess
|
Amir Elkess
|
||||||
|
@ -44,6 +45,7 @@ Aron Coyle
|
||||||
Aron Curzon
|
Aron Curzon
|
||||||
Aviral Verma
|
Aviral Verma
|
||||||
Aviv Palivoda
|
Aviv Palivoda
|
||||||
|
Babak Keyvani
|
||||||
Barney Gale
|
Barney Gale
|
||||||
Ben Gartner
|
Ben Gartner
|
||||||
Ben Webb
|
Ben Webb
|
||||||
|
@ -62,9 +64,11 @@ Ceridwen
|
||||||
Charles Cloud
|
Charles Cloud
|
||||||
Charles Machalow
|
Charles Machalow
|
||||||
Charnjit SiNGH (CCSJ)
|
Charnjit SiNGH (CCSJ)
|
||||||
|
Cheuk Ting Ho
|
||||||
Chris Lamb
|
Chris Lamb
|
||||||
Chris NeJame
|
Chris NeJame
|
||||||
Chris Rose
|
Chris Rose
|
||||||
|
Chris Wheeler
|
||||||
Christian Boelsen
|
Christian Boelsen
|
||||||
Christian Fetzer
|
Christian Fetzer
|
||||||
Christian Neumüller
|
Christian Neumüller
|
||||||
|
@ -83,6 +87,7 @@ Damian Skrzypczak
|
||||||
Daniel Grana
|
Daniel Grana
|
||||||
Daniel Hahler
|
Daniel Hahler
|
||||||
Daniel Nuri
|
Daniel Nuri
|
||||||
|
Daniel Sánchez Castelló
|
||||||
Daniel Wandschneider
|
Daniel Wandschneider
|
||||||
Daniele Procida
|
Daniele Procida
|
||||||
Danielle Jenkins
|
Danielle Jenkins
|
||||||
|
@ -164,6 +169,7 @@ Jeff Widman
|
||||||
Jenni Rinker
|
Jenni Rinker
|
||||||
John Eddie Ayson
|
John Eddie Ayson
|
||||||
John Towler
|
John Towler
|
||||||
|
Jon Parise
|
||||||
Jon Sonesen
|
Jon Sonesen
|
||||||
Jonas Obrist
|
Jonas Obrist
|
||||||
Jordan Guymon
|
Jordan Guymon
|
||||||
|
@ -183,7 +189,9 @@ Katarzyna Jachim
|
||||||
Katarzyna Król
|
Katarzyna Król
|
||||||
Katerina Koukiou
|
Katerina Koukiou
|
||||||
Keri Volans
|
Keri Volans
|
||||||
|
Kevin C
|
||||||
Kevin Cox
|
Kevin Cox
|
||||||
|
Kevin Hierro Carrasco
|
||||||
Kevin J. Foley
|
Kevin J. Foley
|
||||||
Kian Eliasi
|
Kian Eliasi
|
||||||
Kian-Meng Ang
|
Kian-Meng Ang
|
||||||
|
@ -246,6 +254,7 @@ Nicholas Murphy
|
||||||
Niclas Olofsson
|
Niclas Olofsson
|
||||||
Nicolas Delaby
|
Nicolas Delaby
|
||||||
Nikolay Kondratyev
|
Nikolay Kondratyev
|
||||||
|
Nipunn Koorapati
|
||||||
Olga Matoula
|
Olga Matoula
|
||||||
Oleg Pidsadnyi
|
Oleg Pidsadnyi
|
||||||
Oleg Sushchenko
|
Oleg Sushchenko
|
||||||
|
@ -257,6 +266,7 @@ Oscar Benjamin
|
||||||
Parth Patel
|
Parth Patel
|
||||||
Patrick Hayes
|
Patrick Hayes
|
||||||
Paul Müller
|
Paul Müller
|
||||||
|
Paul Reece
|
||||||
Pauli Virtanen
|
Pauli Virtanen
|
||||||
Pavel Karateev
|
Pavel Karateev
|
||||||
Paweł Adamczak
|
Paweł Adamczak
|
||||||
|
@ -320,6 +330,7 @@ Taneli Hukkinen
|
||||||
Tanvi Mehta
|
Tanvi Mehta
|
||||||
Tarcisio Fischer
|
Tarcisio Fischer
|
||||||
Tareq Alayan
|
Tareq Alayan
|
||||||
|
Tatiana Ovary
|
||||||
Ted Xiao
|
Ted Xiao
|
||||||
Terje Runde
|
Terje Runde
|
||||||
Thomas Grainger
|
Thomas Grainger
|
||||||
|
@ -331,12 +342,14 @@ Tom Dalton
|
||||||
Tom Viner
|
Tom Viner
|
||||||
Tomáš Gavenčiak
|
Tomáš Gavenčiak
|
||||||
Tomer Keren
|
Tomer Keren
|
||||||
|
Tony Narlock
|
||||||
Tor Colvin
|
Tor Colvin
|
||||||
Trevor Bekolay
|
Trevor Bekolay
|
||||||
Tyler Goodlet
|
Tyler Goodlet
|
||||||
Tzu-ping Chung
|
Tzu-ping Chung
|
||||||
Vasily Kuznetsov
|
Vasily Kuznetsov
|
||||||
Victor Maryama
|
Victor Maryama
|
||||||
|
Victor Rodriguez
|
||||||
Victor Uriarte
|
Victor Uriarte
|
||||||
Vidar T. Fauske
|
Vidar T. Fauske
|
||||||
Virgil Dupras
|
Virgil Dupras
|
||||||
|
@ -357,5 +370,7 @@ Yoav Caspi
|
||||||
Yuval Shimon
|
Yuval Shimon
|
||||||
Zac Hatfield-Dodds
|
Zac Hatfield-Dodds
|
||||||
Zachary Kneupper
|
Zachary Kneupper
|
||||||
|
Zachary OBrien
|
||||||
|
Zhouxin Qiu
|
||||||
Zoltán Máté
|
Zoltán Máté
|
||||||
Zsolt Cserna
|
Zsolt Cserna
|
||||||
|
|
|
@ -0,0 +1 @@
|
||||||
|
When running with ``--pdb``, ``TestCase.tearDown`` is no longer called for tests when the *class* has been skipped via ``unittest.skip`` or ``pytest.mark.skip``.
|
|
@ -0,0 +1 @@
|
||||||
|
Replace `atomicwrites <https://github.com/untitaker/python-atomicwrites>`__ dependency on windows with `os.replace`.
|
|
@ -0,0 +1 @@
|
||||||
|
:data:`sys.stdin` now contains all expected methods of a file-like object when capture is enabled.
|
|
@ -0,0 +1 @@
|
||||||
|
Doctests now respect the ``--import-mode`` flag.
|
|
@ -0,0 +1 @@
|
||||||
|
A warning is now emitted if a test function returns something other than `None`. This prevents a common mistake among beginners that expect that returning a `bool` (for example `return foo(a, b) == result`) would cause a test to pass or fail, instead of using `assert`.
|
|
@ -0,0 +1,2 @@
|
||||||
|
Improve :py:func:`pytest.raises`. Previously passing an empty tuple would give a confusing
|
||||||
|
error. We now raise immediately with a more helpful message.
|
|
@ -0,0 +1 @@
|
||||||
|
Type-annotate ``FixtureRequest.param`` as ``Any`` as a stop gap measure until :issue:`8073` is fixed.
|
|
@ -0,0 +1 @@
|
||||||
|
Fixed a path handling code in ``rewrite.py`` that seems to work fine, but was incorrect and fails in some systems.
|
|
@ -0,0 +1 @@
|
||||||
|
Some coloring has been added to the short test summary.
|
|
@ -0,0 +1 @@
|
||||||
|
Ensure ``caplog.get_records(when)`` returns current/correct data after invoking ``caplog.clear()``.
|
|
@ -0,0 +1 @@
|
||||||
|
Normalize the help description of all command-line options.
|
|
@ -0,0 +1 @@
|
||||||
|
Added shell-style wildcard support to ``testpaths``.
|
|
@ -0,0 +1 @@
|
||||||
|
Fix default encoding warning (``EncodingWarning``) in ``cacheprovider``
|
|
@ -0,0 +1 @@
|
||||||
|
Fixed string representation for :func:`pytest.approx` when used to compare tuples.
|
|
@ -0,0 +1 @@
|
||||||
|
Display full crash messages in ``short test summary info``, when runng in a CI environment.
|
|
@ -0,0 +1 @@
|
||||||
|
Explicit note that :fixture:`tmpdir` fixture is discouraged in favour of :fixture:`tmp_path`.
|
|
@ -0,0 +1,4 @@
|
||||||
|
Improve the error message when we attempt to access a fixture that has been
|
||||||
|
torn down.
|
||||||
|
Add an additional sentence to the docstring explaining when it's not a good
|
||||||
|
idea to call getfixturevalue.
|
|
@ -0,0 +1 @@
|
||||||
|
Added support for hidden configuration file by allowing ``.pytest.ini`` as an alternative to ``pytest.ini``.
|
|
@ -17,7 +17,6 @@
|
||||||
<li><a href="{{ pathto('changelog') }}">Changelog</a></li>
|
<li><a href="{{ pathto('changelog') }}">Changelog</a></li>
|
||||||
<li><a href="{{ pathto('contributing') }}">Contributing</a></li>
|
<li><a href="{{ pathto('contributing') }}">Contributing</a></li>
|
||||||
<li><a href="{{ pathto('backwards-compatibility') }}">Backwards Compatibility</a></li>
|
<li><a href="{{ pathto('backwards-compatibility') }}">Backwards Compatibility</a></li>
|
||||||
<li><a href="{{ pathto('py27-py34-deprecation') }}">Python 2.7 and 3.4 Support</a></li>
|
|
||||||
<li><a href="{{ pathto('sponsor') }}">Sponsor</a></li>
|
<li><a href="{{ pathto('sponsor') }}">Sponsor</a></li>
|
||||||
<li><a href="{{ pathto('tidelift') }}">pytest for Enterprise</a></li>
|
<li><a href="{{ pathto('tidelift') }}">pytest for Enterprise</a></li>
|
||||||
<li><a href="{{ pathto('license') }}">License</a></li>
|
<li><a href="{{ pathto('license') }}">License</a></li>
|
||||||
|
@ -30,5 +29,3 @@
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
|
|
||||||
<hr>
|
<hr>
|
||||||
<a href="{{ pathto('genindex') }}">Index</a>
|
|
||||||
<hr>
|
|
||||||
|
|
|
@ -77,3 +77,18 @@ Deprecation Roadmap
|
||||||
Features currently deprecated and removed in previous releases can be found in :ref:`deprecations`.
|
Features currently deprecated and removed in previous releases can be found in :ref:`deprecations`.
|
||||||
|
|
||||||
We track future deprecation and removal of features using milestones and the `deprecation <https://github.com/pytest-dev/pytest/issues?q=label%3A%22type%3A+deprecation%22>`_ and `removal <https://github.com/pytest-dev/pytest/labels/type%3A%20removal>`_ labels on GitHub.
|
We track future deprecation and removal of features using milestones and the `deprecation <https://github.com/pytest-dev/pytest/issues?q=label%3A%22type%3A+deprecation%22>`_ and `removal <https://github.com/pytest-dev/pytest/labels/type%3A%20removal>`_ labels on GitHub.
|
||||||
|
|
||||||
|
|
||||||
|
Python version support
|
||||||
|
======================
|
||||||
|
|
||||||
|
Released pytest versions support all Python versions that are actively maintained at the time of the release:
|
||||||
|
|
||||||
|
============== ===================
|
||||||
|
pytest version min. Python version
|
||||||
|
============== ===================
|
||||||
|
7.1+ 3.7+
|
||||||
|
6.2 - 7.0 3.6+
|
||||||
|
5.0 - 6.1 3.5+
|
||||||
|
3.3 - 4.6 2.7, 3.4+
|
||||||
|
============== ===================
|
||||||
|
|
|
@ -2618,7 +2618,8 @@ Important
|
||||||
|
|
||||||
This release is a Python3.5+ only release.
|
This release is a Python3.5+ only release.
|
||||||
|
|
||||||
For more details, see our :std:doc:`Python 2.7 and 3.4 support plan <py27-py34-deprecation>`.
|
For more details, see our `Python 2.7 and 3.4 support plan
|
||||||
|
<https://docs.pytest.org/en/7.0.x/py27-py34-deprecation.html>`_.
|
||||||
|
|
||||||
Removals
|
Removals
|
||||||
--------
|
--------
|
||||||
|
@ -2842,7 +2843,11 @@ Features
|
||||||
|
|
||||||
- :issue:`6870`: New ``Config.invocation_args`` attribute containing the unchanged arguments passed to ``pytest.main()``.
|
- :issue:`6870`: New ``Config.invocation_args`` attribute containing the unchanged arguments passed to ``pytest.main()``.
|
||||||
|
|
||||||
Remark: while this is technically a new feature and according to our :ref:`policy <what goes into 4.6.x releases>` it should not have been backported, we have opened an exception in this particular case because it fixes a serious interaction with ``pytest-xdist``, so it can also be considered a bugfix.
|
Remark: while this is technically a new feature and according to our
|
||||||
|
`policy <https://docs.pytest.org/en/7.0.x/py27-py34-deprecation.html#what-goes-into-4-6-x-releases>`_
|
||||||
|
it should not have been backported, we have opened an exception in this
|
||||||
|
particular case because it fixes a serious interaction with ``pytest-xdist``,
|
||||||
|
so it can also be considered a bugfix.
|
||||||
|
|
||||||
Trivial/Internal Changes
|
Trivial/Internal Changes
|
||||||
------------------------
|
------------------------
|
||||||
|
@ -3014,7 +3019,8 @@ Important
|
||||||
|
|
||||||
The ``4.6.X`` series will be the last series to support **Python 2 and Python 3.4**.
|
The ``4.6.X`` series will be the last series to support **Python 2 and Python 3.4**.
|
||||||
|
|
||||||
For more details, see our :std:doc:`Python 2.7 and 3.4 support plan <py27-py34-deprecation>`.
|
For more details, see our `Python 2.7 and 3.4 support plan
|
||||||
|
<https://docs.pytest.org/en/7.0.x/py27-py34-deprecation.html>`_.
|
||||||
|
|
||||||
|
|
||||||
Features
|
Features
|
||||||
|
|
|
@ -247,7 +247,7 @@ html_sidebars = {
|
||||||
html_domain_indices = True
|
html_domain_indices = True
|
||||||
|
|
||||||
# If false, no index is generated.
|
# If false, no index is generated.
|
||||||
html_use_index = True
|
html_use_index = False
|
||||||
|
|
||||||
# If true, the index is split into individual pages for each letter.
|
# If true, the index is split into individual pages for each letter.
|
||||||
# html_split_index = False
|
# html_split_index = False
|
||||||
|
@ -320,7 +320,9 @@ latex_domain_indices = False
|
||||||
|
|
||||||
# One entry per manual page. List of tuples
|
# One entry per manual page. List of tuples
|
||||||
# (source start file, name, description, authors, manual section).
|
# (source start file, name, description, authors, manual section).
|
||||||
man_pages = [("usage", "pytest", "pytest usage", ["holger krekel at merlinux eu"], 1)]
|
man_pages = [
|
||||||
|
("how-to/usage", "pytest", "pytest usage", ["holger krekel at merlinux eu"], 1)
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
# -- Options for Epub output ---------------------------------------------------
|
# -- Options for Epub output ---------------------------------------------------
|
||||||
|
|
|
@ -85,7 +85,6 @@ Further topics
|
||||||
|
|
||||||
backwards-compatibility
|
backwards-compatibility
|
||||||
deprecations
|
deprecations
|
||||||
py27-py34-deprecation
|
|
||||||
|
|
||||||
contributing
|
contributing
|
||||||
development_guide
|
development_guide
|
||||||
|
|
|
@ -260,6 +260,47 @@ or ``pytest.warns(Warning)``.
|
||||||
|
|
||||||
See :ref:`warns use cases` for examples.
|
See :ref:`warns use cases` for examples.
|
||||||
|
|
||||||
|
|
||||||
|
Returning non-None value in test functions
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
.. deprecated:: 7.2
|
||||||
|
|
||||||
|
A :class:`pytest.PytestReturnNotNoneWarning` is now emitted if a test function returns something other than `None`.
|
||||||
|
|
||||||
|
This prevents a common mistake among beginners that expect that returning a `bool` would cause a test to pass or fail, for example:
|
||||||
|
|
||||||
|
.. code-block:: python
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
["a", "b", "result"],
|
||||||
|
[
|
||||||
|
[1, 2, 5],
|
||||||
|
[2, 3, 8],
|
||||||
|
[5, 3, 18],
|
||||||
|
],
|
||||||
|
)
|
||||||
|
def test_foo(a, b, result):
|
||||||
|
return foo(a, b) == result
|
||||||
|
|
||||||
|
Given that pytest ignores the return value, this might be surprising that it will never fail.
|
||||||
|
|
||||||
|
The proper fix is to change the `return` to an `assert`:
|
||||||
|
|
||||||
|
.. code-block:: python
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
["a", "b", "result"],
|
||||||
|
[
|
||||||
|
[1, 2, 5],
|
||||||
|
[2, 3, 8],
|
||||||
|
[5, 3, 18],
|
||||||
|
],
|
||||||
|
)
|
||||||
|
def test_foo(a, b, result):
|
||||||
|
assert foo(a, b) == result
|
||||||
|
|
||||||
|
|
||||||
The ``--strict`` command-line option
|
The ``--strict`` command-line option
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
|
|
@ -346,7 +346,7 @@ Custom marker and command line option to control test runs
|
||||||
Plugins can provide custom markers and implement specific behaviour
|
Plugins can provide custom markers and implement specific behaviour
|
||||||
based on it. This is a self-contained example which adds a command
|
based on it. This is a self-contained example which adds a command
|
||||||
line option and a parametrized test function marker to run tests
|
line option and a parametrized test function marker to run tests
|
||||||
specifies via named environments:
|
specified via named environments:
|
||||||
|
|
||||||
.. code-block:: python
|
.. code-block:: python
|
||||||
|
|
||||||
|
|
|
@ -657,21 +657,17 @@ Use :func:`pytest.raises` with the
|
||||||
:ref:`pytest.mark.parametrize ref` decorator to write parametrized tests
|
:ref:`pytest.mark.parametrize ref` decorator to write parametrized tests
|
||||||
in which some tests raise exceptions and others do not.
|
in which some tests raise exceptions and others do not.
|
||||||
|
|
||||||
It is helpful to define a no-op context manager ``does_not_raise`` to serve
|
It may be helpful to use ``nullcontext`` as a complement to ``raises``.
|
||||||
as a complement to ``raises``. For example:
|
|
||||||
|
For example:
|
||||||
|
|
||||||
.. code-block:: python
|
.. code-block:: python
|
||||||
|
|
||||||
from contextlib import contextmanager
|
from contextlib import nullcontext as does_not_raise
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
|
|
||||||
@contextmanager
|
|
||||||
def does_not_raise():
|
|
||||||
yield
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"example_input,expectation",
|
"example_input,expectation",
|
||||||
[
|
[
|
||||||
|
@ -688,22 +684,3 @@ as a complement to ``raises``. For example:
|
||||||
|
|
||||||
In the example above, the first three test cases should run unexceptionally,
|
In the example above, the first three test cases should run unexceptionally,
|
||||||
while the fourth should raise ``ZeroDivisionError``.
|
while the fourth should raise ``ZeroDivisionError``.
|
||||||
|
|
||||||
If you're only supporting Python 3.7+, you can simply use ``nullcontext``
|
|
||||||
to define ``does_not_raise``:
|
|
||||||
|
|
||||||
.. code-block:: python
|
|
||||||
|
|
||||||
from contextlib import nullcontext as does_not_raise
|
|
||||||
|
|
||||||
Or, if you're supporting Python 3.3+ you can use:
|
|
||||||
|
|
||||||
.. code-block:: python
|
|
||||||
|
|
||||||
from contextlib import ExitStack as does_not_raise
|
|
||||||
|
|
||||||
Or, if desired, you can ``pip install contextlib2`` and use:
|
|
||||||
|
|
||||||
.. code-block:: python
|
|
||||||
|
|
||||||
from contextlib2 import nullcontext as does_not_raise
|
|
||||||
|
|
|
@ -173,10 +173,9 @@ This layout prevents a lot of common pitfalls and has many benefits, which are b
|
||||||
`blog post by Ionel Cristian Mărieș <https://blog.ionelmc.ro/2014/05/25/python-packaging/#the-structure>`_.
|
`blog post by Ionel Cristian Mărieș <https://blog.ionelmc.ro/2014/05/25/python-packaging/#the-structure>`_.
|
||||||
|
|
||||||
.. note::
|
.. note::
|
||||||
The new ``--import-mode=importlib`` (see :ref:`import-modes`) doesn't have
|
The ``--import-mode=importlib`` option (see :ref:`import-modes`) does not have
|
||||||
any of the drawbacks above because ``sys.path`` is not changed when importing
|
any of the drawbacks above because ``sys.path`` is not changed when importing
|
||||||
test modules, so users that run
|
test modules, so users that run into this issue are strongly encouraged to try it.
|
||||||
into this issue are strongly encouraged to try it and report if the new option works well for them.
|
|
||||||
|
|
||||||
The ``src`` directory layout is still strongly recommended however.
|
The ``src`` directory layout is still strongly recommended however.
|
||||||
|
|
||||||
|
|
|
@ -45,10 +45,19 @@ these values:
|
||||||
|
|
||||||
* ``importlib``: new in pytest-6.0, this mode uses :mod:`importlib` to import test modules. This gives full control over the import process, and doesn't require changing :py:data:`sys.path`.
|
* ``importlib``: new in pytest-6.0, this mode uses :mod:`importlib` to import test modules. This gives full control over the import process, and doesn't require changing :py:data:`sys.path`.
|
||||||
|
|
||||||
For this reason this doesn't require test module names to be unique, but also makes test
|
For this reason this doesn't require test module names to be unique.
|
||||||
modules non-importable by each other.
|
|
||||||
|
One drawback however is that test modules are non-importable by each other. Also, utility
|
||||||
|
modules in the tests directories are not automatically importable because the tests directory is no longer
|
||||||
|
added to :py:data:`sys.path`.
|
||||||
|
|
||||||
|
Initially we intended to make ``importlib`` the default in future releases, however it is clear now that
|
||||||
|
it has its own set of drawbacks so the default will remain ``prepend`` for the foreseeable future.
|
||||||
|
|
||||||
|
.. seealso::
|
||||||
|
|
||||||
|
The :confval:`pythonpath` configuration variable.
|
||||||
|
|
||||||
We intend to make ``importlib`` the default in future releases, depending on feedback.
|
|
||||||
|
|
||||||
``prepend`` and ``append`` import modes scenarios
|
``prepend`` and ``append`` import modes scenarios
|
||||||
-------------------------------------------------
|
-------------------------------------------------
|
||||||
|
|
|
@ -126,14 +126,17 @@ pytest also introduces new options:
|
||||||
in expected doctest output.
|
in expected doctest output.
|
||||||
|
|
||||||
* ``NUMBER``: when enabled, floating-point numbers only need to match as far as
|
* ``NUMBER``: when enabled, floating-point numbers only need to match as far as
|
||||||
the precision you have written in the expected doctest output. For example,
|
the precision you have written in the expected doctest output. The numbers are
|
||||||
the following output would only need to match to 2 decimal places::
|
compared using :func:`pytest.approx` with relative tolerance equal to the
|
||||||
|
precision. For example, the following output would only need to match to 2
|
||||||
|
decimal places when comparing ``3.14`` to
|
||||||
|
``pytest.approx(math.pi, rel=10**-2)``::
|
||||||
|
|
||||||
>>> math.pi
|
>>> math.pi
|
||||||
3.14
|
3.14
|
||||||
|
|
||||||
If you wrote ``3.1416`` then the actual output would need to match to 4
|
If you wrote ``3.1416`` then the actual output would need to match to
|
||||||
decimal places; and so on.
|
approximately 4 decimal places; and so on.
|
||||||
|
|
||||||
This avoids false positives caused by limited floating-point precision, like
|
This avoids false positives caused by limited floating-point precision, like
|
||||||
this::
|
this::
|
||||||
|
|
|
@ -631,6 +631,7 @@ Here's what that might look like:
|
||||||
def receiving_user(mail_admin):
|
def receiving_user(mail_admin):
|
||||||
user = mail_admin.create_user()
|
user = mail_admin.create_user()
|
||||||
yield user
|
yield user
|
||||||
|
user.clear_mailbox()
|
||||||
mail_admin.delete_user(user)
|
mail_admin.delete_user(user)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -176,8 +176,8 @@ logging records as they are emitted directly into the console.
|
||||||
|
|
||||||
You can specify the logging level for which log records with equal or higher
|
You can specify the logging level for which log records with equal or higher
|
||||||
level are printed to the console by passing ``--log-cli-level``. This setting
|
level are printed to the console by passing ``--log-cli-level``. This setting
|
||||||
accepts the logging level names as seen in python's documentation or an integer
|
accepts the logging level names or numeric values as seen in
|
||||||
as the logging level num.
|
:ref:`logging's documentation <python:levels>`.
|
||||||
|
|
||||||
Additionally, you can also specify ``--log-cli-format`` and
|
Additionally, you can also specify ``--log-cli-format`` and
|
||||||
``--log-cli-date-format`` which mirror and default to ``--log-format`` and
|
``--log-cli-date-format`` which mirror and default to ``--log-format`` and
|
||||||
|
@ -198,9 +198,8 @@ Note that relative paths for the log-file location, whether passed on the CLI or
|
||||||
config file, are always resolved relative to the current working directory.
|
config file, are always resolved relative to the current working directory.
|
||||||
|
|
||||||
You can also specify the logging level for the log file by passing
|
You can also specify the logging level for the log file by passing
|
||||||
``--log-file-level``. This setting accepts the logging level names as seen in
|
``--log-file-level``. This setting accepts the logging level names or numeric
|
||||||
python's documentation(ie, uppercased level names) or an integer as the logging
|
values as seen in :ref:`logging's documentation <python:levels>`.
|
||||||
level num.
|
|
||||||
|
|
||||||
Additionally, you can also specify ``--log-file-format`` and
|
Additionally, you can also specify ``--log-file-format`` and
|
||||||
``--log-file-date-format`` which are equal to ``--log-format`` and
|
``--log-file-date-format`` which are equal to ``--log-format`` and
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
How to monkeypatch/mock modules and environments
|
How to monkeypatch/mock modules and environments
|
||||||
================================================================
|
================================================================
|
||||||
|
|
||||||
.. currentmodule:: _pytest.monkeypatch
|
.. currentmodule:: pytest
|
||||||
|
|
||||||
Sometimes tests need to invoke functionality which depends
|
Sometimes tests need to invoke functionality which depends
|
||||||
on global settings or which invokes code which cannot be easily
|
on global settings or which invokes code which cannot be easily
|
||||||
|
@ -25,6 +25,7 @@ functionality in tests:
|
||||||
monkeypatch.delenv(name, raising=True)
|
monkeypatch.delenv(name, raising=True)
|
||||||
monkeypatch.syspath_prepend(path)
|
monkeypatch.syspath_prepend(path)
|
||||||
monkeypatch.chdir(path)
|
monkeypatch.chdir(path)
|
||||||
|
monkeypatch.context()
|
||||||
|
|
||||||
All modifications will be undone after the requesting
|
All modifications will be undone after the requesting
|
||||||
test function or fixture has finished. The ``raising``
|
test function or fixture has finished. The ``raising``
|
||||||
|
@ -55,6 +56,9 @@ during a test.
|
||||||
5. Use :py:meth:`monkeypatch.syspath_prepend <MonkeyPatch.syspath_prepend>` to modify ``sys.path`` which will also
|
5. Use :py:meth:`monkeypatch.syspath_prepend <MonkeyPatch.syspath_prepend>` to modify ``sys.path`` which will also
|
||||||
call ``pkg_resources.fixup_namespace_packages`` and :py:func:`importlib.invalidate_caches`.
|
call ``pkg_resources.fixup_namespace_packages`` and :py:func:`importlib.invalidate_caches`.
|
||||||
|
|
||||||
|
6. Use :py:meth:`monkeypatch.context <MonkeyPatch.context>` to apply patches only in a specific scope, which can help
|
||||||
|
control teardown of complex fixtures or patches to the stdlib.
|
||||||
|
|
||||||
See the `monkeypatch blog post`_ for some introduction material
|
See the `monkeypatch blog post`_ for some introduction material
|
||||||
and a discussion of its motivation.
|
and a discussion of its motivation.
|
||||||
|
|
||||||
|
@ -436,7 +440,7 @@ separate fixtures for each potential mock and reference them in the needed tests
|
||||||
_ = app.create_connection_string()
|
_ = app.create_connection_string()
|
||||||
|
|
||||||
|
|
||||||
.. currentmodule:: _pytest.monkeypatch
|
.. currentmodule:: pytest
|
||||||
|
|
||||||
API Reference
|
API Reference
|
||||||
-------------
|
-------------
|
||||||
|
|
|
@ -104,8 +104,10 @@ The ``tmpdir`` and ``tmpdir_factory`` fixtures
|
||||||
|
|
||||||
The ``tmpdir`` and ``tmpdir_factory`` fixtures are similar to ``tmp_path``
|
The ``tmpdir`` and ``tmpdir_factory`` fixtures are similar to ``tmp_path``
|
||||||
and ``tmp_path_factory``, but use/return legacy `py.path.local`_ objects
|
and ``tmp_path_factory``, but use/return legacy `py.path.local`_ objects
|
||||||
rather than standard :class:`pathlib.Path` objects. These days, prefer to
|
rather than standard :class:`pathlib.Path` objects.
|
||||||
use ``tmp_path`` and ``tmp_path_factory``.
|
|
||||||
|
.. note::
|
||||||
|
These days, it is preferred to use ``tmp_path`` and ``tmp_path_factory``.
|
||||||
|
|
||||||
See :fixture:`tmpdir <tmpdir>` :fixture:`tmpdir_factory <tmpdir_factory>`
|
See :fixture:`tmpdir <tmpdir>` :fixture:`tmpdir_factory <tmpdir_factory>`
|
||||||
API for details.
|
API for details.
|
||||||
|
|
|
@ -27,12 +27,15 @@ Almost all ``unittest`` features are supported:
|
||||||
* ``setUpClass/tearDownClass``;
|
* ``setUpClass/tearDownClass``;
|
||||||
* ``setUpModule/tearDownModule``;
|
* ``setUpModule/tearDownModule``;
|
||||||
|
|
||||||
|
.. _`pytest-subtests`: https://github.com/pytest-dev/pytest-subtests
|
||||||
.. _`load_tests protocol`: https://docs.python.org/3/library/unittest.html#load-tests-protocol
|
.. _`load_tests protocol`: https://docs.python.org/3/library/unittest.html#load-tests-protocol
|
||||||
|
|
||||||
|
Additionally, :ref:`subtests <python:subtests>` are supported by the
|
||||||
|
`pytest-subtests`_ plugin.
|
||||||
|
|
||||||
Up to this point pytest does not have support for the following features:
|
Up to this point pytest does not have support for the following features:
|
||||||
|
|
||||||
* `load_tests protocol`_;
|
* `load_tests protocol`_;
|
||||||
* :ref:`subtests <python:subtests>`;
|
|
||||||
|
|
||||||
Benefits out of the box
|
Benefits out of the box
|
||||||
-----------------------
|
-----------------------
|
||||||
|
|
|
@ -158,18 +158,20 @@ it in your setuptools-invocation:
|
||||||
# sample ./setup.py file
|
# sample ./setup.py file
|
||||||
from setuptools import setup
|
from setuptools import setup
|
||||||
|
|
||||||
|
|
||||||
|
name_of_plugin = "myproject" # register plugin with this name
|
||||||
setup(
|
setup(
|
||||||
name="myproject",
|
name="myproject",
|
||||||
packages=["myproject"],
|
packages=["myproject"],
|
||||||
# the following makes a plugin available to pytest
|
# the following makes a plugin available to pytest
|
||||||
entry_points={"pytest11": ["name_of_plugin = myproject.pluginmodule"]},
|
entry_points={"pytest11": [f"{name_of_plugin} = myproject.pluginmodule"]},
|
||||||
# custom PyPI classifier for pytest plugins
|
# custom PyPI classifier for pytest plugins
|
||||||
classifiers=["Framework :: Pytest"],
|
classifiers=["Framework :: Pytest"],
|
||||||
)
|
)
|
||||||
|
|
||||||
If a package is installed this way, ``pytest`` will load
|
If a package is installed this way, ``pytest`` will load
|
||||||
``myproject.pluginmodule`` as a plugin which can define
|
``myproject.pluginmodule`` as a plugin which can define
|
||||||
:ref:`hooks <hook-reference>`.
|
:ref:`hooks <hook-reference>`. Confirm registration with ``pytest --trace-config``
|
||||||
|
|
||||||
.. note::
|
.. note::
|
||||||
|
|
||||||
|
|
|
@ -2,16 +2,11 @@
|
||||||
|
|
||||||
.. sidebar:: Next Open Trainings
|
.. sidebar:: Next Open Trainings
|
||||||
|
|
||||||
- `PyConDE <https://2022.pycon.de/program/W93DBJ/>`__, April 11th 2022 (3h), Berlin, Germany
|
- `CH Open Workshop-Tage <https://workshoptage.ch/workshops/2022/pytest-professionelles-testen-nicht-nur-fuer-python/>`__ (German), September 8th 2022, Bern, Switzerland
|
||||||
- `PyConIT <https://pycon.it/en/talk/pytest-simple-rapid-and-fun-testing-with-python>`__, June 3rd 2022 (4h), Florence, Italy
|
|
||||||
- `Professional Testing with Python <https://python-academy.com/courses/python_course_testing.html>`_, via `Python Academy <https://www.python-academy.com/>`_, March 7th to 9th 2023 (3 day in-depth training), Remote and Leipzig, Germany
|
- `Professional Testing with Python <https://python-academy.com/courses/python_course_testing.html>`_, via `Python Academy <https://www.python-academy.com/>`_, March 7th to 9th 2023 (3 day in-depth training), Remote and Leipzig, Germany
|
||||||
|
|
||||||
Also see :doc:`previous talks and blogposts <talks>`.
|
Also see :doc:`previous talks and blogposts <talks>`.
|
||||||
|
|
||||||
..
|
|
||||||
- `Europython <https://ep2022.europython.eu/>`__, July 11th to 17th (3h), Dublin, Ireland
|
|
||||||
- `CH Open Workshoptage <https://workshoptage.ch/>`__ (German), September 6th to 8th (1 day), Bern, Switzerland
|
|
||||||
|
|
||||||
.. _features:
|
.. _features:
|
||||||
|
|
||||||
pytest: helps you write better programs
|
pytest: helps you write better programs
|
||||||
|
@ -27,8 +22,6 @@ scale to support complex functional testing for applications and libraries.
|
||||||
|
|
||||||
**PyPI package name**: :pypi:`pytest`
|
**PyPI package name**: :pypi:`pytest`
|
||||||
|
|
||||||
**Documentation as PDF**: `download latest <https://media.readthedocs.org/pdf/pytest/latest/pytest.pdf>`_
|
|
||||||
|
|
||||||
|
|
||||||
A quick example
|
A quick example
|
||||||
---------------
|
---------------
|
||||||
|
@ -104,11 +97,6 @@ Bugs/Requests
|
||||||
Please use the `GitHub issue tracker <https://github.com/pytest-dev/pytest/issues>`_ to submit bugs or request features.
|
Please use the `GitHub issue tracker <https://github.com/pytest-dev/pytest/issues>`_ to submit bugs or request features.
|
||||||
|
|
||||||
|
|
||||||
Changelog
|
|
||||||
---------
|
|
||||||
|
|
||||||
Consult the :ref:`Changelog <changelog>` page for fixes and enhancements of each version.
|
|
||||||
|
|
||||||
Support pytest
|
Support pytest
|
||||||
--------------
|
--------------
|
||||||
|
|
||||||
|
@ -141,13 +129,3 @@ Security
|
||||||
pytest has never been associated with a security vulnerability, but in any case, to report a
|
pytest has never been associated with a security vulnerability, but in any case, to report a
|
||||||
security vulnerability please use the `Tidelift security contact <https://tidelift.com/security>`_.
|
security vulnerability please use the `Tidelift security contact <https://tidelift.com/security>`_.
|
||||||
Tidelift will coordinate the fix and disclosure.
|
Tidelift will coordinate the fix and disclosure.
|
||||||
|
|
||||||
|
|
||||||
License
|
|
||||||
-------
|
|
||||||
|
|
||||||
Copyright Holger Krekel and others, 2004.
|
|
||||||
|
|
||||||
Distributed under the terms of the `MIT`_ license, pytest is free and open source software.
|
|
||||||
|
|
||||||
.. _`MIT`: https://github.com/pytest-dev/pytest/blob/main/LICENSE
|
|
||||||
|
|
|
@ -1,99 +0,0 @@
|
||||||
Python 2.7 and 3.4 support
|
|
||||||
==========================
|
|
||||||
|
|
||||||
It is demanding on the maintainers of an open source project to support many Python versions, as
|
|
||||||
there's extra cost of keeping code compatible between all versions, while holding back on
|
|
||||||
features only made possible on newer Python versions.
|
|
||||||
|
|
||||||
In case of Python 2 and 3, the difference between the languages makes it even more prominent,
|
|
||||||
because many new Python 3 features cannot be used in a Python 2/3 compatible code base.
|
|
||||||
|
|
||||||
Python 2.7 EOL has been reached :pep:`in 2020 <0373#maintenance-releases>`, with
|
|
||||||
the last release made in April, 2020.
|
|
||||||
|
|
||||||
Python 3.4 EOL has been reached :pep:`in 2019 <0429#release-schedule>`, with the last release made in March, 2019.
|
|
||||||
|
|
||||||
For those reasons, in Jun 2019 it was decided that **pytest 4.6** series will be the last to support Python 2.7 and 3.4.
|
|
||||||
|
|
||||||
What this means for general users
|
|
||||||
---------------------------------
|
|
||||||
|
|
||||||
Thanks to the `python_requires`_ setuptools option,
|
|
||||||
Python 2.7 and Python 3.4 users using a modern pip version
|
|
||||||
will install the last pytest 4.6.X version automatically even if 5.0 or later versions
|
|
||||||
are available on PyPI.
|
|
||||||
|
|
||||||
Users should ensure they are using the latest pip and setuptools versions for this to work.
|
|
||||||
|
|
||||||
Maintenance of 4.6.X versions
|
|
||||||
-----------------------------
|
|
||||||
|
|
||||||
Until January 2020, the pytest core team ported many bug-fixes from the main release into the
|
|
||||||
``4.6.x`` branch, with several 4.6.X releases being made along the year.
|
|
||||||
|
|
||||||
From now on, the core team will **no longer actively backport patches**, but the ``4.6.x``
|
|
||||||
branch will continue to exist so the community itself can contribute patches.
|
|
||||||
|
|
||||||
The core team will be happy to accept those patches, and make new 4.6.X releases **until mid-2020**
|
|
||||||
(but consider that date as a ballpark, after that date the team might still decide to make new releases
|
|
||||||
for critical bugs).
|
|
||||||
|
|
||||||
.. _`python_requires`: https://packaging.python.org/guides/distributing-packages-using-setuptools/#python-requires
|
|
||||||
|
|
||||||
Technical aspects
|
|
||||||
~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
(This section is a transcript from :issue:`5275`).
|
|
||||||
|
|
||||||
In this section we describe the technical aspects of the Python 2.7 and 3.4 support plan.
|
|
||||||
|
|
||||||
.. _what goes into 4.6.x releases:
|
|
||||||
|
|
||||||
What goes into 4.6.X releases
|
|
||||||
+++++++++++++++++++++++++++++
|
|
||||||
|
|
||||||
New 4.6.X releases will contain bug fixes only.
|
|
||||||
|
|
||||||
When will 4.6.X releases happen
|
|
||||||
+++++++++++++++++++++++++++++++
|
|
||||||
|
|
||||||
New 4.6.X releases will happen after we have a few bugs in place to release, or if a few weeks have
|
|
||||||
passed (say a single bug has been fixed a month after the latest 4.6.X release).
|
|
||||||
|
|
||||||
No hard rules here, just ballpark.
|
|
||||||
|
|
||||||
Who will handle applying bug fixes
|
|
||||||
++++++++++++++++++++++++++++++++++
|
|
||||||
|
|
||||||
We core maintainers expect that people still using Python 2.7/3.4 and being affected by
|
|
||||||
bugs to step up and provide patches and/or port bug fixes from the active branches.
|
|
||||||
|
|
||||||
We will be happy to guide users interested in doing so, so please don't hesitate to ask.
|
|
||||||
|
|
||||||
**Backporting changes into 4.6**
|
|
||||||
|
|
||||||
Please follow these instructions:
|
|
||||||
|
|
||||||
#. ``git fetch --all --prune``
|
|
||||||
|
|
||||||
#. ``git checkout origin/4.6.x -b backport-XXXX`` # use the PR number here
|
|
||||||
|
|
||||||
#. Locate the merge commit on the PR, in the *merged* message, for example:
|
|
||||||
|
|
||||||
nicoddemus merged commit 0f8b462 into pytest-dev:features
|
|
||||||
|
|
||||||
#. ``git cherry-pick -m1 REVISION`` # use the revision you found above (``0f8b462``).
|
|
||||||
|
|
||||||
#. Open a PR targeting ``4.6.x``:
|
|
||||||
|
|
||||||
* Prefix the message with ``[4.6]`` so it is an obvious backport
|
|
||||||
* Delete the PR body, it usually contains a duplicate commit message.
|
|
||||||
|
|
||||||
**Providing new PRs to 4.6**
|
|
||||||
|
|
||||||
Fresh pull requests to ``4.6.x`` will be accepted provided that
|
|
||||||
the equivalent code in the active branches does not contain that bug (for example, a bug is specific
|
|
||||||
to Python 2 only).
|
|
||||||
|
|
||||||
Bug fixes that also happen in the mainstream version should be first fixed
|
|
||||||
there, and then backported as per instructions above.
|
|
|
@ -29,9 +29,11 @@ pytest.ini
|
||||||
|
|
||||||
``pytest.ini`` files take precedence over other files, even when empty.
|
``pytest.ini`` files take precedence over other files, even when empty.
|
||||||
|
|
||||||
|
Alternatively, the hidden version ``.pytest.ini`` can be used.
|
||||||
|
|
||||||
.. code-block:: ini
|
.. code-block:: ini
|
||||||
|
|
||||||
# pytest.ini
|
# pytest.ini or .pytest.ini
|
||||||
[pytest]
|
[pytest]
|
||||||
minversion = 6.0
|
minversion = 6.0
|
||||||
addopts = -ra -q
|
addopts = -ra -q
|
||||||
|
|
|
@ -8,8 +8,8 @@ Reference guides
|
||||||
.. toctree::
|
.. toctree::
|
||||||
:maxdepth: 1
|
:maxdepth: 1
|
||||||
|
|
||||||
fixtures
|
|
||||||
plugin_list
|
|
||||||
customize
|
|
||||||
reference
|
reference
|
||||||
|
fixtures
|
||||||
|
customize
|
||||||
exit-codes
|
exit-codes
|
||||||
|
plugin_list
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -92,7 +92,7 @@ pytest.param
|
||||||
pytest.raises
|
pytest.raises
|
||||||
~~~~~~~~~~~~~
|
~~~~~~~~~~~~~
|
||||||
|
|
||||||
**Tutorial**: :ref:`assertraises`.
|
**Tutorial**: :ref:`assertraises`
|
||||||
|
|
||||||
.. autofunction:: pytest.raises(expected_exception: Exception [, *, match])
|
.. autofunction:: pytest.raises(expected_exception: Exception [, *, match])
|
||||||
:with: excinfo
|
:with: excinfo
|
||||||
|
@ -100,7 +100,7 @@ pytest.raises
|
||||||
pytest.deprecated_call
|
pytest.deprecated_call
|
||||||
~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
**Tutorial**: :ref:`ensuring_function_triggers`.
|
**Tutorial**: :ref:`ensuring_function_triggers`
|
||||||
|
|
||||||
.. autofunction:: pytest.deprecated_call()
|
.. autofunction:: pytest.deprecated_call()
|
||||||
:with:
|
:with:
|
||||||
|
@ -108,7 +108,7 @@ pytest.deprecated_call
|
||||||
pytest.register_assert_rewrite
|
pytest.register_assert_rewrite
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
**Tutorial**: :ref:`assertion-rewriting`.
|
**Tutorial**: :ref:`assertion-rewriting`
|
||||||
|
|
||||||
.. autofunction:: pytest.register_assert_rewrite
|
.. autofunction:: pytest.register_assert_rewrite
|
||||||
|
|
||||||
|
@ -123,7 +123,7 @@ pytest.warns
|
||||||
pytest.freeze_includes
|
pytest.freeze_includes
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
**Tutorial**: :ref:`freezing-pytest`.
|
**Tutorial**: :ref:`freezing-pytest`
|
||||||
|
|
||||||
.. autofunction:: pytest.freeze_includes
|
.. autofunction:: pytest.freeze_includes
|
||||||
|
|
||||||
|
@ -143,7 +143,7 @@ fixtures or plugins.
|
||||||
pytest.mark.filterwarnings
|
pytest.mark.filterwarnings
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
**Tutorial**: :ref:`filterwarnings`.
|
**Tutorial**: :ref:`filterwarnings`
|
||||||
|
|
||||||
Add warning filters to marked test items.
|
Add warning filters to marked test items.
|
||||||
|
|
||||||
|
@ -169,7 +169,7 @@ Add warning filters to marked test items.
|
||||||
pytest.mark.parametrize
|
pytest.mark.parametrize
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
:ref:`parametrize`.
|
**Tutorial**: :ref:`parametrize`
|
||||||
|
|
||||||
This mark has the same signature as :py:meth:`pytest.Metafunc.parametrize`; see there.
|
This mark has the same signature as :py:meth:`pytest.Metafunc.parametrize`; see there.
|
||||||
|
|
||||||
|
@ -179,7 +179,7 @@ This mark has the same signature as :py:meth:`pytest.Metafunc.parametrize`; see
|
||||||
pytest.mark.skip
|
pytest.mark.skip
|
||||||
~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
:ref:`skip`.
|
**Tutorial**: :ref:`skip`
|
||||||
|
|
||||||
Unconditionally skip a test function.
|
Unconditionally skip a test function.
|
||||||
|
|
||||||
|
@ -193,7 +193,7 @@ Unconditionally skip a test function.
|
||||||
pytest.mark.skipif
|
pytest.mark.skipif
|
||||||
~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
:ref:`skipif`.
|
**Tutorial**: :ref:`skipif`
|
||||||
|
|
||||||
Skip a test function if a condition is ``True``.
|
Skip a test function if a condition is ``True``.
|
||||||
|
|
||||||
|
@ -209,7 +209,7 @@ Skip a test function if a condition is ``True``.
|
||||||
pytest.mark.usefixtures
|
pytest.mark.usefixtures
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
**Tutorial**: :ref:`usefixtures`.
|
**Tutorial**: :ref:`usefixtures`
|
||||||
|
|
||||||
Mark a test function as using the given fixture names.
|
Mark a test function as using the given fixture names.
|
||||||
|
|
||||||
|
@ -231,7 +231,7 @@ Mark a test function as using the given fixture names.
|
||||||
pytest.mark.xfail
|
pytest.mark.xfail
|
||||||
~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
**Tutorial**: :ref:`xfail`.
|
**Tutorial**: :ref:`xfail`
|
||||||
|
|
||||||
Marks a test function as *expected to fail*.
|
Marks a test function as *expected to fail*.
|
||||||
|
|
||||||
|
@ -245,7 +245,7 @@ Marks a test function as *expected to fail*.
|
||||||
:keyword str reason:
|
:keyword str reason:
|
||||||
Reason why the test function is marked as xfail.
|
Reason why the test function is marked as xfail.
|
||||||
:keyword Type[Exception] raises:
|
:keyword Type[Exception] raises:
|
||||||
Exception subclass expected to be raised by the test function; other exceptions will fail the test.
|
Exception subclass (or tuple of subclasses) expected to be raised by the test function; other exceptions will fail the test.
|
||||||
:keyword bool run:
|
:keyword bool run:
|
||||||
If the test function should actually be executed. If ``False``, the function will always xfail and will
|
If the test function should actually be executed. If ``False``, the function will always xfail and will
|
||||||
not be executed (useful if a function is segfaulting).
|
not be executed (useful if a function is segfaulting).
|
||||||
|
@ -290,14 +290,14 @@ Example for using multiple custom markers:
|
||||||
def test_function():
|
def test_function():
|
||||||
...
|
...
|
||||||
|
|
||||||
When :meth:`Node.iter_markers <_pytest.nodes.Node.iter_markers>` or :meth:`Node.iter_markers <_pytest.nodes.Node.iter_markers_with_node>` is used with multiple markers, the marker closest to the function will be iterated over first. The above example will result in ``@pytest.mark.slow`` followed by ``@pytest.mark.timeout(...)``.
|
When :meth:`Node.iter_markers <_pytest.nodes.Node.iter_markers>` or :meth:`Node.iter_markers_with_node <_pytest.nodes.Node.iter_markers_with_node>` is used with multiple markers, the marker closest to the function will be iterated over first. The above example will result in ``@pytest.mark.slow`` followed by ``@pytest.mark.timeout(...)``.
|
||||||
|
|
||||||
.. _`fixtures-api`:
|
.. _`fixtures-api`:
|
||||||
|
|
||||||
Fixtures
|
Fixtures
|
||||||
--------
|
--------
|
||||||
|
|
||||||
**Tutorial**: :ref:`fixture`.
|
**Tutorial**: :ref:`fixture`
|
||||||
|
|
||||||
Fixtures are requested by test functions or other fixtures by declaring them as argument names.
|
Fixtures are requested by test functions or other fixtures by declaring them as argument names.
|
||||||
|
|
||||||
|
@ -338,7 +338,7 @@ For more details, consult the full :ref:`fixtures docs <fixture>`.
|
||||||
config.cache
|
config.cache
|
||||||
~~~~~~~~~~~~
|
~~~~~~~~~~~~
|
||||||
|
|
||||||
**Tutorial**: :ref:`cache`.
|
**Tutorial**: :ref:`cache`
|
||||||
|
|
||||||
The ``config.cache`` object allows other plugins and fixtures
|
The ``config.cache`` object allows other plugins and fixtures
|
||||||
to store and retrieve values across test runs. To access it from fixtures
|
to store and retrieve values across test runs. To access it from fixtures
|
||||||
|
@ -358,22 +358,11 @@ Under the hood, the cache plugin uses the simple
|
||||||
capsys
|
capsys
|
||||||
~~~~~~
|
~~~~~~
|
||||||
|
|
||||||
:ref:`captures`.
|
**Tutorial**: :ref:`captures`
|
||||||
|
|
||||||
.. autofunction:: _pytest.capture.capsys()
|
.. autofunction:: _pytest.capture.capsys()
|
||||||
:no-auto-options:
|
:no-auto-options:
|
||||||
|
|
||||||
Returns an instance of :class:`CaptureFixture[str] <pytest.CaptureFixture>`.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
.. code-block:: python
|
|
||||||
|
|
||||||
def test_output(capsys):
|
|
||||||
print("hello")
|
|
||||||
captured = capsys.readouterr()
|
|
||||||
assert captured.out == "hello\n"
|
|
||||||
|
|
||||||
.. autoclass:: pytest.CaptureFixture()
|
.. autoclass:: pytest.CaptureFixture()
|
||||||
:members:
|
:members:
|
||||||
|
|
||||||
|
@ -383,93 +372,48 @@ capsys
|
||||||
capsysbinary
|
capsysbinary
|
||||||
~~~~~~~~~~~~
|
~~~~~~~~~~~~
|
||||||
|
|
||||||
:ref:`captures`.
|
**Tutorial**: :ref:`captures`
|
||||||
|
|
||||||
.. autofunction:: _pytest.capture.capsysbinary()
|
.. autofunction:: _pytest.capture.capsysbinary()
|
||||||
:no-auto-options:
|
:no-auto-options:
|
||||||
|
|
||||||
Returns an instance of :class:`CaptureFixture[bytes] <pytest.CaptureFixture>`.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
.. code-block:: python
|
|
||||||
|
|
||||||
def test_output(capsysbinary):
|
|
||||||
print("hello")
|
|
||||||
captured = capsysbinary.readouterr()
|
|
||||||
assert captured.out == b"hello\n"
|
|
||||||
|
|
||||||
|
|
||||||
.. fixture:: capfd
|
.. fixture:: capfd
|
||||||
|
|
||||||
capfd
|
capfd
|
||||||
~~~~~~
|
~~~~~~
|
||||||
|
|
||||||
:ref:`captures`.
|
**Tutorial**: :ref:`captures`
|
||||||
|
|
||||||
.. autofunction:: _pytest.capture.capfd()
|
.. autofunction:: _pytest.capture.capfd()
|
||||||
:no-auto-options:
|
:no-auto-options:
|
||||||
|
|
||||||
Returns an instance of :class:`CaptureFixture[str] <pytest.CaptureFixture>`.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
.. code-block:: python
|
|
||||||
|
|
||||||
def test_system_echo(capfd):
|
|
||||||
os.system('echo "hello"')
|
|
||||||
captured = capfd.readouterr()
|
|
||||||
assert captured.out == "hello\n"
|
|
||||||
|
|
||||||
|
|
||||||
.. fixture:: capfdbinary
|
.. fixture:: capfdbinary
|
||||||
|
|
||||||
capfdbinary
|
capfdbinary
|
||||||
~~~~~~~~~~~~
|
~~~~~~~~~~~~
|
||||||
|
|
||||||
:ref:`captures`.
|
**Tutorial**: :ref:`captures`
|
||||||
|
|
||||||
.. autofunction:: _pytest.capture.capfdbinary()
|
.. autofunction:: _pytest.capture.capfdbinary()
|
||||||
:no-auto-options:
|
:no-auto-options:
|
||||||
|
|
||||||
Returns an instance of :class:`CaptureFixture[bytes] <pytest.CaptureFixture>`.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
.. code-block:: python
|
|
||||||
|
|
||||||
def test_system_echo(capfdbinary):
|
|
||||||
os.system('echo "hello"')
|
|
||||||
captured = capfdbinary.readouterr()
|
|
||||||
assert captured.out == b"hello\n"
|
|
||||||
|
|
||||||
|
|
||||||
.. fixture:: doctest_namespace
|
.. fixture:: doctest_namespace
|
||||||
|
|
||||||
doctest_namespace
|
doctest_namespace
|
||||||
~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
:ref:`doctest`.
|
**Tutorial**: :ref:`doctest`
|
||||||
|
|
||||||
.. autofunction:: _pytest.doctest.doctest_namespace()
|
.. autofunction:: _pytest.doctest.doctest_namespace()
|
||||||
|
|
||||||
Usually this fixture is used in conjunction with another ``autouse`` fixture:
|
|
||||||
|
|
||||||
.. code-block:: python
|
|
||||||
|
|
||||||
@pytest.fixture(autouse=True)
|
|
||||||
def add_np(doctest_namespace):
|
|
||||||
doctest_namespace["np"] = numpy
|
|
||||||
|
|
||||||
For more details: :ref:`doctest_namespace`.
|
|
||||||
|
|
||||||
|
|
||||||
.. fixture:: request
|
.. fixture:: request
|
||||||
|
|
||||||
request
|
request
|
||||||
~~~~~~~
|
~~~~~~~
|
||||||
|
|
||||||
:ref:`request example`.
|
**Example**: :ref:`request example`
|
||||||
|
|
||||||
The ``request`` fixture is a special fixture providing information of the requesting test function.
|
The ``request`` fixture is a special fixture providing information of the requesting test function.
|
||||||
|
|
||||||
|
@ -490,7 +434,7 @@ pytestconfig
|
||||||
record_property
|
record_property
|
||||||
~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
**Tutorial**: :ref:`record_property example`.
|
**Tutorial**: :ref:`record_property example`
|
||||||
|
|
||||||
.. autofunction:: _pytest.junitxml.record_property()
|
.. autofunction:: _pytest.junitxml.record_property()
|
||||||
|
|
||||||
|
@ -500,7 +444,7 @@ record_property
|
||||||
record_testsuite_property
|
record_testsuite_property
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
**Tutorial**: :ref:`record_testsuite_property example`.
|
**Tutorial**: :ref:`record_testsuite_property example`
|
||||||
|
|
||||||
.. autofunction:: _pytest.junitxml.record_testsuite_property()
|
.. autofunction:: _pytest.junitxml.record_testsuite_property()
|
||||||
|
|
||||||
|
@ -510,7 +454,7 @@ record_testsuite_property
|
||||||
caplog
|
caplog
|
||||||
~~~~~~
|
~~~~~~
|
||||||
|
|
||||||
:ref:`logging`.
|
**Tutorial**: :ref:`logging`
|
||||||
|
|
||||||
.. autofunction:: _pytest.logging.caplog()
|
.. autofunction:: _pytest.logging.caplog()
|
||||||
:no-auto-options:
|
:no-auto-options:
|
||||||
|
@ -526,7 +470,7 @@ caplog
|
||||||
monkeypatch
|
monkeypatch
|
||||||
~~~~~~~~~~~
|
~~~~~~~~~~~
|
||||||
|
|
||||||
:ref:`monkeypatching`.
|
**Tutorial**: :ref:`monkeypatching`
|
||||||
|
|
||||||
.. autofunction:: _pytest.monkeypatch.monkeypatch()
|
.. autofunction:: _pytest.monkeypatch.monkeypatch()
|
||||||
:no-auto-options:
|
:no-auto-options:
|
||||||
|
@ -600,19 +544,13 @@ recwarn
|
||||||
.. autoclass:: pytest.WarningsRecorder()
|
.. autoclass:: pytest.WarningsRecorder()
|
||||||
:members:
|
:members:
|
||||||
|
|
||||||
Each recorded warning is an instance of :class:`warnings.WarningMessage`.
|
|
||||||
|
|
||||||
.. note::
|
|
||||||
``DeprecationWarning`` and ``PendingDeprecationWarning`` are treated
|
|
||||||
differently; see :ref:`ensuring_function_triggers`.
|
|
||||||
|
|
||||||
|
|
||||||
.. fixture:: tmp_path
|
.. fixture:: tmp_path
|
||||||
|
|
||||||
tmp_path
|
tmp_path
|
||||||
~~~~~~~~
|
~~~~~~~~
|
||||||
|
|
||||||
:ref:`tmp_path`
|
**Tutorial**: :ref:`tmp_path`
|
||||||
|
|
||||||
.. autofunction:: _pytest.tmpdir.tmp_path()
|
.. autofunction:: _pytest.tmpdir.tmp_path()
|
||||||
:no-auto-options:
|
:no-auto-options:
|
||||||
|
@ -623,7 +561,7 @@ tmp_path
|
||||||
tmp_path_factory
|
tmp_path_factory
|
||||||
~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
:ref:`tmp_path_factory example`
|
**Tutorial**: :ref:`tmp_path_factory example`
|
||||||
|
|
||||||
.. _`tmp_path_factory factory api`:
|
.. _`tmp_path_factory factory api`:
|
||||||
|
|
||||||
|
@ -638,7 +576,7 @@ tmp_path_factory
|
||||||
tmpdir
|
tmpdir
|
||||||
~~~~~~
|
~~~~~~
|
||||||
|
|
||||||
:ref:`tmpdir and tmpdir_factory`
|
**Tutorial**: :ref:`tmpdir and tmpdir_factory`
|
||||||
|
|
||||||
.. autofunction:: _pytest.legacypath.LegacyTmpdirPlugin.tmpdir()
|
.. autofunction:: _pytest.legacypath.LegacyTmpdirPlugin.tmpdir()
|
||||||
:no-auto-options:
|
:no-auto-options:
|
||||||
|
@ -649,7 +587,7 @@ tmpdir
|
||||||
tmpdir_factory
|
tmpdir_factory
|
||||||
~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~
|
||||||
|
|
||||||
:ref:`tmpdir and tmpdir_factory`
|
**Tutorial**: :ref:`tmpdir and tmpdir_factory`
|
||||||
|
|
||||||
``tmpdir_factory`` is an instance of :class:`~pytest.TempdirFactory`:
|
``tmpdir_factory`` is an instance of :class:`~pytest.TempdirFactory`:
|
||||||
|
|
||||||
|
@ -662,7 +600,7 @@ tmpdir_factory
|
||||||
Hooks
|
Hooks
|
||||||
-----
|
-----
|
||||||
|
|
||||||
:ref:`writing-plugins`.
|
**Tutorial**: :ref:`writing-plugins`
|
||||||
|
|
||||||
.. currentmodule:: _pytest.hookspec
|
.. currentmodule:: _pytest.hookspec
|
||||||
|
|
||||||
|
@ -1192,6 +1130,9 @@ Custom warnings generated in some situations such as improper usage or deprecate
|
||||||
.. autoclass:: pytest.PytestExperimentalApiWarning
|
.. autoclass:: pytest.PytestExperimentalApiWarning
|
||||||
:show-inheritance:
|
:show-inheritance:
|
||||||
|
|
||||||
|
.. autoclass:: pytest.PytestReturnNotNoneWarning
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
.. autoclass:: pytest.PytestUnhandledCoroutineWarning
|
.. autoclass:: pytest.PytestUnhandledCoroutineWarning
|
||||||
:show-inheritance:
|
:show-inheritance:
|
||||||
|
|
||||||
|
@ -1213,9 +1154,10 @@ Consult the :ref:`internal-warnings` section in the documentation for more infor
|
||||||
Configuration Options
|
Configuration Options
|
||||||
---------------------
|
---------------------
|
||||||
|
|
||||||
Here is a list of builtin configuration options that may be written in a ``pytest.ini``, ``pyproject.toml``, ``tox.ini`` or ``setup.cfg``
|
Here is a list of builtin configuration options that may be written in a ``pytest.ini`` (or ``.pytest.ini``),
|
||||||
file, usually located at the root of your repository. To see each file format in details, see
|
``pyproject.toml``, ``tox.ini``, or ``setup.cfg`` file, usually located at the root of your repository.
|
||||||
:ref:`config file formats`.
|
|
||||||
|
To see each file format in details, see :ref:`config file formats`.
|
||||||
|
|
||||||
.. warning::
|
.. warning::
|
||||||
Usage of ``setup.cfg`` is not recommended except for very simple use cases. ``.cfg``
|
Usage of ``setup.cfg`` is not recommended except for very simple use cases. ``.cfg``
|
||||||
|
@ -1761,6 +1703,8 @@ passed multiple times. The expected format is ``name=value``. For example::
|
||||||
Sets list of directories that should be searched for tests when
|
Sets list of directories that should be searched for tests when
|
||||||
no specific directories, files or test ids are given in the command line when
|
no specific directories, files or test ids are given in the command line when
|
||||||
executing pytest from the :ref:`rootdir <rootdir>` directory.
|
executing pytest from the :ref:`rootdir <rootdir>` directory.
|
||||||
|
File system paths may use shell-style wildcards, including the recursive
|
||||||
|
``**`` pattern.
|
||||||
Useful when all project tests are in a known location to speed up
|
Useful when all project tests are in a known location to speed up
|
||||||
test collection and to avoid picking up undesired tests by accident.
|
test collection and to avoid picking up undesired tests by accident.
|
||||||
|
|
||||||
|
@ -1809,11 +1753,11 @@ All the command-line flags can be obtained by running ``pytest --help``::
|
||||||
$ pytest --help
|
$ pytest --help
|
||||||
usage: pytest [options] [file_or_dir] [file_or_dir] [...]
|
usage: pytest [options] [file_or_dir] [file_or_dir] [...]
|
||||||
|
|
||||||
positional arguments:
|
Positional arguments:
|
||||||
file_or_dir
|
file_or_dir
|
||||||
|
|
||||||
general:
|
General:
|
||||||
-k EXPRESSION only run tests which match the given substring
|
-k EXPRESSION Only run tests which match the given substring
|
||||||
expression. An expression is a python evaluatable
|
expression. An expression is a python evaluatable
|
||||||
expression where all names are substring-matched
|
expression where all names are substring-matched
|
||||||
against test names and their parent classes.
|
against test names and their parent classes.
|
||||||
|
@ -1828,217 +1772,217 @@ All the command-line flags can be obtained by running ``pytest --help``::
|
||||||
'extra_keyword_matches' set, as well as functions
|
'extra_keyword_matches' set, as well as functions
|
||||||
which have names assigned directly to them. The
|
which have names assigned directly to them. The
|
||||||
matching is case-insensitive.
|
matching is case-insensitive.
|
||||||
-m MARKEXPR only run tests matching given mark expression.
|
-m MARKEXPR Only run tests matching given mark expression.
|
||||||
For example: -m 'mark1 and not mark2'.
|
For example: -m 'mark1 and not mark2'.
|
||||||
--markers show markers (builtin, plugin and per-project ones).
|
--markers Show markers (builtin, plugin and per-project ones)
|
||||||
-x, --exitfirst exit instantly on first error or failed test.
|
-x, --exitfirst Exit instantly on first error or failed test
|
||||||
--fixtures, --funcargs
|
--fixtures, --funcargs
|
||||||
show available fixtures, sorted by plugin appearance
|
Show available fixtures, sorted by plugin appearance
|
||||||
(fixtures with leading '_' are only shown with '-v')
|
(fixtures with leading '_' are only shown with '-v')
|
||||||
--fixtures-per-test show fixtures per test
|
--fixtures-per-test Show fixtures per test
|
||||||
--pdb start the interactive Python debugger on errors or
|
--pdb Start the interactive Python debugger on errors or
|
||||||
KeyboardInterrupt.
|
KeyboardInterrupt
|
||||||
--pdbcls=modulename:classname
|
--pdbcls=modulename:classname
|
||||||
specify a custom interactive Python debugger for use
|
Specify a custom interactive Python debugger for use
|
||||||
with --pdb.For example:
|
with --pdb. For example:
|
||||||
--pdbcls=IPython.terminal.debugger:TerminalPdb
|
--pdbcls=IPython.terminal.debugger:TerminalPdb
|
||||||
--trace Immediately break when running each test.
|
--trace Immediately break when running each test
|
||||||
--capture=method per-test capturing method: one of fd|sys|no|tee-sys.
|
--capture=method Per-test capturing method: one of fd|sys|no|tee-sys.
|
||||||
-s shortcut for --capture=no.
|
-s Shortcut for --capture=no.
|
||||||
--runxfail report the results of xfail tests as if they were
|
--runxfail Report the results of xfail tests as if they were
|
||||||
not marked
|
not marked
|
||||||
--lf, --last-failed rerun only the tests that failed at the last run (or
|
--lf, --last-failed Rerun only the tests that failed at the last run (or
|
||||||
all if none failed)
|
all if none failed)
|
||||||
--ff, --failed-first run all tests, but run the last failures first.
|
--ff, --failed-first Run all tests, but run the last failures first
|
||||||
This may re-order tests and thus lead to repeated
|
This may re-order tests and thus lead to repeated
|
||||||
fixture setup/teardown.
|
fixture setup/teardown
|
||||||
--nf, --new-first run tests from new files first, then the rest of the
|
--nf, --new-first Run tests from new files first, then the rest of the
|
||||||
tests sorted by file mtime
|
tests sorted by file mtime
|
||||||
--cache-show=[CACHESHOW]
|
--cache-show=[CACHESHOW]
|
||||||
show cache contents, don't perform collection or
|
Show cache contents, don't perform collection or
|
||||||
tests. Optional argument: glob (default: '*').
|
tests. Optional argument: glob (default: '*').
|
||||||
--cache-clear remove all cache contents at start of test run.
|
--cache-clear Remove all cache contents at start of test run
|
||||||
--lfnf={all,none}, --last-failed-no-failures={all,none}
|
--lfnf={all,none}, --last-failed-no-failures={all,none}
|
||||||
which tests to run with no previously (known)
|
Which tests to run with no previously (known)
|
||||||
failures.
|
failures
|
||||||
--sw, --stepwise exit on test failure and continue from last failing
|
--sw, --stepwise Exit on test failure and continue from last failing
|
||||||
test next time
|
test next time
|
||||||
--sw-skip, --stepwise-skip
|
--sw-skip, --stepwise-skip
|
||||||
ignore the first failing test but stop on the next
|
Ignore the first failing test but stop on the next
|
||||||
failing test.
|
failing test.
|
||||||
implicitly enables --stepwise.
|
implicitly enables --stepwise.
|
||||||
|
|
||||||
reporting:
|
Reporting:
|
||||||
--durations=N show N slowest setup/test durations (N=0 for all).
|
--durations=N show N slowest setup/test durations (N=0 for all)
|
||||||
--durations-min=N Minimal duration in seconds for inclusion in slowest
|
--durations-min=N Minimal duration in seconds for inclusion in slowest
|
||||||
list. Default 0.005
|
list. Default: 0.005.
|
||||||
-v, --verbose increase verbosity.
|
-v, --verbose Increase verbosity
|
||||||
--no-header disable header
|
--no-header Disable header
|
||||||
--no-summary disable summary
|
--no-summary Disable summary
|
||||||
-q, --quiet decrease verbosity.
|
-q, --quiet Decrease verbosity
|
||||||
--verbosity=VERBOSE set verbosity. Default is 0.
|
--verbosity=VERBOSE Set verbosity. Default: 0.
|
||||||
-r chars show extra test summary info as specified by chars:
|
-r chars Show extra test summary info as specified by chars:
|
||||||
(f)ailed, (E)rror, (s)kipped, (x)failed, (X)passed,
|
(f)ailed, (E)rror, (s)kipped, (x)failed, (X)passed,
|
||||||
(p)assed, (P)assed with output, (a)ll except passed
|
(p)assed, (P)assed with output, (a)ll except passed
|
||||||
(p/P), or (A)ll. (w)arnings are enabled by default
|
(p/P), or (A)ll. (w)arnings are enabled by default
|
||||||
(see --disable-warnings), 'N' can be used to reset
|
(see --disable-warnings), 'N' can be used to reset
|
||||||
the list. (default: 'fE').
|
the list. (default: 'fE').
|
||||||
--disable-warnings, --disable-pytest-warnings
|
--disable-warnings, --disable-pytest-warnings
|
||||||
disable warnings summary
|
Disable warnings summary
|
||||||
-l, --showlocals show locals in tracebacks (disabled by default).
|
-l, --showlocals Show locals in tracebacks (disabled by default)
|
||||||
--tb=style traceback print mode
|
--tb=style Traceback print mode
|
||||||
(auto/long/short/line/native/no).
|
(auto/long/short/line/native/no).
|
||||||
--show-capture={no,stdout,stderr,log,all}
|
--show-capture={no,stdout,stderr,log,all}
|
||||||
Controls how captured stdout/stderr/log is shown on
|
Controls how captured stdout/stderr/log is shown on
|
||||||
failed tests. Default is 'all'.
|
failed tests. Default: all.
|
||||||
--full-trace don't cut any tracebacks (default is to cut).
|
--full-trace Don't cut any tracebacks (default is to cut)
|
||||||
--color=color color terminal output (yes/no/auto).
|
--color=color Color terminal output (yes/no/auto)
|
||||||
--code-highlight={yes,no}
|
--code-highlight={yes,no}
|
||||||
Whether code should be highlighted (only if --color
|
Whether code should be highlighted (only if --color
|
||||||
is also enabled)
|
is also enabled). Default: yes.
|
||||||
--pastebin=mode send failed|all info to bpaste.net pastebin service.
|
--pastebin=mode Send failed|all info to bpaste.net pastebin service
|
||||||
--junit-xml=path create junit-xml style report file at given path.
|
--junit-xml=path Create junit-xml style report file at given path
|
||||||
--junit-prefix=str prepend prefix to classnames in junit-xml output
|
--junit-prefix=str Prepend prefix to classnames in junit-xml output
|
||||||
|
|
||||||
pytest-warnings:
|
pytest-warnings:
|
||||||
-W PYTHONWARNINGS, --pythonwarnings=PYTHONWARNINGS
|
-W PYTHONWARNINGS, --pythonwarnings=PYTHONWARNINGS
|
||||||
set which warnings to report, see -W option of
|
Set which warnings to report, see -W option of
|
||||||
python itself.
|
Python itself
|
||||||
--maxfail=num exit after first num failures or errors.
|
--maxfail=num Exit after first num failures or errors
|
||||||
--strict-config any warnings encountered while parsing the `pytest`
|
--strict-config Any warnings encountered while parsing the `pytest`
|
||||||
section of the configuration file raise errors.
|
section of the configuration file raise errors
|
||||||
--strict-markers markers not registered in the `markers` section of
|
--strict-markers Markers not registered in the `markers` section of
|
||||||
the configuration file raise errors.
|
the configuration file raise errors
|
||||||
--strict (deprecated) alias to --strict-markers.
|
--strict (Deprecated) alias to --strict-markers
|
||||||
-c file load configuration from `file` instead of trying to
|
-c file Load configuration from `file` instead of trying to
|
||||||
locate one of the implicit configuration files.
|
locate one of the implicit configuration files
|
||||||
--continue-on-collection-errors
|
--continue-on-collection-errors
|
||||||
Force test execution even if collection errors
|
Force test execution even if collection errors
|
||||||
occur.
|
occur
|
||||||
--rootdir=ROOTDIR Define root directory for tests. Can be relative
|
--rootdir=ROOTDIR Define root directory for tests. Can be relative
|
||||||
path: 'root_dir', './root_dir',
|
path: 'root_dir', './root_dir',
|
||||||
'root_dir/another_dir/'; absolute path:
|
'root_dir/another_dir/'; absolute path:
|
||||||
'/home/user/root_dir'; path with variables:
|
'/home/user/root_dir'; path with variables:
|
||||||
'$HOME/root_dir'.
|
'$HOME/root_dir'.
|
||||||
|
|
||||||
collection:
|
Collection:
|
||||||
--collect-only, --co only collect tests, don't execute them.
|
--collect-only, --co Only collect tests, don't execute them
|
||||||
--pyargs try to interpret all arguments as python packages.
|
--pyargs Try to interpret all arguments as Python packages
|
||||||
--ignore=path ignore path during collection (multi-allowed).
|
--ignore=path Ignore path during collection (multi-allowed)
|
||||||
--ignore-glob=path ignore path pattern during collection (multi-
|
--ignore-glob=path Ignore path pattern during collection (multi-
|
||||||
allowed).
|
allowed)
|
||||||
--deselect=nodeid_prefix
|
--deselect=nodeid_prefix
|
||||||
deselect item (via node id prefix) during collection
|
Deselect item (via node id prefix) during collection
|
||||||
(multi-allowed).
|
(multi-allowed)
|
||||||
--confcutdir=dir only load conftest.py's relative to specified dir.
|
--confcutdir=dir Only load conftest.py's relative to specified dir
|
||||||
--noconftest Don't load any conftest.py files.
|
--noconftest Don't load any conftest.py files
|
||||||
--keep-duplicates Keep duplicate tests.
|
--keep-duplicates Keep duplicate tests
|
||||||
--collect-in-virtualenv
|
--collect-in-virtualenv
|
||||||
Don't ignore tests in a local virtualenv directory
|
Don't ignore tests in a local virtualenv directory
|
||||||
--import-mode={prepend,append,importlib}
|
--import-mode={prepend,append,importlib}
|
||||||
prepend/append to sys.path when importing test
|
Prepend/append to sys.path when importing test
|
||||||
modules and conftest files, default is to prepend.
|
modules and conftest files. Default: prepend.
|
||||||
--doctest-modules run doctests in all .py modules
|
--doctest-modules Run doctests in all .py modules
|
||||||
--doctest-report={none,cdiff,ndiff,udiff,only_first_failure}
|
--doctest-report={none,cdiff,ndiff,udiff,only_first_failure}
|
||||||
choose another output format for diffs on doctest
|
Choose another output format for diffs on doctest
|
||||||
failure
|
failure
|
||||||
--doctest-glob=pat doctests file matching pattern, default: test*.txt
|
--doctest-glob=pat Doctests file matching pattern, default: test*.txt
|
||||||
--doctest-ignore-import-errors
|
--doctest-ignore-import-errors
|
||||||
ignore doctest ImportErrors
|
Ignore doctest ImportErrors
|
||||||
--doctest-continue-on-failure
|
--doctest-continue-on-failure
|
||||||
for a given doctest, continue to run after the first
|
For a given doctest, continue to run after the first
|
||||||
failure
|
failure
|
||||||
|
|
||||||
test session debugging and configuration:
|
Test session debugging and configuration:
|
||||||
--basetemp=dir base temporary directory for this test run.(warning:
|
--basetemp=dir Base temporary directory for this test run. (Warning:
|
||||||
this directory is removed if it exists)
|
this directory is removed if it exists.)
|
||||||
-V, --version display pytest version and information about
|
-V, --version Display pytest version and information about
|
||||||
plugins. When given twice, also display information
|
plugins. When given twice, also display information
|
||||||
about plugins.
|
about plugins.
|
||||||
-h, --help show help message and configuration info
|
-h, --help Show help message and configuration info
|
||||||
-p name early-load given plugin module name or entry point
|
-p name Early-load given plugin module name or entry point
|
||||||
(multi-allowed).
|
(multi-allowed)
|
||||||
To avoid loading of plugins, use the `no:` prefix,
|
To avoid loading of plugins, use the `no:` prefix,
|
||||||
e.g. `no:doctest`.
|
e.g. `no:doctest`
|
||||||
--trace-config trace considerations of conftest.py files.
|
--trace-config Trace considerations of conftest.py files
|
||||||
--debug=[DEBUG_FILE_NAME]
|
--debug=[DEBUG_FILE_NAME]
|
||||||
store internal tracing debug information in this log
|
Store internal tracing debug information in this log
|
||||||
file.
|
file.
|
||||||
This file is opened with 'w' and truncated as a
|
This file is opened with 'w' and truncated as a
|
||||||
result, care advised.
|
result, care advised.
|
||||||
Defaults to 'pytestdebug.log'.
|
Default: pytestdebug.log.
|
||||||
-o OVERRIDE_INI, --override-ini=OVERRIDE_INI
|
-o OVERRIDE_INI, --override-ini=OVERRIDE_INI
|
||||||
override ini option with "option=value" style, e.g.
|
Override ini option with "option=value" style, e.g.
|
||||||
`-o xfail_strict=True -o cache_dir=cache`.
|
`-o xfail_strict=True -o cache_dir=cache`
|
||||||
--assert=MODE Control assertion debugging tools.
|
--assert=MODE Control assertion debugging tools.
|
||||||
'plain' performs no assertion debugging.
|
'plain' performs no assertion debugging.
|
||||||
'rewrite' (the default) rewrites assert statements
|
'rewrite' (the default) rewrites assert statements
|
||||||
in test modules on import to provide assert
|
in test modules on import to provide assert
|
||||||
expression information.
|
expression information.
|
||||||
--setup-only only setup fixtures, do not execute tests.
|
--setup-only Only setup fixtures, do not execute tests
|
||||||
--setup-show show setup of fixtures while executing tests.
|
--setup-show Show setup of fixtures while executing tests
|
||||||
--setup-plan show what fixtures and tests would be executed but
|
--setup-plan Show what fixtures and tests would be executed but
|
||||||
don't execute anything.
|
don't execute anything
|
||||||
|
|
||||||
logging:
|
Logging:
|
||||||
--log-level=LEVEL level of messages to catch/display.
|
--log-level=LEVEL Level of messages to catch/display.
|
||||||
Not set by default, so it depends on the root/parent
|
Not set by default, so it depends on the root/parent
|
||||||
log handler's effective level, where it is "WARNING"
|
log handler's effective level, where it is "WARNING"
|
||||||
by default.
|
by default.
|
||||||
--log-format=LOG_FORMAT
|
--log-format=LOG_FORMAT
|
||||||
log format as used by the logging module.
|
Log format used by the logging module
|
||||||
--log-date-format=LOG_DATE_FORMAT
|
--log-date-format=LOG_DATE_FORMAT
|
||||||
log date format as used by the logging module.
|
Log date format used by the logging module
|
||||||
--log-cli-level=LOG_CLI_LEVEL
|
--log-cli-level=LOG_CLI_LEVEL
|
||||||
cli logging level.
|
CLI logging level
|
||||||
--log-cli-format=LOG_CLI_FORMAT
|
--log-cli-format=LOG_CLI_FORMAT
|
||||||
log format as used by the logging module.
|
Log format used by the logging module
|
||||||
--log-cli-date-format=LOG_CLI_DATE_FORMAT
|
--log-cli-date-format=LOG_CLI_DATE_FORMAT
|
||||||
log date format as used by the logging module.
|
Log date format used by the logging module
|
||||||
--log-file=LOG_FILE path to a file when logging will be written to.
|
--log-file=LOG_FILE Path to a file when logging will be written to
|
||||||
--log-file-level=LOG_FILE_LEVEL
|
--log-file-level=LOG_FILE_LEVEL
|
||||||
log file logging level.
|
Log file logging level
|
||||||
--log-file-format=LOG_FILE_FORMAT
|
--log-file-format=LOG_FILE_FORMAT
|
||||||
log format as used by the logging module.
|
Log format used by the logging module
|
||||||
--log-file-date-format=LOG_FILE_DATE_FORMAT
|
--log-file-date-format=LOG_FILE_DATE_FORMAT
|
||||||
log date format as used by the logging module.
|
Log date format used by the logging module
|
||||||
--log-auto-indent=LOG_AUTO_INDENT
|
--log-auto-indent=LOG_AUTO_INDENT
|
||||||
Auto-indent multiline messages passed to the logging
|
Auto-indent multiline messages passed to the logging
|
||||||
module. Accepts true|on, false|off or an integer.
|
module. Accepts true|on, false|off or an integer.
|
||||||
|
|
||||||
[pytest] ini-options in the first pytest.ini|tox.ini|setup.cfg file found:
|
[pytest] ini-options in the first pytest.ini|tox.ini|setup.cfg file found:
|
||||||
|
|
||||||
markers (linelist): markers for test functions
|
markers (linelist): Markers for test functions
|
||||||
empty_parameter_set_mark (string):
|
empty_parameter_set_mark (string):
|
||||||
default marker for empty parametersets
|
Default marker for empty parametersets
|
||||||
norecursedirs (args): directory patterns to avoid for recursion
|
norecursedirs (args): Directory patterns to avoid for recursion
|
||||||
testpaths (args): directories to search for tests when no files or
|
testpaths (args): Directories to search for tests when no files or
|
||||||
directories are given in the command line.
|
directories are given in the command line
|
||||||
filterwarnings (linelist):
|
filterwarnings (linelist):
|
||||||
Each line specifies a pattern for
|
Each line specifies a pattern for
|
||||||
warnings.filterwarnings. Processed after
|
warnings.filterwarnings. Processed after
|
||||||
-W/--pythonwarnings.
|
-W/--pythonwarnings.
|
||||||
usefixtures (args): list of default fixtures to be used with this
|
usefixtures (args): List of default fixtures to be used with this
|
||||||
project
|
project
|
||||||
python_files (args): glob-style file patterns for Python test module
|
python_files (args): Glob-style file patterns for Python test module
|
||||||
discovery
|
discovery
|
||||||
python_classes (args):
|
python_classes (args):
|
||||||
prefixes or glob names for Python test class
|
Prefixes or glob names for Python test class
|
||||||
discovery
|
discovery
|
||||||
python_functions (args):
|
python_functions (args):
|
||||||
prefixes or glob names for Python test function and
|
Prefixes or glob names for Python test function and
|
||||||
method discovery
|
method discovery
|
||||||
disable_test_id_escaping_and_forfeit_all_rights_to_community_support (bool):
|
disable_test_id_escaping_and_forfeit_all_rights_to_community_support (bool):
|
||||||
disable string escape non-ascii characters, might
|
Disable string escape non-ASCII characters, might
|
||||||
cause unwanted side effects(use at your own risk)
|
cause unwanted side effects(use at your own risk)
|
||||||
console_output_style (string):
|
console_output_style (string):
|
||||||
console output: "classic", or with additional
|
Console output: "classic", or with additional
|
||||||
progress information ("progress" (percentage) |
|
progress information ("progress" (percentage) |
|
||||||
"count").
|
"count")
|
||||||
xfail_strict (bool): default for the strict parameter of xfail markers
|
xfail_strict (bool): Default for the strict parameter of xfail markers
|
||||||
when not given explicitly (default: False)
|
when not given explicitly (default: False)
|
||||||
enable_assertion_pass_hook (bool):
|
enable_assertion_pass_hook (bool):
|
||||||
Enables the pytest_assertion_pass hook.Make sure to
|
Enables the pytest_assertion_pass hook. Make sure to
|
||||||
delete any previously generated pyc cache files.
|
delete any previously generated pyc cache files.
|
||||||
junit_suite_name (string):
|
junit_suite_name (string):
|
||||||
Test suite name for JUnit report
|
Test suite name for JUnit report
|
||||||
|
@ -2053,45 +1997,45 @@ All the command-line flags can be obtained by running ``pytest --help``::
|
||||||
junit_family (string):
|
junit_family (string):
|
||||||
Emit XML for schema: one of legacy|xunit1|xunit2
|
Emit XML for schema: one of legacy|xunit1|xunit2
|
||||||
doctest_optionflags (args):
|
doctest_optionflags (args):
|
||||||
option flags for doctests
|
Option flags for doctests
|
||||||
doctest_encoding (string):
|
doctest_encoding (string):
|
||||||
encoding used for doctest files
|
Encoding used for doctest files
|
||||||
cache_dir (string): cache directory path.
|
cache_dir (string): Cache directory path
|
||||||
log_level (string): default value for --log-level
|
log_level (string): Default value for --log-level
|
||||||
log_format (string): default value for --log-format
|
log_format (string): Default value for --log-format
|
||||||
log_date_format (string):
|
log_date_format (string):
|
||||||
default value for --log-date-format
|
Default value for --log-date-format
|
||||||
log_cli (bool): enable log display during test run (also known as
|
log_cli (bool): Enable log display during test run (also known as
|
||||||
"live logging").
|
"live logging")
|
||||||
log_cli_level (string):
|
log_cli_level (string):
|
||||||
default value for --log-cli-level
|
Default value for --log-cli-level
|
||||||
log_cli_format (string):
|
log_cli_format (string):
|
||||||
default value for --log-cli-format
|
Default value for --log-cli-format
|
||||||
log_cli_date_format (string):
|
log_cli_date_format (string):
|
||||||
default value for --log-cli-date-format
|
Default value for --log-cli-date-format
|
||||||
log_file (string): default value for --log-file
|
log_file (string): Default value for --log-file
|
||||||
log_file_level (string):
|
log_file_level (string):
|
||||||
default value for --log-file-level
|
Default value for --log-file-level
|
||||||
log_file_format (string):
|
log_file_format (string):
|
||||||
default value for --log-file-format
|
Default value for --log-file-format
|
||||||
log_file_date_format (string):
|
log_file_date_format (string):
|
||||||
default value for --log-file-date-format
|
Default value for --log-file-date-format
|
||||||
log_auto_indent (string):
|
log_auto_indent (string):
|
||||||
default value for --log-auto-indent
|
Default value for --log-auto-indent
|
||||||
pythonpath (paths): Add paths to sys.path
|
pythonpath (paths): Add paths to sys.path
|
||||||
faulthandler_timeout (string):
|
faulthandler_timeout (string):
|
||||||
Dump the traceback of all threads if a test takes
|
Dump the traceback of all threads if a test takes
|
||||||
more than TIMEOUT seconds to finish.
|
more than TIMEOUT seconds to finish
|
||||||
addopts (args): extra command line options
|
addopts (args): Extra command line options
|
||||||
minversion (string): minimally required pytest version
|
minversion (string): Minimally required pytest version
|
||||||
required_plugins (args):
|
required_plugins (args):
|
||||||
plugins that must be present for pytest to run
|
Plugins that must be present for pytest to run
|
||||||
|
|
||||||
environment variables:
|
Environment variables:
|
||||||
PYTEST_ADDOPTS extra command line options
|
PYTEST_ADDOPTS Extra command line options
|
||||||
PYTEST_PLUGINS comma-separated plugins to load during startup
|
PYTEST_PLUGINS Comma-separated plugins to load during startup
|
||||||
PYTEST_DISABLE_PLUGIN_AUTOLOAD set to disable plugin auto-loading
|
PYTEST_DISABLE_PLUGIN_AUTOLOAD Set to disable plugin auto-loading
|
||||||
PYTEST_DEBUG set to enable debug tracing of pytest's internals
|
PYTEST_DEBUG Set to enable debug tracing of pytest's internals
|
||||||
|
|
||||||
|
|
||||||
to see available markers type: pytest --markers
|
to see available markers type: pytest --markers
|
||||||
|
|
|
@ -17,6 +17,8 @@ Books
|
||||||
Talks and blog postings
|
Talks and blog postings
|
||||||
---------------------------------------------
|
---------------------------------------------
|
||||||
|
|
||||||
|
- Training: `pytest - simple, rapid and fun testing with Python <https://www.youtube.com/watch?v=ofPHJrAOaTE>`_, Florian Bruhin, PyConDE 2022
|
||||||
|
|
||||||
- `pytest: Simple, rapid and fun testing with Python, <https://youtu.be/cSJ-X3TbQ1c?t=15752>`_ (@ 4:22:32), Florian Bruhin, WeAreDevelopers World Congress 2021
|
- `pytest: Simple, rapid and fun testing with Python, <https://youtu.be/cSJ-X3TbQ1c?t=15752>`_ (@ 4:22:32), Florian Bruhin, WeAreDevelopers World Congress 2021
|
||||||
|
|
||||||
- Webinar: `pytest: Test Driven Development für Python (German) <https://bruhin.software/ins-pytest/>`_, Florian Bruhin, via mylearning.ch, 2020
|
- Webinar: `pytest: Test Driven Development für Python (German) <https://bruhin.software/ins-pytest/>`_, Florian Bruhin, via mylearning.ch, 2020
|
||||||
|
|
|
@ -46,7 +46,6 @@ install_requires =
|
||||||
packaging
|
packaging
|
||||||
pluggy>=0.12,<2.0
|
pluggy>=0.12,<2.0
|
||||||
py>=1.8.2
|
py>=1.8.2
|
||||||
atomicwrites>=1.0;sys_platform=="win32"
|
|
||||||
colorama;sys_platform=="win32"
|
colorama;sys_platform=="win32"
|
||||||
importlib-metadata>=0.12;python_version<"3.8"
|
importlib-metadata>=0.12;python_version<"3.8"
|
||||||
tomli>=1.0.0;python_version<"3.11"
|
tomli>=1.0.0;python_version<"3.11"
|
||||||
|
|
|
@ -39,7 +39,7 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
"enable_assertion_pass_hook",
|
"enable_assertion_pass_hook",
|
||||||
type="bool",
|
type="bool",
|
||||||
default=False,
|
default=False,
|
||||||
help="Enables the pytest_assertion_pass hook."
|
help="Enables the pytest_assertion_pass hook. "
|
||||||
"Make sure to delete any previously generated pyc cache files.",
|
"Make sure to delete any previously generated pyc cache files.",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -190,7 +190,7 @@ class AssertionRewritingHook(importlib.abc.MetaPathFinder, importlib.abc.Loader)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# For matching the name it must be as if it was a filename.
|
# For matching the name it must be as if it was a filename.
|
||||||
path = PurePath(os.path.sep.join(parts) + ".py")
|
path = PurePath(*parts).with_suffix(".py")
|
||||||
|
|
||||||
for pat in self.fnpats:
|
for pat in self.fnpats:
|
||||||
# if the pattern contains subdirectories ("tests/**.py" for example) we can't bail out based
|
# if the pattern contains subdirectories ("tests/**.py" for example) we can't bail out based
|
||||||
|
@ -281,7 +281,9 @@ class AssertionRewritingHook(importlib.abc.MetaPathFinder, importlib.abc.Loader)
|
||||||
else:
|
else:
|
||||||
from importlib.resources.readers import FileReader
|
from importlib.resources.readers import FileReader
|
||||||
|
|
||||||
return FileReader(types.SimpleNamespace(path=self._rewritten_names[name]))
|
return FileReader( # type:ignore[no-any-return]
|
||||||
|
types.SimpleNamespace(path=self._rewritten_names[name])
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def _write_pyc_fp(
|
def _write_pyc_fp(
|
||||||
|
@ -302,52 +304,28 @@ def _write_pyc_fp(
|
||||||
fp.write(marshal.dumps(co))
|
fp.write(marshal.dumps(co))
|
||||||
|
|
||||||
|
|
||||||
if sys.platform == "win32":
|
def _write_pyc(
|
||||||
from atomicwrites import atomic_write
|
|
||||||
|
|
||||||
def _write_pyc(
|
|
||||||
state: "AssertionState",
|
state: "AssertionState",
|
||||||
co: types.CodeType,
|
co: types.CodeType,
|
||||||
source_stat: os.stat_result,
|
source_stat: os.stat_result,
|
||||||
pyc: Path,
|
pyc: Path,
|
||||||
) -> bool:
|
) -> bool:
|
||||||
try:
|
|
||||||
with atomic_write(os.fspath(pyc), mode="wb", overwrite=True) as fp:
|
|
||||||
_write_pyc_fp(fp, source_stat, co)
|
|
||||||
except OSError as e:
|
|
||||||
state.trace(f"error writing pyc file at {pyc}: {e}")
|
|
||||||
# we ignore any failure to write the cache file
|
|
||||||
# there are many reasons, permission-denied, pycache dir being a
|
|
||||||
# file etc.
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
else:
|
|
||||||
|
|
||||||
def _write_pyc(
|
|
||||||
state: "AssertionState",
|
|
||||||
co: types.CodeType,
|
|
||||||
source_stat: os.stat_result,
|
|
||||||
pyc: Path,
|
|
||||||
) -> bool:
|
|
||||||
proc_pyc = f"{pyc}.{os.getpid()}"
|
proc_pyc = f"{pyc}.{os.getpid()}"
|
||||||
try:
|
try:
|
||||||
fp = open(proc_pyc, "wb")
|
with open(proc_pyc, "wb") as fp:
|
||||||
|
_write_pyc_fp(fp, source_stat, co)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
state.trace(f"error writing pyc file at {proc_pyc}: errno={e.errno}")
|
state.trace(f"error writing pyc file at {proc_pyc}: errno={e.errno}")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
try:
|
try:
|
||||||
_write_pyc_fp(fp, source_stat, co)
|
os.replace(proc_pyc, pyc)
|
||||||
os.rename(proc_pyc, pyc)
|
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
state.trace(f"error writing pyc file at {pyc}: {e}")
|
state.trace(f"error writing pyc file at {pyc}: {e}")
|
||||||
# we ignore any failure to write the cache file
|
# we ignore any failure to write the cache file
|
||||||
# there are many reasons, permission-denied, pycache dir being a
|
# there are many reasons, permission-denied, pycache dir being a
|
||||||
# file etc.
|
# file etc.
|
||||||
return False
|
return False
|
||||||
finally:
|
|
||||||
fp.close()
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -157,7 +157,7 @@ class Cache:
|
||||||
"""
|
"""
|
||||||
path = self._getvaluepath(key)
|
path = self._getvaluepath(key)
|
||||||
try:
|
try:
|
||||||
with path.open("r") as f:
|
with path.open("r", encoding="UTF-8") as f:
|
||||||
return json.load(f)
|
return json.load(f)
|
||||||
except (ValueError, OSError):
|
except (ValueError, OSError):
|
||||||
return default
|
return default
|
||||||
|
@ -184,9 +184,9 @@ class Cache:
|
||||||
return
|
return
|
||||||
if not cache_dir_exists_already:
|
if not cache_dir_exists_already:
|
||||||
self._ensure_supporting_files()
|
self._ensure_supporting_files()
|
||||||
data = json.dumps(value, indent=2)
|
data = json.dumps(value, ensure_ascii=False, indent=2)
|
||||||
try:
|
try:
|
||||||
f = path.open("w")
|
f = path.open("w", encoding="UTF-8")
|
||||||
except OSError:
|
except OSError:
|
||||||
self.warn("cache could not write path {path}", path=path, _ispytest=True)
|
self.warn("cache could not write path {path}", path=path, _ispytest=True)
|
||||||
else:
|
else:
|
||||||
|
@ -196,7 +196,7 @@ class Cache:
|
||||||
def _ensure_supporting_files(self) -> None:
|
def _ensure_supporting_files(self) -> None:
|
||||||
"""Create supporting files in the cache dir that are not really part of the cache."""
|
"""Create supporting files in the cache dir that are not really part of the cache."""
|
||||||
readme_path = self._cachedir / "README.md"
|
readme_path = self._cachedir / "README.md"
|
||||||
readme_path.write_text(README_CONTENT)
|
readme_path.write_text(README_CONTENT, encoding="UTF-8")
|
||||||
|
|
||||||
gitignore_path = self._cachedir.joinpath(".gitignore")
|
gitignore_path = self._cachedir.joinpath(".gitignore")
|
||||||
msg = "# Created by pytest automatically.\n*\n"
|
msg = "# Created by pytest automatically.\n*\n"
|
||||||
|
@ -440,7 +440,7 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
"--last-failed",
|
"--last-failed",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
dest="lf",
|
dest="lf",
|
||||||
help="rerun only the tests that failed "
|
help="Rerun only the tests that failed "
|
||||||
"at the last run (or all if none failed)",
|
"at the last run (or all if none failed)",
|
||||||
)
|
)
|
||||||
group.addoption(
|
group.addoption(
|
||||||
|
@ -448,7 +448,7 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
"--failed-first",
|
"--failed-first",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
dest="failedfirst",
|
dest="failedfirst",
|
||||||
help="run all tests, but run the last failures first.\n"
|
help="Run all tests, but run the last failures first. "
|
||||||
"This may re-order tests and thus lead to "
|
"This may re-order tests and thus lead to "
|
||||||
"repeated fixture setup/teardown.",
|
"repeated fixture setup/teardown.",
|
||||||
)
|
)
|
||||||
|
@ -457,7 +457,7 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
"--new-first",
|
"--new-first",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
dest="newfirst",
|
dest="newfirst",
|
||||||
help="run tests from new files first, then the rest of the tests "
|
help="Run tests from new files first, then the rest of the tests "
|
||||||
"sorted by file mtime",
|
"sorted by file mtime",
|
||||||
)
|
)
|
||||||
group.addoption(
|
group.addoption(
|
||||||
|
@ -466,7 +466,7 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
nargs="?",
|
nargs="?",
|
||||||
dest="cacheshow",
|
dest="cacheshow",
|
||||||
help=(
|
help=(
|
||||||
"show cache contents, don't perform collection or tests. "
|
"Show cache contents, don't perform collection or tests. "
|
||||||
"Optional argument: glob (default: '*')."
|
"Optional argument: glob (default: '*')."
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
@ -474,12 +474,12 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
"--cache-clear",
|
"--cache-clear",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
dest="cacheclear",
|
dest="cacheclear",
|
||||||
help="remove all cache contents at start of test run.",
|
help="Remove all cache contents at start of test run",
|
||||||
)
|
)
|
||||||
cache_dir_default = ".pytest_cache"
|
cache_dir_default = ".pytest_cache"
|
||||||
if "TOX_ENV_DIR" in os.environ:
|
if "TOX_ENV_DIR" in os.environ:
|
||||||
cache_dir_default = os.path.join(os.environ["TOX_ENV_DIR"], cache_dir_default)
|
cache_dir_default = os.path.join(os.environ["TOX_ENV_DIR"], cache_dir_default)
|
||||||
parser.addini("cache_dir", default=cache_dir_default, help="cache directory path.")
|
parser.addini("cache_dir", default=cache_dir_default, help="Cache directory path")
|
||||||
group.addoption(
|
group.addoption(
|
||||||
"--lfnf",
|
"--lfnf",
|
||||||
"--last-failed-no-failures",
|
"--last-failed-no-failures",
|
||||||
|
@ -487,7 +487,7 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
dest="last_failed_no_failures",
|
dest="last_failed_no_failures",
|
||||||
choices=("all", "none"),
|
choices=("all", "none"),
|
||||||
default="all",
|
default="all",
|
||||||
help="which tests to run with no previously (known) failures.",
|
help="Which tests to run with no previously (known) failures",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -42,14 +42,14 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
default="fd",
|
default="fd",
|
||||||
metavar="method",
|
metavar="method",
|
||||||
choices=["fd", "sys", "no", "tee-sys"],
|
choices=["fd", "sys", "no", "tee-sys"],
|
||||||
help="per-test capturing method: one of fd|sys|no|tee-sys.",
|
help="Per-test capturing method: one of fd|sys|no|tee-sys",
|
||||||
)
|
)
|
||||||
group._addoption(
|
group._addoption(
|
||||||
"-s",
|
"-s",
|
||||||
action="store_const",
|
action="store_const",
|
||||||
const="no",
|
const="no",
|
||||||
dest="capture",
|
dest="capture",
|
||||||
help="shortcut for --capture=no.",
|
help="Shortcut for --capture=no",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -203,12 +203,39 @@ class DontReadFromInput:
|
||||||
def fileno(self) -> int:
|
def fileno(self) -> int:
|
||||||
raise UnsupportedOperation("redirected stdin is pseudofile, has no fileno()")
|
raise UnsupportedOperation("redirected stdin is pseudofile, has no fileno()")
|
||||||
|
|
||||||
|
def flush(self) -> None:
|
||||||
|
raise UnsupportedOperation("redirected stdin is pseudofile, has no flush()")
|
||||||
|
|
||||||
def isatty(self) -> bool:
|
def isatty(self) -> bool:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def close(self) -> None:
|
def close(self) -> None:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
def readable(self) -> bool:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def seek(self, offset: int) -> int:
|
||||||
|
raise UnsupportedOperation("redirected stdin is pseudofile, has no seek(int)")
|
||||||
|
|
||||||
|
def seekable(self) -> bool:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def tell(self) -> int:
|
||||||
|
raise UnsupportedOperation("redirected stdin is pseudofile, has no tell()")
|
||||||
|
|
||||||
|
def truncate(self, size: int) -> None:
|
||||||
|
raise UnsupportedOperation("cannont truncate stdin")
|
||||||
|
|
||||||
|
def write(self, *args) -> None:
|
||||||
|
raise UnsupportedOperation("cannot write to stdin")
|
||||||
|
|
||||||
|
def writelines(self, *args) -> None:
|
||||||
|
raise UnsupportedOperation("Cannot write to stdin")
|
||||||
|
|
||||||
|
def writable(self) -> bool:
|
||||||
|
return False
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def buffer(self):
|
def buffer(self):
|
||||||
return self
|
return self
|
||||||
|
@ -876,11 +903,22 @@ class CaptureFixture(Generic[AnyStr]):
|
||||||
|
|
||||||
@fixture
|
@fixture
|
||||||
def capsys(request: SubRequest) -> Generator[CaptureFixture[str], None, None]:
|
def capsys(request: SubRequest) -> Generator[CaptureFixture[str], None, None]:
|
||||||
"""Enable text capturing of writes to ``sys.stdout`` and ``sys.stderr``.
|
r"""Enable text capturing of writes to ``sys.stdout`` and ``sys.stderr``.
|
||||||
|
|
||||||
The captured output is made available via ``capsys.readouterr()`` method
|
The captured output is made available via ``capsys.readouterr()`` method
|
||||||
calls, which return a ``(out, err)`` namedtuple.
|
calls, which return a ``(out, err)`` namedtuple.
|
||||||
``out`` and ``err`` will be ``text`` objects.
|
``out`` and ``err`` will be ``text`` objects.
|
||||||
|
|
||||||
|
Returns an instance of :class:`CaptureFixture[str] <pytest.CaptureFixture>`.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
.. code-block:: python
|
||||||
|
|
||||||
|
def test_output(capsys):
|
||||||
|
print("hello")
|
||||||
|
captured = capsys.readouterr()
|
||||||
|
assert captured.out == "hello\n"
|
||||||
"""
|
"""
|
||||||
capman = request.config.pluginmanager.getplugin("capturemanager")
|
capman = request.config.pluginmanager.getplugin("capturemanager")
|
||||||
capture_fixture = CaptureFixture[str](SysCapture, request, _ispytest=True)
|
capture_fixture = CaptureFixture[str](SysCapture, request, _ispytest=True)
|
||||||
|
@ -893,11 +931,22 @@ def capsys(request: SubRequest) -> Generator[CaptureFixture[str], None, None]:
|
||||||
|
|
||||||
@fixture
|
@fixture
|
||||||
def capsysbinary(request: SubRequest) -> Generator[CaptureFixture[bytes], None, None]:
|
def capsysbinary(request: SubRequest) -> Generator[CaptureFixture[bytes], None, None]:
|
||||||
"""Enable bytes capturing of writes to ``sys.stdout`` and ``sys.stderr``.
|
r"""Enable bytes capturing of writes to ``sys.stdout`` and ``sys.stderr``.
|
||||||
|
|
||||||
The captured output is made available via ``capsysbinary.readouterr()``
|
The captured output is made available via ``capsysbinary.readouterr()``
|
||||||
method calls, which return a ``(out, err)`` namedtuple.
|
method calls, which return a ``(out, err)`` namedtuple.
|
||||||
``out`` and ``err`` will be ``bytes`` objects.
|
``out`` and ``err`` will be ``bytes`` objects.
|
||||||
|
|
||||||
|
Returns an instance of :class:`CaptureFixture[bytes] <pytest.CaptureFixture>`.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
.. code-block:: python
|
||||||
|
|
||||||
|
def test_output(capsysbinary):
|
||||||
|
print("hello")
|
||||||
|
captured = capsysbinary.readouterr()
|
||||||
|
assert captured.out == b"hello\n"
|
||||||
"""
|
"""
|
||||||
capman = request.config.pluginmanager.getplugin("capturemanager")
|
capman = request.config.pluginmanager.getplugin("capturemanager")
|
||||||
capture_fixture = CaptureFixture[bytes](SysCaptureBinary, request, _ispytest=True)
|
capture_fixture = CaptureFixture[bytes](SysCaptureBinary, request, _ispytest=True)
|
||||||
|
@ -910,11 +959,22 @@ def capsysbinary(request: SubRequest) -> Generator[CaptureFixture[bytes], None,
|
||||||
|
|
||||||
@fixture
|
@fixture
|
||||||
def capfd(request: SubRequest) -> Generator[CaptureFixture[str], None, None]:
|
def capfd(request: SubRequest) -> Generator[CaptureFixture[str], None, None]:
|
||||||
"""Enable text capturing of writes to file descriptors ``1`` and ``2``.
|
r"""Enable text capturing of writes to file descriptors ``1`` and ``2``.
|
||||||
|
|
||||||
The captured output is made available via ``capfd.readouterr()`` method
|
The captured output is made available via ``capfd.readouterr()`` method
|
||||||
calls, which return a ``(out, err)`` namedtuple.
|
calls, which return a ``(out, err)`` namedtuple.
|
||||||
``out`` and ``err`` will be ``text`` objects.
|
``out`` and ``err`` will be ``text`` objects.
|
||||||
|
|
||||||
|
Returns an instance of :class:`CaptureFixture[str] <pytest.CaptureFixture>`.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
.. code-block:: python
|
||||||
|
|
||||||
|
def test_system_echo(capfd):
|
||||||
|
os.system('echo "hello"')
|
||||||
|
captured = capfd.readouterr()
|
||||||
|
assert captured.out == "hello\n"
|
||||||
"""
|
"""
|
||||||
capman = request.config.pluginmanager.getplugin("capturemanager")
|
capman = request.config.pluginmanager.getplugin("capturemanager")
|
||||||
capture_fixture = CaptureFixture[str](FDCapture, request, _ispytest=True)
|
capture_fixture = CaptureFixture[str](FDCapture, request, _ispytest=True)
|
||||||
|
@ -927,11 +987,23 @@ def capfd(request: SubRequest) -> Generator[CaptureFixture[str], None, None]:
|
||||||
|
|
||||||
@fixture
|
@fixture
|
||||||
def capfdbinary(request: SubRequest) -> Generator[CaptureFixture[bytes], None, None]:
|
def capfdbinary(request: SubRequest) -> Generator[CaptureFixture[bytes], None, None]:
|
||||||
"""Enable bytes capturing of writes to file descriptors ``1`` and ``2``.
|
r"""Enable bytes capturing of writes to file descriptors ``1`` and ``2``.
|
||||||
|
|
||||||
The captured output is made available via ``capfd.readouterr()`` method
|
The captured output is made available via ``capfd.readouterr()`` method
|
||||||
calls, which return a ``(out, err)`` namedtuple.
|
calls, which return a ``(out, err)`` namedtuple.
|
||||||
``out`` and ``err`` will be ``byte`` objects.
|
``out`` and ``err`` will be ``byte`` objects.
|
||||||
|
|
||||||
|
Returns an instance of :class:`CaptureFixture[bytes] <pytest.CaptureFixture>`.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
.. code-block:: python
|
||||||
|
|
||||||
|
def test_system_echo(capfdbinary):
|
||||||
|
os.system('echo "hello"')
|
||||||
|
captured = capfdbinary.readouterr()
|
||||||
|
assert captured.out == b"hello\n"
|
||||||
|
|
||||||
"""
|
"""
|
||||||
capman = request.config.pluginmanager.getplugin("capturemanager")
|
capman = request.config.pluginmanager.getplugin("capturemanager")
|
||||||
capture_fixture = CaptureFixture[bytes](FDCaptureBinary, request, _ispytest=True)
|
capture_fixture = CaptureFixture[bytes](FDCaptureBinary, request, _ispytest=True)
|
||||||
|
|
|
@ -10,6 +10,7 @@ from pathlib import Path
|
||||||
from typing import Any
|
from typing import Any
|
||||||
from typing import Callable
|
from typing import Callable
|
||||||
from typing import Generic
|
from typing import Generic
|
||||||
|
from typing import NoReturn
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
from typing import Tuple
|
from typing import Tuple
|
||||||
from typing import TYPE_CHECKING
|
from typing import TYPE_CHECKING
|
||||||
|
@ -20,7 +21,6 @@ import attr
|
||||||
import py
|
import py
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from typing import NoReturn
|
|
||||||
from typing_extensions import Final
|
from typing_extensions import Final
|
||||||
|
|
||||||
|
|
||||||
|
@ -403,5 +403,5 @@ else:
|
||||||
# previously.
|
# previously.
|
||||||
#
|
#
|
||||||
# This also work for Enums (if you use `is` to compare) and Literals.
|
# This also work for Enums (if you use `is` to compare) and Literals.
|
||||||
def assert_never(value: "NoReturn") -> "NoReturn":
|
def assert_never(value: NoReturn) -> NoReturn:
|
||||||
assert False, f"Unhandled value: {value} ({type(value).__name__})"
|
assert False, f"Unhandled value: {value} ({type(value).__name__})"
|
||||||
|
|
|
@ -3,6 +3,7 @@ import argparse
|
||||||
import collections.abc
|
import collections.abc
|
||||||
import copy
|
import copy
|
||||||
import enum
|
import enum
|
||||||
|
import glob
|
||||||
import inspect
|
import inspect
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
|
@ -899,6 +900,19 @@ class Config:
|
||||||
dir: Path
|
dir: Path
|
||||||
"""The directory from which :func:`pytest.main` was invoked."""
|
"""The directory from which :func:`pytest.main` was invoked."""
|
||||||
|
|
||||||
|
class ArgsSource(enum.Enum):
|
||||||
|
"""Indicates the source of the test arguments.
|
||||||
|
|
||||||
|
.. versionadded:: 7.2
|
||||||
|
"""
|
||||||
|
|
||||||
|
#: Command line arguments.
|
||||||
|
ARGS = enum.auto()
|
||||||
|
#: Invocation directory.
|
||||||
|
INCOVATION_DIR = enum.auto()
|
||||||
|
#: 'testpaths' configuration value.
|
||||||
|
TESTPATHS = enum.auto()
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
pluginmanager: PytestPluginManager,
|
pluginmanager: PytestPluginManager,
|
||||||
|
@ -1101,11 +1115,11 @@ class Config:
|
||||||
self.inicfg = inicfg
|
self.inicfg = inicfg
|
||||||
self._parser.extra_info["rootdir"] = str(self.rootpath)
|
self._parser.extra_info["rootdir"] = str(self.rootpath)
|
||||||
self._parser.extra_info["inifile"] = str(self.inipath)
|
self._parser.extra_info["inifile"] = str(self.inipath)
|
||||||
self._parser.addini("addopts", "extra command line options", "args")
|
self._parser.addini("addopts", "Extra command line options", "args")
|
||||||
self._parser.addini("minversion", "minimally required pytest version")
|
self._parser.addini("minversion", "Minimally required pytest version")
|
||||||
self._parser.addini(
|
self._parser.addini(
|
||||||
"required_plugins",
|
"required_plugins",
|
||||||
"plugins that must be present for pytest to run",
|
"Plugins that must be present for pytest to run",
|
||||||
type="args",
|
type="args",
|
||||||
default=[],
|
default=[],
|
||||||
)
|
)
|
||||||
|
@ -1308,15 +1322,25 @@ class Config:
|
||||||
self.hook.pytest_cmdline_preparse(config=self, args=args)
|
self.hook.pytest_cmdline_preparse(config=self, args=args)
|
||||||
self._parser.after_preparse = True # type: ignore
|
self._parser.after_preparse = True # type: ignore
|
||||||
try:
|
try:
|
||||||
|
source = Config.ArgsSource.ARGS
|
||||||
args = self._parser.parse_setoption(
|
args = self._parser.parse_setoption(
|
||||||
args, self.option, namespace=self.option
|
args, self.option, namespace=self.option
|
||||||
)
|
)
|
||||||
if not args:
|
if not args:
|
||||||
if self.invocation_params.dir == self.rootpath:
|
if self.invocation_params.dir == self.rootpath:
|
||||||
args = self.getini("testpaths")
|
source = Config.ArgsSource.TESTPATHS
|
||||||
|
testpaths: List[str] = self.getini("testpaths")
|
||||||
|
if self.known_args_namespace.pyargs:
|
||||||
|
args = testpaths
|
||||||
|
else:
|
||||||
|
args = []
|
||||||
|
for path in testpaths:
|
||||||
|
args.extend(sorted(glob.iglob(path, recursive=True)))
|
||||||
if not args:
|
if not args:
|
||||||
|
source = Config.ArgsSource.INCOVATION_DIR
|
||||||
args = [str(self.invocation_params.dir)]
|
args = [str(self.invocation_params.dir)]
|
||||||
self.args = args
|
self.args = args
|
||||||
|
self.args_source = source
|
||||||
except PrintHelp:
|
except PrintHelp:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
|
@ -9,6 +9,7 @@ from typing import cast
|
||||||
from typing import Dict
|
from typing import Dict
|
||||||
from typing import List
|
from typing import List
|
||||||
from typing import Mapping
|
from typing import Mapping
|
||||||
|
from typing import NoReturn
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
from typing import Sequence
|
from typing import Sequence
|
||||||
from typing import Tuple
|
from typing import Tuple
|
||||||
|
@ -24,7 +25,6 @@ from _pytest.deprecated import ARGUMENT_TYPE_STR_CHOICE
|
||||||
from _pytest.deprecated import check_ispytest
|
from _pytest.deprecated import check_ispytest
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from typing import NoReturn
|
|
||||||
from typing_extensions import Literal
|
from typing_extensions import Literal
|
||||||
|
|
||||||
FILE_OR_DIR = "file_or_dir"
|
FILE_OR_DIR = "file_or_dir"
|
||||||
|
@ -48,7 +48,7 @@ class Parser:
|
||||||
_ispytest: bool = False,
|
_ispytest: bool = False,
|
||||||
) -> None:
|
) -> None:
|
||||||
check_ispytest(_ispytest)
|
check_ispytest(_ispytest)
|
||||||
self._anonymous = OptionGroup("custom options", parser=self, _ispytest=True)
|
self._anonymous = OptionGroup("Custom options", parser=self, _ispytest=True)
|
||||||
self._groups: List[OptionGroup] = []
|
self._groups: List[OptionGroup] = []
|
||||||
self._processopt = processopt
|
self._processopt = processopt
|
||||||
self._usage = usage
|
self._usage = usage
|
||||||
|
@ -227,7 +227,7 @@ class Argument:
|
||||||
_typ_map = {"int": int, "string": str, "float": float, "complex": complex}
|
_typ_map = {"int": int, "string": str, "float": float, "complex": complex}
|
||||||
|
|
||||||
def __init__(self, *names: str, **attrs: Any) -> None:
|
def __init__(self, *names: str, **attrs: Any) -> None:
|
||||||
"""Store parms in private vars for use in add_argument."""
|
"""Store params in private vars for use in add_argument."""
|
||||||
self._attrs = attrs
|
self._attrs = attrs
|
||||||
self._short_opts: List[str] = []
|
self._short_opts: List[str] = []
|
||||||
self._long_opts: List[str] = []
|
self._long_opts: List[str] = []
|
||||||
|
@ -403,7 +403,7 @@ class MyOptionParser(argparse.ArgumentParser):
|
||||||
# an usage error to provide more contextual information to the user.
|
# an usage error to provide more contextual information to the user.
|
||||||
self.extra_info = extra_info if extra_info else {}
|
self.extra_info = extra_info if extra_info else {}
|
||||||
|
|
||||||
def error(self, message: str) -> "NoReturn":
|
def error(self, message: str) -> NoReturn:
|
||||||
"""Transform argparse error message into UsageError."""
|
"""Transform argparse error message into UsageError."""
|
||||||
msg = f"{self.prog}: error: {message}"
|
msg = f"{self.prog}: error: {message}"
|
||||||
|
|
||||||
|
|
|
@ -96,6 +96,7 @@ def locate_config(
|
||||||
and return a tuple of (rootdir, inifile, cfg-dict)."""
|
and return a tuple of (rootdir, inifile, cfg-dict)."""
|
||||||
config_names = [
|
config_names = [
|
||||||
"pytest.ini",
|
"pytest.ini",
|
||||||
|
".pytest.ini",
|
||||||
"pyproject.toml",
|
"pyproject.toml",
|
||||||
"tox.ini",
|
"tox.ini",
|
||||||
"setup.cfg",
|
"setup.cfg",
|
||||||
|
|
|
@ -46,21 +46,21 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
"--pdb",
|
"--pdb",
|
||||||
dest="usepdb",
|
dest="usepdb",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
help="start the interactive Python debugger on errors or KeyboardInterrupt.",
|
help="Start the interactive Python debugger on errors or KeyboardInterrupt",
|
||||||
)
|
)
|
||||||
group._addoption(
|
group._addoption(
|
||||||
"--pdbcls",
|
"--pdbcls",
|
||||||
dest="usepdb_cls",
|
dest="usepdb_cls",
|
||||||
metavar="modulename:classname",
|
metavar="modulename:classname",
|
||||||
type=_validate_usepdb_cls,
|
type=_validate_usepdb_cls,
|
||||||
help="specify a custom interactive Python debugger for use with --pdb."
|
help="Specify a custom interactive Python debugger for use with --pdb."
|
||||||
"For example: --pdbcls=IPython.terminal.debugger:TerminalPdb",
|
"For example: --pdbcls=IPython.terminal.debugger:TerminalPdb",
|
||||||
)
|
)
|
||||||
group._addoption(
|
group._addoption(
|
||||||
"--trace",
|
"--trace",
|
||||||
dest="trace",
|
dest="trace",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
help="Immediately break when running each test.",
|
help="Immediately break when running each test",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -66,26 +66,26 @@ CHECKER_CLASS: Optional[Type["doctest.OutputChecker"]] = None
|
||||||
def pytest_addoption(parser: Parser) -> None:
|
def pytest_addoption(parser: Parser) -> None:
|
||||||
parser.addini(
|
parser.addini(
|
||||||
"doctest_optionflags",
|
"doctest_optionflags",
|
||||||
"option flags for doctests",
|
"Option flags for doctests",
|
||||||
type="args",
|
type="args",
|
||||||
default=["ELLIPSIS"],
|
default=["ELLIPSIS"],
|
||||||
)
|
)
|
||||||
parser.addini(
|
parser.addini(
|
||||||
"doctest_encoding", "encoding used for doctest files", default="utf-8"
|
"doctest_encoding", "Encoding used for doctest files", default="utf-8"
|
||||||
)
|
)
|
||||||
group = parser.getgroup("collect")
|
group = parser.getgroup("collect")
|
||||||
group.addoption(
|
group.addoption(
|
||||||
"--doctest-modules",
|
"--doctest-modules",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
default=False,
|
default=False,
|
||||||
help="run doctests in all .py modules",
|
help="Run doctests in all .py modules",
|
||||||
dest="doctestmodules",
|
dest="doctestmodules",
|
||||||
)
|
)
|
||||||
group.addoption(
|
group.addoption(
|
||||||
"--doctest-report",
|
"--doctest-report",
|
||||||
type=str.lower,
|
type=str.lower,
|
||||||
default="udiff",
|
default="udiff",
|
||||||
help="choose another output format for diffs on doctest failure",
|
help="Choose another output format for diffs on doctest failure",
|
||||||
choices=DOCTEST_REPORT_CHOICES,
|
choices=DOCTEST_REPORT_CHOICES,
|
||||||
dest="doctestreport",
|
dest="doctestreport",
|
||||||
)
|
)
|
||||||
|
@ -94,21 +94,21 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
action="append",
|
action="append",
|
||||||
default=[],
|
default=[],
|
||||||
metavar="pat",
|
metavar="pat",
|
||||||
help="doctests file matching pattern, default: test*.txt",
|
help="Doctests file matching pattern, default: test*.txt",
|
||||||
dest="doctestglob",
|
dest="doctestglob",
|
||||||
)
|
)
|
||||||
group.addoption(
|
group.addoption(
|
||||||
"--doctest-ignore-import-errors",
|
"--doctest-ignore-import-errors",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
default=False,
|
default=False,
|
||||||
help="ignore doctest ImportErrors",
|
help="Ignore doctest ImportErrors",
|
||||||
dest="doctest_ignore_import_errors",
|
dest="doctest_ignore_import_errors",
|
||||||
)
|
)
|
||||||
group.addoption(
|
group.addoption(
|
||||||
"--doctest-continue-on-failure",
|
"--doctest-continue-on-failure",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
default=False,
|
default=False,
|
||||||
help="for a given doctest, continue to run after the first failure",
|
help="For a given doctest, continue to run after the first failure",
|
||||||
dest="doctest_continue_on_failure",
|
dest="doctest_continue_on_failure",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -542,7 +542,11 @@ class DoctestModule(pytest.Module):
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
module = import_path(self.path, root=self.config.rootpath)
|
module = import_path(
|
||||||
|
self.path,
|
||||||
|
root=self.config.rootpath,
|
||||||
|
mode=self.config.getoption("importmode"),
|
||||||
|
)
|
||||||
except ImportError:
|
except ImportError:
|
||||||
if self.config.getvalue("doctest_ignore_import_errors"):
|
if self.config.getvalue("doctest_ignore_import_errors"):
|
||||||
pytest.skip("unable to import module %r" % self.path)
|
pytest.skip("unable to import module %r" % self.path)
|
||||||
|
@ -730,5 +734,16 @@ def _get_report_choice(key: str) -> int:
|
||||||
@pytest.fixture(scope="session")
|
@pytest.fixture(scope="session")
|
||||||
def doctest_namespace() -> Dict[str, Any]:
|
def doctest_namespace() -> Dict[str, Any]:
|
||||||
"""Fixture that returns a :py:class:`dict` that will be injected into the
|
"""Fixture that returns a :py:class:`dict` that will be injected into the
|
||||||
namespace of doctests."""
|
namespace of doctests.
|
||||||
|
|
||||||
|
Usually this fixture is used in conjunction with another ``autouse`` fixture:
|
||||||
|
|
||||||
|
.. code-block:: python
|
||||||
|
|
||||||
|
@pytest.fixture(autouse=True)
|
||||||
|
def add_np(doctest_namespace):
|
||||||
|
doctest_namespace["np"] = numpy
|
||||||
|
|
||||||
|
For more details: :ref:`doctest_namespace`.
|
||||||
|
"""
|
||||||
return dict()
|
return dict()
|
||||||
|
|
|
@ -18,7 +18,7 @@ fault_handler_originally_enabled_key = StashKey[bool]()
|
||||||
def pytest_addoption(parser: Parser) -> None:
|
def pytest_addoption(parser: Parser) -> None:
|
||||||
help = (
|
help = (
|
||||||
"Dump the traceback of all threads if a test takes "
|
"Dump the traceback of all threads if a test takes "
|
||||||
"more than TIMEOUT seconds to finish."
|
"more than TIMEOUT seconds to finish"
|
||||||
)
|
)
|
||||||
parser.addini("faulthandler_timeout", help, default=0.0)
|
parser.addini("faulthandler_timeout", help, default=0.0)
|
||||||
|
|
||||||
|
|
|
@ -18,6 +18,7 @@ from typing import Iterable
|
||||||
from typing import Iterator
|
from typing import Iterator
|
||||||
from typing import List
|
from typing import List
|
||||||
from typing import MutableMapping
|
from typing import MutableMapping
|
||||||
|
from typing import NoReturn
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
from typing import overload
|
from typing import overload
|
||||||
from typing import Sequence
|
from typing import Sequence
|
||||||
|
@ -67,7 +68,6 @@ from _pytest.stash import StashKey
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from typing import Deque
|
from typing import Deque
|
||||||
from typing import NoReturn
|
|
||||||
|
|
||||||
from _pytest.scope import _ScopeName
|
from _pytest.scope import _ScopeName
|
||||||
from _pytest.main import Session
|
from _pytest.main import Session
|
||||||
|
@ -223,15 +223,10 @@ def add_funcarg_pseudo_fixture_def(
|
||||||
def getfixturemarker(obj: object) -> Optional["FixtureFunctionMarker"]:
|
def getfixturemarker(obj: object) -> Optional["FixtureFunctionMarker"]:
|
||||||
"""Return fixturemarker or None if it doesn't exist or raised
|
"""Return fixturemarker or None if it doesn't exist or raised
|
||||||
exceptions."""
|
exceptions."""
|
||||||
try:
|
return cast(
|
||||||
fixturemarker: Optional[FixtureFunctionMarker] = getattr(
|
Optional[FixtureFunctionMarker],
|
||||||
obj, "_pytestfixturefunction", None
|
safe_getattr(obj, "_pytestfixturefunction", None),
|
||||||
)
|
)
|
||||||
except TEST_OUTCOME:
|
|
||||||
# some objects raise errors like request (from flask import request)
|
|
||||||
# we don't expect them to be fixture functions
|
|
||||||
return None
|
|
||||||
return fixturemarker
|
|
||||||
|
|
||||||
|
|
||||||
# Parametrized fixture key, helper alias for code below.
|
# Parametrized fixture key, helper alias for code below.
|
||||||
|
@ -350,7 +345,7 @@ def reorder_items_atscope(
|
||||||
return items_done
|
return items_done
|
||||||
|
|
||||||
|
|
||||||
def get_direct_param_fixture_func(request):
|
def get_direct_param_fixture_func(request: "FixtureRequest") -> Any:
|
||||||
return request.param
|
return request.param
|
||||||
|
|
||||||
|
|
||||||
|
@ -412,6 +407,15 @@ class FixtureRequest:
|
||||||
self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy()
|
self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy()
|
||||||
self._arg2index: Dict[str, int] = {}
|
self._arg2index: Dict[str, int] = {}
|
||||||
self._fixturemanager: FixtureManager = pyfuncitem.session._fixturemanager
|
self._fixturemanager: FixtureManager = pyfuncitem.session._fixturemanager
|
||||||
|
# Notes on the type of `param`:
|
||||||
|
# -`request.param` is only defined in parametrized fixtures, and will raise
|
||||||
|
# AttributeError otherwise. Python typing has no notion of "undefined", so
|
||||||
|
# this cannot be reflected in the type.
|
||||||
|
# - Technically `param` is only (possibly) defined on SubRequest, not
|
||||||
|
# FixtureRequest, but the typing of that is still in flux so this cheats.
|
||||||
|
# - In the future we might consider using a generic for the param type, but
|
||||||
|
# for now just using Any.
|
||||||
|
self.param: Any
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def scope(self) -> "_ScopeName":
|
def scope(self) -> "_ScopeName":
|
||||||
|
@ -491,6 +495,7 @@ class FixtureRequest:
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def path(self) -> Path:
|
def path(self) -> Path:
|
||||||
|
"""Path where the test function was collected."""
|
||||||
if self.scope not in ("function", "class", "module", "package"):
|
if self.scope not in ("function", "class", "module", "package"):
|
||||||
raise AttributeError(f"path not available in {self.scope}-scoped context")
|
raise AttributeError(f"path not available in {self.scope}-scoped context")
|
||||||
# TODO: Remove ignore once _pyfuncitem is properly typed.
|
# TODO: Remove ignore once _pyfuncitem is properly typed.
|
||||||
|
@ -529,7 +534,7 @@ class FixtureRequest:
|
||||||
"""
|
"""
|
||||||
self.node.add_marker(marker)
|
self.node.add_marker(marker)
|
||||||
|
|
||||||
def raiseerror(self, msg: Optional[str]) -> "NoReturn":
|
def raiseerror(self, msg: Optional[str]) -> NoReturn:
|
||||||
"""Raise a FixtureLookupError with the given message."""
|
"""Raise a FixtureLookupError with the given message."""
|
||||||
raise self._fixturemanager.FixtureLookupError(None, self, msg)
|
raise self._fixturemanager.FixtureLookupError(None, self, msg)
|
||||||
|
|
||||||
|
@ -548,11 +553,18 @@ class FixtureRequest:
|
||||||
setup time, you may use this function to retrieve it inside a fixture
|
setup time, you may use this function to retrieve it inside a fixture
|
||||||
or test function body.
|
or test function body.
|
||||||
|
|
||||||
|
This method can be used during the test setup phase or the test run
|
||||||
|
phase, but during the test teardown phase a fixture's value may not
|
||||||
|
be available.
|
||||||
|
|
||||||
:raises pytest.FixtureLookupError:
|
:raises pytest.FixtureLookupError:
|
||||||
If the given fixture could not be found.
|
If the given fixture could not be found.
|
||||||
"""
|
"""
|
||||||
fixturedef = self._get_active_fixturedef(argname)
|
fixturedef = self._get_active_fixturedef(argname)
|
||||||
assert fixturedef.cached_result is not None
|
assert fixturedef.cached_result is not None, (
|
||||||
|
f'The fixture value for "{argname}" is not available. '
|
||||||
|
"This can happen when the fixture has already been torn down."
|
||||||
|
)
|
||||||
return fixturedef.cached_result[0]
|
return fixturedef.cached_result[0]
|
||||||
|
|
||||||
def _get_active_fixturedef(
|
def _get_active_fixturedef(
|
||||||
|
@ -864,7 +876,7 @@ class FixtureLookupErrorRepr(TerminalRepr):
|
||||||
tw.line("%s:%d" % (os.fspath(self.filename), self.firstlineno + 1))
|
tw.line("%s:%d" % (os.fspath(self.filename), self.firstlineno + 1))
|
||||||
|
|
||||||
|
|
||||||
def fail_fixturefunc(fixturefunc, msg: str) -> "NoReturn":
|
def fail_fixturefunc(fixturefunc, msg: str) -> NoReturn:
|
||||||
fs, lineno = getfslineno(fixturefunc)
|
fs, lineno = getfslineno(fixturefunc)
|
||||||
location = f"{fs}:{lineno + 1}"
|
location = f"{fs}:{lineno + 1}"
|
||||||
source = _pytest._code.Source(fixturefunc)
|
source = _pytest._code.Source(fixturefunc)
|
||||||
|
@ -1350,7 +1362,7 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
"usefixtures",
|
"usefixtures",
|
||||||
type="args",
|
type="args",
|
||||||
default=[],
|
default=[],
|
||||||
help="list of default fixtures to be used with this project",
|
help="List of default fixtures to be used with this project",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -49,7 +49,7 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
action="count",
|
action="count",
|
||||||
default=0,
|
default=0,
|
||||||
dest="version",
|
dest="version",
|
||||||
help="display pytest version and information about plugins. "
|
help="Display pytest version and information about plugins. "
|
||||||
"When given twice, also display information about plugins.",
|
"When given twice, also display information about plugins.",
|
||||||
)
|
)
|
||||||
group._addoption(
|
group._addoption(
|
||||||
|
@ -57,7 +57,7 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
"--help",
|
"--help",
|
||||||
action=HelpAction,
|
action=HelpAction,
|
||||||
dest="help",
|
dest="help",
|
||||||
help="show help message and configuration info",
|
help="Show help message and configuration info",
|
||||||
)
|
)
|
||||||
group._addoption(
|
group._addoption(
|
||||||
"-p",
|
"-p",
|
||||||
|
@ -65,7 +65,7 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
dest="plugins",
|
dest="plugins",
|
||||||
default=[],
|
default=[],
|
||||||
metavar="name",
|
metavar="name",
|
||||||
help="early-load given plugin module name or entry point (multi-allowed).\n"
|
help="Early-load given plugin module name or entry point (multi-allowed). "
|
||||||
"To avoid loading of plugins, use the `no:` prefix, e.g. "
|
"To avoid loading of plugins, use the `no:` prefix, e.g. "
|
||||||
"`no:doctest`.",
|
"`no:doctest`.",
|
||||||
)
|
)
|
||||||
|
@ -74,7 +74,7 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
"--trace-config",
|
"--trace-config",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
default=False,
|
default=False,
|
||||||
help="trace considerations of conftest.py files.",
|
help="Trace considerations of conftest.py files",
|
||||||
)
|
)
|
||||||
group.addoption(
|
group.addoption(
|
||||||
"--debug",
|
"--debug",
|
||||||
|
@ -83,16 +83,17 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
const="pytestdebug.log",
|
const="pytestdebug.log",
|
||||||
dest="debug",
|
dest="debug",
|
||||||
metavar="DEBUG_FILE_NAME",
|
metavar="DEBUG_FILE_NAME",
|
||||||
help="store internal tracing debug information in this log file.\n"
|
help="Store internal tracing debug information in this log file. "
|
||||||
"This file is opened with 'w' and truncated as a result, care advised.\n"
|
"This file is opened with 'w' and truncated as a result, care advised. "
|
||||||
"Defaults to 'pytestdebug.log'.",
|
"Default: pytestdebug.log.",
|
||||||
)
|
)
|
||||||
group._addoption(
|
group._addoption(
|
||||||
"-o",
|
"-o",
|
||||||
"--override-ini",
|
"--override-ini",
|
||||||
dest="override_ini",
|
dest="override_ini",
|
||||||
action="append",
|
action="append",
|
||||||
help='override ini option with "option=value" style, e.g. `-o xfail_strict=True -o cache_dir=cache`.',
|
help='Override ini option with "option=value" style, '
|
||||||
|
"e.g. `-o xfail_strict=True -o cache_dir=cache`.",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -203,12 +204,12 @@ def showhelp(config: Config) -> None:
|
||||||
tw.line(indent + line)
|
tw.line(indent + line)
|
||||||
|
|
||||||
tw.line()
|
tw.line()
|
||||||
tw.line("environment variables:")
|
tw.line("Environment variables:")
|
||||||
vars = [
|
vars = [
|
||||||
("PYTEST_ADDOPTS", "extra command line options"),
|
("PYTEST_ADDOPTS", "Extra command line options"),
|
||||||
("PYTEST_PLUGINS", "comma-separated plugins to load during startup"),
|
("PYTEST_PLUGINS", "Comma-separated plugins to load during startup"),
|
||||||
("PYTEST_DISABLE_PLUGIN_AUTOLOAD", "set to disable plugin auto-loading"),
|
("PYTEST_DISABLE_PLUGIN_AUTOLOAD", "Set to disable plugin auto-loading"),
|
||||||
("PYTEST_DEBUG", "set to enable debug tracing of pytest's internals"),
|
("PYTEST_DEBUG", "Set to enable debug tracing of pytest's internals"),
|
||||||
]
|
]
|
||||||
for name, help in vars:
|
for name, help in vars:
|
||||||
tw.line(f" {name:<24} {help}")
|
tw.line(f" {name:<24} {help}")
|
||||||
|
|
|
@ -386,7 +386,7 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
metavar="path",
|
metavar="path",
|
||||||
type=functools.partial(filename_arg, optname="--junitxml"),
|
type=functools.partial(filename_arg, optname="--junitxml"),
|
||||||
default=None,
|
default=None,
|
||||||
help="create junit-xml style report file at given path.",
|
help="Create junit-xml style report file at given path",
|
||||||
)
|
)
|
||||||
group.addoption(
|
group.addoption(
|
||||||
"--junitprefix",
|
"--junitprefix",
|
||||||
|
@ -394,7 +394,7 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
action="store",
|
action="store",
|
||||||
metavar="str",
|
metavar="str",
|
||||||
default=None,
|
default=None,
|
||||||
help="prepend prefix to classnames in junit-xml output",
|
help="Prepend prefix to classnames in junit-xml output",
|
||||||
)
|
)
|
||||||
parser.addini(
|
parser.addini(
|
||||||
"junit_suite_name", "Test suite name for JUnit report", default="pytest"
|
"junit_suite_name", "Test suite name for JUnit report", default="pytest"
|
||||||
|
|
|
@ -270,8 +270,15 @@ class LegacyTestdirPlugin:
|
||||||
@final
|
@final
|
||||||
@attr.s(init=False, auto_attribs=True)
|
@attr.s(init=False, auto_attribs=True)
|
||||||
class TempdirFactory:
|
class TempdirFactory:
|
||||||
"""Backward compatibility wrapper that implements :class:``_pytest.compat.LEGACY_PATH``
|
"""Backward compatibility wrapper that implements :class:`py.path.local`
|
||||||
for :class:``TempPathFactory``."""
|
for :class:`TempPathFactory`.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
These days, it is preferred to use ``tmp_path_factory``.
|
||||||
|
|
||||||
|
:ref:`About the tmpdir and tmpdir_factory fixtures<tmpdir and tmpdir_factory>`.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
_tmppath_factory: TempPathFactory
|
_tmppath_factory: TempPathFactory
|
||||||
|
|
||||||
|
@ -282,11 +289,11 @@ class TempdirFactory:
|
||||||
self._tmppath_factory = tmppath_factory
|
self._tmppath_factory = tmppath_factory
|
||||||
|
|
||||||
def mktemp(self, basename: str, numbered: bool = True) -> LEGACY_PATH:
|
def mktemp(self, basename: str, numbered: bool = True) -> LEGACY_PATH:
|
||||||
"""Same as :meth:`TempPathFactory.mktemp`, but returns a ``_pytest.compat.LEGACY_PATH`` object."""
|
"""Same as :meth:`TempPathFactory.mktemp`, but returns a :class:`py.path.local` object."""
|
||||||
return legacy_path(self._tmppath_factory.mktemp(basename, numbered).resolve())
|
return legacy_path(self._tmppath_factory.mktemp(basename, numbered).resolve())
|
||||||
|
|
||||||
def getbasetemp(self) -> LEGACY_PATH:
|
def getbasetemp(self) -> LEGACY_PATH:
|
||||||
"""Backward compat wrapper for ``_tmppath_factory.getbasetemp``."""
|
"""Same as :meth:`TempPathFactory.getbasetemp`, but returns a :class:`py.path.local` object."""
|
||||||
return legacy_path(self._tmppath_factory.getbasetemp().resolve())
|
return legacy_path(self._tmppath_factory.getbasetemp().resolve())
|
||||||
|
|
||||||
|
|
||||||
|
@ -312,6 +319,11 @@ class LegacyTmpdirPlugin:
|
||||||
|
|
||||||
The returned object is a `legacy_path`_ object.
|
The returned object is a `legacy_path`_ object.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
These days, it is preferred to use ``tmp_path``.
|
||||||
|
|
||||||
|
:ref:`About the tmpdir and tmpdir_factory fixtures<tmpdir and tmpdir_factory>`.
|
||||||
|
|
||||||
.. _legacy_path: https://py.readthedocs.io/en/latest/path.html
|
.. _legacy_path: https://py.readthedocs.io/en/latest/path.html
|
||||||
"""
|
"""
|
||||||
return legacy_path(tmp_path)
|
return legacy_path(tmp_path)
|
||||||
|
|
|
@ -40,7 +40,6 @@ if TYPE_CHECKING:
|
||||||
else:
|
else:
|
||||||
logging_StreamHandler = logging.StreamHandler
|
logging_StreamHandler = logging.StreamHandler
|
||||||
|
|
||||||
|
|
||||||
DEFAULT_LOG_FORMAT = "%(levelname)-8s %(name)s:%(filename)s:%(lineno)d %(message)s"
|
DEFAULT_LOG_FORMAT = "%(levelname)-8s %(name)s:%(filename)s:%(lineno)d %(message)s"
|
||||||
DEFAULT_LOG_DATE_FORMAT = "%H:%M:%S"
|
DEFAULT_LOG_DATE_FORMAT = "%H:%M:%S"
|
||||||
_ANSI_ESCAPE_SEQ = re.compile(r"\x1b\[[\d;]+m")
|
_ANSI_ESCAPE_SEQ = re.compile(r"\x1b\[[\d;]+m")
|
||||||
|
@ -218,7 +217,7 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
|
|
||||||
def add_option_ini(option, dest, default=None, type=None, **kwargs):
|
def add_option_ini(option, dest, default=None, type=None, **kwargs):
|
||||||
parser.addini(
|
parser.addini(
|
||||||
dest, default=default, type=type, help="default value for " + option
|
dest, default=default, type=type, help="Default value for " + option
|
||||||
)
|
)
|
||||||
group.addoption(option, dest=dest, **kwargs)
|
group.addoption(option, dest=dest, **kwargs)
|
||||||
|
|
||||||
|
@ -228,8 +227,8 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
default=None,
|
default=None,
|
||||||
metavar="LEVEL",
|
metavar="LEVEL",
|
||||||
help=(
|
help=(
|
||||||
"level of messages to catch/display.\n"
|
"Level of messages to catch/display."
|
||||||
"Not set by default, so it depends on the root/parent log handler's"
|
" Not set by default, so it depends on the root/parent log handler's"
|
||||||
' effective level, where it is "WARNING" by default.'
|
' effective level, where it is "WARNING" by default.'
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
@ -237,58 +236,58 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
"--log-format",
|
"--log-format",
|
||||||
dest="log_format",
|
dest="log_format",
|
||||||
default=DEFAULT_LOG_FORMAT,
|
default=DEFAULT_LOG_FORMAT,
|
||||||
help="log format as used by the logging module.",
|
help="Log format used by the logging module",
|
||||||
)
|
)
|
||||||
add_option_ini(
|
add_option_ini(
|
||||||
"--log-date-format",
|
"--log-date-format",
|
||||||
dest="log_date_format",
|
dest="log_date_format",
|
||||||
default=DEFAULT_LOG_DATE_FORMAT,
|
default=DEFAULT_LOG_DATE_FORMAT,
|
||||||
help="log date format as used by the logging module.",
|
help="Log date format used by the logging module",
|
||||||
)
|
)
|
||||||
parser.addini(
|
parser.addini(
|
||||||
"log_cli",
|
"log_cli",
|
||||||
default=False,
|
default=False,
|
||||||
type="bool",
|
type="bool",
|
||||||
help='enable log display during test run (also known as "live logging").',
|
help='Enable log display during test run (also known as "live logging")',
|
||||||
)
|
)
|
||||||
add_option_ini(
|
add_option_ini(
|
||||||
"--log-cli-level", dest="log_cli_level", default=None, help="cli logging level."
|
"--log-cli-level", dest="log_cli_level", default=None, help="CLI logging level"
|
||||||
)
|
)
|
||||||
add_option_ini(
|
add_option_ini(
|
||||||
"--log-cli-format",
|
"--log-cli-format",
|
||||||
dest="log_cli_format",
|
dest="log_cli_format",
|
||||||
default=None,
|
default=None,
|
||||||
help="log format as used by the logging module.",
|
help="Log format used by the logging module",
|
||||||
)
|
)
|
||||||
add_option_ini(
|
add_option_ini(
|
||||||
"--log-cli-date-format",
|
"--log-cli-date-format",
|
||||||
dest="log_cli_date_format",
|
dest="log_cli_date_format",
|
||||||
default=None,
|
default=None,
|
||||||
help="log date format as used by the logging module.",
|
help="Log date format used by the logging module",
|
||||||
)
|
)
|
||||||
add_option_ini(
|
add_option_ini(
|
||||||
"--log-file",
|
"--log-file",
|
||||||
dest="log_file",
|
dest="log_file",
|
||||||
default=None,
|
default=None,
|
||||||
help="path to a file when logging will be written to.",
|
help="Path to a file when logging will be written to",
|
||||||
)
|
)
|
||||||
add_option_ini(
|
add_option_ini(
|
||||||
"--log-file-level",
|
"--log-file-level",
|
||||||
dest="log_file_level",
|
dest="log_file_level",
|
||||||
default=None,
|
default=None,
|
||||||
help="log file logging level.",
|
help="Log file logging level",
|
||||||
)
|
)
|
||||||
add_option_ini(
|
add_option_ini(
|
||||||
"--log-file-format",
|
"--log-file-format",
|
||||||
dest="log_file_format",
|
dest="log_file_format",
|
||||||
default=DEFAULT_LOG_FORMAT,
|
default=DEFAULT_LOG_FORMAT,
|
||||||
help="log format as used by the logging module.",
|
help="Log format used by the logging module",
|
||||||
)
|
)
|
||||||
add_option_ini(
|
add_option_ini(
|
||||||
"--log-file-date-format",
|
"--log-file-date-format",
|
||||||
dest="log_file_date_format",
|
dest="log_file_date_format",
|
||||||
default=DEFAULT_LOG_DATE_FORMAT,
|
default=DEFAULT_LOG_DATE_FORMAT,
|
||||||
help="log date format as used by the logging module.",
|
help="Log date format used by the logging module",
|
||||||
)
|
)
|
||||||
add_option_ini(
|
add_option_ini(
|
||||||
"--log-auto-indent",
|
"--log-auto-indent",
|
||||||
|
@ -345,6 +344,10 @@ class LogCaptureHandler(logging_StreamHandler):
|
||||||
self.records = []
|
self.records = []
|
||||||
self.stream = StringIO()
|
self.stream = StringIO()
|
||||||
|
|
||||||
|
def clear(self) -> None:
|
||||||
|
self.records.clear()
|
||||||
|
self.stream = StringIO()
|
||||||
|
|
||||||
def handleError(self, record: logging.LogRecord) -> None:
|
def handleError(self, record: logging.LogRecord) -> None:
|
||||||
if logging.raiseExceptions:
|
if logging.raiseExceptions:
|
||||||
# Fail the test if the log message is bad (emit failed).
|
# Fail the test if the log message is bad (emit failed).
|
||||||
|
@ -440,7 +443,7 @@ class LogCaptureFixture:
|
||||||
|
|
||||||
def clear(self) -> None:
|
def clear(self) -> None:
|
||||||
"""Reset the list of log records and the captured log text."""
|
"""Reset the list of log records and the captured log text."""
|
||||||
self.handler.reset()
|
self.handler.clear()
|
||||||
|
|
||||||
def set_level(self, level: Union[int, str], logger: Optional[str] = None) -> None:
|
def set_level(self, level: Union[int, str], logger: Optional[str] = None) -> None:
|
||||||
"""Set the level of a logger for the duration of a test.
|
"""Set the level of a logger for the duration of a test.
|
||||||
|
|
|
@ -51,7 +51,7 @@ if TYPE_CHECKING:
|
||||||
def pytest_addoption(parser: Parser) -> None:
|
def pytest_addoption(parser: Parser) -> None:
|
||||||
parser.addini(
|
parser.addini(
|
||||||
"norecursedirs",
|
"norecursedirs",
|
||||||
"directory patterns to avoid for recursion",
|
"Directory patterns to avoid for recursion",
|
||||||
type="args",
|
type="args",
|
||||||
default=[
|
default=[
|
||||||
"*.egg",
|
"*.egg",
|
||||||
|
@ -67,26 +67,26 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
)
|
)
|
||||||
parser.addini(
|
parser.addini(
|
||||||
"testpaths",
|
"testpaths",
|
||||||
"directories to search for tests when no files or directories are given in the "
|
"Directories to search for tests when no files or directories are given on the "
|
||||||
"command line.",
|
"command line",
|
||||||
type="args",
|
type="args",
|
||||||
default=[],
|
default=[],
|
||||||
)
|
)
|
||||||
group = parser.getgroup("general", "running and selection options")
|
group = parser.getgroup("general", "Running and selection options")
|
||||||
group._addoption(
|
group._addoption(
|
||||||
"-x",
|
"-x",
|
||||||
"--exitfirst",
|
"--exitfirst",
|
||||||
action="store_const",
|
action="store_const",
|
||||||
dest="maxfail",
|
dest="maxfail",
|
||||||
const=1,
|
const=1,
|
||||||
help="exit instantly on first error or failed test.",
|
help="Exit instantly on first error or failed test",
|
||||||
)
|
)
|
||||||
group = parser.getgroup("pytest-warnings")
|
group = parser.getgroup("pytest-warnings")
|
||||||
group.addoption(
|
group.addoption(
|
||||||
"-W",
|
"-W",
|
||||||
"--pythonwarnings",
|
"--pythonwarnings",
|
||||||
action="append",
|
action="append",
|
||||||
help="set which warnings to report, see -W option of python itself.",
|
help="Set which warnings to report, see -W option of Python itself",
|
||||||
)
|
)
|
||||||
parser.addini(
|
parser.addini(
|
||||||
"filterwarnings",
|
"filterwarnings",
|
||||||
|
@ -102,37 +102,39 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
type=int,
|
type=int,
|
||||||
dest="maxfail",
|
dest="maxfail",
|
||||||
default=0,
|
default=0,
|
||||||
help="exit after first num failures or errors.",
|
help="Exit after first num failures or errors",
|
||||||
)
|
)
|
||||||
group._addoption(
|
group._addoption(
|
||||||
"--strict-config",
|
"--strict-config",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
help="any warnings encountered while parsing the `pytest` section of the configuration file raise errors.",
|
help="Any warnings encountered while parsing the `pytest` section of the "
|
||||||
|
"configuration file raise errors",
|
||||||
)
|
)
|
||||||
group._addoption(
|
group._addoption(
|
||||||
"--strict-markers",
|
"--strict-markers",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
help="markers not registered in the `markers` section of the configuration file raise errors.",
|
help="Markers not registered in the `markers` section of the configuration "
|
||||||
|
"file raise errors",
|
||||||
)
|
)
|
||||||
group._addoption(
|
group._addoption(
|
||||||
"--strict",
|
"--strict",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
help="(deprecated) alias to --strict-markers.",
|
help="(Deprecated) alias to --strict-markers",
|
||||||
)
|
)
|
||||||
group._addoption(
|
group._addoption(
|
||||||
"-c",
|
"-c",
|
||||||
metavar="file",
|
metavar="file",
|
||||||
type=str,
|
type=str,
|
||||||
dest="inifilename",
|
dest="inifilename",
|
||||||
help="load configuration from `file` instead of trying to locate one of the implicit "
|
help="Load configuration from `file` instead of trying to locate one of the "
|
||||||
"configuration files.",
|
"implicit configuration files",
|
||||||
)
|
)
|
||||||
group._addoption(
|
group._addoption(
|
||||||
"--continue-on-collection-errors",
|
"--continue-on-collection-errors",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
default=False,
|
default=False,
|
||||||
dest="continue_on_collection_errors",
|
dest="continue_on_collection_errors",
|
||||||
help="Force test execution even if collection errors occur.",
|
help="Force test execution even if collection errors occur",
|
||||||
)
|
)
|
||||||
group._addoption(
|
group._addoption(
|
||||||
"--rootdir",
|
"--rootdir",
|
||||||
|
@ -149,30 +151,30 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
"--collect-only",
|
"--collect-only",
|
||||||
"--co",
|
"--co",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
help="only collect tests, don't execute them.",
|
help="Only collect tests, don't execute them",
|
||||||
)
|
)
|
||||||
group.addoption(
|
group.addoption(
|
||||||
"--pyargs",
|
"--pyargs",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
help="try to interpret all arguments as python packages.",
|
help="Try to interpret all arguments as Python packages",
|
||||||
)
|
)
|
||||||
group.addoption(
|
group.addoption(
|
||||||
"--ignore",
|
"--ignore",
|
||||||
action="append",
|
action="append",
|
||||||
metavar="path",
|
metavar="path",
|
||||||
help="ignore path during collection (multi-allowed).",
|
help="Ignore path during collection (multi-allowed)",
|
||||||
)
|
)
|
||||||
group.addoption(
|
group.addoption(
|
||||||
"--ignore-glob",
|
"--ignore-glob",
|
||||||
action="append",
|
action="append",
|
||||||
metavar="path",
|
metavar="path",
|
||||||
help="ignore path pattern during collection (multi-allowed).",
|
help="Ignore path pattern during collection (multi-allowed)",
|
||||||
)
|
)
|
||||||
group.addoption(
|
group.addoption(
|
||||||
"--deselect",
|
"--deselect",
|
||||||
action="append",
|
action="append",
|
||||||
metavar="nodeid_prefix",
|
metavar="nodeid_prefix",
|
||||||
help="deselect item (via node id prefix) during collection (multi-allowed).",
|
help="Deselect item (via node id prefix) during collection (multi-allowed)",
|
||||||
)
|
)
|
||||||
group.addoption(
|
group.addoption(
|
||||||
"--confcutdir",
|
"--confcutdir",
|
||||||
|
@ -180,14 +182,14 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
default=None,
|
default=None,
|
||||||
metavar="dir",
|
metavar="dir",
|
||||||
type=functools.partial(directory_arg, optname="--confcutdir"),
|
type=functools.partial(directory_arg, optname="--confcutdir"),
|
||||||
help="only load conftest.py's relative to specified dir.",
|
help="Only load conftest.py's relative to specified dir",
|
||||||
)
|
)
|
||||||
group.addoption(
|
group.addoption(
|
||||||
"--noconftest",
|
"--noconftest",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
dest="noconftest",
|
dest="noconftest",
|
||||||
default=False,
|
default=False,
|
||||||
help="Don't load any conftest.py files.",
|
help="Don't load any conftest.py files",
|
||||||
)
|
)
|
||||||
group.addoption(
|
group.addoption(
|
||||||
"--keepduplicates",
|
"--keepduplicates",
|
||||||
|
@ -195,7 +197,7 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
action="store_true",
|
action="store_true",
|
||||||
dest="keepduplicates",
|
dest="keepduplicates",
|
||||||
default=False,
|
default=False,
|
||||||
help="Keep duplicate tests.",
|
help="Keep duplicate tests",
|
||||||
)
|
)
|
||||||
group.addoption(
|
group.addoption(
|
||||||
"--collect-in-virtualenv",
|
"--collect-in-virtualenv",
|
||||||
|
@ -209,8 +211,8 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
default="prepend",
|
default="prepend",
|
||||||
choices=["prepend", "append", "importlib"],
|
choices=["prepend", "append", "importlib"],
|
||||||
dest="importmode",
|
dest="importmode",
|
||||||
help="prepend/append to sys.path when importing test modules and conftest files, "
|
help="Prepend/append to sys.path when importing test modules and conftest "
|
||||||
"default is to prepend.",
|
"files. Default: prepend.",
|
||||||
)
|
)
|
||||||
|
|
||||||
group = parser.getgroup("debugconfig", "test session debugging and configuration")
|
group = parser.getgroup("debugconfig", "test session debugging and configuration")
|
||||||
|
@ -221,8 +223,8 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
type=validate_basetemp,
|
type=validate_basetemp,
|
||||||
metavar="dir",
|
metavar="dir",
|
||||||
help=(
|
help=(
|
||||||
"base temporary directory for this test run."
|
"Base temporary directory for this test run. "
|
||||||
"(warning: this directory is removed if it exists)"
|
"(Warning: this directory is removed if it exists.)"
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -76,8 +76,8 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
dest="keyword",
|
dest="keyword",
|
||||||
default="",
|
default="",
|
||||||
metavar="EXPRESSION",
|
metavar="EXPRESSION",
|
||||||
help="only run tests which match the given substring expression. "
|
help="Only run tests which match the given substring expression. "
|
||||||
"An expression is a python evaluatable expression "
|
"An expression is a Python evaluatable expression "
|
||||||
"where all names are substring-matched against test names "
|
"where all names are substring-matched against test names "
|
||||||
"and their parent classes. Example: -k 'test_method or test_"
|
"and their parent classes. Example: -k 'test_method or test_"
|
||||||
"other' matches all test functions and classes whose name "
|
"other' matches all test functions and classes whose name "
|
||||||
|
@ -96,7 +96,7 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
dest="markexpr",
|
dest="markexpr",
|
||||||
default="",
|
default="",
|
||||||
metavar="MARKEXPR",
|
metavar="MARKEXPR",
|
||||||
help="only run tests matching given mark expression.\n"
|
help="Only run tests matching given mark expression. "
|
||||||
"For example: -m 'mark1 and not mark2'.",
|
"For example: -m 'mark1 and not mark2'.",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -106,8 +106,8 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
help="show markers (builtin, plugin and per-project ones).",
|
help="show markers (builtin, plugin and per-project ones).",
|
||||||
)
|
)
|
||||||
|
|
||||||
parser.addini("markers", "markers for test functions", "linelist")
|
parser.addini("markers", "Markers for test functions", "linelist")
|
||||||
parser.addini(EMPTY_PARAMETERSET_OPTION, "default marker for empty parametersets")
|
parser.addini(EMPTY_PARAMETERSET_OPTION, "Default marker for empty parametersets")
|
||||||
|
|
||||||
|
|
||||||
@hookimpl(tryfirst=True)
|
@hookimpl(tryfirst=True)
|
||||||
|
|
|
@ -21,15 +21,12 @@ import types
|
||||||
from typing import Callable
|
from typing import Callable
|
||||||
from typing import Iterator
|
from typing import Iterator
|
||||||
from typing import Mapping
|
from typing import Mapping
|
||||||
|
from typing import NoReturn
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
from typing import Sequence
|
from typing import Sequence
|
||||||
from typing import TYPE_CHECKING
|
|
||||||
|
|
||||||
import attr
|
import attr
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
from typing import NoReturn
|
|
||||||
|
|
||||||
|
|
||||||
__all__ = [
|
__all__ = [
|
||||||
"Expression",
|
"Expression",
|
||||||
|
@ -117,7 +114,7 @@ class Scanner:
|
||||||
self.reject((type,))
|
self.reject((type,))
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def reject(self, expected: Sequence[TokenType]) -> "NoReturn":
|
def reject(self, expected: Sequence[TokenType]) -> NoReturn:
|
||||||
raise ParseError(
|
raise ParseError(
|
||||||
self.current.pos + 1,
|
self.current.pos + 1,
|
||||||
"expected {}; got {}".format(
|
"expected {}; got {}".format(
|
||||||
|
|
|
@ -5,6 +5,7 @@ import warnings
|
||||||
from typing import Any
|
from typing import Any
|
||||||
from typing import Callable
|
from typing import Callable
|
||||||
from typing import cast
|
from typing import cast
|
||||||
|
from typing import NoReturn
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
from typing import Type
|
from typing import Type
|
||||||
from typing import TypeVar
|
from typing import TypeVar
|
||||||
|
@ -14,7 +15,6 @@ from _pytest.deprecated import KEYWORD_MSG_ARG
|
||||||
TYPE_CHECKING = False # Avoid circular import through compat.
|
TYPE_CHECKING = False # Avoid circular import through compat.
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from typing import NoReturn
|
|
||||||
from typing_extensions import Protocol
|
from typing_extensions import Protocol
|
||||||
else:
|
else:
|
||||||
# typing.Protocol is only available starting from Python 3.8. It is also
|
# typing.Protocol is only available starting from Python 3.8. It is also
|
||||||
|
@ -115,7 +115,7 @@ def _with_exception(exception_type: _ET) -> Callable[[_F], _WithException[_F, _E
|
||||||
@_with_exception(Exit)
|
@_with_exception(Exit)
|
||||||
def exit(
|
def exit(
|
||||||
reason: str = "", returncode: Optional[int] = None, *, msg: Optional[str] = None
|
reason: str = "", returncode: Optional[int] = None, *, msg: Optional[str] = None
|
||||||
) -> "NoReturn":
|
) -> NoReturn:
|
||||||
"""Exit testing process.
|
"""Exit testing process.
|
||||||
|
|
||||||
:param reason:
|
:param reason:
|
||||||
|
@ -146,7 +146,7 @@ def exit(
|
||||||
@_with_exception(Skipped)
|
@_with_exception(Skipped)
|
||||||
def skip(
|
def skip(
|
||||||
reason: str = "", *, allow_module_level: bool = False, msg: Optional[str] = None
|
reason: str = "", *, allow_module_level: bool = False, msg: Optional[str] = None
|
||||||
) -> "NoReturn":
|
) -> NoReturn:
|
||||||
"""Skip an executing test with the given message.
|
"""Skip an executing test with the given message.
|
||||||
|
|
||||||
This function should be called only during testing (setup, call or teardown) or
|
This function should be called only during testing (setup, call or teardown) or
|
||||||
|
@ -176,9 +176,7 @@ def skip(
|
||||||
|
|
||||||
|
|
||||||
@_with_exception(Failed)
|
@_with_exception(Failed)
|
||||||
def fail(
|
def fail(reason: str = "", pytrace: bool = True, msg: Optional[str] = None) -> NoReturn:
|
||||||
reason: str = "", pytrace: bool = True, msg: Optional[str] = None
|
|
||||||
) -> "NoReturn":
|
|
||||||
"""Explicitly fail an executing test with the given message.
|
"""Explicitly fail an executing test with the given message.
|
||||||
|
|
||||||
:param reason:
|
:param reason:
|
||||||
|
@ -238,7 +236,7 @@ class XFailed(Failed):
|
||||||
|
|
||||||
|
|
||||||
@_with_exception(XFailed)
|
@_with_exception(XFailed)
|
||||||
def xfail(reason: str = "") -> "NoReturn":
|
def xfail(reason: str = "") -> NoReturn:
|
||||||
"""Imperatively xfail an executing test or setup function with the given reason.
|
"""Imperatively xfail an executing test or setup function with the given reason.
|
||||||
|
|
||||||
This function should be called only during testing (setup, call or teardown).
|
This function should be called only during testing (setup, call or teardown).
|
||||||
|
|
|
@ -24,7 +24,7 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
dest="pastebin",
|
dest="pastebin",
|
||||||
default=None,
|
default=None,
|
||||||
choices=["failed", "all"],
|
choices=["failed", "all"],
|
||||||
help="send failed|all info to bpaste.net pastebin service.",
|
help="Send failed|all info to bpaste.net pastebin service",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -89,7 +89,7 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
action="store_true",
|
action="store_true",
|
||||||
dest="lsof",
|
dest="lsof",
|
||||||
default=False,
|
default=False,
|
||||||
help="run FD checks if lsof is available",
|
help="Run FD checks if lsof is available",
|
||||||
)
|
)
|
||||||
|
|
||||||
parser.addoption(
|
parser.addoption(
|
||||||
|
@ -98,13 +98,13 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
dest="runpytest",
|
dest="runpytest",
|
||||||
choices=("inprocess", "subprocess"),
|
choices=("inprocess", "subprocess"),
|
||||||
help=(
|
help=(
|
||||||
"run pytest sub runs in tests using an 'inprocess' "
|
"Run pytest sub runs in tests using an 'inprocess' "
|
||||||
"or 'subprocess' (python -m main) method"
|
"or 'subprocess' (python -m main) method"
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
parser.addini(
|
parser.addini(
|
||||||
"pytester_example_dir", help="directory to take the pytester example files from"
|
"pytester_example_dir", help="Directory to take the pytester example files from"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -904,13 +904,13 @@ class Pytester:
|
||||||
|
|
||||||
self._monkeypatch.syspath_prepend(str(path))
|
self._monkeypatch.syspath_prepend(str(path))
|
||||||
|
|
||||||
def mkdir(self, name: str) -> Path:
|
def mkdir(self, name: Union[str, "os.PathLike[str]"]) -> Path:
|
||||||
"""Create a new (sub)directory."""
|
"""Create a new (sub)directory."""
|
||||||
p = self.path / name
|
p = self.path / name
|
||||||
p.mkdir()
|
p.mkdir()
|
||||||
return p
|
return p
|
||||||
|
|
||||||
def mkpydir(self, name: str) -> Path:
|
def mkpydir(self, name: Union[str, "os.PathLike[str]"]) -> Path:
|
||||||
"""Create a new python package.
|
"""Create a new python package.
|
||||||
|
|
||||||
This creates a (sub)directory with an empty ``__init__.py`` file so it
|
This creates a (sub)directory with an empty ``__init__.py`` file so it
|
||||||
|
|
|
@ -77,10 +77,12 @@ from _pytest.pathlib import parts
|
||||||
from _pytest.pathlib import visit
|
from _pytest.pathlib import visit
|
||||||
from _pytest.scope import Scope
|
from _pytest.scope import Scope
|
||||||
from _pytest.warning_types import PytestCollectionWarning
|
from _pytest.warning_types import PytestCollectionWarning
|
||||||
|
from _pytest.warning_types import PytestReturnNotNoneWarning
|
||||||
from _pytest.warning_types import PytestUnhandledCoroutineWarning
|
from _pytest.warning_types import PytestUnhandledCoroutineWarning
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from typing_extensions import Literal
|
from typing_extensions import Literal
|
||||||
|
|
||||||
from _pytest.scope import _ScopeName
|
from _pytest.scope import _ScopeName
|
||||||
|
|
||||||
|
|
||||||
|
@ -95,7 +97,7 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
action="store_true",
|
action="store_true",
|
||||||
dest="showfixtures",
|
dest="showfixtures",
|
||||||
default=False,
|
default=False,
|
||||||
help="show available fixtures, sorted by plugin appearance "
|
help="Show available fixtures, sorted by plugin appearance "
|
||||||
"(fixtures with leading '_' are only shown with '-v')",
|
"(fixtures with leading '_' are only shown with '-v')",
|
||||||
)
|
)
|
||||||
group.addoption(
|
group.addoption(
|
||||||
|
@ -103,32 +105,32 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
action="store_true",
|
action="store_true",
|
||||||
dest="show_fixtures_per_test",
|
dest="show_fixtures_per_test",
|
||||||
default=False,
|
default=False,
|
||||||
help="show fixtures per test",
|
help="Show fixtures per test",
|
||||||
)
|
)
|
||||||
parser.addini(
|
parser.addini(
|
||||||
"python_files",
|
"python_files",
|
||||||
type="args",
|
type="args",
|
||||||
# NOTE: default is also used in AssertionRewritingHook.
|
# NOTE: default is also used in AssertionRewritingHook.
|
||||||
default=["test_*.py", "*_test.py"],
|
default=["test_*.py", "*_test.py"],
|
||||||
help="glob-style file patterns for Python test module discovery",
|
help="Glob-style file patterns for Python test module discovery",
|
||||||
)
|
)
|
||||||
parser.addini(
|
parser.addini(
|
||||||
"python_classes",
|
"python_classes",
|
||||||
type="args",
|
type="args",
|
||||||
default=["Test"],
|
default=["Test"],
|
||||||
help="prefixes or glob names for Python test class discovery",
|
help="Prefixes or glob names for Python test class discovery",
|
||||||
)
|
)
|
||||||
parser.addini(
|
parser.addini(
|
||||||
"python_functions",
|
"python_functions",
|
||||||
type="args",
|
type="args",
|
||||||
default=["test"],
|
default=["test"],
|
||||||
help="prefixes or glob names for Python test function and method discovery",
|
help="Prefixes or glob names for Python test function and method discovery",
|
||||||
)
|
)
|
||||||
parser.addini(
|
parser.addini(
|
||||||
"disable_test_id_escaping_and_forfeit_all_rights_to_community_support",
|
"disable_test_id_escaping_and_forfeit_all_rights_to_community_support",
|
||||||
type="bool",
|
type="bool",
|
||||||
default=False,
|
default=False,
|
||||||
help="disable string escape non-ascii characters, might cause unwanted "
|
help="Disable string escape non-ASCII characters, might cause unwanted "
|
||||||
"side effects(use at your own risk)",
|
"side effects(use at your own risk)",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -192,6 +194,13 @@ def pytest_pyfunc_call(pyfuncitem: "Function") -> Optional[object]:
|
||||||
result = testfunction(**testargs)
|
result = testfunction(**testargs)
|
||||||
if hasattr(result, "__await__") or hasattr(result, "__aiter__"):
|
if hasattr(result, "__await__") or hasattr(result, "__aiter__"):
|
||||||
async_warn_and_skip(pyfuncitem.nodeid)
|
async_warn_and_skip(pyfuncitem.nodeid)
|
||||||
|
elif result is not None:
|
||||||
|
warnings.warn(
|
||||||
|
PytestReturnNotNoneWarning(
|
||||||
|
f"Expected None, but {pyfuncitem.nodeid} returned {result!r}, which will be an error in a "
|
||||||
|
"future version of pytest. Did you mean to use `assert` instead of `return`?"
|
||||||
|
)
|
||||||
|
)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -133,9 +133,11 @@ class ApproxBase:
|
||||||
# raise if there are any non-numeric elements in the sequence.
|
# raise if there are any non-numeric elements in the sequence.
|
||||||
|
|
||||||
|
|
||||||
def _recursive_list_map(f, x):
|
def _recursive_sequence_map(f, x):
|
||||||
if isinstance(x, list):
|
"""Recursively map a function over a sequence of arbitrary depth"""
|
||||||
return [_recursive_list_map(f, xi) for xi in x]
|
if isinstance(x, (list, tuple)):
|
||||||
|
seq_type = type(x)
|
||||||
|
return seq_type(_recursive_sequence_map(f, xi) for xi in x)
|
||||||
else:
|
else:
|
||||||
return f(x)
|
return f(x)
|
||||||
|
|
||||||
|
@ -144,7 +146,9 @@ class ApproxNumpy(ApproxBase):
|
||||||
"""Perform approximate comparisons where the expected value is numpy array."""
|
"""Perform approximate comparisons where the expected value is numpy array."""
|
||||||
|
|
||||||
def __repr__(self) -> str:
|
def __repr__(self) -> str:
|
||||||
list_scalars = _recursive_list_map(self._approx_scalar, self.expected.tolist())
|
list_scalars = _recursive_sequence_map(
|
||||||
|
self._approx_scalar, self.expected.tolist()
|
||||||
|
)
|
||||||
return f"approx({list_scalars!r})"
|
return f"approx({list_scalars!r})"
|
||||||
|
|
||||||
def _repr_compare(self, other_side: "ndarray") -> List[str]:
|
def _repr_compare(self, other_side: "ndarray") -> List[str]:
|
||||||
|
@ -164,7 +168,7 @@ class ApproxNumpy(ApproxBase):
|
||||||
return value
|
return value
|
||||||
|
|
||||||
np_array_shape = self.expected.shape
|
np_array_shape = self.expected.shape
|
||||||
approx_side_as_list = _recursive_list_map(
|
approx_side_as_seq = _recursive_sequence_map(
|
||||||
self._approx_scalar, self.expected.tolist()
|
self._approx_scalar, self.expected.tolist()
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -179,7 +183,7 @@ class ApproxNumpy(ApproxBase):
|
||||||
max_rel_diff = -math.inf
|
max_rel_diff = -math.inf
|
||||||
different_ids = []
|
different_ids = []
|
||||||
for index in itertools.product(*(range(i) for i in np_array_shape)):
|
for index in itertools.product(*(range(i) for i in np_array_shape)):
|
||||||
approx_value = get_value_from_nested_list(approx_side_as_list, index)
|
approx_value = get_value_from_nested_list(approx_side_as_seq, index)
|
||||||
other_value = get_value_from_nested_list(other_side, index)
|
other_value = get_value_from_nested_list(other_side, index)
|
||||||
if approx_value != other_value:
|
if approx_value != other_value:
|
||||||
abs_diff = abs(approx_value.expected - other_value)
|
abs_diff = abs(approx_value.expected - other_value)
|
||||||
|
@ -194,7 +198,7 @@ class ApproxNumpy(ApproxBase):
|
||||||
(
|
(
|
||||||
str(index),
|
str(index),
|
||||||
str(get_value_from_nested_list(other_side, index)),
|
str(get_value_from_nested_list(other_side, index)),
|
||||||
str(get_value_from_nested_list(approx_side_as_list, index)),
|
str(get_value_from_nested_list(approx_side_as_seq, index)),
|
||||||
)
|
)
|
||||||
for index in different_ids
|
for index in different_ids
|
||||||
]
|
]
|
||||||
|
@ -326,7 +330,7 @@ class ApproxSequenceLike(ApproxBase):
|
||||||
f"Lengths: {len(self.expected)} and {len(other_side)}",
|
f"Lengths: {len(self.expected)} and {len(other_side)}",
|
||||||
]
|
]
|
||||||
|
|
||||||
approx_side_as_map = _recursive_list_map(self._approx_scalar, self.expected)
|
approx_side_as_map = _recursive_sequence_map(self._approx_scalar, self.expected)
|
||||||
|
|
||||||
number_of_elements = len(approx_side_as_map)
|
number_of_elements = len(approx_side_as_map)
|
||||||
max_abs_diff = -math.inf
|
max_abs_diff = -math.inf
|
||||||
|
@ -666,6 +670,11 @@ def approx(expected, rel=None, abs=None, nan_ok: bool = False) -> ApproxBase:
|
||||||
specialised test helpers in :std:doc:`numpy:reference/routines.testing`
|
specialised test helpers in :std:doc:`numpy:reference/routines.testing`
|
||||||
if you need support for comparisons, NaNs, or ULP-based tolerances.
|
if you need support for comparisons, NaNs, or ULP-based tolerances.
|
||||||
|
|
||||||
|
To match strings using regex, you can use
|
||||||
|
`Matches <https://github.com/asottile/re-assert#re_assertmatchespattern-str-args-kwargs>`_
|
||||||
|
from the
|
||||||
|
`re_assert package <https://github.com/asottile/re-assert>`_.
|
||||||
|
|
||||||
.. warning::
|
.. warning::
|
||||||
|
|
||||||
.. versionchanged:: 3.2
|
.. versionchanged:: 3.2
|
||||||
|
@ -899,6 +908,12 @@ def raises(
|
||||||
"""
|
"""
|
||||||
__tracebackhide__ = True
|
__tracebackhide__ = True
|
||||||
|
|
||||||
|
if not expected_exception:
|
||||||
|
raise ValueError(
|
||||||
|
f"Expected an exception type or a tuple of exception types, but got `{expected_exception!r}`. "
|
||||||
|
f"Raising exceptions is already understood as failing the test, so you don't need "
|
||||||
|
f"any special code to say 'this should never raise an exception'."
|
||||||
|
)
|
||||||
if isinstance(expected_exception, type):
|
if isinstance(expected_exception, type):
|
||||||
excepted_exceptions: Tuple[Type[E], ...] = (expected_exception,)
|
excepted_exceptions: Tuple[Type[E], ...] = (expected_exception,)
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -160,7 +160,14 @@ def warns(
|
||||||
class WarningsRecorder(warnings.catch_warnings):
|
class WarningsRecorder(warnings.catch_warnings):
|
||||||
"""A context manager to record raised warnings.
|
"""A context manager to record raised warnings.
|
||||||
|
|
||||||
|
Each recorded warning is an instance of :class:`warnings.WarningMessage`.
|
||||||
|
|
||||||
Adapted from `warnings.catch_warnings`.
|
Adapted from `warnings.catch_warnings`.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
``DeprecationWarning`` and ``PendingDeprecationWarning`` are treated
|
||||||
|
differently; see :ref:`ensuring_function_triggers`.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, *, _ispytest: bool = False) -> None:
|
def __init__(self, *, _ispytest: bool = False) -> None:
|
||||||
|
|
|
@ -8,6 +8,7 @@ from typing import Iterable
|
||||||
from typing import Iterator
|
from typing import Iterator
|
||||||
from typing import List
|
from typing import List
|
||||||
from typing import Mapping
|
from typing import Mapping
|
||||||
|
from typing import NoReturn
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
from typing import Tuple
|
from typing import Tuple
|
||||||
from typing import Type
|
from typing import Type
|
||||||
|
@ -36,7 +37,6 @@ from _pytest.nodes import Item
|
||||||
from _pytest.outcomes import skip
|
from _pytest.outcomes import skip
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from typing import NoReturn
|
|
||||||
from typing_extensions import Literal
|
from typing_extensions import Literal
|
||||||
|
|
||||||
from _pytest.runner import CallInfo
|
from _pytest.runner import CallInfo
|
||||||
|
@ -229,7 +229,7 @@ class BaseReport:
|
||||||
|
|
||||||
def _report_unserialization_failure(
|
def _report_unserialization_failure(
|
||||||
type_name: str, report_class: Type[BaseReport], reportdict
|
type_name: str, report_class: Type[BaseReport], reportdict
|
||||||
) -> "NoReturn":
|
) -> NoReturn:
|
||||||
url = "https://github.com/pytest-dev/pytest/issues"
|
url = "https://github.com/pytest-dev/pytest/issues"
|
||||||
stream = StringIO()
|
stream = StringIO()
|
||||||
pprint("-" * 100, stream=stream)
|
pprint("-" * 100, stream=stream)
|
||||||
|
|
|
@ -46,14 +46,14 @@ if TYPE_CHECKING:
|
||||||
|
|
||||||
|
|
||||||
def pytest_addoption(parser: Parser) -> None:
|
def pytest_addoption(parser: Parser) -> None:
|
||||||
group = parser.getgroup("terminal reporting", "reporting", after="general")
|
group = parser.getgroup("terminal reporting", "Reporting", after="general")
|
||||||
group.addoption(
|
group.addoption(
|
||||||
"--durations",
|
"--durations",
|
||||||
action="store",
|
action="store",
|
||||||
type=int,
|
type=int,
|
||||||
default=None,
|
default=None,
|
||||||
metavar="N",
|
metavar="N",
|
||||||
help="show N slowest setup/test durations (N=0 for all).",
|
help="Show N slowest setup/test durations (N=0 for all)",
|
||||||
)
|
)
|
||||||
group.addoption(
|
group.addoption(
|
||||||
"--durations-min",
|
"--durations-min",
|
||||||
|
@ -61,7 +61,8 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
type=float,
|
type=float,
|
||||||
default=0.005,
|
default=0.005,
|
||||||
metavar="N",
|
metavar="N",
|
||||||
help="Minimal duration in seconds for inclusion in slowest list. Default 0.005",
|
help="Minimal duration in seconds for inclusion in slowest list. "
|
||||||
|
"Default: 0.005.",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -18,13 +18,13 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
"--setuponly",
|
"--setuponly",
|
||||||
"--setup-only",
|
"--setup-only",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
help="only setup fixtures, do not execute tests.",
|
help="Only setup fixtures, do not execute tests",
|
||||||
)
|
)
|
||||||
group.addoption(
|
group.addoption(
|
||||||
"--setupshow",
|
"--setupshow",
|
||||||
"--setup-show",
|
"--setup-show",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
help="show setup of fixtures while executing tests.",
|
help="Show setup of fixtures while executing tests",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -15,8 +15,8 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
"--setupplan",
|
"--setupplan",
|
||||||
"--setup-plan",
|
"--setup-plan",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
help="show what fixtures and tests would be executed but "
|
help="Show what fixtures and tests would be executed but "
|
||||||
"don't execute anything.",
|
"don't execute anything",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -31,12 +31,12 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
action="store_true",
|
action="store_true",
|
||||||
dest="runxfail",
|
dest="runxfail",
|
||||||
default=False,
|
default=False,
|
||||||
help="report the results of xfail tests as if they were not marked",
|
help="Report the results of xfail tests as if they were not marked",
|
||||||
)
|
)
|
||||||
|
|
||||||
parser.addini(
|
parser.addini(
|
||||||
"xfail_strict",
|
"xfail_strict",
|
||||||
"default for the strict parameter of xfail "
|
"Default for the strict parameter of xfail "
|
||||||
"markers when not given explicitly (default: False)",
|
"markers when not given explicitly (default: False)",
|
||||||
default=False,
|
default=False,
|
||||||
type="bool",
|
type="bool",
|
||||||
|
|
|
@ -23,7 +23,7 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
action="store_true",
|
action="store_true",
|
||||||
default=False,
|
default=False,
|
||||||
dest="stepwise",
|
dest="stepwise",
|
||||||
help="exit on test failure and continue from last failing test next time",
|
help="Exit on test failure and continue from last failing test next time",
|
||||||
)
|
)
|
||||||
group.addoption(
|
group.addoption(
|
||||||
"--sw-skip",
|
"--sw-skip",
|
||||||
|
@ -31,8 +31,8 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
action="store_true",
|
action="store_true",
|
||||||
default=False,
|
default=False,
|
||||||
dest="stepwise_skip",
|
dest="stepwise_skip",
|
||||||
help="ignore the first failing test but stop on the next failing test.\n"
|
help="Ignore the first failing test but stop on the next failing test. "
|
||||||
"implicitly enables --stepwise.",
|
"Implicitly enables --stepwise.",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -35,7 +35,9 @@ from _pytest import nodes
|
||||||
from _pytest import timing
|
from _pytest import timing
|
||||||
from _pytest._code import ExceptionInfo
|
from _pytest._code import ExceptionInfo
|
||||||
from _pytest._code.code import ExceptionRepr
|
from _pytest._code.code import ExceptionRepr
|
||||||
|
from _pytest._io import TerminalWriter
|
||||||
from _pytest._io.wcwidth import wcswidth
|
from _pytest._io.wcwidth import wcswidth
|
||||||
|
from _pytest.assertion.util import running_on_ci
|
||||||
from _pytest.compat import final
|
from _pytest.compat import final
|
||||||
from _pytest.config import _PluggyPlugin
|
from _pytest.config import _PluggyPlugin
|
||||||
from _pytest.config import Config
|
from _pytest.config import Config
|
||||||
|
@ -110,28 +112,28 @@ class MoreQuietAction(argparse.Action):
|
||||||
|
|
||||||
|
|
||||||
def pytest_addoption(parser: Parser) -> None:
|
def pytest_addoption(parser: Parser) -> None:
|
||||||
group = parser.getgroup("terminal reporting", "reporting", after="general")
|
group = parser.getgroup("terminal reporting", "Reporting", after="general")
|
||||||
group._addoption(
|
group._addoption(
|
||||||
"-v",
|
"-v",
|
||||||
"--verbose",
|
"--verbose",
|
||||||
action="count",
|
action="count",
|
||||||
default=0,
|
default=0,
|
||||||
dest="verbose",
|
dest="verbose",
|
||||||
help="increase verbosity.",
|
help="Increase verbosity",
|
||||||
)
|
)
|
||||||
group._addoption(
|
group._addoption(
|
||||||
"--no-header",
|
"--no-header",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
default=False,
|
default=False,
|
||||||
dest="no_header",
|
dest="no_header",
|
||||||
help="disable header",
|
help="Disable header",
|
||||||
)
|
)
|
||||||
group._addoption(
|
group._addoption(
|
||||||
"--no-summary",
|
"--no-summary",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
default=False,
|
default=False,
|
||||||
dest="no_summary",
|
dest="no_summary",
|
||||||
help="disable summary",
|
help="Disable summary",
|
||||||
)
|
)
|
||||||
group._addoption(
|
group._addoption(
|
||||||
"-q",
|
"-q",
|
||||||
|
@ -139,14 +141,14 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
action=MoreQuietAction,
|
action=MoreQuietAction,
|
||||||
default=0,
|
default=0,
|
||||||
dest="verbose",
|
dest="verbose",
|
||||||
help="decrease verbosity.",
|
help="Decrease verbosity",
|
||||||
)
|
)
|
||||||
group._addoption(
|
group._addoption(
|
||||||
"--verbosity",
|
"--verbosity",
|
||||||
dest="verbose",
|
dest="verbose",
|
||||||
type=int,
|
type=int,
|
||||||
default=0,
|
default=0,
|
||||||
help="set verbosity. Default is 0.",
|
help="Set verbosity. Default: 0.",
|
||||||
)
|
)
|
||||||
group._addoption(
|
group._addoption(
|
||||||
"-r",
|
"-r",
|
||||||
|
@ -154,7 +156,7 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
dest="reportchars",
|
dest="reportchars",
|
||||||
default=_REPORTCHARS_DEFAULT,
|
default=_REPORTCHARS_DEFAULT,
|
||||||
metavar="chars",
|
metavar="chars",
|
||||||
help="show extra test summary info as specified by chars: (f)ailed, "
|
help="Show extra test summary info as specified by chars: (f)ailed, "
|
||||||
"(E)rror, (s)kipped, (x)failed, (X)passed, "
|
"(E)rror, (s)kipped, (x)failed, (X)passed, "
|
||||||
"(p)assed, (P)assed with output, (a)ll except passed (p/P), or (A)ll. "
|
"(p)assed, (P)assed with output, (a)ll except passed (p/P), or (A)ll. "
|
||||||
"(w)arnings are enabled by default (see --disable-warnings), "
|
"(w)arnings are enabled by default (see --disable-warnings), "
|
||||||
|
@ -166,7 +168,7 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
default=False,
|
default=False,
|
||||||
dest="disable_warnings",
|
dest="disable_warnings",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
help="disable warnings summary",
|
help="Disable warnings summary",
|
||||||
)
|
)
|
||||||
group._addoption(
|
group._addoption(
|
||||||
"-l",
|
"-l",
|
||||||
|
@ -174,7 +176,7 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
action="store_true",
|
action="store_true",
|
||||||
dest="showlocals",
|
dest="showlocals",
|
||||||
default=False,
|
default=False,
|
||||||
help="show locals in tracebacks (disabled by default).",
|
help="Show locals in tracebacks (disabled by default)",
|
||||||
)
|
)
|
||||||
group._addoption(
|
group._addoption(
|
||||||
"--tb",
|
"--tb",
|
||||||
|
@ -183,7 +185,7 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
dest="tbstyle",
|
dest="tbstyle",
|
||||||
default="auto",
|
default="auto",
|
||||||
choices=["auto", "long", "short", "no", "line", "native"],
|
choices=["auto", "long", "short", "no", "line", "native"],
|
||||||
help="traceback print mode (auto/long/short/line/native/no).",
|
help="Traceback print mode (auto/long/short/line/native/no)",
|
||||||
)
|
)
|
||||||
group._addoption(
|
group._addoption(
|
||||||
"--show-capture",
|
"--show-capture",
|
||||||
|
@ -192,14 +194,14 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
choices=["no", "stdout", "stderr", "log", "all"],
|
choices=["no", "stdout", "stderr", "log", "all"],
|
||||||
default="all",
|
default="all",
|
||||||
help="Controls how captured stdout/stderr/log is shown on failed tests. "
|
help="Controls how captured stdout/stderr/log is shown on failed tests. "
|
||||||
"Default is 'all'.",
|
"Default: all.",
|
||||||
)
|
)
|
||||||
group._addoption(
|
group._addoption(
|
||||||
"--fulltrace",
|
"--fulltrace",
|
||||||
"--full-trace",
|
"--full-trace",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
default=False,
|
default=False,
|
||||||
help="don't cut any tracebacks (default is to cut).",
|
help="Don't cut any tracebacks (default is to cut)",
|
||||||
)
|
)
|
||||||
group._addoption(
|
group._addoption(
|
||||||
"--color",
|
"--color",
|
||||||
|
@ -208,18 +210,20 @@ def pytest_addoption(parser: Parser) -> None:
|
||||||
dest="color",
|
dest="color",
|
||||||
default="auto",
|
default="auto",
|
||||||
choices=["yes", "no", "auto"],
|
choices=["yes", "no", "auto"],
|
||||||
help="color terminal output (yes/no/auto).",
|
help="Color terminal output (yes/no/auto)",
|
||||||
)
|
)
|
||||||
group._addoption(
|
group._addoption(
|
||||||
"--code-highlight",
|
"--code-highlight",
|
||||||
default="yes",
|
default="yes",
|
||||||
choices=["yes", "no"],
|
choices=["yes", "no"],
|
||||||
help="Whether code should be highlighted (only if --color is also enabled)",
|
help="Whether code should be highlighted (only if --color is also enabled). "
|
||||||
|
"Default: yes.",
|
||||||
)
|
)
|
||||||
|
|
||||||
parser.addini(
|
parser.addini(
|
||||||
"console_output_style",
|
"console_output_style",
|
||||||
help='console output: "classic", or with additional progress information ("progress" (percentage) | "count").',
|
help='Console output: "classic", or with additional progress information '
|
||||||
|
'("progress" (percentage) | "count")',
|
||||||
default="progress",
|
default="progress",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -728,8 +732,8 @@ class TerminalReporter:
|
||||||
if config.inipath:
|
if config.inipath:
|
||||||
line += ", configfile: " + bestrelpath(config.rootpath, config.inipath)
|
line += ", configfile: " + bestrelpath(config.rootpath, config.inipath)
|
||||||
|
|
||||||
|
if config.args_source == Config.ArgsSource.TESTPATHS:
|
||||||
testpaths: List[str] = config.getini("testpaths")
|
testpaths: List[str] = config.getini("testpaths")
|
||||||
if config.invocation_params.dir == config.rootpath and config.args == testpaths:
|
|
||||||
line += ", testpaths: {}".format(", ".join(testpaths))
|
line += ", testpaths: {}".format(", ".join(testpaths))
|
||||||
|
|
||||||
result = [line]
|
result = [line]
|
||||||
|
@ -1074,33 +1078,43 @@ class TerminalReporter:
|
||||||
if not self.reportchars:
|
if not self.reportchars:
|
||||||
return
|
return
|
||||||
|
|
||||||
def show_simple(stat, lines: List[str]) -> None:
|
def show_simple(lines: List[str], *, stat: str) -> None:
|
||||||
failed = self.stats.get(stat, [])
|
failed = self.stats.get(stat, [])
|
||||||
if not failed:
|
if not failed:
|
||||||
return
|
return
|
||||||
termwidth = self._tw.fullwidth
|
|
||||||
config = self.config
|
config = self.config
|
||||||
for rep in failed:
|
for rep in failed:
|
||||||
line = _get_line_with_reprcrash_message(config, rep, termwidth)
|
color = _color_for_type.get(stat, _color_for_type_default)
|
||||||
|
line = _get_line_with_reprcrash_message(
|
||||||
|
config, rep, self._tw, {color: True}
|
||||||
|
)
|
||||||
lines.append(line)
|
lines.append(line)
|
||||||
|
|
||||||
def show_xfailed(lines: List[str]) -> None:
|
def show_xfailed(lines: List[str]) -> None:
|
||||||
xfailed = self.stats.get("xfailed", [])
|
xfailed = self.stats.get("xfailed", [])
|
||||||
for rep in xfailed:
|
for rep in xfailed:
|
||||||
verbose_word = rep._get_verbose_word(self.config)
|
verbose_word = rep._get_verbose_word(self.config)
|
||||||
pos = _get_pos(self.config, rep)
|
markup_word = self._tw.markup(
|
||||||
lines.append(f"{verbose_word} {pos}")
|
verbose_word, **{_color_for_type["warnings"]: True}
|
||||||
|
)
|
||||||
|
nodeid = _get_node_id_with_markup(self._tw, self.config, rep)
|
||||||
|
line = f"{markup_word} {nodeid}"
|
||||||
reason = rep.wasxfail
|
reason = rep.wasxfail
|
||||||
if reason:
|
if reason:
|
||||||
lines.append(" " + str(reason))
|
line += " - " + str(reason)
|
||||||
|
|
||||||
|
lines.append(line)
|
||||||
|
|
||||||
def show_xpassed(lines: List[str]) -> None:
|
def show_xpassed(lines: List[str]) -> None:
|
||||||
xpassed = self.stats.get("xpassed", [])
|
xpassed = self.stats.get("xpassed", [])
|
||||||
for rep in xpassed:
|
for rep in xpassed:
|
||||||
verbose_word = rep._get_verbose_word(self.config)
|
verbose_word = rep._get_verbose_word(self.config)
|
||||||
pos = _get_pos(self.config, rep)
|
markup_word = self._tw.markup(
|
||||||
|
verbose_word, **{_color_for_type["warnings"]: True}
|
||||||
|
)
|
||||||
|
nodeid = _get_node_id_with_markup(self._tw, self.config, rep)
|
||||||
reason = rep.wasxfail
|
reason = rep.wasxfail
|
||||||
lines.append(f"{verbose_word} {pos} {reason}")
|
lines.append(f"{markup_word} {nodeid} {reason}")
|
||||||
|
|
||||||
def show_skipped(lines: List[str]) -> None:
|
def show_skipped(lines: List[str]) -> None:
|
||||||
skipped: List[CollectReport] = self.stats.get("skipped", [])
|
skipped: List[CollectReport] = self.stats.get("skipped", [])
|
||||||
|
@ -1108,24 +1122,27 @@ class TerminalReporter:
|
||||||
if not fskips:
|
if not fskips:
|
||||||
return
|
return
|
||||||
verbose_word = skipped[0]._get_verbose_word(self.config)
|
verbose_word = skipped[0]._get_verbose_word(self.config)
|
||||||
|
markup_word = self._tw.markup(
|
||||||
|
verbose_word, **{_color_for_type["warnings"]: True}
|
||||||
|
)
|
||||||
|
prefix = "Skipped: "
|
||||||
for num, fspath, lineno, reason in fskips:
|
for num, fspath, lineno, reason in fskips:
|
||||||
if reason.startswith("Skipped: "):
|
if reason.startswith(prefix):
|
||||||
reason = reason[9:]
|
reason = reason[len(prefix) :]
|
||||||
if lineno is not None:
|
if lineno is not None:
|
||||||
lines.append(
|
lines.append(
|
||||||
"%s [%d] %s:%d: %s"
|
"%s [%d] %s:%d: %s" % (markup_word, num, fspath, lineno, reason)
|
||||||
% (verbose_word, num, fspath, lineno, reason)
|
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
lines.append("%s [%d] %s: %s" % (verbose_word, num, fspath, reason))
|
lines.append("%s [%d] %s: %s" % (markup_word, num, fspath, reason))
|
||||||
|
|
||||||
REPORTCHAR_ACTIONS: Mapping[str, Callable[[List[str]], None]] = {
|
REPORTCHAR_ACTIONS: Mapping[str, Callable[[List[str]], None]] = {
|
||||||
"x": show_xfailed,
|
"x": show_xfailed,
|
||||||
"X": show_xpassed,
|
"X": show_xpassed,
|
||||||
"f": partial(show_simple, "failed"),
|
"f": partial(show_simple, stat="failed"),
|
||||||
"s": show_skipped,
|
"s": show_skipped,
|
||||||
"p": partial(show_simple, "passed"),
|
"p": partial(show_simple, stat="passed"),
|
||||||
"E": partial(show_simple, "error"),
|
"E": partial(show_simple, stat="error"),
|
||||||
}
|
}
|
||||||
|
|
||||||
lines: List[str] = []
|
lines: List[str] = []
|
||||||
|
@ -1135,7 +1152,7 @@ class TerminalReporter:
|
||||||
action(lines)
|
action(lines)
|
||||||
|
|
||||||
if lines:
|
if lines:
|
||||||
self.write_sep("=", "short test summary info")
|
self.write_sep("=", "short test summary info", cyan=True, bold=True)
|
||||||
for line in lines:
|
for line in lines:
|
||||||
self.write_line(line)
|
self.write_line(line)
|
||||||
|
|
||||||
|
@ -1249,9 +1266,14 @@ class TerminalReporter:
|
||||||
return parts, main_color
|
return parts, main_color
|
||||||
|
|
||||||
|
|
||||||
def _get_pos(config: Config, rep: BaseReport):
|
def _get_node_id_with_markup(tw: TerminalWriter, config: Config, rep: BaseReport):
|
||||||
nodeid = config.cwd_relative_nodeid(rep.nodeid)
|
nodeid = config.cwd_relative_nodeid(rep.nodeid)
|
||||||
return nodeid
|
path, *parts = nodeid.split("::")
|
||||||
|
if parts:
|
||||||
|
parts_markup = tw.markup("::".join(parts), bold=True)
|
||||||
|
return path + "::" + parts_markup
|
||||||
|
else:
|
||||||
|
return path
|
||||||
|
|
||||||
|
|
||||||
def _format_trimmed(format: str, msg: str, available_width: int) -> Optional[str]:
|
def _format_trimmed(format: str, msg: str, available_width: int) -> Optional[str]:
|
||||||
|
@ -1280,13 +1302,14 @@ def _format_trimmed(format: str, msg: str, available_width: int) -> Optional[str
|
||||||
|
|
||||||
|
|
||||||
def _get_line_with_reprcrash_message(
|
def _get_line_with_reprcrash_message(
|
||||||
config: Config, rep: BaseReport, termwidth: int
|
config: Config, rep: BaseReport, tw: TerminalWriter, word_markup: Dict[str, bool]
|
||||||
) -> str:
|
) -> str:
|
||||||
"""Get summary line for a report, trying to add reprcrash message."""
|
"""Get summary line for a report, trying to add reprcrash message."""
|
||||||
verbose_word = rep._get_verbose_word(config)
|
verbose_word = rep._get_verbose_word(config)
|
||||||
pos = _get_pos(config, rep)
|
word = tw.markup(verbose_word, **word_markup)
|
||||||
|
node = _get_node_id_with_markup(tw, config, rep)
|
||||||
|
|
||||||
line = f"{verbose_word} {pos}"
|
line = f"{word} {node}"
|
||||||
line_width = wcswidth(line)
|
line_width = wcswidth(line)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@ -1295,8 +1318,11 @@ def _get_line_with_reprcrash_message(
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
available_width = termwidth - line_width
|
if not running_on_ci():
|
||||||
|
available_width = tw.fullwidth - line_width
|
||||||
msg = _format_trimmed(" - {}", msg, available_width)
|
msg = _format_trimmed(" - {}", msg, available_width)
|
||||||
|
else:
|
||||||
|
msg = f" - {msg}"
|
||||||
if msg is not None:
|
if msg is not None:
|
||||||
line += msg
|
line += msg
|
||||||
|
|
||||||
|
|
|
@ -316,7 +316,10 @@ class TestCaseFunction(Function):
|
||||||
# Arguably we could always postpone tearDown(), but this changes the moment where the
|
# Arguably we could always postpone tearDown(), but this changes the moment where the
|
||||||
# TestCase instance interacts with the results object, so better to only do it
|
# TestCase instance interacts with the results object, so better to only do it
|
||||||
# when absolutely needed.
|
# when absolutely needed.
|
||||||
if self.config.getoption("usepdb") and not _is_skipped(self.obj):
|
# We need to consider if the test itself is skipped, or the whole class.
|
||||||
|
assert isinstance(self.parent, UnitTestCase)
|
||||||
|
skipped = _is_skipped(self.obj) or _is_skipped(self.parent.obj)
|
||||||
|
if self.config.getoption("usepdb") and not skipped:
|
||||||
self._explicit_tearDown = self._testcase.tearDown
|
self._explicit_tearDown = self._testcase.tearDown
|
||||||
setattr(self._testcase, "tearDown", lambda *args: None)
|
setattr(self._testcase, "tearDown", lambda *args: None)
|
||||||
|
|
||||||
|
|
|
@ -55,6 +55,13 @@ class PytestRemovedIn8Warning(PytestDeprecationWarning):
|
||||||
__module__ = "pytest"
|
__module__ = "pytest"
|
||||||
|
|
||||||
|
|
||||||
|
@final
|
||||||
|
class PytestReturnNotNoneWarning(PytestDeprecationWarning):
|
||||||
|
"""Warning emitted when a test function is returning value other than None."""
|
||||||
|
|
||||||
|
__module__ = "pytest"
|
||||||
|
|
||||||
|
|
||||||
@final
|
@final
|
||||||
class PytestExperimentalApiWarning(PytestWarning, FutureWarning):
|
class PytestExperimentalApiWarning(PytestWarning, FutureWarning):
|
||||||
"""Warning category used to denote experiments in pytest.
|
"""Warning category used to denote experiments in pytest.
|
||||||
|
|
|
@ -69,6 +69,7 @@ from _pytest.warning_types import PytestConfigWarning
|
||||||
from _pytest.warning_types import PytestDeprecationWarning
|
from _pytest.warning_types import PytestDeprecationWarning
|
||||||
from _pytest.warning_types import PytestExperimentalApiWarning
|
from _pytest.warning_types import PytestExperimentalApiWarning
|
||||||
from _pytest.warning_types import PytestRemovedIn8Warning
|
from _pytest.warning_types import PytestRemovedIn8Warning
|
||||||
|
from _pytest.warning_types import PytestReturnNotNoneWarning
|
||||||
from _pytest.warning_types import PytestUnhandledCoroutineWarning
|
from _pytest.warning_types import PytestUnhandledCoroutineWarning
|
||||||
from _pytest.warning_types import PytestUnhandledThreadExceptionWarning
|
from _pytest.warning_types import PytestUnhandledThreadExceptionWarning
|
||||||
from _pytest.warning_types import PytestUnknownMarkWarning
|
from _pytest.warning_types import PytestUnknownMarkWarning
|
||||||
|
@ -127,6 +128,7 @@ __all__ = [
|
||||||
"PytestDeprecationWarning",
|
"PytestDeprecationWarning",
|
||||||
"PytestExperimentalApiWarning",
|
"PytestExperimentalApiWarning",
|
||||||
"PytestRemovedIn8Warning",
|
"PytestRemovedIn8Warning",
|
||||||
|
"PytestReturnNotNoneWarning",
|
||||||
"Pytester",
|
"Pytester",
|
||||||
"PytestPluginManager",
|
"PytestPluginManager",
|
||||||
"PytestUnhandledCoroutineWarning",
|
"PytestUnhandledCoroutineWarning",
|
||||||
|
|
|
@ -1292,3 +1292,14 @@ def test_no_brokenpipeerror_message(pytester: Pytester) -> None:
|
||||||
|
|
||||||
# Cleanup.
|
# Cleanup.
|
||||||
popen.stderr.close()
|
popen.stderr.close()
|
||||||
|
|
||||||
|
|
||||||
|
def test_function_return_non_none_warning(testdir) -> None:
|
||||||
|
testdir.makepyfile(
|
||||||
|
"""
|
||||||
|
def test_stuff():
|
||||||
|
return "something"
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
res = testdir.runpytest()
|
||||||
|
res.stdout.fnmatch_lines(["*Did you mean to use `assert` instead of `return`?*"])
|
||||||
|
|
|
@ -172,6 +172,24 @@ def test_caplog_captures_for_all_stages(caplog, logging_during_setup_and_teardow
|
||||||
assert set(caplog._item.stash[caplog_records_key]) == {"setup", "call"}
|
assert set(caplog._item.stash[caplog_records_key]) == {"setup", "call"}
|
||||||
|
|
||||||
|
|
||||||
|
def test_clear_for_call_stage(caplog, logging_during_setup_and_teardown):
|
||||||
|
logger.info("a_call_log")
|
||||||
|
assert [x.message for x in caplog.get_records("call")] == ["a_call_log"]
|
||||||
|
assert [x.message for x in caplog.get_records("setup")] == ["a_setup_log"]
|
||||||
|
assert set(caplog._item.stash[caplog_records_key]) == {"setup", "call"}
|
||||||
|
|
||||||
|
caplog.clear()
|
||||||
|
|
||||||
|
assert caplog.get_records("call") == []
|
||||||
|
assert [x.message for x in caplog.get_records("setup")] == ["a_setup_log"]
|
||||||
|
assert set(caplog._item.stash[caplog_records_key]) == {"setup", "call"}
|
||||||
|
|
||||||
|
logging.info("a_call_log_after_clear")
|
||||||
|
assert [x.message for x in caplog.get_records("call")] == ["a_call_log_after_clear"]
|
||||||
|
assert [x.message for x in caplog.get_records("setup")] == ["a_setup_log"]
|
||||||
|
assert set(caplog._item.stash[caplog_records_key]) == {"setup", "call"}
|
||||||
|
|
||||||
|
|
||||||
def test_ini_controls_global_log_level(pytester: Pytester) -> None:
|
def test_ini_controls_global_log_level(pytester: Pytester) -> None:
|
||||||
pytester.makepyfile(
|
pytester.makepyfile(
|
||||||
"""
|
"""
|
||||||
|
|
|
@ -1,14 +1,14 @@
|
||||||
anyio[curio,trio]==3.5.0
|
anyio[curio,trio]==3.6.1
|
||||||
django==4.0.4
|
django==4.0.6
|
||||||
pytest-asyncio==0.18.3
|
pytest-asyncio==0.18.3
|
||||||
pytest-bdd==5.0.0
|
pytest-bdd==6.0.1
|
||||||
pytest-cov==3.0.0
|
pytest-cov==3.0.0
|
||||||
pytest-django==4.5.2
|
pytest-django==4.5.2
|
||||||
pytest-flakes==4.0.5
|
pytest-flakes==4.0.5
|
||||||
pytest-html==3.1.1
|
pytest-html==3.1.1
|
||||||
pytest-mock==3.7.0
|
pytest-mock==3.8.2
|
||||||
pytest-rerunfailures==10.2
|
pytest-rerunfailures==10.2
|
||||||
pytest-sugar==0.9.4
|
pytest-sugar==0.9.5
|
||||||
pytest-trio==0.7.0
|
pytest-trio==0.7.0
|
||||||
pytest-twisted==1.13.4
|
pytest-twisted==1.13.4
|
||||||
twisted==22.4.0
|
twisted==22.4.0
|
||||||
|
|
|
@ -2,12 +2,14 @@ import operator
|
||||||
from contextlib import contextmanager
|
from contextlib import contextmanager
|
||||||
from decimal import Decimal
|
from decimal import Decimal
|
||||||
from fractions import Fraction
|
from fractions import Fraction
|
||||||
|
from math import sqrt
|
||||||
from operator import eq
|
from operator import eq
|
||||||
from operator import ne
|
from operator import ne
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
from _pytest.pytester import Pytester
|
from _pytest.pytester import Pytester
|
||||||
|
from _pytest.python_api import _recursive_sequence_map
|
||||||
from pytest import approx
|
from pytest import approx
|
||||||
|
|
||||||
inf, nan = float("inf"), float("nan")
|
inf, nan = float("inf"), float("nan")
|
||||||
|
@ -133,6 +135,18 @@ class TestApprox:
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
assert_approx_raises_regex(
|
||||||
|
(1, 2.2, 4),
|
||||||
|
(1, 3.2, 4),
|
||||||
|
[
|
||||||
|
r" comparison failed. Mismatched elements: 1 / 3:",
|
||||||
|
rf" Max absolute difference: {SOME_FLOAT}",
|
||||||
|
rf" Max relative difference: {SOME_FLOAT}",
|
||||||
|
r" Index \| Obtained\s+\| Expected ",
|
||||||
|
rf" 1 \| {SOME_FLOAT} \| {SOME_FLOAT} ± {SOME_FLOAT}",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
# Specific test for comparison with 0.0 (relative diff will be 'inf')
|
# Specific test for comparison with 0.0 (relative diff will be 'inf')
|
||||||
assert_approx_raises_regex(
|
assert_approx_raises_regex(
|
||||||
[0.0],
|
[0.0],
|
||||||
|
@ -878,3 +892,31 @@ class TestApprox:
|
||||||
"""pytest.approx() should raise an error on unordered sequences (#9692)."""
|
"""pytest.approx() should raise an error on unordered sequences (#9692)."""
|
||||||
with pytest.raises(TypeError, match="only supports ordered sequences"):
|
with pytest.raises(TypeError, match="only supports ordered sequences"):
|
||||||
assert {1, 2, 3} == approx({1, 2, 3})
|
assert {1, 2, 3} == approx({1, 2, 3})
|
||||||
|
|
||||||
|
|
||||||
|
class TestRecursiveSequenceMap:
|
||||||
|
def test_map_over_scalar(self):
|
||||||
|
assert _recursive_sequence_map(sqrt, 16) == 4
|
||||||
|
|
||||||
|
def test_map_over_empty_list(self):
|
||||||
|
assert _recursive_sequence_map(sqrt, []) == []
|
||||||
|
|
||||||
|
def test_map_over_list(self):
|
||||||
|
assert _recursive_sequence_map(sqrt, [4, 16, 25, 676]) == [2, 4, 5, 26]
|
||||||
|
|
||||||
|
def test_map_over_tuple(self):
|
||||||
|
assert _recursive_sequence_map(sqrt, (4, 16, 25, 676)) == (2, 4, 5, 26)
|
||||||
|
|
||||||
|
def test_map_over_nested_lists(self):
|
||||||
|
assert _recursive_sequence_map(sqrt, [4, [25, 64], [[49]]]) == [
|
||||||
|
2,
|
||||||
|
[5, 8],
|
||||||
|
[[7]],
|
||||||
|
]
|
||||||
|
|
||||||
|
def test_map_over_mixed_sequence(self):
|
||||||
|
assert _recursive_sequence_map(sqrt, [4, (25, 64), [(49)]]) == [
|
||||||
|
2,
|
||||||
|
(5, 8),
|
||||||
|
[(7)],
|
||||||
|
]
|
||||||
|
|
|
@ -19,6 +19,16 @@ class TestRaises:
|
||||||
excinfo = pytest.raises(ValueError, int, "hello")
|
excinfo = pytest.raises(ValueError, int, "hello")
|
||||||
assert "invalid literal" in str(excinfo.value)
|
assert "invalid literal" in str(excinfo.value)
|
||||||
|
|
||||||
|
def test_raises_does_not_allow_none(self):
|
||||||
|
with pytest.raises(ValueError, match="Expected an exception type or"):
|
||||||
|
# We're testing that this invalid usage gives a helpful error,
|
||||||
|
# so we can ignore Mypy telling us that None is invalid.
|
||||||
|
pytest.raises(expected_exception=None) # type: ignore
|
||||||
|
|
||||||
|
def test_raises_does_not_allow_empty_tuple(self):
|
||||||
|
with pytest.raises(ValueError, match="Expected an exception type or"):
|
||||||
|
pytest.raises(expected_exception=())
|
||||||
|
|
||||||
def test_raises_callable_no_exception(self) -> None:
|
def test_raises_callable_no_exception(self) -> None:
|
||||||
class A:
|
class A:
|
||||||
def __call__(self):
|
def __call__(self):
|
||||||
|
@ -82,13 +92,9 @@ class TestRaises:
|
||||||
def test_does_not_raise(self, pytester: Pytester) -> None:
|
def test_does_not_raise(self, pytester: Pytester) -> None:
|
||||||
pytester.makepyfile(
|
pytester.makepyfile(
|
||||||
"""
|
"""
|
||||||
from contextlib import contextmanager
|
from contextlib import nullcontext as does_not_raise
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
@contextmanager
|
|
||||||
def does_not_raise():
|
|
||||||
yield
|
|
||||||
|
|
||||||
@pytest.mark.parametrize('example_input,expectation', [
|
@pytest.mark.parametrize('example_input,expectation', [
|
||||||
(3, does_not_raise()),
|
(3, does_not_raise()),
|
||||||
(2, does_not_raise()),
|
(2, does_not_raise()),
|
||||||
|
@ -107,13 +113,9 @@ class TestRaises:
|
||||||
def test_does_not_raise_does_raise(self, pytester: Pytester) -> None:
|
def test_does_not_raise_does_raise(self, pytester: Pytester) -> None:
|
||||||
pytester.makepyfile(
|
pytester.makepyfile(
|
||||||
"""
|
"""
|
||||||
from contextlib import contextmanager
|
from contextlib import nullcontext as does_not_raise
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
@contextmanager
|
|
||||||
def does_not_raise():
|
|
||||||
yield
|
|
||||||
|
|
||||||
@pytest.mark.parametrize('example_input,expectation', [
|
@pytest.mark.parametrize('example_input,expectation', [
|
||||||
(0, does_not_raise()),
|
(0, does_not_raise()),
|
||||||
(1, pytest.raises(ZeroDivisionError)),
|
(1, pytest.raises(ZeroDivisionError)),
|
||||||
|
|
|
@ -1009,7 +1009,7 @@ class TestAssertionRewriteHookDetails:
|
||||||
)
|
)
|
||||||
assert pytester.runpytest().ret == 0
|
assert pytester.runpytest().ret == 0
|
||||||
|
|
||||||
def test_write_pyc(self, pytester: Pytester, tmp_path, monkeypatch) -> None:
|
def test_write_pyc(self, pytester: Pytester, tmp_path) -> None:
|
||||||
from _pytest.assertion.rewrite import _write_pyc
|
from _pytest.assertion.rewrite import _write_pyc
|
||||||
from _pytest.assertion import AssertionState
|
from _pytest.assertion import AssertionState
|
||||||
|
|
||||||
|
@ -1021,26 +1021,7 @@ class TestAssertionRewriteHookDetails:
|
||||||
co = compile("1", "f.py", "single")
|
co = compile("1", "f.py", "single")
|
||||||
assert _write_pyc(state, co, os.stat(source_path), pycpath)
|
assert _write_pyc(state, co, os.stat(source_path), pycpath)
|
||||||
|
|
||||||
if sys.platform == "win32":
|
with mock.patch.object(os, "replace", side_effect=OSError):
|
||||||
from contextlib import contextmanager
|
|
||||||
|
|
||||||
@contextmanager
|
|
||||||
def atomic_write_failed(fn, mode="r", overwrite=False):
|
|
||||||
e = OSError()
|
|
||||||
e.errno = 10
|
|
||||||
raise e
|
|
||||||
yield # type:ignore[unreachable]
|
|
||||||
|
|
||||||
monkeypatch.setattr(
|
|
||||||
_pytest.assertion.rewrite, "atomic_write", atomic_write_failed
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
|
|
||||||
def raise_oserror(*args):
|
|
||||||
raise OSError()
|
|
||||||
|
|
||||||
monkeypatch.setattr("os.rename", raise_oserror)
|
|
||||||
|
|
||||||
assert not _write_pyc(state, co, os.stat(source_path), pycpath)
|
assert not _write_pyc(state, co, os.stat(source_path), pycpath)
|
||||||
|
|
||||||
def test_resources_provider_for_loader(self, pytester: Pytester) -> None:
|
def test_resources_provider_for_loader(self, pytester: Pytester) -> None:
|
||||||
|
|
|
@ -897,6 +897,15 @@ def test_dontreadfrominput() -> None:
|
||||||
iter_f = iter(f)
|
iter_f = iter(f)
|
||||||
pytest.raises(OSError, next, iter_f)
|
pytest.raises(OSError, next, iter_f)
|
||||||
pytest.raises(UnsupportedOperation, f.fileno)
|
pytest.raises(UnsupportedOperation, f.fileno)
|
||||||
|
pytest.raises(UnsupportedOperation, f.flush)
|
||||||
|
assert not f.readable()
|
||||||
|
pytest.raises(UnsupportedOperation, f.seek, 0)
|
||||||
|
assert not f.seekable()
|
||||||
|
pytest.raises(UnsupportedOperation, f.tell)
|
||||||
|
pytest.raises(UnsupportedOperation, f.truncate, 0)
|
||||||
|
pytest.raises(UnsupportedOperation, f.write, b"")
|
||||||
|
pytest.raises(UnsupportedOperation, f.writelines, [])
|
||||||
|
assert not f.writable()
|
||||||
f.close() # just for completeness
|
f.close() # just for completeness
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -244,28 +244,32 @@ class TestCollectFS:
|
||||||
pytester.makeini(
|
pytester.makeini(
|
||||||
"""
|
"""
|
||||||
[pytest]
|
[pytest]
|
||||||
testpaths = gui uts
|
testpaths = */tests
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
tmp_path = pytester.path
|
tmp_path = pytester.path
|
||||||
ensure_file(tmp_path / "env" / "test_1.py").write_text("def test_env(): pass")
|
ensure_file(tmp_path / "a" / "test_1.py").write_text("def test_a(): pass")
|
||||||
ensure_file(tmp_path / "gui" / "test_2.py").write_text("def test_gui(): pass")
|
ensure_file(tmp_path / "b" / "tests" / "test_2.py").write_text(
|
||||||
ensure_file(tmp_path / "uts" / "test_3.py").write_text("def test_uts(): pass")
|
"def test_b(): pass"
|
||||||
|
)
|
||||||
|
ensure_file(tmp_path / "c" / "tests" / "test_3.py").write_text(
|
||||||
|
"def test_c(): pass"
|
||||||
|
)
|
||||||
|
|
||||||
# executing from rootdir only tests from `testpaths` directories
|
# executing from rootdir only tests from `testpaths` directories
|
||||||
# are collected
|
# are collected
|
||||||
items, reprec = pytester.inline_genitems("-v")
|
items, reprec = pytester.inline_genitems("-v")
|
||||||
assert [x.name for x in items] == ["test_gui", "test_uts"]
|
assert [x.name for x in items] == ["test_b", "test_c"]
|
||||||
|
|
||||||
# check that explicitly passing directories in the command-line
|
# check that explicitly passing directories in the command-line
|
||||||
# collects the tests
|
# collects the tests
|
||||||
for dirname in ("env", "gui", "uts"):
|
for dirname in ("a", "b", "c"):
|
||||||
items, reprec = pytester.inline_genitems(tmp_path.joinpath(dirname))
|
items, reprec = pytester.inline_genitems(tmp_path.joinpath(dirname))
|
||||||
assert [x.name for x in items] == ["test_%s" % dirname]
|
assert [x.name for x in items] == ["test_%s" % dirname]
|
||||||
|
|
||||||
# changing cwd to each subdirectory and running pytest without
|
# changing cwd to each subdirectory and running pytest without
|
||||||
# arguments collects the tests in that directory normally
|
# arguments collects the tests in that directory normally
|
||||||
for dirname in ("env", "gui", "uts"):
|
for dirname in ("a", "b", "c"):
|
||||||
monkeypatch.chdir(pytester.path.joinpath(dirname))
|
monkeypatch.chdir(pytester.path.joinpath(dirname))
|
||||||
items, reprec = pytester.inline_genitems()
|
items, reprec = pytester.inline_genitems()
|
||||||
assert [x.name for x in items] == ["test_%s" % dirname]
|
assert [x.name for x in items] == ["test_%s" % dirname]
|
||||||
|
|
|
@ -112,21 +112,26 @@ class TestParseIni:
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"section, name",
|
"section, name",
|
||||||
[("tool:pytest", "setup.cfg"), ("pytest", "tox.ini"), ("pytest", "pytest.ini")],
|
[
|
||||||
|
("tool:pytest", "setup.cfg"),
|
||||||
|
("pytest", "tox.ini"),
|
||||||
|
("pytest", "pytest.ini"),
|
||||||
|
("pytest", ".pytest.ini"),
|
||||||
|
],
|
||||||
)
|
)
|
||||||
def test_ini_names(self, pytester: Pytester, name, section) -> None:
|
def test_ini_names(self, pytester: Pytester, name, section) -> None:
|
||||||
pytester.path.joinpath(name).write_text(
|
pytester.path.joinpath(name).write_text(
|
||||||
textwrap.dedent(
|
textwrap.dedent(
|
||||||
"""
|
"""
|
||||||
[{section}]
|
[{section}]
|
||||||
minversion = 1.0
|
minversion = 3.36
|
||||||
""".format(
|
""".format(
|
||||||
section=section
|
section=section
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
config = pytester.parseconfig()
|
config = pytester.parseconfig()
|
||||||
assert config.getini("minversion") == "1.0"
|
assert config.getini("minversion") == "3.36"
|
||||||
|
|
||||||
def test_pyproject_toml(self, pytester: Pytester) -> None:
|
def test_pyproject_toml(self, pytester: Pytester) -> None:
|
||||||
pytester.makepyprojecttoml(
|
pytester.makepyprojecttoml(
|
||||||
|
@ -2117,8 +2122,8 @@ class TestDebugOptions:
|
||||||
result = pytester.runpytest("-h")
|
result = pytester.runpytest("-h")
|
||||||
result.stdout.fnmatch_lines(
|
result.stdout.fnmatch_lines(
|
||||||
[
|
[
|
||||||
"*store internal tracing debug information in this log*",
|
"*Store internal tracing debug information in this log*",
|
||||||
"*This file is opened with 'w' and truncated as a result*",
|
"*file. This file is opened with 'w' and truncated as a*",
|
||||||
"*Defaults to 'pytestdebug.log'.",
|
"*Default: pytestdebug.log.",
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
|
@ -553,7 +553,7 @@ class TestConftestVisibility:
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
print("created directory structure:")
|
print("created directory structure:")
|
||||||
for x in pytester.path.rglob(""):
|
for x in pytester.path.glob("**/"):
|
||||||
print(" " + str(x.relative_to(pytester.path)))
|
print(" " + str(x.relative_to(pytester.path)))
|
||||||
|
|
||||||
return {"runner": runner, "package": package, "swc": swc, "snc": snc}
|
return {"runner": runner, "package": package, "swc": swc, "snc": snc}
|
||||||
|
|
|
@ -244,7 +244,7 @@ class TestPDB:
|
||||||
"""
|
"""
|
||||||
def test_1():
|
def test_1():
|
||||||
import logging
|
import logging
|
||||||
logging.warn("get " + "rekt")
|
logging.warning("get " + "rekt")
|
||||||
assert False
|
assert False
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
|
@ -263,7 +263,7 @@ class TestPDB:
|
||||||
"""
|
"""
|
||||||
def test_1():
|
def test_1():
|
||||||
import logging
|
import logging
|
||||||
logging.warn("get " + "rekt")
|
logging.warning("get " + "rekt")
|
||||||
assert False
|
assert False
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
|
@ -353,6 +353,7 @@ class TestPDB:
|
||||||
result = pytester.runpytest_subprocess("--pdb", ".")
|
result = pytester.runpytest_subprocess("--pdb", ".")
|
||||||
result.stdout.fnmatch_lines(["-> import unknown"])
|
result.stdout.fnmatch_lines(["-> import unknown"])
|
||||||
|
|
||||||
|
@pytest.mark.xfail(reason="#10042")
|
||||||
def test_pdb_interaction_capturing_simple(self, pytester: Pytester) -> None:
|
def test_pdb_interaction_capturing_simple(self, pytester: Pytester) -> None:
|
||||||
p1 = pytester.makepyfile(
|
p1 = pytester.makepyfile(
|
||||||
"""
|
"""
|
||||||
|
@ -521,6 +522,7 @@ class TestPDB:
|
||||||
assert "BdbQuit" not in rest
|
assert "BdbQuit" not in rest
|
||||||
assert "UNEXPECTED EXCEPTION" not in rest
|
assert "UNEXPECTED EXCEPTION" not in rest
|
||||||
|
|
||||||
|
@pytest.mark.xfail(reason="#10042")
|
||||||
def test_pdb_interaction_capturing_twice(self, pytester: Pytester) -> None:
|
def test_pdb_interaction_capturing_twice(self, pytester: Pytester) -> None:
|
||||||
p1 = pytester.makepyfile(
|
p1 = pytester.makepyfile(
|
||||||
"""
|
"""
|
||||||
|
@ -556,6 +558,7 @@ class TestPDB:
|
||||||
assert "1 failed" in rest
|
assert "1 failed" in rest
|
||||||
self.flush(child)
|
self.flush(child)
|
||||||
|
|
||||||
|
@pytest.mark.xfail(reason="#10042")
|
||||||
def test_pdb_with_injected_do_debug(self, pytester: Pytester) -> None:
|
def test_pdb_with_injected_do_debug(self, pytester: Pytester) -> None:
|
||||||
"""Simulates pdbpp, which injects Pdb into do_debug, and uses
|
"""Simulates pdbpp, which injects Pdb into do_debug, and uses
|
||||||
self.__class__ in do_continue.
|
self.__class__ in do_continue.
|
||||||
|
@ -1000,6 +1003,7 @@ class TestDebuggingBreakpoints:
|
||||||
assert "reading from stdin while output" not in rest
|
assert "reading from stdin while output" not in rest
|
||||||
TestPDB.flush(child)
|
TestPDB.flush(child)
|
||||||
|
|
||||||
|
@pytest.mark.xfail(reason="#10042")
|
||||||
def test_pdb_not_altered(self, pytester: Pytester) -> None:
|
def test_pdb_not_altered(self, pytester: Pytester) -> None:
|
||||||
p1 = pytester.makepyfile(
|
p1 = pytester.makepyfile(
|
||||||
"""
|
"""
|
||||||
|
@ -1159,6 +1163,7 @@ def test_quit_with_swallowed_SystemExit(pytester: Pytester) -> None:
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("fixture", ("capfd", "capsys"))
|
@pytest.mark.parametrize("fixture", ("capfd", "capsys"))
|
||||||
|
@pytest.mark.xfail(reason="#10042")
|
||||||
def test_pdb_suspends_fixture_capturing(pytester: Pytester, fixture: str) -> None:
|
def test_pdb_suspends_fixture_capturing(pytester: Pytester, fixture: str) -> None:
|
||||||
"""Using "-s" with pytest should suspend/resume fixture capturing."""
|
"""Using "-s" with pytest should suspend/resume fixture capturing."""
|
||||||
p1 = pytester.makepyfile(
|
p1 = pytester.makepyfile(
|
||||||
|
|
|
@ -113,6 +113,28 @@ class TestDoctests:
|
||||||
reprec = pytester.inline_run(p)
|
reprec = pytester.inline_run(p)
|
||||||
reprec.assertoutcome(failed=1)
|
reprec.assertoutcome(failed=1)
|
||||||
|
|
||||||
|
def test_importmode(self, pytester: Pytester):
|
||||||
|
p = pytester.makepyfile(
|
||||||
|
**{
|
||||||
|
"namespacepkg/innerpkg/__init__.py": "",
|
||||||
|
"namespacepkg/innerpkg/a.py": """
|
||||||
|
def some_func():
|
||||||
|
return 42
|
||||||
|
""",
|
||||||
|
"namespacepkg/innerpkg/b.py": """
|
||||||
|
from namespacepkg.innerpkg.a import some_func
|
||||||
|
def my_func():
|
||||||
|
'''
|
||||||
|
>>> my_func()
|
||||||
|
42
|
||||||
|
'''
|
||||||
|
return some_func()
|
||||||
|
""",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
reprec = pytester.inline_run(p, "--doctest-modules", "--import-mode=importlib")
|
||||||
|
reprec.assertoutcome(passed=1)
|
||||||
|
|
||||||
def test_new_pattern(self, pytester: Pytester):
|
def test_new_pattern(self, pytester: Pytester):
|
||||||
p = pytester.maketxtfile(
|
p = pytester.maketxtfile(
|
||||||
xdoc="""
|
xdoc="""
|
||||||
|
@ -201,7 +223,11 @@ class TestDoctests:
|
||||||
"Traceback (most recent call last):",
|
"Traceback (most recent call last):",
|
||||||
' File "*/doctest.py", line *, in __run',
|
' File "*/doctest.py", line *, in __run',
|
||||||
" *",
|
" *",
|
||||||
*((" *^^^^*",) if sys.version_info >= (3, 11) else ()),
|
*(
|
||||||
|
(" *^^^^*",)
|
||||||
|
if (3, 11, 0, "beta", 4) > sys.version_info >= (3, 11)
|
||||||
|
else ()
|
||||||
|
),
|
||||||
' File "<doctest test_doctest_unexpected_exception.txt[1]>", line 1, in <module>',
|
' File "<doctest test_doctest_unexpected_exception.txt[1]>", line 1, in <module>',
|
||||||
"ZeroDivisionError: division by zero",
|
"ZeroDivisionError: division by zero",
|
||||||
"*/test_doctest_unexpected_exception.txt:2: UnexpectedException",
|
"*/test_doctest_unexpected_exception.txt:2: UnexpectedException",
|
||||||
|
|
|
@ -30,11 +30,11 @@ def test_help(pytester: Pytester) -> None:
|
||||||
assert result.ret == 0
|
assert result.ret == 0
|
||||||
result.stdout.fnmatch_lines(
|
result.stdout.fnmatch_lines(
|
||||||
"""
|
"""
|
||||||
-m MARKEXPR only run tests matching given mark expression.
|
-m MARKEXPR Only run tests matching given mark expression. For
|
||||||
For example: -m 'mark1 and not mark2'.
|
example: -m 'mark1 and not mark2'.
|
||||||
reporting:
|
Reporting:
|
||||||
--durations=N *
|
--durations=N *
|
||||||
-V, --version display pytest version and information about plugins.
|
-V, --version Display pytest version and information about plugins.
|
||||||
When given twice, also display information about
|
When given twice, also display information about
|
||||||
plugins.
|
plugins.
|
||||||
*setup.cfg*
|
*setup.cfg*
|
||||||
|
@ -71,9 +71,9 @@ def test_empty_help_param(pytester: Pytester) -> None:
|
||||||
assert result.ret == 0
|
assert result.ret == 0
|
||||||
lines = [
|
lines = [
|
||||||
" required_plugins (args):",
|
" required_plugins (args):",
|
||||||
" plugins that must be present for pytest to run*",
|
" Plugins that must be present for pytest to run*",
|
||||||
" test_ini (bool):*",
|
" test_ini (bool):*",
|
||||||
"environment variables:",
|
"Environment variables:",
|
||||||
]
|
]
|
||||||
result.stdout.fnmatch_lines(lines, consecutive=True)
|
result.stdout.fnmatch_lines(lines, consecutive=True)
|
||||||
|
|
||||||
|
|
|
@ -47,7 +47,7 @@ def test_wrap_session_notify_exception(ret_exc, pytester: Pytester) -> None:
|
||||||
|
|
||||||
end_lines = (
|
end_lines = (
|
||||||
result.stdout.lines[-4:]
|
result.stdout.lines[-4:]
|
||||||
if sys.version_info >= (3, 11)
|
if (3, 11, 0, "beta", 4) > sys.version_info >= (3, 11)
|
||||||
else result.stdout.lines[-3:]
|
else result.stdout.lines[-3:]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -57,7 +57,7 @@ def test_wrap_session_notify_exception(ret_exc, pytester: Pytester) -> None:
|
||||||
'INTERNALERROR> raise SystemExit("boom")',
|
'INTERNALERROR> raise SystemExit("boom")',
|
||||||
*(
|
*(
|
||||||
("INTERNALERROR> ^^^^^^^^^^^^^^^^^^^^^^^^",)
|
("INTERNALERROR> ^^^^^^^^^^^^^^^^^^^^^^^^",)
|
||||||
if sys.version_info >= (3, 11)
|
if (3, 11, 0, "beta", 4) > sys.version_info >= (3, 11)
|
||||||
else ()
|
else ()
|
||||||
),
|
),
|
||||||
"INTERNALERROR> SystemExit: boom",
|
"INTERNALERROR> SystemExit: boom",
|
||||||
|
@ -68,7 +68,7 @@ def test_wrap_session_notify_exception(ret_exc, pytester: Pytester) -> None:
|
||||||
'INTERNALERROR> raise ValueError("boom")',
|
'INTERNALERROR> raise ValueError("boom")',
|
||||||
*(
|
*(
|
||||||
("INTERNALERROR> ^^^^^^^^^^^^^^^^^^^^^^^^",)
|
("INTERNALERROR> ^^^^^^^^^^^^^^^^^^^^^^^^",)
|
||||||
if sys.version_info >= (3, 11)
|
if (3, 11, 0, "beta", 4) > sys.version_info >= (3, 11)
|
||||||
else ()
|
else ()
|
||||||
),
|
),
|
||||||
"INTERNALERROR> ValueError: boom",
|
"INTERNALERROR> ValueError: boom",
|
||||||
|
|
|
@ -441,10 +441,8 @@ class TestXFail:
|
||||||
result = pytester.runpytest(p, "-rx")
|
result = pytester.runpytest(p, "-rx")
|
||||||
result.stdout.fnmatch_lines(
|
result.stdout.fnmatch_lines(
|
||||||
[
|
[
|
||||||
"*test_one*test_this*",
|
"*test_one*test_this - reason: *NOTRUN* noway",
|
||||||
"*NOTRUN*noway",
|
"*test_one*test_this_true - reason: *NOTRUN* condition: True",
|
||||||
"*test_one*test_this_true*",
|
|
||||||
"*NOTRUN*condition:*True*",
|
|
||||||
"*1 passed*",
|
"*1 passed*",
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
@ -461,9 +459,7 @@ class TestXFail:
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result = pytester.runpytest(p, "-rx")
|
result = pytester.runpytest(p, "-rx")
|
||||||
result.stdout.fnmatch_lines(
|
result.stdout.fnmatch_lines(["*test_one*test_this*NOTRUN*hello", "*1 xfailed*"])
|
||||||
["*test_one*test_this*", "*NOTRUN*hello", "*1 xfailed*"]
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_xfail_xpass(self, pytester: Pytester) -> None:
|
def test_xfail_xpass(self, pytester: Pytester) -> None:
|
||||||
p = pytester.makepyfile(
|
p = pytester.makepyfile(
|
||||||
|
@ -489,7 +485,7 @@ class TestXFail:
|
||||||
result = pytester.runpytest(p)
|
result = pytester.runpytest(p)
|
||||||
result.stdout.fnmatch_lines(["*1 xfailed*"])
|
result.stdout.fnmatch_lines(["*1 xfailed*"])
|
||||||
result = pytester.runpytest(p, "-rx")
|
result = pytester.runpytest(p, "-rx")
|
||||||
result.stdout.fnmatch_lines(["*XFAIL*test_this*", "*reason:*hello*"])
|
result.stdout.fnmatch_lines(["*XFAIL*test_this*reason:*hello*"])
|
||||||
result = pytester.runpytest(p, "--runxfail")
|
result = pytester.runpytest(p, "--runxfail")
|
||||||
result.stdout.fnmatch_lines(["*1 pass*"])
|
result.stdout.fnmatch_lines(["*1 pass*"])
|
||||||
|
|
||||||
|
@ -507,7 +503,7 @@ class TestXFail:
|
||||||
result = pytester.runpytest(p)
|
result = pytester.runpytest(p)
|
||||||
result.stdout.fnmatch_lines(["*1 xfailed*"])
|
result.stdout.fnmatch_lines(["*1 xfailed*"])
|
||||||
result = pytester.runpytest(p, "-rx")
|
result = pytester.runpytest(p, "-rx")
|
||||||
result.stdout.fnmatch_lines(["*XFAIL*test_this*", "*reason:*hello*"])
|
result.stdout.fnmatch_lines(["*XFAIL*test_this*reason:*hello*"])
|
||||||
result = pytester.runpytest(p, "--runxfail")
|
result = pytester.runpytest(p, "--runxfail")
|
||||||
result.stdout.fnmatch_lines(
|
result.stdout.fnmatch_lines(
|
||||||
"""
|
"""
|
||||||
|
@ -543,7 +539,7 @@ class TestXFail:
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result = pytester.runpytest(p, "-rxX")
|
result = pytester.runpytest(p, "-rxX")
|
||||||
result.stdout.fnmatch_lines(["*XFAIL*test_this*", "*NOTRUN*"])
|
result.stdout.fnmatch_lines(["*XFAIL*test_this*NOTRUN*"])
|
||||||
|
|
||||||
def test_dynamic_xfail_set_during_funcarg_setup(self, pytester: Pytester) -> None:
|
def test_dynamic_xfail_set_during_funcarg_setup(self, pytester: Pytester) -> None:
|
||||||
p = pytester.makepyfile(
|
p = pytester.makepyfile(
|
||||||
|
@ -622,7 +618,7 @@ class TestXFail:
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result = pytester.runpytest(p, "-rxX")
|
result = pytester.runpytest(p, "-rxX")
|
||||||
result.stdout.fnmatch_lines(["*XFAIL*", "*unsupported feature*"])
|
result.stdout.fnmatch_lines(["*XFAIL*unsupported feature*"])
|
||||||
assert result.ret == 0
|
assert result.ret == 0
|
||||||
|
|
||||||
@pytest.mark.parametrize("strict", [True, False])
|
@pytest.mark.parametrize("strict", [True, False])
|
||||||
|
@ -1185,7 +1181,7 @@ def test_xfail_skipif_with_globals(pytester: Pytester) -> None:
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result = pytester.runpytest("-rsx")
|
result = pytester.runpytest("-rsx")
|
||||||
result.stdout.fnmatch_lines(["*SKIP*x == 3*", "*XFAIL*test_boolean*", "*x == 3*"])
|
result.stdout.fnmatch_lines(["*SKIP*x == 3*", "*XFAIL*test_boolean*x == 3*"])
|
||||||
|
|
||||||
|
|
||||||
def test_default_markers(pytester: Pytester) -> None:
|
def test_default_markers(pytester: Pytester) -> None:
|
||||||
|
@ -1297,8 +1293,7 @@ class TestBooleanCondition:
|
||||||
result = pytester.runpytest("-rxs")
|
result = pytester.runpytest("-rxs")
|
||||||
result.stdout.fnmatch_lines(
|
result.stdout.fnmatch_lines(
|
||||||
"""
|
"""
|
||||||
*XFAIL*
|
*XFAIL*True123*
|
||||||
*True123*
|
|
||||||
*1 xfail*
|
*1 xfail*
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
|
|
|
@ -277,4 +277,4 @@ def test_stepwise_skip_is_independent(pytester: Pytester) -> None:
|
||||||
|
|
||||||
def test_sw_skip_help(pytester: Pytester) -> None:
|
def test_sw_skip_help(pytester: Pytester) -> None:
|
||||||
result = pytester.runpytest("-h")
|
result = pytester.runpytest("-h")
|
||||||
result.stdout.fnmatch_lines("*implicitly enables --stepwise.")
|
result.stdout.fnmatch_lines("*Implicitly enables --stepwise.")
|
||||||
|
|
|
@ -1139,7 +1139,21 @@ class TestTerminalFunctional:
|
||||||
assert result.stdout.lines.count(expected) == 1
|
assert result.stdout.lines.count(expected) == 1
|
||||||
|
|
||||||
|
|
||||||
def test_fail_extra_reporting(pytester: Pytester, monkeypatch) -> None:
|
@pytest.mark.parametrize(
|
||||||
|
("use_ci", "expected_message"),
|
||||||
|
(
|
||||||
|
(True, f"- AssertionError: {'this_failed'*100}"),
|
||||||
|
(False, "- AssertionError: this_failedt..."),
|
||||||
|
),
|
||||||
|
ids=("on CI", "not on CI"),
|
||||||
|
)
|
||||||
|
def test_fail_extra_reporting(
|
||||||
|
pytester: Pytester, monkeypatch, use_ci: bool, expected_message: str
|
||||||
|
) -> None:
|
||||||
|
if use_ci:
|
||||||
|
monkeypatch.setenv("CI", "true")
|
||||||
|
else:
|
||||||
|
monkeypatch.delenv("CI", raising=False)
|
||||||
monkeypatch.setenv("COLUMNS", "80")
|
monkeypatch.setenv("COLUMNS", "80")
|
||||||
pytester.makepyfile("def test_this(): assert 0, 'this_failed' * 100")
|
pytester.makepyfile("def test_this(): assert 0, 'this_failed' * 100")
|
||||||
result = pytester.runpytest("-rN")
|
result = pytester.runpytest("-rN")
|
||||||
|
@ -1148,7 +1162,7 @@ def test_fail_extra_reporting(pytester: Pytester, monkeypatch) -> None:
|
||||||
result.stdout.fnmatch_lines(
|
result.stdout.fnmatch_lines(
|
||||||
[
|
[
|
||||||
"*test summary*",
|
"*test summary*",
|
||||||
"FAILED test_fail_extra_reporting.py::test_this - AssertionError: this_failedt...",
|
f"FAILED test_fail_extra_reporting.py::test_this {expected_message}",
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -2319,7 +2333,7 @@ def test_line_with_reprcrash(monkeypatch: MonkeyPatch) -> None:
|
||||||
def mock_get_pos(*args):
|
def mock_get_pos(*args):
|
||||||
return mocked_pos
|
return mocked_pos
|
||||||
|
|
||||||
monkeypatch.setattr(_pytest.terminal, "_get_pos", mock_get_pos)
|
monkeypatch.setattr(_pytest.terminal, "_get_node_id_with_markup", mock_get_pos)
|
||||||
|
|
||||||
class config:
|
class config:
|
||||||
pass
|
pass
|
||||||
|
@ -2333,10 +2347,16 @@ def test_line_with_reprcrash(monkeypatch: MonkeyPatch) -> None:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def check(msg, width, expected):
|
def check(msg, width, expected):
|
||||||
|
class DummyTerminalWriter:
|
||||||
|
fullwidth = width
|
||||||
|
|
||||||
|
def markup(self, word: str, **markup: str):
|
||||||
|
return word
|
||||||
|
|
||||||
__tracebackhide__ = True
|
__tracebackhide__ = True
|
||||||
if msg:
|
if msg:
|
||||||
rep.longrepr.reprcrash.message = msg # type: ignore
|
rep.longrepr.reprcrash.message = msg # type: ignore
|
||||||
actual = _get_line_with_reprcrash_message(config, rep(), width) # type: ignore
|
actual = _get_line_with_reprcrash_message(config, rep(), DummyTerminalWriter(), {}) # type: ignore
|
||||||
|
|
||||||
assert actual == expected
|
assert actual == expected
|
||||||
if actual != f"{mocked_verbose_word} {mocked_pos}":
|
if actual != f"{mocked_verbose_word} {mocked_pos}":
|
||||||
|
|
|
@ -1241,12 +1241,15 @@ def test_pdb_teardown_called(pytester: Pytester, monkeypatch: MonkeyPatch) -> No
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("mark", ["@unittest.skip", "@pytest.mark.skip"])
|
@pytest.mark.parametrize("mark", ["@unittest.skip", "@pytest.mark.skip"])
|
||||||
def test_pdb_teardown_skipped(
|
def test_pdb_teardown_skipped_for_functions(
|
||||||
pytester: Pytester, monkeypatch: MonkeyPatch, mark: str
|
pytester: Pytester, monkeypatch: MonkeyPatch, mark: str
|
||||||
) -> None:
|
) -> None:
|
||||||
"""With --pdb, setUp and tearDown should not be called for skipped tests."""
|
"""
|
||||||
|
With --pdb, setUp and tearDown should not be called for tests skipped
|
||||||
|
via a decorator (#7215).
|
||||||
|
"""
|
||||||
tracked: List[str] = []
|
tracked: List[str] = []
|
||||||
monkeypatch.setattr(pytest, "test_pdb_teardown_skipped", tracked, raising=False)
|
monkeypatch.setattr(pytest, "track_pdb_teardown_skipped", tracked, raising=False)
|
||||||
|
|
||||||
pytester.makepyfile(
|
pytester.makepyfile(
|
||||||
"""
|
"""
|
||||||
|
@ -1256,10 +1259,10 @@ def test_pdb_teardown_skipped(
|
||||||
class MyTestCase(unittest.TestCase):
|
class MyTestCase(unittest.TestCase):
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
pytest.test_pdb_teardown_skipped.append("setUp:" + self.id())
|
pytest.track_pdb_teardown_skipped.append("setUp:" + self.id())
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
pytest.test_pdb_teardown_skipped.append("tearDown:" + self.id())
|
pytest.track_pdb_teardown_skipped.append("tearDown:" + self.id())
|
||||||
|
|
||||||
{mark}("skipped for reasons")
|
{mark}("skipped for reasons")
|
||||||
def test_1(self):
|
def test_1(self):
|
||||||
|
@ -1274,6 +1277,43 @@ def test_pdb_teardown_skipped(
|
||||||
assert tracked == []
|
assert tracked == []
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("mark", ["@unittest.skip", "@pytest.mark.skip"])
|
||||||
|
def test_pdb_teardown_skipped_for_classes(
|
||||||
|
pytester: Pytester, monkeypatch: MonkeyPatch, mark: str
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
With --pdb, setUp and tearDown should not be called for tests skipped
|
||||||
|
via a decorator on the class (#10060).
|
||||||
|
"""
|
||||||
|
tracked: List[str] = []
|
||||||
|
monkeypatch.setattr(pytest, "track_pdb_teardown_skipped", tracked, raising=False)
|
||||||
|
|
||||||
|
pytester.makepyfile(
|
||||||
|
"""
|
||||||
|
import unittest
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
{mark}("skipped for reasons")
|
||||||
|
class MyTestCase(unittest.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
pytest.track_pdb_teardown_skipped.append("setUp:" + self.id())
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
pytest.track_pdb_teardown_skipped.append("tearDown:" + self.id())
|
||||||
|
|
||||||
|
def test_1(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
""".format(
|
||||||
|
mark=mark
|
||||||
|
)
|
||||||
|
)
|
||||||
|
result = pytester.runpytest_inprocess("--pdb")
|
||||||
|
result.stdout.fnmatch_lines("* 1 skipped in *")
|
||||||
|
assert tracked == []
|
||||||
|
|
||||||
|
|
||||||
def test_async_support(pytester: Pytester) -> None:
|
def test_async_support(pytester: Pytester) -> None:
|
||||||
pytest.importorskip("unittest.async_case")
|
pytest.importorskip("unittest.async_case")
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue