Merge branch 'main' into deprecate-nose-plugin
This commit is contained in:
commit
c3c48ff19c
|
@ -1,6 +1,6 @@
|
|||
repos:
|
||||
- repo: https://github.com/psf/black
|
||||
rev: 22.3.0
|
||||
rev: 22.6.0
|
||||
hooks:
|
||||
- id: black
|
||||
args: [--safe, --quiet]
|
||||
|
@ -10,7 +10,7 @@ repos:
|
|||
- id: blacken-docs
|
||||
additional_dependencies: [black==20.8b1]
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.2.0
|
||||
rev: v4.3.0
|
||||
hooks:
|
||||
- id: trailing-whitespace
|
||||
- id: end-of-file-fixer
|
||||
|
@ -37,17 +37,17 @@ repos:
|
|||
- flake8-typing-imports==1.12.0
|
||||
- flake8-docstrings==1.5.0
|
||||
- repo: https://github.com/asottile/reorder_python_imports
|
||||
rev: v3.0.1
|
||||
rev: v3.8.2
|
||||
hooks:
|
||||
- id: reorder-python-imports
|
||||
args: ['--application-directories=.:src', --py37-plus]
|
||||
- repo: https://github.com/asottile/pyupgrade
|
||||
rev: v2.32.0
|
||||
rev: v2.37.2
|
||||
hooks:
|
||||
- id: pyupgrade
|
||||
args: [--py37-plus]
|
||||
- repo: https://github.com/asottile/setup-cfg-fmt
|
||||
rev: v1.20.1
|
||||
rev: v1.20.2
|
||||
hooks:
|
||||
- id: setup-cfg-fmt
|
||||
args: [--max-py-version=3.10]
|
||||
|
@ -56,7 +56,7 @@ repos:
|
|||
hooks:
|
||||
- id: python-use-type-annotations
|
||||
- repo: https://github.com/pre-commit/mirrors-mypy
|
||||
rev: v0.942
|
||||
rev: v0.971
|
||||
hooks:
|
||||
- id: mypy
|
||||
files: ^(src/|testing/)
|
||||
|
@ -67,7 +67,6 @@ repos:
|
|||
- attrs>=19.2.0
|
||||
- packaging
|
||||
- tomli
|
||||
- types-atomicwrites
|
||||
- types-pkg_resources
|
||||
- repo: local
|
||||
hooks:
|
||||
|
@ -101,7 +100,7 @@ repos:
|
|||
types: [python]
|
||||
- id: py-path-deprecated
|
||||
name: py.path usage is deprecated
|
||||
exclude: docs|src/_pytest/deprecated.py|testing/deprecated_test.py
|
||||
exclude: docs|src/_pytest/deprecated.py|testing/deprecated_test.py|src/_pytest/legacypath.py
|
||||
language: pygrep
|
||||
entry: \bpy\.path\.local
|
||||
types: [python]
|
||||
|
|
15
AUTHORS
15
AUTHORS
|
@ -15,6 +15,7 @@ Alan Velasco
|
|||
Alexander Johnson
|
||||
Alexander King
|
||||
Alexei Kozlenok
|
||||
Alice Purcell
|
||||
Allan Feldman
|
||||
Aly Sivji
|
||||
Amir Elkess
|
||||
|
@ -44,6 +45,7 @@ Aron Coyle
|
|||
Aron Curzon
|
||||
Aviral Verma
|
||||
Aviv Palivoda
|
||||
Babak Keyvani
|
||||
Barney Gale
|
||||
Ben Gartner
|
||||
Ben Webb
|
||||
|
@ -62,9 +64,11 @@ Ceridwen
|
|||
Charles Cloud
|
||||
Charles Machalow
|
||||
Charnjit SiNGH (CCSJ)
|
||||
Cheuk Ting Ho
|
||||
Chris Lamb
|
||||
Chris NeJame
|
||||
Chris Rose
|
||||
Chris Wheeler
|
||||
Christian Boelsen
|
||||
Christian Fetzer
|
||||
Christian Neumüller
|
||||
|
@ -83,6 +87,7 @@ Damian Skrzypczak
|
|||
Daniel Grana
|
||||
Daniel Hahler
|
||||
Daniel Nuri
|
||||
Daniel Sánchez Castelló
|
||||
Daniel Wandschneider
|
||||
Daniele Procida
|
||||
Danielle Jenkins
|
||||
|
@ -164,6 +169,7 @@ Jeff Widman
|
|||
Jenni Rinker
|
||||
John Eddie Ayson
|
||||
John Towler
|
||||
Jon Parise
|
||||
Jon Sonesen
|
||||
Jonas Obrist
|
||||
Jordan Guymon
|
||||
|
@ -183,7 +189,9 @@ Katarzyna Jachim
|
|||
Katarzyna Król
|
||||
Katerina Koukiou
|
||||
Keri Volans
|
||||
Kevin C
|
||||
Kevin Cox
|
||||
Kevin Hierro Carrasco
|
||||
Kevin J. Foley
|
||||
Kian Eliasi
|
||||
Kian-Meng Ang
|
||||
|
@ -246,6 +254,7 @@ Nicholas Murphy
|
|||
Niclas Olofsson
|
||||
Nicolas Delaby
|
||||
Nikolay Kondratyev
|
||||
Nipunn Koorapati
|
||||
Olga Matoula
|
||||
Oleg Pidsadnyi
|
||||
Oleg Sushchenko
|
||||
|
@ -257,6 +266,7 @@ Oscar Benjamin
|
|||
Parth Patel
|
||||
Patrick Hayes
|
||||
Paul Müller
|
||||
Paul Reece
|
||||
Pauli Virtanen
|
||||
Pavel Karateev
|
||||
Paweł Adamczak
|
||||
|
@ -320,6 +330,7 @@ Taneli Hukkinen
|
|||
Tanvi Mehta
|
||||
Tarcisio Fischer
|
||||
Tareq Alayan
|
||||
Tatiana Ovary
|
||||
Ted Xiao
|
||||
Terje Runde
|
||||
Thomas Grainger
|
||||
|
@ -331,12 +342,14 @@ Tom Dalton
|
|||
Tom Viner
|
||||
Tomáš Gavenčiak
|
||||
Tomer Keren
|
||||
Tony Narlock
|
||||
Tor Colvin
|
||||
Trevor Bekolay
|
||||
Tyler Goodlet
|
||||
Tzu-ping Chung
|
||||
Vasily Kuznetsov
|
||||
Victor Maryama
|
||||
Victor Rodriguez
|
||||
Victor Uriarte
|
||||
Vidar T. Fauske
|
||||
Virgil Dupras
|
||||
|
@ -357,5 +370,7 @@ Yoav Caspi
|
|||
Yuval Shimon
|
||||
Zac Hatfield-Dodds
|
||||
Zachary Kneupper
|
||||
Zachary OBrien
|
||||
Zhouxin Qiu
|
||||
Zoltán Máté
|
||||
Zsolt Cserna
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
When running with ``--pdb``, ``TestCase.tearDown`` is no longer called for tests when the *class* has been skipped via ``unittest.skip`` or ``pytest.mark.skip``.
|
|
@ -0,0 +1 @@
|
|||
Replace `atomicwrites <https://github.com/untitaker/python-atomicwrites>`__ dependency on windows with `os.replace`.
|
|
@ -0,0 +1 @@
|
|||
:data:`sys.stdin` now contains all expected methods of a file-like object when capture is enabled.
|
|
@ -0,0 +1 @@
|
|||
Doctests now respect the ``--import-mode`` flag.
|
|
@ -0,0 +1 @@
|
|||
A warning is now emitted if a test function returns something other than `None`. This prevents a common mistake among beginners that expect that returning a `bool` (for example `return foo(a, b) == result`) would cause a test to pass or fail, instead of using `assert`.
|
|
@ -0,0 +1,2 @@
|
|||
Improve :py:func:`pytest.raises`. Previously passing an empty tuple would give a confusing
|
||||
error. We now raise immediately with a more helpful message.
|
|
@ -0,0 +1 @@
|
|||
Type-annotate ``FixtureRequest.param`` as ``Any`` as a stop gap measure until :issue:`8073` is fixed.
|
|
@ -0,0 +1 @@
|
|||
Fixed a path handling code in ``rewrite.py`` that seems to work fine, but was incorrect and fails in some systems.
|
|
@ -0,0 +1 @@
|
|||
Some coloring has been added to the short test summary.
|
|
@ -0,0 +1 @@
|
|||
Ensure ``caplog.get_records(when)`` returns current/correct data after invoking ``caplog.clear()``.
|
|
@ -0,0 +1 @@
|
|||
Normalize the help description of all command-line options.
|
|
@ -0,0 +1 @@
|
|||
Added shell-style wildcard support to ``testpaths``.
|
|
@ -0,0 +1 @@
|
|||
Fix default encoding warning (``EncodingWarning``) in ``cacheprovider``
|
|
@ -0,0 +1 @@
|
|||
Fixed string representation for :func:`pytest.approx` when used to compare tuples.
|
|
@ -0,0 +1 @@
|
|||
Display full crash messages in ``short test summary info``, when runng in a CI environment.
|
|
@ -0,0 +1 @@
|
|||
Explicit note that :fixture:`tmpdir` fixture is discouraged in favour of :fixture:`tmp_path`.
|
|
@ -0,0 +1,4 @@
|
|||
Improve the error message when we attempt to access a fixture that has been
|
||||
torn down.
|
||||
Add an additional sentence to the docstring explaining when it's not a good
|
||||
idea to call getfixturevalue.
|
|
@ -0,0 +1 @@
|
|||
Added support for hidden configuration file by allowing ``.pytest.ini`` as an alternative to ``pytest.ini``.
|
|
@ -17,7 +17,6 @@
|
|||
<li><a href="{{ pathto('changelog') }}">Changelog</a></li>
|
||||
<li><a href="{{ pathto('contributing') }}">Contributing</a></li>
|
||||
<li><a href="{{ pathto('backwards-compatibility') }}">Backwards Compatibility</a></li>
|
||||
<li><a href="{{ pathto('py27-py34-deprecation') }}">Python 2.7 and 3.4 Support</a></li>
|
||||
<li><a href="{{ pathto('sponsor') }}">Sponsor</a></li>
|
||||
<li><a href="{{ pathto('tidelift') }}">pytest for Enterprise</a></li>
|
||||
<li><a href="{{ pathto('license') }}">License</a></li>
|
||||
|
@ -30,5 +29,3 @@
|
|||
{%- endif %}
|
||||
|
||||
<hr>
|
||||
<a href="{{ pathto('genindex') }}">Index</a>
|
||||
<hr>
|
||||
|
|
|
@ -77,3 +77,18 @@ Deprecation Roadmap
|
|||
Features currently deprecated and removed in previous releases can be found in :ref:`deprecations`.
|
||||
|
||||
We track future deprecation and removal of features using milestones and the `deprecation <https://github.com/pytest-dev/pytest/issues?q=label%3A%22type%3A+deprecation%22>`_ and `removal <https://github.com/pytest-dev/pytest/labels/type%3A%20removal>`_ labels on GitHub.
|
||||
|
||||
|
||||
Python version support
|
||||
======================
|
||||
|
||||
Released pytest versions support all Python versions that are actively maintained at the time of the release:
|
||||
|
||||
============== ===================
|
||||
pytest version min. Python version
|
||||
============== ===================
|
||||
7.1+ 3.7+
|
||||
6.2 - 7.0 3.6+
|
||||
5.0 - 6.1 3.5+
|
||||
3.3 - 4.6 2.7, 3.4+
|
||||
============== ===================
|
||||
|
|
|
@ -2618,7 +2618,8 @@ Important
|
|||
|
||||
This release is a Python3.5+ only release.
|
||||
|
||||
For more details, see our :std:doc:`Python 2.7 and 3.4 support plan <py27-py34-deprecation>`.
|
||||
For more details, see our `Python 2.7 and 3.4 support plan
|
||||
<https://docs.pytest.org/en/7.0.x/py27-py34-deprecation.html>`_.
|
||||
|
||||
Removals
|
||||
--------
|
||||
|
@ -2842,7 +2843,11 @@ Features
|
|||
|
||||
- :issue:`6870`: New ``Config.invocation_args`` attribute containing the unchanged arguments passed to ``pytest.main()``.
|
||||
|
||||
Remark: while this is technically a new feature and according to our :ref:`policy <what goes into 4.6.x releases>` it should not have been backported, we have opened an exception in this particular case because it fixes a serious interaction with ``pytest-xdist``, so it can also be considered a bugfix.
|
||||
Remark: while this is technically a new feature and according to our
|
||||
`policy <https://docs.pytest.org/en/7.0.x/py27-py34-deprecation.html#what-goes-into-4-6-x-releases>`_
|
||||
it should not have been backported, we have opened an exception in this
|
||||
particular case because it fixes a serious interaction with ``pytest-xdist``,
|
||||
so it can also be considered a bugfix.
|
||||
|
||||
Trivial/Internal Changes
|
||||
------------------------
|
||||
|
@ -3014,7 +3019,8 @@ Important
|
|||
|
||||
The ``4.6.X`` series will be the last series to support **Python 2 and Python 3.4**.
|
||||
|
||||
For more details, see our :std:doc:`Python 2.7 and 3.4 support plan <py27-py34-deprecation>`.
|
||||
For more details, see our `Python 2.7 and 3.4 support plan
|
||||
<https://docs.pytest.org/en/7.0.x/py27-py34-deprecation.html>`_.
|
||||
|
||||
|
||||
Features
|
||||
|
|
|
@ -247,7 +247,7 @@ html_sidebars = {
|
|||
html_domain_indices = True
|
||||
|
||||
# If false, no index is generated.
|
||||
html_use_index = True
|
||||
html_use_index = False
|
||||
|
||||
# If true, the index is split into individual pages for each letter.
|
||||
# html_split_index = False
|
||||
|
@ -320,7 +320,9 @@ latex_domain_indices = False
|
|||
|
||||
# One entry per manual page. List of tuples
|
||||
# (source start file, name, description, authors, manual section).
|
||||
man_pages = [("usage", "pytest", "pytest usage", ["holger krekel at merlinux eu"], 1)]
|
||||
man_pages = [
|
||||
("how-to/usage", "pytest", "pytest usage", ["holger krekel at merlinux eu"], 1)
|
||||
]
|
||||
|
||||
|
||||
# -- Options for Epub output ---------------------------------------------------
|
||||
|
|
|
@ -85,7 +85,6 @@ Further topics
|
|||
|
||||
backwards-compatibility
|
||||
deprecations
|
||||
py27-py34-deprecation
|
||||
|
||||
contributing
|
||||
development_guide
|
||||
|
|
|
@ -260,6 +260,47 @@ or ``pytest.warns(Warning)``.
|
|||
|
||||
See :ref:`warns use cases` for examples.
|
||||
|
||||
|
||||
Returning non-None value in test functions
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. deprecated:: 7.2
|
||||
|
||||
A :class:`pytest.PytestReturnNotNoneWarning` is now emitted if a test function returns something other than `None`.
|
||||
|
||||
This prevents a common mistake among beginners that expect that returning a `bool` would cause a test to pass or fail, for example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
["a", "b", "result"],
|
||||
[
|
||||
[1, 2, 5],
|
||||
[2, 3, 8],
|
||||
[5, 3, 18],
|
||||
],
|
||||
)
|
||||
def test_foo(a, b, result):
|
||||
return foo(a, b) == result
|
||||
|
||||
Given that pytest ignores the return value, this might be surprising that it will never fail.
|
||||
|
||||
The proper fix is to change the `return` to an `assert`:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
["a", "b", "result"],
|
||||
[
|
||||
[1, 2, 5],
|
||||
[2, 3, 8],
|
||||
[5, 3, 18],
|
||||
],
|
||||
)
|
||||
def test_foo(a, b, result):
|
||||
assert foo(a, b) == result
|
||||
|
||||
|
||||
The ``--strict`` command-line option
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
|
|
|
@ -346,7 +346,7 @@ Custom marker and command line option to control test runs
|
|||
Plugins can provide custom markers and implement specific behaviour
|
||||
based on it. This is a self-contained example which adds a command
|
||||
line option and a parametrized test function marker to run tests
|
||||
specifies via named environments:
|
||||
specified via named environments:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
|
|
|
@ -657,21 +657,17 @@ Use :func:`pytest.raises` with the
|
|||
:ref:`pytest.mark.parametrize ref` decorator to write parametrized tests
|
||||
in which some tests raise exceptions and others do not.
|
||||
|
||||
It is helpful to define a no-op context manager ``does_not_raise`` to serve
|
||||
as a complement to ``raises``. For example:
|
||||
It may be helpful to use ``nullcontext`` as a complement to ``raises``.
|
||||
|
||||
For example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from contextlib import contextmanager
|
||||
from contextlib import nullcontext as does_not_raise
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
@contextmanager
|
||||
def does_not_raise():
|
||||
yield
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"example_input,expectation",
|
||||
[
|
||||
|
@ -688,22 +684,3 @@ as a complement to ``raises``. For example:
|
|||
|
||||
In the example above, the first three test cases should run unexceptionally,
|
||||
while the fourth should raise ``ZeroDivisionError``.
|
||||
|
||||
If you're only supporting Python 3.7+, you can simply use ``nullcontext``
|
||||
to define ``does_not_raise``:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from contextlib import nullcontext as does_not_raise
|
||||
|
||||
Or, if you're supporting Python 3.3+ you can use:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from contextlib import ExitStack as does_not_raise
|
||||
|
||||
Or, if desired, you can ``pip install contextlib2`` and use:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from contextlib2 import nullcontext as does_not_raise
|
||||
|
|
|
@ -173,10 +173,9 @@ This layout prevents a lot of common pitfalls and has many benefits, which are b
|
|||
`blog post by Ionel Cristian Mărieș <https://blog.ionelmc.ro/2014/05/25/python-packaging/#the-structure>`_.
|
||||
|
||||
.. note::
|
||||
The new ``--import-mode=importlib`` (see :ref:`import-modes`) doesn't have
|
||||
The ``--import-mode=importlib`` option (see :ref:`import-modes`) does not have
|
||||
any of the drawbacks above because ``sys.path`` is not changed when importing
|
||||
test modules, so users that run
|
||||
into this issue are strongly encouraged to try it and report if the new option works well for them.
|
||||
test modules, so users that run into this issue are strongly encouraged to try it.
|
||||
|
||||
The ``src`` directory layout is still strongly recommended however.
|
||||
|
||||
|
|
|
@ -45,10 +45,19 @@ these values:
|
|||
|
||||
* ``importlib``: new in pytest-6.0, this mode uses :mod:`importlib` to import test modules. This gives full control over the import process, and doesn't require changing :py:data:`sys.path`.
|
||||
|
||||
For this reason this doesn't require test module names to be unique, but also makes test
|
||||
modules non-importable by each other.
|
||||
For this reason this doesn't require test module names to be unique.
|
||||
|
||||
One drawback however is that test modules are non-importable by each other. Also, utility
|
||||
modules in the tests directories are not automatically importable because the tests directory is no longer
|
||||
added to :py:data:`sys.path`.
|
||||
|
||||
Initially we intended to make ``importlib`` the default in future releases, however it is clear now that
|
||||
it has its own set of drawbacks so the default will remain ``prepend`` for the foreseeable future.
|
||||
|
||||
.. seealso::
|
||||
|
||||
The :confval:`pythonpath` configuration variable.
|
||||
|
||||
We intend to make ``importlib`` the default in future releases, depending on feedback.
|
||||
|
||||
``prepend`` and ``append`` import modes scenarios
|
||||
-------------------------------------------------
|
||||
|
|
|
@ -126,14 +126,17 @@ pytest also introduces new options:
|
|||
in expected doctest output.
|
||||
|
||||
* ``NUMBER``: when enabled, floating-point numbers only need to match as far as
|
||||
the precision you have written in the expected doctest output. For example,
|
||||
the following output would only need to match to 2 decimal places::
|
||||
the precision you have written in the expected doctest output. The numbers are
|
||||
compared using :func:`pytest.approx` with relative tolerance equal to the
|
||||
precision. For example, the following output would only need to match to 2
|
||||
decimal places when comparing ``3.14`` to
|
||||
``pytest.approx(math.pi, rel=10**-2)``::
|
||||
|
||||
>>> math.pi
|
||||
3.14
|
||||
|
||||
If you wrote ``3.1416`` then the actual output would need to match to 4
|
||||
decimal places; and so on.
|
||||
If you wrote ``3.1416`` then the actual output would need to match to
|
||||
approximately 4 decimal places; and so on.
|
||||
|
||||
This avoids false positives caused by limited floating-point precision, like
|
||||
this::
|
||||
|
|
|
@ -631,6 +631,7 @@ Here's what that might look like:
|
|||
def receiving_user(mail_admin):
|
||||
user = mail_admin.create_user()
|
||||
yield user
|
||||
user.clear_mailbox()
|
||||
mail_admin.delete_user(user)
|
||||
|
||||
|
||||
|
|
|
@ -176,8 +176,8 @@ logging records as they are emitted directly into the console.
|
|||
|
||||
You can specify the logging level for which log records with equal or higher
|
||||
level are printed to the console by passing ``--log-cli-level``. This setting
|
||||
accepts the logging level names as seen in python's documentation or an integer
|
||||
as the logging level num.
|
||||
accepts the logging level names or numeric values as seen in
|
||||
:ref:`logging's documentation <python:levels>`.
|
||||
|
||||
Additionally, you can also specify ``--log-cli-format`` and
|
||||
``--log-cli-date-format`` which mirror and default to ``--log-format`` and
|
||||
|
@ -198,9 +198,8 @@ Note that relative paths for the log-file location, whether passed on the CLI or
|
|||
config file, are always resolved relative to the current working directory.
|
||||
|
||||
You can also specify the logging level for the log file by passing
|
||||
``--log-file-level``. This setting accepts the logging level names as seen in
|
||||
python's documentation(ie, uppercased level names) or an integer as the logging
|
||||
level num.
|
||||
``--log-file-level``. This setting accepts the logging level names or numeric
|
||||
values as seen in :ref:`logging's documentation <python:levels>`.
|
||||
|
||||
Additionally, you can also specify ``--log-file-format`` and
|
||||
``--log-file-date-format`` which are equal to ``--log-format`` and
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
How to monkeypatch/mock modules and environments
|
||||
================================================================
|
||||
|
||||
.. currentmodule:: _pytest.monkeypatch
|
||||
.. currentmodule:: pytest
|
||||
|
||||
Sometimes tests need to invoke functionality which depends
|
||||
on global settings or which invokes code which cannot be easily
|
||||
|
@ -25,6 +25,7 @@ functionality in tests:
|
|||
monkeypatch.delenv(name, raising=True)
|
||||
monkeypatch.syspath_prepend(path)
|
||||
monkeypatch.chdir(path)
|
||||
monkeypatch.context()
|
||||
|
||||
All modifications will be undone after the requesting
|
||||
test function or fixture has finished. The ``raising``
|
||||
|
@ -55,6 +56,9 @@ during a test.
|
|||
5. Use :py:meth:`monkeypatch.syspath_prepend <MonkeyPatch.syspath_prepend>` to modify ``sys.path`` which will also
|
||||
call ``pkg_resources.fixup_namespace_packages`` and :py:func:`importlib.invalidate_caches`.
|
||||
|
||||
6. Use :py:meth:`monkeypatch.context <MonkeyPatch.context>` to apply patches only in a specific scope, which can help
|
||||
control teardown of complex fixtures or patches to the stdlib.
|
||||
|
||||
See the `monkeypatch blog post`_ for some introduction material
|
||||
and a discussion of its motivation.
|
||||
|
||||
|
@ -436,7 +440,7 @@ separate fixtures for each potential mock and reference them in the needed tests
|
|||
_ = app.create_connection_string()
|
||||
|
||||
|
||||
.. currentmodule:: _pytest.monkeypatch
|
||||
.. currentmodule:: pytest
|
||||
|
||||
API Reference
|
||||
-------------
|
||||
|
|
|
@ -104,8 +104,10 @@ The ``tmpdir`` and ``tmpdir_factory`` fixtures
|
|||
|
||||
The ``tmpdir`` and ``tmpdir_factory`` fixtures are similar to ``tmp_path``
|
||||
and ``tmp_path_factory``, but use/return legacy `py.path.local`_ objects
|
||||
rather than standard :class:`pathlib.Path` objects. These days, prefer to
|
||||
use ``tmp_path`` and ``tmp_path_factory``.
|
||||
rather than standard :class:`pathlib.Path` objects.
|
||||
|
||||
.. note::
|
||||
These days, it is preferred to use ``tmp_path`` and ``tmp_path_factory``.
|
||||
|
||||
See :fixture:`tmpdir <tmpdir>` :fixture:`tmpdir_factory <tmpdir_factory>`
|
||||
API for details.
|
||||
|
|
|
@ -27,12 +27,15 @@ Almost all ``unittest`` features are supported:
|
|||
* ``setUpClass/tearDownClass``;
|
||||
* ``setUpModule/tearDownModule``;
|
||||
|
||||
.. _`pytest-subtests`: https://github.com/pytest-dev/pytest-subtests
|
||||
.. _`load_tests protocol`: https://docs.python.org/3/library/unittest.html#load-tests-protocol
|
||||
|
||||
Additionally, :ref:`subtests <python:subtests>` are supported by the
|
||||
`pytest-subtests`_ plugin.
|
||||
|
||||
Up to this point pytest does not have support for the following features:
|
||||
|
||||
* `load_tests protocol`_;
|
||||
* :ref:`subtests <python:subtests>`;
|
||||
|
||||
Benefits out of the box
|
||||
-----------------------
|
||||
|
|
|
@ -158,18 +158,20 @@ it in your setuptools-invocation:
|
|||
# sample ./setup.py file
|
||||
from setuptools import setup
|
||||
|
||||
|
||||
name_of_plugin = "myproject" # register plugin with this name
|
||||
setup(
|
||||
name="myproject",
|
||||
packages=["myproject"],
|
||||
# the following makes a plugin available to pytest
|
||||
entry_points={"pytest11": ["name_of_plugin = myproject.pluginmodule"]},
|
||||
entry_points={"pytest11": [f"{name_of_plugin} = myproject.pluginmodule"]},
|
||||
# custom PyPI classifier for pytest plugins
|
||||
classifiers=["Framework :: Pytest"],
|
||||
)
|
||||
|
||||
If a package is installed this way, ``pytest`` will load
|
||||
``myproject.pluginmodule`` as a plugin which can define
|
||||
:ref:`hooks <hook-reference>`.
|
||||
:ref:`hooks <hook-reference>`. Confirm registration with ``pytest --trace-config``
|
||||
|
||||
.. note::
|
||||
|
||||
|
|
|
@ -2,16 +2,11 @@
|
|||
|
||||
.. sidebar:: Next Open Trainings
|
||||
|
||||
- `PyConDE <https://2022.pycon.de/program/W93DBJ/>`__, April 11th 2022 (3h), Berlin, Germany
|
||||
- `PyConIT <https://pycon.it/en/talk/pytest-simple-rapid-and-fun-testing-with-python>`__, June 3rd 2022 (4h), Florence, Italy
|
||||
- `CH Open Workshop-Tage <https://workshoptage.ch/workshops/2022/pytest-professionelles-testen-nicht-nur-fuer-python/>`__ (German), September 8th 2022, Bern, Switzerland
|
||||
- `Professional Testing with Python <https://python-academy.com/courses/python_course_testing.html>`_, via `Python Academy <https://www.python-academy.com/>`_, March 7th to 9th 2023 (3 day in-depth training), Remote and Leipzig, Germany
|
||||
|
||||
Also see :doc:`previous talks and blogposts <talks>`.
|
||||
|
||||
..
|
||||
- `Europython <https://ep2022.europython.eu/>`__, July 11th to 17th (3h), Dublin, Ireland
|
||||
- `CH Open Workshoptage <https://workshoptage.ch/>`__ (German), September 6th to 8th (1 day), Bern, Switzerland
|
||||
|
||||
.. _features:
|
||||
|
||||
pytest: helps you write better programs
|
||||
|
@ -27,8 +22,6 @@ scale to support complex functional testing for applications and libraries.
|
|||
|
||||
**PyPI package name**: :pypi:`pytest`
|
||||
|
||||
**Documentation as PDF**: `download latest <https://media.readthedocs.org/pdf/pytest/latest/pytest.pdf>`_
|
||||
|
||||
|
||||
A quick example
|
||||
---------------
|
||||
|
@ -104,11 +97,6 @@ Bugs/Requests
|
|||
Please use the `GitHub issue tracker <https://github.com/pytest-dev/pytest/issues>`_ to submit bugs or request features.
|
||||
|
||||
|
||||
Changelog
|
||||
---------
|
||||
|
||||
Consult the :ref:`Changelog <changelog>` page for fixes and enhancements of each version.
|
||||
|
||||
Support pytest
|
||||
--------------
|
||||
|
||||
|
@ -141,13 +129,3 @@ Security
|
|||
pytest has never been associated with a security vulnerability, but in any case, to report a
|
||||
security vulnerability please use the `Tidelift security contact <https://tidelift.com/security>`_.
|
||||
Tidelift will coordinate the fix and disclosure.
|
||||
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
Copyright Holger Krekel and others, 2004.
|
||||
|
||||
Distributed under the terms of the `MIT`_ license, pytest is free and open source software.
|
||||
|
||||
.. _`MIT`: https://github.com/pytest-dev/pytest/blob/main/LICENSE
|
||||
|
|
|
@ -1,99 +0,0 @@
|
|||
Python 2.7 and 3.4 support
|
||||
==========================
|
||||
|
||||
It is demanding on the maintainers of an open source project to support many Python versions, as
|
||||
there's extra cost of keeping code compatible between all versions, while holding back on
|
||||
features only made possible on newer Python versions.
|
||||
|
||||
In case of Python 2 and 3, the difference between the languages makes it even more prominent,
|
||||
because many new Python 3 features cannot be used in a Python 2/3 compatible code base.
|
||||
|
||||
Python 2.7 EOL has been reached :pep:`in 2020 <0373#maintenance-releases>`, with
|
||||
the last release made in April, 2020.
|
||||
|
||||
Python 3.4 EOL has been reached :pep:`in 2019 <0429#release-schedule>`, with the last release made in March, 2019.
|
||||
|
||||
For those reasons, in Jun 2019 it was decided that **pytest 4.6** series will be the last to support Python 2.7 and 3.4.
|
||||
|
||||
What this means for general users
|
||||
---------------------------------
|
||||
|
||||
Thanks to the `python_requires`_ setuptools option,
|
||||
Python 2.7 and Python 3.4 users using a modern pip version
|
||||
will install the last pytest 4.6.X version automatically even if 5.0 or later versions
|
||||
are available on PyPI.
|
||||
|
||||
Users should ensure they are using the latest pip and setuptools versions for this to work.
|
||||
|
||||
Maintenance of 4.6.X versions
|
||||
-----------------------------
|
||||
|
||||
Until January 2020, the pytest core team ported many bug-fixes from the main release into the
|
||||
``4.6.x`` branch, with several 4.6.X releases being made along the year.
|
||||
|
||||
From now on, the core team will **no longer actively backport patches**, but the ``4.6.x``
|
||||
branch will continue to exist so the community itself can contribute patches.
|
||||
|
||||
The core team will be happy to accept those patches, and make new 4.6.X releases **until mid-2020**
|
||||
(but consider that date as a ballpark, after that date the team might still decide to make new releases
|
||||
for critical bugs).
|
||||
|
||||
.. _`python_requires`: https://packaging.python.org/guides/distributing-packages-using-setuptools/#python-requires
|
||||
|
||||
Technical aspects
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
(This section is a transcript from :issue:`5275`).
|
||||
|
||||
In this section we describe the technical aspects of the Python 2.7 and 3.4 support plan.
|
||||
|
||||
.. _what goes into 4.6.x releases:
|
||||
|
||||
What goes into 4.6.X releases
|
||||
+++++++++++++++++++++++++++++
|
||||
|
||||
New 4.6.X releases will contain bug fixes only.
|
||||
|
||||
When will 4.6.X releases happen
|
||||
+++++++++++++++++++++++++++++++
|
||||
|
||||
New 4.6.X releases will happen after we have a few bugs in place to release, or if a few weeks have
|
||||
passed (say a single bug has been fixed a month after the latest 4.6.X release).
|
||||
|
||||
No hard rules here, just ballpark.
|
||||
|
||||
Who will handle applying bug fixes
|
||||
++++++++++++++++++++++++++++++++++
|
||||
|
||||
We core maintainers expect that people still using Python 2.7/3.4 and being affected by
|
||||
bugs to step up and provide patches and/or port bug fixes from the active branches.
|
||||
|
||||
We will be happy to guide users interested in doing so, so please don't hesitate to ask.
|
||||
|
||||
**Backporting changes into 4.6**
|
||||
|
||||
Please follow these instructions:
|
||||
|
||||
#. ``git fetch --all --prune``
|
||||
|
||||
#. ``git checkout origin/4.6.x -b backport-XXXX`` # use the PR number here
|
||||
|
||||
#. Locate the merge commit on the PR, in the *merged* message, for example:
|
||||
|
||||
nicoddemus merged commit 0f8b462 into pytest-dev:features
|
||||
|
||||
#. ``git cherry-pick -m1 REVISION`` # use the revision you found above (``0f8b462``).
|
||||
|
||||
#. Open a PR targeting ``4.6.x``:
|
||||
|
||||
* Prefix the message with ``[4.6]`` so it is an obvious backport
|
||||
* Delete the PR body, it usually contains a duplicate commit message.
|
||||
|
||||
**Providing new PRs to 4.6**
|
||||
|
||||
Fresh pull requests to ``4.6.x`` will be accepted provided that
|
||||
the equivalent code in the active branches does not contain that bug (for example, a bug is specific
|
||||
to Python 2 only).
|
||||
|
||||
Bug fixes that also happen in the mainstream version should be first fixed
|
||||
there, and then backported as per instructions above.
|
|
@ -29,9 +29,11 @@ pytest.ini
|
|||
|
||||
``pytest.ini`` files take precedence over other files, even when empty.
|
||||
|
||||
Alternatively, the hidden version ``.pytest.ini`` can be used.
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
# pytest.ini
|
||||
# pytest.ini or .pytest.ini
|
||||
[pytest]
|
||||
minversion = 6.0
|
||||
addopts = -ra -q
|
||||
|
|
|
@ -8,8 +8,8 @@ Reference guides
|
|||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
fixtures
|
||||
plugin_list
|
||||
customize
|
||||
reference
|
||||
fixtures
|
||||
customize
|
||||
exit-codes
|
||||
plugin_list
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -92,7 +92,7 @@ pytest.param
|
|||
pytest.raises
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
**Tutorial**: :ref:`assertraises`.
|
||||
**Tutorial**: :ref:`assertraises`
|
||||
|
||||
.. autofunction:: pytest.raises(expected_exception: Exception [, *, match])
|
||||
:with: excinfo
|
||||
|
@ -100,7 +100,7 @@ pytest.raises
|
|||
pytest.deprecated_call
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
**Tutorial**: :ref:`ensuring_function_triggers`.
|
||||
**Tutorial**: :ref:`ensuring_function_triggers`
|
||||
|
||||
.. autofunction:: pytest.deprecated_call()
|
||||
:with:
|
||||
|
@ -108,7 +108,7 @@ pytest.deprecated_call
|
|||
pytest.register_assert_rewrite
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
**Tutorial**: :ref:`assertion-rewriting`.
|
||||
**Tutorial**: :ref:`assertion-rewriting`
|
||||
|
||||
.. autofunction:: pytest.register_assert_rewrite
|
||||
|
||||
|
@ -123,7 +123,7 @@ pytest.warns
|
|||
pytest.freeze_includes
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
**Tutorial**: :ref:`freezing-pytest`.
|
||||
**Tutorial**: :ref:`freezing-pytest`
|
||||
|
||||
.. autofunction:: pytest.freeze_includes
|
||||
|
||||
|
@ -143,7 +143,7 @@ fixtures or plugins.
|
|||
pytest.mark.filterwarnings
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
**Tutorial**: :ref:`filterwarnings`.
|
||||
**Tutorial**: :ref:`filterwarnings`
|
||||
|
||||
Add warning filters to marked test items.
|
||||
|
||||
|
@ -169,7 +169,7 @@ Add warning filters to marked test items.
|
|||
pytest.mark.parametrize
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
:ref:`parametrize`.
|
||||
**Tutorial**: :ref:`parametrize`
|
||||
|
||||
This mark has the same signature as :py:meth:`pytest.Metafunc.parametrize`; see there.
|
||||
|
||||
|
@ -179,7 +179,7 @@ This mark has the same signature as :py:meth:`pytest.Metafunc.parametrize`; see
|
|||
pytest.mark.skip
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
:ref:`skip`.
|
||||
**Tutorial**: :ref:`skip`
|
||||
|
||||
Unconditionally skip a test function.
|
||||
|
||||
|
@ -193,7 +193,7 @@ Unconditionally skip a test function.
|
|||
pytest.mark.skipif
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
:ref:`skipif`.
|
||||
**Tutorial**: :ref:`skipif`
|
||||
|
||||
Skip a test function if a condition is ``True``.
|
||||
|
||||
|
@ -209,7 +209,7 @@ Skip a test function if a condition is ``True``.
|
|||
pytest.mark.usefixtures
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
**Tutorial**: :ref:`usefixtures`.
|
||||
**Tutorial**: :ref:`usefixtures`
|
||||
|
||||
Mark a test function as using the given fixture names.
|
||||
|
||||
|
@ -231,7 +231,7 @@ Mark a test function as using the given fixture names.
|
|||
pytest.mark.xfail
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
**Tutorial**: :ref:`xfail`.
|
||||
**Tutorial**: :ref:`xfail`
|
||||
|
||||
Marks a test function as *expected to fail*.
|
||||
|
||||
|
@ -245,7 +245,7 @@ Marks a test function as *expected to fail*.
|
|||
:keyword str reason:
|
||||
Reason why the test function is marked as xfail.
|
||||
:keyword Type[Exception] raises:
|
||||
Exception subclass expected to be raised by the test function; other exceptions will fail the test.
|
||||
Exception subclass (or tuple of subclasses) expected to be raised by the test function; other exceptions will fail the test.
|
||||
:keyword bool run:
|
||||
If the test function should actually be executed. If ``False``, the function will always xfail and will
|
||||
not be executed (useful if a function is segfaulting).
|
||||
|
@ -290,14 +290,14 @@ Example for using multiple custom markers:
|
|||
def test_function():
|
||||
...
|
||||
|
||||
When :meth:`Node.iter_markers <_pytest.nodes.Node.iter_markers>` or :meth:`Node.iter_markers <_pytest.nodes.Node.iter_markers_with_node>` is used with multiple markers, the marker closest to the function will be iterated over first. The above example will result in ``@pytest.mark.slow`` followed by ``@pytest.mark.timeout(...)``.
|
||||
When :meth:`Node.iter_markers <_pytest.nodes.Node.iter_markers>` or :meth:`Node.iter_markers_with_node <_pytest.nodes.Node.iter_markers_with_node>` is used with multiple markers, the marker closest to the function will be iterated over first. The above example will result in ``@pytest.mark.slow`` followed by ``@pytest.mark.timeout(...)``.
|
||||
|
||||
.. _`fixtures-api`:
|
||||
|
||||
Fixtures
|
||||
--------
|
||||
|
||||
**Tutorial**: :ref:`fixture`.
|
||||
**Tutorial**: :ref:`fixture`
|
||||
|
||||
Fixtures are requested by test functions or other fixtures by declaring them as argument names.
|
||||
|
||||
|
@ -338,7 +338,7 @@ For more details, consult the full :ref:`fixtures docs <fixture>`.
|
|||
config.cache
|
||||
~~~~~~~~~~~~
|
||||
|
||||
**Tutorial**: :ref:`cache`.
|
||||
**Tutorial**: :ref:`cache`
|
||||
|
||||
The ``config.cache`` object allows other plugins and fixtures
|
||||
to store and retrieve values across test runs. To access it from fixtures
|
||||
|
@ -358,22 +358,11 @@ Under the hood, the cache plugin uses the simple
|
|||
capsys
|
||||
~~~~~~
|
||||
|
||||
:ref:`captures`.
|
||||
**Tutorial**: :ref:`captures`
|
||||
|
||||
.. autofunction:: _pytest.capture.capsys()
|
||||
:no-auto-options:
|
||||
|
||||
Returns an instance of :class:`CaptureFixture[str] <pytest.CaptureFixture>`.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def test_output(capsys):
|
||||
print("hello")
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == "hello\n"
|
||||
|
||||
.. autoclass:: pytest.CaptureFixture()
|
||||
:members:
|
||||
|
||||
|
@ -383,93 +372,48 @@ capsys
|
|||
capsysbinary
|
||||
~~~~~~~~~~~~
|
||||
|
||||
:ref:`captures`.
|
||||
**Tutorial**: :ref:`captures`
|
||||
|
||||
.. autofunction:: _pytest.capture.capsysbinary()
|
||||
:no-auto-options:
|
||||
|
||||
Returns an instance of :class:`CaptureFixture[bytes] <pytest.CaptureFixture>`.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def test_output(capsysbinary):
|
||||
print("hello")
|
||||
captured = capsysbinary.readouterr()
|
||||
assert captured.out == b"hello\n"
|
||||
|
||||
|
||||
.. fixture:: capfd
|
||||
|
||||
capfd
|
||||
~~~~~~
|
||||
|
||||
:ref:`captures`.
|
||||
**Tutorial**: :ref:`captures`
|
||||
|
||||
.. autofunction:: _pytest.capture.capfd()
|
||||
:no-auto-options:
|
||||
|
||||
Returns an instance of :class:`CaptureFixture[str] <pytest.CaptureFixture>`.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def test_system_echo(capfd):
|
||||
os.system('echo "hello"')
|
||||
captured = capfd.readouterr()
|
||||
assert captured.out == "hello\n"
|
||||
|
||||
|
||||
.. fixture:: capfdbinary
|
||||
|
||||
capfdbinary
|
||||
~~~~~~~~~~~~
|
||||
|
||||
:ref:`captures`.
|
||||
**Tutorial**: :ref:`captures`
|
||||
|
||||
.. autofunction:: _pytest.capture.capfdbinary()
|
||||
:no-auto-options:
|
||||
|
||||
Returns an instance of :class:`CaptureFixture[bytes] <pytest.CaptureFixture>`.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def test_system_echo(capfdbinary):
|
||||
os.system('echo "hello"')
|
||||
captured = capfdbinary.readouterr()
|
||||
assert captured.out == b"hello\n"
|
||||
|
||||
|
||||
.. fixture:: doctest_namespace
|
||||
|
||||
doctest_namespace
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
:ref:`doctest`.
|
||||
**Tutorial**: :ref:`doctest`
|
||||
|
||||
.. autofunction:: _pytest.doctest.doctest_namespace()
|
||||
|
||||
Usually this fixture is used in conjunction with another ``autouse`` fixture:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def add_np(doctest_namespace):
|
||||
doctest_namespace["np"] = numpy
|
||||
|
||||
For more details: :ref:`doctest_namespace`.
|
||||
|
||||
|
||||
.. fixture:: request
|
||||
|
||||
request
|
||||
~~~~~~~
|
||||
|
||||
:ref:`request example`.
|
||||
**Example**: :ref:`request example`
|
||||
|
||||
The ``request`` fixture is a special fixture providing information of the requesting test function.
|
||||
|
||||
|
@ -490,7 +434,7 @@ pytestconfig
|
|||
record_property
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
**Tutorial**: :ref:`record_property example`.
|
||||
**Tutorial**: :ref:`record_property example`
|
||||
|
||||
.. autofunction:: _pytest.junitxml.record_property()
|
||||
|
||||
|
@ -500,7 +444,7 @@ record_property
|
|||
record_testsuite_property
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
**Tutorial**: :ref:`record_testsuite_property example`.
|
||||
**Tutorial**: :ref:`record_testsuite_property example`
|
||||
|
||||
.. autofunction:: _pytest.junitxml.record_testsuite_property()
|
||||
|
||||
|
@ -510,7 +454,7 @@ record_testsuite_property
|
|||
caplog
|
||||
~~~~~~
|
||||
|
||||
:ref:`logging`.
|
||||
**Tutorial**: :ref:`logging`
|
||||
|
||||
.. autofunction:: _pytest.logging.caplog()
|
||||
:no-auto-options:
|
||||
|
@ -526,7 +470,7 @@ caplog
|
|||
monkeypatch
|
||||
~~~~~~~~~~~
|
||||
|
||||
:ref:`monkeypatching`.
|
||||
**Tutorial**: :ref:`monkeypatching`
|
||||
|
||||
.. autofunction:: _pytest.monkeypatch.monkeypatch()
|
||||
:no-auto-options:
|
||||
|
@ -600,19 +544,13 @@ recwarn
|
|||
.. autoclass:: pytest.WarningsRecorder()
|
||||
:members:
|
||||
|
||||
Each recorded warning is an instance of :class:`warnings.WarningMessage`.
|
||||
|
||||
.. note::
|
||||
``DeprecationWarning`` and ``PendingDeprecationWarning`` are treated
|
||||
differently; see :ref:`ensuring_function_triggers`.
|
||||
|
||||
|
||||
.. fixture:: tmp_path
|
||||
|
||||
tmp_path
|
||||
~~~~~~~~
|
||||
|
||||
:ref:`tmp_path`
|
||||
**Tutorial**: :ref:`tmp_path`
|
||||
|
||||
.. autofunction:: _pytest.tmpdir.tmp_path()
|
||||
:no-auto-options:
|
||||
|
@ -623,7 +561,7 @@ tmp_path
|
|||
tmp_path_factory
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
:ref:`tmp_path_factory example`
|
||||
**Tutorial**: :ref:`tmp_path_factory example`
|
||||
|
||||
.. _`tmp_path_factory factory api`:
|
||||
|
||||
|
@ -638,7 +576,7 @@ tmp_path_factory
|
|||
tmpdir
|
||||
~~~~~~
|
||||
|
||||
:ref:`tmpdir and tmpdir_factory`
|
||||
**Tutorial**: :ref:`tmpdir and tmpdir_factory`
|
||||
|
||||
.. autofunction:: _pytest.legacypath.LegacyTmpdirPlugin.tmpdir()
|
||||
:no-auto-options:
|
||||
|
@ -649,7 +587,7 @@ tmpdir
|
|||
tmpdir_factory
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
:ref:`tmpdir and tmpdir_factory`
|
||||
**Tutorial**: :ref:`tmpdir and tmpdir_factory`
|
||||
|
||||
``tmpdir_factory`` is an instance of :class:`~pytest.TempdirFactory`:
|
||||
|
||||
|
@ -662,7 +600,7 @@ tmpdir_factory
|
|||
Hooks
|
||||
-----
|
||||
|
||||
:ref:`writing-plugins`.
|
||||
**Tutorial**: :ref:`writing-plugins`
|
||||
|
||||
.. currentmodule:: _pytest.hookspec
|
||||
|
||||
|
@ -1192,6 +1130,9 @@ Custom warnings generated in some situations such as improper usage or deprecate
|
|||
.. autoclass:: pytest.PytestExperimentalApiWarning
|
||||
:show-inheritance:
|
||||
|
||||
.. autoclass:: pytest.PytestReturnNotNoneWarning
|
||||
:show-inheritance:
|
||||
|
||||
.. autoclass:: pytest.PytestUnhandledCoroutineWarning
|
||||
:show-inheritance:
|
||||
|
||||
|
@ -1213,9 +1154,10 @@ Consult the :ref:`internal-warnings` section in the documentation for more infor
|
|||
Configuration Options
|
||||
---------------------
|
||||
|
||||
Here is a list of builtin configuration options that may be written in a ``pytest.ini``, ``pyproject.toml``, ``tox.ini`` or ``setup.cfg``
|
||||
file, usually located at the root of your repository. To see each file format in details, see
|
||||
:ref:`config file formats`.
|
||||
Here is a list of builtin configuration options that may be written in a ``pytest.ini`` (or ``.pytest.ini``),
|
||||
``pyproject.toml``, ``tox.ini``, or ``setup.cfg`` file, usually located at the root of your repository.
|
||||
|
||||
To see each file format in details, see :ref:`config file formats`.
|
||||
|
||||
.. warning::
|
||||
Usage of ``setup.cfg`` is not recommended except for very simple use cases. ``.cfg``
|
||||
|
@ -1761,6 +1703,8 @@ passed multiple times. The expected format is ``name=value``. For example::
|
|||
Sets list of directories that should be searched for tests when
|
||||
no specific directories, files or test ids are given in the command line when
|
||||
executing pytest from the :ref:`rootdir <rootdir>` directory.
|
||||
File system paths may use shell-style wildcards, including the recursive
|
||||
``**`` pattern.
|
||||
Useful when all project tests are in a known location to speed up
|
||||
test collection and to avoid picking up undesired tests by accident.
|
||||
|
||||
|
@ -1809,11 +1753,11 @@ All the command-line flags can be obtained by running ``pytest --help``::
|
|||
$ pytest --help
|
||||
usage: pytest [options] [file_or_dir] [file_or_dir] [...]
|
||||
|
||||
positional arguments:
|
||||
Positional arguments:
|
||||
file_or_dir
|
||||
|
||||
general:
|
||||
-k EXPRESSION only run tests which match the given substring
|
||||
General:
|
||||
-k EXPRESSION Only run tests which match the given substring
|
||||
expression. An expression is a python evaluatable
|
||||
expression where all names are substring-matched
|
||||
against test names and their parent classes.
|
||||
|
@ -1828,217 +1772,217 @@ All the command-line flags can be obtained by running ``pytest --help``::
|
|||
'extra_keyword_matches' set, as well as functions
|
||||
which have names assigned directly to them. The
|
||||
matching is case-insensitive.
|
||||
-m MARKEXPR only run tests matching given mark expression.
|
||||
-m MARKEXPR Only run tests matching given mark expression.
|
||||
For example: -m 'mark1 and not mark2'.
|
||||
--markers show markers (builtin, plugin and per-project ones).
|
||||
-x, --exitfirst exit instantly on first error or failed test.
|
||||
--markers Show markers (builtin, plugin and per-project ones)
|
||||
-x, --exitfirst Exit instantly on first error or failed test
|
||||
--fixtures, --funcargs
|
||||
show available fixtures, sorted by plugin appearance
|
||||
Show available fixtures, sorted by plugin appearance
|
||||
(fixtures with leading '_' are only shown with '-v')
|
||||
--fixtures-per-test show fixtures per test
|
||||
--pdb start the interactive Python debugger on errors or
|
||||
KeyboardInterrupt.
|
||||
--fixtures-per-test Show fixtures per test
|
||||
--pdb Start the interactive Python debugger on errors or
|
||||
KeyboardInterrupt
|
||||
--pdbcls=modulename:classname
|
||||
specify a custom interactive Python debugger for use
|
||||
with --pdb.For example:
|
||||
Specify a custom interactive Python debugger for use
|
||||
with --pdb. For example:
|
||||
--pdbcls=IPython.terminal.debugger:TerminalPdb
|
||||
--trace Immediately break when running each test.
|
||||
--capture=method per-test capturing method: one of fd|sys|no|tee-sys.
|
||||
-s shortcut for --capture=no.
|
||||
--runxfail report the results of xfail tests as if they were
|
||||
--trace Immediately break when running each test
|
||||
--capture=method Per-test capturing method: one of fd|sys|no|tee-sys.
|
||||
-s Shortcut for --capture=no.
|
||||
--runxfail Report the results of xfail tests as if they were
|
||||
not marked
|
||||
--lf, --last-failed rerun only the tests that failed at the last run (or
|
||||
--lf, --last-failed Rerun only the tests that failed at the last run (or
|
||||
all if none failed)
|
||||
--ff, --failed-first run all tests, but run the last failures first.
|
||||
--ff, --failed-first Run all tests, but run the last failures first
|
||||
This may re-order tests and thus lead to repeated
|
||||
fixture setup/teardown.
|
||||
--nf, --new-first run tests from new files first, then the rest of the
|
||||
fixture setup/teardown
|
||||
--nf, --new-first Run tests from new files first, then the rest of the
|
||||
tests sorted by file mtime
|
||||
--cache-show=[CACHESHOW]
|
||||
show cache contents, don't perform collection or
|
||||
Show cache contents, don't perform collection or
|
||||
tests. Optional argument: glob (default: '*').
|
||||
--cache-clear remove all cache contents at start of test run.
|
||||
--cache-clear Remove all cache contents at start of test run
|
||||
--lfnf={all,none}, --last-failed-no-failures={all,none}
|
||||
which tests to run with no previously (known)
|
||||
failures.
|
||||
--sw, --stepwise exit on test failure and continue from last failing
|
||||
Which tests to run with no previously (known)
|
||||
failures
|
||||
--sw, --stepwise Exit on test failure and continue from last failing
|
||||
test next time
|
||||
--sw-skip, --stepwise-skip
|
||||
ignore the first failing test but stop on the next
|
||||
Ignore the first failing test but stop on the next
|
||||
failing test.
|
||||
implicitly enables --stepwise.
|
||||
|
||||
reporting:
|
||||
--durations=N show N slowest setup/test durations (N=0 for all).
|
||||
Reporting:
|
||||
--durations=N show N slowest setup/test durations (N=0 for all)
|
||||
--durations-min=N Minimal duration in seconds for inclusion in slowest
|
||||
list. Default 0.005
|
||||
-v, --verbose increase verbosity.
|
||||
--no-header disable header
|
||||
--no-summary disable summary
|
||||
-q, --quiet decrease verbosity.
|
||||
--verbosity=VERBOSE set verbosity. Default is 0.
|
||||
-r chars show extra test summary info as specified by chars:
|
||||
list. Default: 0.005.
|
||||
-v, --verbose Increase verbosity
|
||||
--no-header Disable header
|
||||
--no-summary Disable summary
|
||||
-q, --quiet Decrease verbosity
|
||||
--verbosity=VERBOSE Set verbosity. Default: 0.
|
||||
-r chars Show extra test summary info as specified by chars:
|
||||
(f)ailed, (E)rror, (s)kipped, (x)failed, (X)passed,
|
||||
(p)assed, (P)assed with output, (a)ll except passed
|
||||
(p/P), or (A)ll. (w)arnings are enabled by default
|
||||
(see --disable-warnings), 'N' can be used to reset
|
||||
the list. (default: 'fE').
|
||||
--disable-warnings, --disable-pytest-warnings
|
||||
disable warnings summary
|
||||
-l, --showlocals show locals in tracebacks (disabled by default).
|
||||
--tb=style traceback print mode
|
||||
Disable warnings summary
|
||||
-l, --showlocals Show locals in tracebacks (disabled by default)
|
||||
--tb=style Traceback print mode
|
||||
(auto/long/short/line/native/no).
|
||||
--show-capture={no,stdout,stderr,log,all}
|
||||
Controls how captured stdout/stderr/log is shown on
|
||||
failed tests. Default is 'all'.
|
||||
--full-trace don't cut any tracebacks (default is to cut).
|
||||
--color=color color terminal output (yes/no/auto).
|
||||
failed tests. Default: all.
|
||||
--full-trace Don't cut any tracebacks (default is to cut)
|
||||
--color=color Color terminal output (yes/no/auto)
|
||||
--code-highlight={yes,no}
|
||||
Whether code should be highlighted (only if --color
|
||||
is also enabled)
|
||||
--pastebin=mode send failed|all info to bpaste.net pastebin service.
|
||||
--junit-xml=path create junit-xml style report file at given path.
|
||||
--junit-prefix=str prepend prefix to classnames in junit-xml output
|
||||
is also enabled). Default: yes.
|
||||
--pastebin=mode Send failed|all info to bpaste.net pastebin service
|
||||
--junit-xml=path Create junit-xml style report file at given path
|
||||
--junit-prefix=str Prepend prefix to classnames in junit-xml output
|
||||
|
||||
pytest-warnings:
|
||||
-W PYTHONWARNINGS, --pythonwarnings=PYTHONWARNINGS
|
||||
set which warnings to report, see -W option of
|
||||
python itself.
|
||||
--maxfail=num exit after first num failures or errors.
|
||||
--strict-config any warnings encountered while parsing the `pytest`
|
||||
section of the configuration file raise errors.
|
||||
--strict-markers markers not registered in the `markers` section of
|
||||
the configuration file raise errors.
|
||||
--strict (deprecated) alias to --strict-markers.
|
||||
-c file load configuration from `file` instead of trying to
|
||||
locate one of the implicit configuration files.
|
||||
Set which warnings to report, see -W option of
|
||||
Python itself
|
||||
--maxfail=num Exit after first num failures or errors
|
||||
--strict-config Any warnings encountered while parsing the `pytest`
|
||||
section of the configuration file raise errors
|
||||
--strict-markers Markers not registered in the `markers` section of
|
||||
the configuration file raise errors
|
||||
--strict (Deprecated) alias to --strict-markers
|
||||
-c file Load configuration from `file` instead of trying to
|
||||
locate one of the implicit configuration files
|
||||
--continue-on-collection-errors
|
||||
Force test execution even if collection errors
|
||||
occur.
|
||||
occur
|
||||
--rootdir=ROOTDIR Define root directory for tests. Can be relative
|
||||
path: 'root_dir', './root_dir',
|
||||
'root_dir/another_dir/'; absolute path:
|
||||
'/home/user/root_dir'; path with variables:
|
||||
'$HOME/root_dir'.
|
||||
|
||||
collection:
|
||||
--collect-only, --co only collect tests, don't execute them.
|
||||
--pyargs try to interpret all arguments as python packages.
|
||||
--ignore=path ignore path during collection (multi-allowed).
|
||||
--ignore-glob=path ignore path pattern during collection (multi-
|
||||
allowed).
|
||||
Collection:
|
||||
--collect-only, --co Only collect tests, don't execute them
|
||||
--pyargs Try to interpret all arguments as Python packages
|
||||
--ignore=path Ignore path during collection (multi-allowed)
|
||||
--ignore-glob=path Ignore path pattern during collection (multi-
|
||||
allowed)
|
||||
--deselect=nodeid_prefix
|
||||
deselect item (via node id prefix) during collection
|
||||
(multi-allowed).
|
||||
--confcutdir=dir only load conftest.py's relative to specified dir.
|
||||
--noconftest Don't load any conftest.py files.
|
||||
--keep-duplicates Keep duplicate tests.
|
||||
Deselect item (via node id prefix) during collection
|
||||
(multi-allowed)
|
||||
--confcutdir=dir Only load conftest.py's relative to specified dir
|
||||
--noconftest Don't load any conftest.py files
|
||||
--keep-duplicates Keep duplicate tests
|
||||
--collect-in-virtualenv
|
||||
Don't ignore tests in a local virtualenv directory
|
||||
--import-mode={prepend,append,importlib}
|
||||
prepend/append to sys.path when importing test
|
||||
modules and conftest files, default is to prepend.
|
||||
--doctest-modules run doctests in all .py modules
|
||||
Prepend/append to sys.path when importing test
|
||||
modules and conftest files. Default: prepend.
|
||||
--doctest-modules Run doctests in all .py modules
|
||||
--doctest-report={none,cdiff,ndiff,udiff,only_first_failure}
|
||||
choose another output format for diffs on doctest
|
||||
Choose another output format for diffs on doctest
|
||||
failure
|
||||
--doctest-glob=pat doctests file matching pattern, default: test*.txt
|
||||
--doctest-glob=pat Doctests file matching pattern, default: test*.txt
|
||||
--doctest-ignore-import-errors
|
||||
ignore doctest ImportErrors
|
||||
Ignore doctest ImportErrors
|
||||
--doctest-continue-on-failure
|
||||
for a given doctest, continue to run after the first
|
||||
For a given doctest, continue to run after the first
|
||||
failure
|
||||
|
||||
test session debugging and configuration:
|
||||
--basetemp=dir base temporary directory for this test run.(warning:
|
||||
this directory is removed if it exists)
|
||||
-V, --version display pytest version and information about
|
||||
Test session debugging and configuration:
|
||||
--basetemp=dir Base temporary directory for this test run. (Warning:
|
||||
this directory is removed if it exists.)
|
||||
-V, --version Display pytest version and information about
|
||||
plugins. When given twice, also display information
|
||||
about plugins.
|
||||
-h, --help show help message and configuration info
|
||||
-p name early-load given plugin module name or entry point
|
||||
(multi-allowed).
|
||||
-h, --help Show help message and configuration info
|
||||
-p name Early-load given plugin module name or entry point
|
||||
(multi-allowed)
|
||||
To avoid loading of plugins, use the `no:` prefix,
|
||||
e.g. `no:doctest`.
|
||||
--trace-config trace considerations of conftest.py files.
|
||||
e.g. `no:doctest`
|
||||
--trace-config Trace considerations of conftest.py files
|
||||
--debug=[DEBUG_FILE_NAME]
|
||||
store internal tracing debug information in this log
|
||||
Store internal tracing debug information in this log
|
||||
file.
|
||||
This file is opened with 'w' and truncated as a
|
||||
result, care advised.
|
||||
Defaults to 'pytestdebug.log'.
|
||||
Default: pytestdebug.log.
|
||||
-o OVERRIDE_INI, --override-ini=OVERRIDE_INI
|
||||
override ini option with "option=value" style, e.g.
|
||||
`-o xfail_strict=True -o cache_dir=cache`.
|
||||
Override ini option with "option=value" style, e.g.
|
||||
`-o xfail_strict=True -o cache_dir=cache`
|
||||
--assert=MODE Control assertion debugging tools.
|
||||
'plain' performs no assertion debugging.
|
||||
'rewrite' (the default) rewrites assert statements
|
||||
in test modules on import to provide assert
|
||||
expression information.
|
||||
--setup-only only setup fixtures, do not execute tests.
|
||||
--setup-show show setup of fixtures while executing tests.
|
||||
--setup-plan show what fixtures and tests would be executed but
|
||||
don't execute anything.
|
||||
--setup-only Only setup fixtures, do not execute tests
|
||||
--setup-show Show setup of fixtures while executing tests
|
||||
--setup-plan Show what fixtures and tests would be executed but
|
||||
don't execute anything
|
||||
|
||||
logging:
|
||||
--log-level=LEVEL level of messages to catch/display.
|
||||
Logging:
|
||||
--log-level=LEVEL Level of messages to catch/display.
|
||||
Not set by default, so it depends on the root/parent
|
||||
log handler's effective level, where it is "WARNING"
|
||||
by default.
|
||||
--log-format=LOG_FORMAT
|
||||
log format as used by the logging module.
|
||||
Log format used by the logging module
|
||||
--log-date-format=LOG_DATE_FORMAT
|
||||
log date format as used by the logging module.
|
||||
Log date format used by the logging module
|
||||
--log-cli-level=LOG_CLI_LEVEL
|
||||
cli logging level.
|
||||
CLI logging level
|
||||
--log-cli-format=LOG_CLI_FORMAT
|
||||
log format as used by the logging module.
|
||||
Log format used by the logging module
|
||||
--log-cli-date-format=LOG_CLI_DATE_FORMAT
|
||||
log date format as used by the logging module.
|
||||
--log-file=LOG_FILE path to a file when logging will be written to.
|
||||
Log date format used by the logging module
|
||||
--log-file=LOG_FILE Path to a file when logging will be written to
|
||||
--log-file-level=LOG_FILE_LEVEL
|
||||
log file logging level.
|
||||
Log file logging level
|
||||
--log-file-format=LOG_FILE_FORMAT
|
||||
log format as used by the logging module.
|
||||
Log format used by the logging module
|
||||
--log-file-date-format=LOG_FILE_DATE_FORMAT
|
||||
log date format as used by the logging module.
|
||||
Log date format used by the logging module
|
||||
--log-auto-indent=LOG_AUTO_INDENT
|
||||
Auto-indent multiline messages passed to the logging
|
||||
module. Accepts true|on, false|off or an integer.
|
||||
|
||||
[pytest] ini-options in the first pytest.ini|tox.ini|setup.cfg file found:
|
||||
|
||||
markers (linelist): markers for test functions
|
||||
markers (linelist): Markers for test functions
|
||||
empty_parameter_set_mark (string):
|
||||
default marker for empty parametersets
|
||||
norecursedirs (args): directory patterns to avoid for recursion
|
||||
testpaths (args): directories to search for tests when no files or
|
||||
directories are given in the command line.
|
||||
Default marker for empty parametersets
|
||||
norecursedirs (args): Directory patterns to avoid for recursion
|
||||
testpaths (args): Directories to search for tests when no files or
|
||||
directories are given in the command line
|
||||
filterwarnings (linelist):
|
||||
Each line specifies a pattern for
|
||||
warnings.filterwarnings. Processed after
|
||||
-W/--pythonwarnings.
|
||||
usefixtures (args): list of default fixtures to be used with this
|
||||
usefixtures (args): List of default fixtures to be used with this
|
||||
project
|
||||
python_files (args): glob-style file patterns for Python test module
|
||||
python_files (args): Glob-style file patterns for Python test module
|
||||
discovery
|
||||
python_classes (args):
|
||||
prefixes or glob names for Python test class
|
||||
Prefixes or glob names for Python test class
|
||||
discovery
|
||||
python_functions (args):
|
||||
prefixes or glob names for Python test function and
|
||||
Prefixes or glob names for Python test function and
|
||||
method discovery
|
||||
disable_test_id_escaping_and_forfeit_all_rights_to_community_support (bool):
|
||||
disable string escape non-ascii characters, might
|
||||
Disable string escape non-ASCII characters, might
|
||||
cause unwanted side effects(use at your own risk)
|
||||
console_output_style (string):
|
||||
console output: "classic", or with additional
|
||||
Console output: "classic", or with additional
|
||||
progress information ("progress" (percentage) |
|
||||
"count").
|
||||
xfail_strict (bool): default for the strict parameter of xfail markers
|
||||
"count")
|
||||
xfail_strict (bool): Default for the strict parameter of xfail markers
|
||||
when not given explicitly (default: False)
|
||||
enable_assertion_pass_hook (bool):
|
||||
Enables the pytest_assertion_pass hook.Make sure to
|
||||
Enables the pytest_assertion_pass hook. Make sure to
|
||||
delete any previously generated pyc cache files.
|
||||
junit_suite_name (string):
|
||||
Test suite name for JUnit report
|
||||
|
@ -2053,45 +1997,45 @@ All the command-line flags can be obtained by running ``pytest --help``::
|
|||
junit_family (string):
|
||||
Emit XML for schema: one of legacy|xunit1|xunit2
|
||||
doctest_optionflags (args):
|
||||
option flags for doctests
|
||||
Option flags for doctests
|
||||
doctest_encoding (string):
|
||||
encoding used for doctest files
|
||||
cache_dir (string): cache directory path.
|
||||
log_level (string): default value for --log-level
|
||||
log_format (string): default value for --log-format
|
||||
Encoding used for doctest files
|
||||
cache_dir (string): Cache directory path
|
||||
log_level (string): Default value for --log-level
|
||||
log_format (string): Default value for --log-format
|
||||
log_date_format (string):
|
||||
default value for --log-date-format
|
||||
log_cli (bool): enable log display during test run (also known as
|
||||
"live logging").
|
||||
Default value for --log-date-format
|
||||
log_cli (bool): Enable log display during test run (also known as
|
||||
"live logging")
|
||||
log_cli_level (string):
|
||||
default value for --log-cli-level
|
||||
Default value for --log-cli-level
|
||||
log_cli_format (string):
|
||||
default value for --log-cli-format
|
||||
Default value for --log-cli-format
|
||||
log_cli_date_format (string):
|
||||
default value for --log-cli-date-format
|
||||
log_file (string): default value for --log-file
|
||||
Default value for --log-cli-date-format
|
||||
log_file (string): Default value for --log-file
|
||||
log_file_level (string):
|
||||
default value for --log-file-level
|
||||
Default value for --log-file-level
|
||||
log_file_format (string):
|
||||
default value for --log-file-format
|
||||
Default value for --log-file-format
|
||||
log_file_date_format (string):
|
||||
default value for --log-file-date-format
|
||||
Default value for --log-file-date-format
|
||||
log_auto_indent (string):
|
||||
default value for --log-auto-indent
|
||||
Default value for --log-auto-indent
|
||||
pythonpath (paths): Add paths to sys.path
|
||||
faulthandler_timeout (string):
|
||||
Dump the traceback of all threads if a test takes
|
||||
more than TIMEOUT seconds to finish.
|
||||
addopts (args): extra command line options
|
||||
minversion (string): minimally required pytest version
|
||||
more than TIMEOUT seconds to finish
|
||||
addopts (args): Extra command line options
|
||||
minversion (string): Minimally required pytest version
|
||||
required_plugins (args):
|
||||
plugins that must be present for pytest to run
|
||||
Plugins that must be present for pytest to run
|
||||
|
||||
environment variables:
|
||||
PYTEST_ADDOPTS extra command line options
|
||||
PYTEST_PLUGINS comma-separated plugins to load during startup
|
||||
PYTEST_DISABLE_PLUGIN_AUTOLOAD set to disable plugin auto-loading
|
||||
PYTEST_DEBUG set to enable debug tracing of pytest's internals
|
||||
Environment variables:
|
||||
PYTEST_ADDOPTS Extra command line options
|
||||
PYTEST_PLUGINS Comma-separated plugins to load during startup
|
||||
PYTEST_DISABLE_PLUGIN_AUTOLOAD Set to disable plugin auto-loading
|
||||
PYTEST_DEBUG Set to enable debug tracing of pytest's internals
|
||||
|
||||
|
||||
to see available markers type: pytest --markers
|
||||
|
|
|
@ -17,6 +17,8 @@ Books
|
|||
Talks and blog postings
|
||||
---------------------------------------------
|
||||
|
||||
- Training: `pytest - simple, rapid and fun testing with Python <https://www.youtube.com/watch?v=ofPHJrAOaTE>`_, Florian Bruhin, PyConDE 2022
|
||||
|
||||
- `pytest: Simple, rapid and fun testing with Python, <https://youtu.be/cSJ-X3TbQ1c?t=15752>`_ (@ 4:22:32), Florian Bruhin, WeAreDevelopers World Congress 2021
|
||||
|
||||
- Webinar: `pytest: Test Driven Development für Python (German) <https://bruhin.software/ins-pytest/>`_, Florian Bruhin, via mylearning.ch, 2020
|
||||
|
|
|
@ -46,7 +46,6 @@ install_requires =
|
|||
packaging
|
||||
pluggy>=0.12,<2.0
|
||||
py>=1.8.2
|
||||
atomicwrites>=1.0;sys_platform=="win32"
|
||||
colorama;sys_platform=="win32"
|
||||
importlib-metadata>=0.12;python_version<"3.8"
|
||||
tomli>=1.0.0;python_version<"3.11"
|
||||
|
|
|
@ -39,7 +39,7 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
"enable_assertion_pass_hook",
|
||||
type="bool",
|
||||
default=False,
|
||||
help="Enables the pytest_assertion_pass hook."
|
||||
help="Enables the pytest_assertion_pass hook. "
|
||||
"Make sure to delete any previously generated pyc cache files.",
|
||||
)
|
||||
|
||||
|
|
|
@ -190,7 +190,7 @@ class AssertionRewritingHook(importlib.abc.MetaPathFinder, importlib.abc.Loader)
|
|||
return False
|
||||
|
||||
# For matching the name it must be as if it was a filename.
|
||||
path = PurePath(os.path.sep.join(parts) + ".py")
|
||||
path = PurePath(*parts).with_suffix(".py")
|
||||
|
||||
for pat in self.fnpats:
|
||||
# if the pattern contains subdirectories ("tests/**.py" for example) we can't bail out based
|
||||
|
@ -281,7 +281,9 @@ class AssertionRewritingHook(importlib.abc.MetaPathFinder, importlib.abc.Loader)
|
|||
else:
|
||||
from importlib.resources.readers import FileReader
|
||||
|
||||
return FileReader(types.SimpleNamespace(path=self._rewritten_names[name]))
|
||||
return FileReader( # type:ignore[no-any-return]
|
||||
types.SimpleNamespace(path=self._rewritten_names[name])
|
||||
)
|
||||
|
||||
|
||||
def _write_pyc_fp(
|
||||
|
@ -302,53 +304,29 @@ def _write_pyc_fp(
|
|||
fp.write(marshal.dumps(co))
|
||||
|
||||
|
||||
if sys.platform == "win32":
|
||||
from atomicwrites import atomic_write
|
||||
|
||||
def _write_pyc(
|
||||
state: "AssertionState",
|
||||
co: types.CodeType,
|
||||
source_stat: os.stat_result,
|
||||
pyc: Path,
|
||||
) -> bool:
|
||||
try:
|
||||
with atomic_write(os.fspath(pyc), mode="wb", overwrite=True) as fp:
|
||||
_write_pyc_fp(fp, source_stat, co)
|
||||
except OSError as e:
|
||||
state.trace(f"error writing pyc file at {pyc}: {e}")
|
||||
# we ignore any failure to write the cache file
|
||||
# there are many reasons, permission-denied, pycache dir being a
|
||||
# file etc.
|
||||
return False
|
||||
return True
|
||||
|
||||
else:
|
||||
|
||||
def _write_pyc(
|
||||
state: "AssertionState",
|
||||
co: types.CodeType,
|
||||
source_stat: os.stat_result,
|
||||
pyc: Path,
|
||||
) -> bool:
|
||||
proc_pyc = f"{pyc}.{os.getpid()}"
|
||||
try:
|
||||
fp = open(proc_pyc, "wb")
|
||||
except OSError as e:
|
||||
state.trace(f"error writing pyc file at {proc_pyc}: errno={e.errno}")
|
||||
return False
|
||||
|
||||
try:
|
||||
def _write_pyc(
|
||||
state: "AssertionState",
|
||||
co: types.CodeType,
|
||||
source_stat: os.stat_result,
|
||||
pyc: Path,
|
||||
) -> bool:
|
||||
proc_pyc = f"{pyc}.{os.getpid()}"
|
||||
try:
|
||||
with open(proc_pyc, "wb") as fp:
|
||||
_write_pyc_fp(fp, source_stat, co)
|
||||
os.rename(proc_pyc, pyc)
|
||||
except OSError as e:
|
||||
state.trace(f"error writing pyc file at {pyc}: {e}")
|
||||
# we ignore any failure to write the cache file
|
||||
# there are many reasons, permission-denied, pycache dir being a
|
||||
# file etc.
|
||||
return False
|
||||
finally:
|
||||
fp.close()
|
||||
return True
|
||||
except OSError as e:
|
||||
state.trace(f"error writing pyc file at {proc_pyc}: errno={e.errno}")
|
||||
return False
|
||||
|
||||
try:
|
||||
os.replace(proc_pyc, pyc)
|
||||
except OSError as e:
|
||||
state.trace(f"error writing pyc file at {pyc}: {e}")
|
||||
# we ignore any failure to write the cache file
|
||||
# there are many reasons, permission-denied, pycache dir being a
|
||||
# file etc.
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def _rewrite_test(fn: Path, config: Config) -> Tuple[os.stat_result, types.CodeType]:
|
||||
|
|
|
@ -157,7 +157,7 @@ class Cache:
|
|||
"""
|
||||
path = self._getvaluepath(key)
|
||||
try:
|
||||
with path.open("r") as f:
|
||||
with path.open("r", encoding="UTF-8") as f:
|
||||
return json.load(f)
|
||||
except (ValueError, OSError):
|
||||
return default
|
||||
|
@ -184,9 +184,9 @@ class Cache:
|
|||
return
|
||||
if not cache_dir_exists_already:
|
||||
self._ensure_supporting_files()
|
||||
data = json.dumps(value, indent=2)
|
||||
data = json.dumps(value, ensure_ascii=False, indent=2)
|
||||
try:
|
||||
f = path.open("w")
|
||||
f = path.open("w", encoding="UTF-8")
|
||||
except OSError:
|
||||
self.warn("cache could not write path {path}", path=path, _ispytest=True)
|
||||
else:
|
||||
|
@ -196,7 +196,7 @@ class Cache:
|
|||
def _ensure_supporting_files(self) -> None:
|
||||
"""Create supporting files in the cache dir that are not really part of the cache."""
|
||||
readme_path = self._cachedir / "README.md"
|
||||
readme_path.write_text(README_CONTENT)
|
||||
readme_path.write_text(README_CONTENT, encoding="UTF-8")
|
||||
|
||||
gitignore_path = self._cachedir.joinpath(".gitignore")
|
||||
msg = "# Created by pytest automatically.\n*\n"
|
||||
|
@ -440,7 +440,7 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
"--last-failed",
|
||||
action="store_true",
|
||||
dest="lf",
|
||||
help="rerun only the tests that failed "
|
||||
help="Rerun only the tests that failed "
|
||||
"at the last run (or all if none failed)",
|
||||
)
|
||||
group.addoption(
|
||||
|
@ -448,7 +448,7 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
"--failed-first",
|
||||
action="store_true",
|
||||
dest="failedfirst",
|
||||
help="run all tests, but run the last failures first.\n"
|
||||
help="Run all tests, but run the last failures first. "
|
||||
"This may re-order tests and thus lead to "
|
||||
"repeated fixture setup/teardown.",
|
||||
)
|
||||
|
@ -457,7 +457,7 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
"--new-first",
|
||||
action="store_true",
|
||||
dest="newfirst",
|
||||
help="run tests from new files first, then the rest of the tests "
|
||||
help="Run tests from new files first, then the rest of the tests "
|
||||
"sorted by file mtime",
|
||||
)
|
||||
group.addoption(
|
||||
|
@ -466,7 +466,7 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
nargs="?",
|
||||
dest="cacheshow",
|
||||
help=(
|
||||
"show cache contents, don't perform collection or tests. "
|
||||
"Show cache contents, don't perform collection or tests. "
|
||||
"Optional argument: glob (default: '*')."
|
||||
),
|
||||
)
|
||||
|
@ -474,12 +474,12 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
"--cache-clear",
|
||||
action="store_true",
|
||||
dest="cacheclear",
|
||||
help="remove all cache contents at start of test run.",
|
||||
help="Remove all cache contents at start of test run",
|
||||
)
|
||||
cache_dir_default = ".pytest_cache"
|
||||
if "TOX_ENV_DIR" in os.environ:
|
||||
cache_dir_default = os.path.join(os.environ["TOX_ENV_DIR"], cache_dir_default)
|
||||
parser.addini("cache_dir", default=cache_dir_default, help="cache directory path.")
|
||||
parser.addini("cache_dir", default=cache_dir_default, help="Cache directory path")
|
||||
group.addoption(
|
||||
"--lfnf",
|
||||
"--last-failed-no-failures",
|
||||
|
@ -487,7 +487,7 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
dest="last_failed_no_failures",
|
||||
choices=("all", "none"),
|
||||
default="all",
|
||||
help="which tests to run with no previously (known) failures.",
|
||||
help="Which tests to run with no previously (known) failures",
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -42,14 +42,14 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
default="fd",
|
||||
metavar="method",
|
||||
choices=["fd", "sys", "no", "tee-sys"],
|
||||
help="per-test capturing method: one of fd|sys|no|tee-sys.",
|
||||
help="Per-test capturing method: one of fd|sys|no|tee-sys",
|
||||
)
|
||||
group._addoption(
|
||||
"-s",
|
||||
action="store_const",
|
||||
const="no",
|
||||
dest="capture",
|
||||
help="shortcut for --capture=no.",
|
||||
help="Shortcut for --capture=no",
|
||||
)
|
||||
|
||||
|
||||
|
@ -203,12 +203,39 @@ class DontReadFromInput:
|
|||
def fileno(self) -> int:
|
||||
raise UnsupportedOperation("redirected stdin is pseudofile, has no fileno()")
|
||||
|
||||
def flush(self) -> None:
|
||||
raise UnsupportedOperation("redirected stdin is pseudofile, has no flush()")
|
||||
|
||||
def isatty(self) -> bool:
|
||||
return False
|
||||
|
||||
def close(self) -> None:
|
||||
pass
|
||||
|
||||
def readable(self) -> bool:
|
||||
return False
|
||||
|
||||
def seek(self, offset: int) -> int:
|
||||
raise UnsupportedOperation("redirected stdin is pseudofile, has no seek(int)")
|
||||
|
||||
def seekable(self) -> bool:
|
||||
return False
|
||||
|
||||
def tell(self) -> int:
|
||||
raise UnsupportedOperation("redirected stdin is pseudofile, has no tell()")
|
||||
|
||||
def truncate(self, size: int) -> None:
|
||||
raise UnsupportedOperation("cannont truncate stdin")
|
||||
|
||||
def write(self, *args) -> None:
|
||||
raise UnsupportedOperation("cannot write to stdin")
|
||||
|
||||
def writelines(self, *args) -> None:
|
||||
raise UnsupportedOperation("Cannot write to stdin")
|
||||
|
||||
def writable(self) -> bool:
|
||||
return False
|
||||
|
||||
@property
|
||||
def buffer(self):
|
||||
return self
|
||||
|
@ -876,11 +903,22 @@ class CaptureFixture(Generic[AnyStr]):
|
|||
|
||||
@fixture
|
||||
def capsys(request: SubRequest) -> Generator[CaptureFixture[str], None, None]:
|
||||
"""Enable text capturing of writes to ``sys.stdout`` and ``sys.stderr``.
|
||||
r"""Enable text capturing of writes to ``sys.stdout`` and ``sys.stderr``.
|
||||
|
||||
The captured output is made available via ``capsys.readouterr()`` method
|
||||
calls, which return a ``(out, err)`` namedtuple.
|
||||
``out`` and ``err`` will be ``text`` objects.
|
||||
|
||||
Returns an instance of :class:`CaptureFixture[str] <pytest.CaptureFixture>`.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def test_output(capsys):
|
||||
print("hello")
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == "hello\n"
|
||||
"""
|
||||
capman = request.config.pluginmanager.getplugin("capturemanager")
|
||||
capture_fixture = CaptureFixture[str](SysCapture, request, _ispytest=True)
|
||||
|
@ -893,11 +931,22 @@ def capsys(request: SubRequest) -> Generator[CaptureFixture[str], None, None]:
|
|||
|
||||
@fixture
|
||||
def capsysbinary(request: SubRequest) -> Generator[CaptureFixture[bytes], None, None]:
|
||||
"""Enable bytes capturing of writes to ``sys.stdout`` and ``sys.stderr``.
|
||||
r"""Enable bytes capturing of writes to ``sys.stdout`` and ``sys.stderr``.
|
||||
|
||||
The captured output is made available via ``capsysbinary.readouterr()``
|
||||
method calls, which return a ``(out, err)`` namedtuple.
|
||||
``out`` and ``err`` will be ``bytes`` objects.
|
||||
|
||||
Returns an instance of :class:`CaptureFixture[bytes] <pytest.CaptureFixture>`.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def test_output(capsysbinary):
|
||||
print("hello")
|
||||
captured = capsysbinary.readouterr()
|
||||
assert captured.out == b"hello\n"
|
||||
"""
|
||||
capman = request.config.pluginmanager.getplugin("capturemanager")
|
||||
capture_fixture = CaptureFixture[bytes](SysCaptureBinary, request, _ispytest=True)
|
||||
|
@ -910,11 +959,22 @@ def capsysbinary(request: SubRequest) -> Generator[CaptureFixture[bytes], None,
|
|||
|
||||
@fixture
|
||||
def capfd(request: SubRequest) -> Generator[CaptureFixture[str], None, None]:
|
||||
"""Enable text capturing of writes to file descriptors ``1`` and ``2``.
|
||||
r"""Enable text capturing of writes to file descriptors ``1`` and ``2``.
|
||||
|
||||
The captured output is made available via ``capfd.readouterr()`` method
|
||||
calls, which return a ``(out, err)`` namedtuple.
|
||||
``out`` and ``err`` will be ``text`` objects.
|
||||
|
||||
Returns an instance of :class:`CaptureFixture[str] <pytest.CaptureFixture>`.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def test_system_echo(capfd):
|
||||
os.system('echo "hello"')
|
||||
captured = capfd.readouterr()
|
||||
assert captured.out == "hello\n"
|
||||
"""
|
||||
capman = request.config.pluginmanager.getplugin("capturemanager")
|
||||
capture_fixture = CaptureFixture[str](FDCapture, request, _ispytest=True)
|
||||
|
@ -927,11 +987,23 @@ def capfd(request: SubRequest) -> Generator[CaptureFixture[str], None, None]:
|
|||
|
||||
@fixture
|
||||
def capfdbinary(request: SubRequest) -> Generator[CaptureFixture[bytes], None, None]:
|
||||
"""Enable bytes capturing of writes to file descriptors ``1`` and ``2``.
|
||||
r"""Enable bytes capturing of writes to file descriptors ``1`` and ``2``.
|
||||
|
||||
The captured output is made available via ``capfd.readouterr()`` method
|
||||
calls, which return a ``(out, err)`` namedtuple.
|
||||
``out`` and ``err`` will be ``byte`` objects.
|
||||
|
||||
Returns an instance of :class:`CaptureFixture[bytes] <pytest.CaptureFixture>`.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def test_system_echo(capfdbinary):
|
||||
os.system('echo "hello"')
|
||||
captured = capfdbinary.readouterr()
|
||||
assert captured.out == b"hello\n"
|
||||
|
||||
"""
|
||||
capman = request.config.pluginmanager.getplugin("capturemanager")
|
||||
capture_fixture = CaptureFixture[bytes](FDCaptureBinary, request, _ispytest=True)
|
||||
|
|
|
@ -10,6 +10,7 @@ from pathlib import Path
|
|||
from typing import Any
|
||||
from typing import Callable
|
||||
from typing import Generic
|
||||
from typing import NoReturn
|
||||
from typing import Optional
|
||||
from typing import Tuple
|
||||
from typing import TYPE_CHECKING
|
||||
|
@ -20,7 +21,6 @@ import attr
|
|||
import py
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import NoReturn
|
||||
from typing_extensions import Final
|
||||
|
||||
|
||||
|
@ -403,5 +403,5 @@ else:
|
|||
# previously.
|
||||
#
|
||||
# This also work for Enums (if you use `is` to compare) and Literals.
|
||||
def assert_never(value: "NoReturn") -> "NoReturn":
|
||||
def assert_never(value: NoReturn) -> NoReturn:
|
||||
assert False, f"Unhandled value: {value} ({type(value).__name__})"
|
||||
|
|
|
@ -3,6 +3,7 @@ import argparse
|
|||
import collections.abc
|
||||
import copy
|
||||
import enum
|
||||
import glob
|
||||
import inspect
|
||||
import os
|
||||
import re
|
||||
|
@ -899,6 +900,19 @@ class Config:
|
|||
dir: Path
|
||||
"""The directory from which :func:`pytest.main` was invoked."""
|
||||
|
||||
class ArgsSource(enum.Enum):
|
||||
"""Indicates the source of the test arguments.
|
||||
|
||||
.. versionadded:: 7.2
|
||||
"""
|
||||
|
||||
#: Command line arguments.
|
||||
ARGS = enum.auto()
|
||||
#: Invocation directory.
|
||||
INCOVATION_DIR = enum.auto()
|
||||
#: 'testpaths' configuration value.
|
||||
TESTPATHS = enum.auto()
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
pluginmanager: PytestPluginManager,
|
||||
|
@ -1101,11 +1115,11 @@ class Config:
|
|||
self.inicfg = inicfg
|
||||
self._parser.extra_info["rootdir"] = str(self.rootpath)
|
||||
self._parser.extra_info["inifile"] = str(self.inipath)
|
||||
self._parser.addini("addopts", "extra command line options", "args")
|
||||
self._parser.addini("minversion", "minimally required pytest version")
|
||||
self._parser.addini("addopts", "Extra command line options", "args")
|
||||
self._parser.addini("minversion", "Minimally required pytest version")
|
||||
self._parser.addini(
|
||||
"required_plugins",
|
||||
"plugins that must be present for pytest to run",
|
||||
"Plugins that must be present for pytest to run",
|
||||
type="args",
|
||||
default=[],
|
||||
)
|
||||
|
@ -1308,15 +1322,25 @@ class Config:
|
|||
self.hook.pytest_cmdline_preparse(config=self, args=args)
|
||||
self._parser.after_preparse = True # type: ignore
|
||||
try:
|
||||
source = Config.ArgsSource.ARGS
|
||||
args = self._parser.parse_setoption(
|
||||
args, self.option, namespace=self.option
|
||||
)
|
||||
if not args:
|
||||
if self.invocation_params.dir == self.rootpath:
|
||||
args = self.getini("testpaths")
|
||||
source = Config.ArgsSource.TESTPATHS
|
||||
testpaths: List[str] = self.getini("testpaths")
|
||||
if self.known_args_namespace.pyargs:
|
||||
args = testpaths
|
||||
else:
|
||||
args = []
|
||||
for path in testpaths:
|
||||
args.extend(sorted(glob.iglob(path, recursive=True)))
|
||||
if not args:
|
||||
source = Config.ArgsSource.INCOVATION_DIR
|
||||
args = [str(self.invocation_params.dir)]
|
||||
self.args = args
|
||||
self.args_source = source
|
||||
except PrintHelp:
|
||||
pass
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@ from typing import cast
|
|||
from typing import Dict
|
||||
from typing import List
|
||||
from typing import Mapping
|
||||
from typing import NoReturn
|
||||
from typing import Optional
|
||||
from typing import Sequence
|
||||
from typing import Tuple
|
||||
|
@ -24,7 +25,6 @@ from _pytest.deprecated import ARGUMENT_TYPE_STR_CHOICE
|
|||
from _pytest.deprecated import check_ispytest
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import NoReturn
|
||||
from typing_extensions import Literal
|
||||
|
||||
FILE_OR_DIR = "file_or_dir"
|
||||
|
@ -48,7 +48,7 @@ class Parser:
|
|||
_ispytest: bool = False,
|
||||
) -> None:
|
||||
check_ispytest(_ispytest)
|
||||
self._anonymous = OptionGroup("custom options", parser=self, _ispytest=True)
|
||||
self._anonymous = OptionGroup("Custom options", parser=self, _ispytest=True)
|
||||
self._groups: List[OptionGroup] = []
|
||||
self._processopt = processopt
|
||||
self._usage = usage
|
||||
|
@ -227,7 +227,7 @@ class Argument:
|
|||
_typ_map = {"int": int, "string": str, "float": float, "complex": complex}
|
||||
|
||||
def __init__(self, *names: str, **attrs: Any) -> None:
|
||||
"""Store parms in private vars for use in add_argument."""
|
||||
"""Store params in private vars for use in add_argument."""
|
||||
self._attrs = attrs
|
||||
self._short_opts: List[str] = []
|
||||
self._long_opts: List[str] = []
|
||||
|
@ -403,7 +403,7 @@ class MyOptionParser(argparse.ArgumentParser):
|
|||
# an usage error to provide more contextual information to the user.
|
||||
self.extra_info = extra_info if extra_info else {}
|
||||
|
||||
def error(self, message: str) -> "NoReturn":
|
||||
def error(self, message: str) -> NoReturn:
|
||||
"""Transform argparse error message into UsageError."""
|
||||
msg = f"{self.prog}: error: {message}"
|
||||
|
||||
|
|
|
@ -96,6 +96,7 @@ def locate_config(
|
|||
and return a tuple of (rootdir, inifile, cfg-dict)."""
|
||||
config_names = [
|
||||
"pytest.ini",
|
||||
".pytest.ini",
|
||||
"pyproject.toml",
|
||||
"tox.ini",
|
||||
"setup.cfg",
|
||||
|
|
|
@ -46,21 +46,21 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
"--pdb",
|
||||
dest="usepdb",
|
||||
action="store_true",
|
||||
help="start the interactive Python debugger on errors or KeyboardInterrupt.",
|
||||
help="Start the interactive Python debugger on errors or KeyboardInterrupt",
|
||||
)
|
||||
group._addoption(
|
||||
"--pdbcls",
|
||||
dest="usepdb_cls",
|
||||
metavar="modulename:classname",
|
||||
type=_validate_usepdb_cls,
|
||||
help="specify a custom interactive Python debugger for use with --pdb."
|
||||
help="Specify a custom interactive Python debugger for use with --pdb."
|
||||
"For example: --pdbcls=IPython.terminal.debugger:TerminalPdb",
|
||||
)
|
||||
group._addoption(
|
||||
"--trace",
|
||||
dest="trace",
|
||||
action="store_true",
|
||||
help="Immediately break when running each test.",
|
||||
help="Immediately break when running each test",
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -66,26 +66,26 @@ CHECKER_CLASS: Optional[Type["doctest.OutputChecker"]] = None
|
|||
def pytest_addoption(parser: Parser) -> None:
|
||||
parser.addini(
|
||||
"doctest_optionflags",
|
||||
"option flags for doctests",
|
||||
"Option flags for doctests",
|
||||
type="args",
|
||||
default=["ELLIPSIS"],
|
||||
)
|
||||
parser.addini(
|
||||
"doctest_encoding", "encoding used for doctest files", default="utf-8"
|
||||
"doctest_encoding", "Encoding used for doctest files", default="utf-8"
|
||||
)
|
||||
group = parser.getgroup("collect")
|
||||
group.addoption(
|
||||
"--doctest-modules",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="run doctests in all .py modules",
|
||||
help="Run doctests in all .py modules",
|
||||
dest="doctestmodules",
|
||||
)
|
||||
group.addoption(
|
||||
"--doctest-report",
|
||||
type=str.lower,
|
||||
default="udiff",
|
||||
help="choose another output format for diffs on doctest failure",
|
||||
help="Choose another output format for diffs on doctest failure",
|
||||
choices=DOCTEST_REPORT_CHOICES,
|
||||
dest="doctestreport",
|
||||
)
|
||||
|
@ -94,21 +94,21 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
action="append",
|
||||
default=[],
|
||||
metavar="pat",
|
||||
help="doctests file matching pattern, default: test*.txt",
|
||||
help="Doctests file matching pattern, default: test*.txt",
|
||||
dest="doctestglob",
|
||||
)
|
||||
group.addoption(
|
||||
"--doctest-ignore-import-errors",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="ignore doctest ImportErrors",
|
||||
help="Ignore doctest ImportErrors",
|
||||
dest="doctest_ignore_import_errors",
|
||||
)
|
||||
group.addoption(
|
||||
"--doctest-continue-on-failure",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="for a given doctest, continue to run after the first failure",
|
||||
help="For a given doctest, continue to run after the first failure",
|
||||
dest="doctest_continue_on_failure",
|
||||
)
|
||||
|
||||
|
@ -542,7 +542,11 @@ class DoctestModule(pytest.Module):
|
|||
)
|
||||
else:
|
||||
try:
|
||||
module = import_path(self.path, root=self.config.rootpath)
|
||||
module = import_path(
|
||||
self.path,
|
||||
root=self.config.rootpath,
|
||||
mode=self.config.getoption("importmode"),
|
||||
)
|
||||
except ImportError:
|
||||
if self.config.getvalue("doctest_ignore_import_errors"):
|
||||
pytest.skip("unable to import module %r" % self.path)
|
||||
|
@ -730,5 +734,16 @@ def _get_report_choice(key: str) -> int:
|
|||
@pytest.fixture(scope="session")
|
||||
def doctest_namespace() -> Dict[str, Any]:
|
||||
"""Fixture that returns a :py:class:`dict` that will be injected into the
|
||||
namespace of doctests."""
|
||||
namespace of doctests.
|
||||
|
||||
Usually this fixture is used in conjunction with another ``autouse`` fixture:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def add_np(doctest_namespace):
|
||||
doctest_namespace["np"] = numpy
|
||||
|
||||
For more details: :ref:`doctest_namespace`.
|
||||
"""
|
||||
return dict()
|
||||
|
|
|
@ -18,7 +18,7 @@ fault_handler_originally_enabled_key = StashKey[bool]()
|
|||
def pytest_addoption(parser: Parser) -> None:
|
||||
help = (
|
||||
"Dump the traceback of all threads if a test takes "
|
||||
"more than TIMEOUT seconds to finish."
|
||||
"more than TIMEOUT seconds to finish"
|
||||
)
|
||||
parser.addini("faulthandler_timeout", help, default=0.0)
|
||||
|
||||
|
|
|
@ -18,6 +18,7 @@ from typing import Iterable
|
|||
from typing import Iterator
|
||||
from typing import List
|
||||
from typing import MutableMapping
|
||||
from typing import NoReturn
|
||||
from typing import Optional
|
||||
from typing import overload
|
||||
from typing import Sequence
|
||||
|
@ -67,7 +68,6 @@ from _pytest.stash import StashKey
|
|||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Deque
|
||||
from typing import NoReturn
|
||||
|
||||
from _pytest.scope import _ScopeName
|
||||
from _pytest.main import Session
|
||||
|
@ -223,15 +223,10 @@ def add_funcarg_pseudo_fixture_def(
|
|||
def getfixturemarker(obj: object) -> Optional["FixtureFunctionMarker"]:
|
||||
"""Return fixturemarker or None if it doesn't exist or raised
|
||||
exceptions."""
|
||||
try:
|
||||
fixturemarker: Optional[FixtureFunctionMarker] = getattr(
|
||||
obj, "_pytestfixturefunction", None
|
||||
)
|
||||
except TEST_OUTCOME:
|
||||
# some objects raise errors like request (from flask import request)
|
||||
# we don't expect them to be fixture functions
|
||||
return None
|
||||
return fixturemarker
|
||||
return cast(
|
||||
Optional[FixtureFunctionMarker],
|
||||
safe_getattr(obj, "_pytestfixturefunction", None),
|
||||
)
|
||||
|
||||
|
||||
# Parametrized fixture key, helper alias for code below.
|
||||
|
@ -350,7 +345,7 @@ def reorder_items_atscope(
|
|||
return items_done
|
||||
|
||||
|
||||
def get_direct_param_fixture_func(request):
|
||||
def get_direct_param_fixture_func(request: "FixtureRequest") -> Any:
|
||||
return request.param
|
||||
|
||||
|
||||
|
@ -412,6 +407,15 @@ class FixtureRequest:
|
|||
self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy()
|
||||
self._arg2index: Dict[str, int] = {}
|
||||
self._fixturemanager: FixtureManager = pyfuncitem.session._fixturemanager
|
||||
# Notes on the type of `param`:
|
||||
# -`request.param` is only defined in parametrized fixtures, and will raise
|
||||
# AttributeError otherwise. Python typing has no notion of "undefined", so
|
||||
# this cannot be reflected in the type.
|
||||
# - Technically `param` is only (possibly) defined on SubRequest, not
|
||||
# FixtureRequest, but the typing of that is still in flux so this cheats.
|
||||
# - In the future we might consider using a generic for the param type, but
|
||||
# for now just using Any.
|
||||
self.param: Any
|
||||
|
||||
@property
|
||||
def scope(self) -> "_ScopeName":
|
||||
|
@ -491,6 +495,7 @@ class FixtureRequest:
|
|||
|
||||
@property
|
||||
def path(self) -> Path:
|
||||
"""Path where the test function was collected."""
|
||||
if self.scope not in ("function", "class", "module", "package"):
|
||||
raise AttributeError(f"path not available in {self.scope}-scoped context")
|
||||
# TODO: Remove ignore once _pyfuncitem is properly typed.
|
||||
|
@ -529,7 +534,7 @@ class FixtureRequest:
|
|||
"""
|
||||
self.node.add_marker(marker)
|
||||
|
||||
def raiseerror(self, msg: Optional[str]) -> "NoReturn":
|
||||
def raiseerror(self, msg: Optional[str]) -> NoReturn:
|
||||
"""Raise a FixtureLookupError with the given message."""
|
||||
raise self._fixturemanager.FixtureLookupError(None, self, msg)
|
||||
|
||||
|
@ -548,11 +553,18 @@ class FixtureRequest:
|
|||
setup time, you may use this function to retrieve it inside a fixture
|
||||
or test function body.
|
||||
|
||||
This method can be used during the test setup phase or the test run
|
||||
phase, but during the test teardown phase a fixture's value may not
|
||||
be available.
|
||||
|
||||
:raises pytest.FixtureLookupError:
|
||||
If the given fixture could not be found.
|
||||
"""
|
||||
fixturedef = self._get_active_fixturedef(argname)
|
||||
assert fixturedef.cached_result is not None
|
||||
assert fixturedef.cached_result is not None, (
|
||||
f'The fixture value for "{argname}" is not available. '
|
||||
"This can happen when the fixture has already been torn down."
|
||||
)
|
||||
return fixturedef.cached_result[0]
|
||||
|
||||
def _get_active_fixturedef(
|
||||
|
@ -864,7 +876,7 @@ class FixtureLookupErrorRepr(TerminalRepr):
|
|||
tw.line("%s:%d" % (os.fspath(self.filename), self.firstlineno + 1))
|
||||
|
||||
|
||||
def fail_fixturefunc(fixturefunc, msg: str) -> "NoReturn":
|
||||
def fail_fixturefunc(fixturefunc, msg: str) -> NoReturn:
|
||||
fs, lineno = getfslineno(fixturefunc)
|
||||
location = f"{fs}:{lineno + 1}"
|
||||
source = _pytest._code.Source(fixturefunc)
|
||||
|
@ -1350,7 +1362,7 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
"usefixtures",
|
||||
type="args",
|
||||
default=[],
|
||||
help="list of default fixtures to be used with this project",
|
||||
help="List of default fixtures to be used with this project",
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -49,7 +49,7 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
action="count",
|
||||
default=0,
|
||||
dest="version",
|
||||
help="display pytest version and information about plugins. "
|
||||
help="Display pytest version and information about plugins. "
|
||||
"When given twice, also display information about plugins.",
|
||||
)
|
||||
group._addoption(
|
||||
|
@ -57,7 +57,7 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
"--help",
|
||||
action=HelpAction,
|
||||
dest="help",
|
||||
help="show help message and configuration info",
|
||||
help="Show help message and configuration info",
|
||||
)
|
||||
group._addoption(
|
||||
"-p",
|
||||
|
@ -65,7 +65,7 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
dest="plugins",
|
||||
default=[],
|
||||
metavar="name",
|
||||
help="early-load given plugin module name or entry point (multi-allowed).\n"
|
||||
help="Early-load given plugin module name or entry point (multi-allowed). "
|
||||
"To avoid loading of plugins, use the `no:` prefix, e.g. "
|
||||
"`no:doctest`.",
|
||||
)
|
||||
|
@ -74,7 +74,7 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
"--trace-config",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="trace considerations of conftest.py files.",
|
||||
help="Trace considerations of conftest.py files",
|
||||
)
|
||||
group.addoption(
|
||||
"--debug",
|
||||
|
@ -83,16 +83,17 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
const="pytestdebug.log",
|
||||
dest="debug",
|
||||
metavar="DEBUG_FILE_NAME",
|
||||
help="store internal tracing debug information in this log file.\n"
|
||||
"This file is opened with 'w' and truncated as a result, care advised.\n"
|
||||
"Defaults to 'pytestdebug.log'.",
|
||||
help="Store internal tracing debug information in this log file. "
|
||||
"This file is opened with 'w' and truncated as a result, care advised. "
|
||||
"Default: pytestdebug.log.",
|
||||
)
|
||||
group._addoption(
|
||||
"-o",
|
||||
"--override-ini",
|
||||
dest="override_ini",
|
||||
action="append",
|
||||
help='override ini option with "option=value" style, e.g. `-o xfail_strict=True -o cache_dir=cache`.',
|
||||
help='Override ini option with "option=value" style, '
|
||||
"e.g. `-o xfail_strict=True -o cache_dir=cache`.",
|
||||
)
|
||||
|
||||
|
||||
|
@ -203,12 +204,12 @@ def showhelp(config: Config) -> None:
|
|||
tw.line(indent + line)
|
||||
|
||||
tw.line()
|
||||
tw.line("environment variables:")
|
||||
tw.line("Environment variables:")
|
||||
vars = [
|
||||
("PYTEST_ADDOPTS", "extra command line options"),
|
||||
("PYTEST_PLUGINS", "comma-separated plugins to load during startup"),
|
||||
("PYTEST_DISABLE_PLUGIN_AUTOLOAD", "set to disable plugin auto-loading"),
|
||||
("PYTEST_DEBUG", "set to enable debug tracing of pytest's internals"),
|
||||
("PYTEST_ADDOPTS", "Extra command line options"),
|
||||
("PYTEST_PLUGINS", "Comma-separated plugins to load during startup"),
|
||||
("PYTEST_DISABLE_PLUGIN_AUTOLOAD", "Set to disable plugin auto-loading"),
|
||||
("PYTEST_DEBUG", "Set to enable debug tracing of pytest's internals"),
|
||||
]
|
||||
for name, help in vars:
|
||||
tw.line(f" {name:<24} {help}")
|
||||
|
|
|
@ -386,7 +386,7 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
metavar="path",
|
||||
type=functools.partial(filename_arg, optname="--junitxml"),
|
||||
default=None,
|
||||
help="create junit-xml style report file at given path.",
|
||||
help="Create junit-xml style report file at given path",
|
||||
)
|
||||
group.addoption(
|
||||
"--junitprefix",
|
||||
|
@ -394,7 +394,7 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
action="store",
|
||||
metavar="str",
|
||||
default=None,
|
||||
help="prepend prefix to classnames in junit-xml output",
|
||||
help="Prepend prefix to classnames in junit-xml output",
|
||||
)
|
||||
parser.addini(
|
||||
"junit_suite_name", "Test suite name for JUnit report", default="pytest"
|
||||
|
|
|
@ -270,8 +270,15 @@ class LegacyTestdirPlugin:
|
|||
@final
|
||||
@attr.s(init=False, auto_attribs=True)
|
||||
class TempdirFactory:
|
||||
"""Backward compatibility wrapper that implements :class:``_pytest.compat.LEGACY_PATH``
|
||||
for :class:``TempPathFactory``."""
|
||||
"""Backward compatibility wrapper that implements :class:`py.path.local`
|
||||
for :class:`TempPathFactory`.
|
||||
|
||||
.. note::
|
||||
These days, it is preferred to use ``tmp_path_factory``.
|
||||
|
||||
:ref:`About the tmpdir and tmpdir_factory fixtures<tmpdir and tmpdir_factory>`.
|
||||
|
||||
"""
|
||||
|
||||
_tmppath_factory: TempPathFactory
|
||||
|
||||
|
@ -282,11 +289,11 @@ class TempdirFactory:
|
|||
self._tmppath_factory = tmppath_factory
|
||||
|
||||
def mktemp(self, basename: str, numbered: bool = True) -> LEGACY_PATH:
|
||||
"""Same as :meth:`TempPathFactory.mktemp`, but returns a ``_pytest.compat.LEGACY_PATH`` object."""
|
||||
"""Same as :meth:`TempPathFactory.mktemp`, but returns a :class:`py.path.local` object."""
|
||||
return legacy_path(self._tmppath_factory.mktemp(basename, numbered).resolve())
|
||||
|
||||
def getbasetemp(self) -> LEGACY_PATH:
|
||||
"""Backward compat wrapper for ``_tmppath_factory.getbasetemp``."""
|
||||
"""Same as :meth:`TempPathFactory.getbasetemp`, but returns a :class:`py.path.local` object."""
|
||||
return legacy_path(self._tmppath_factory.getbasetemp().resolve())
|
||||
|
||||
|
||||
|
@ -312,6 +319,11 @@ class LegacyTmpdirPlugin:
|
|||
|
||||
The returned object is a `legacy_path`_ object.
|
||||
|
||||
.. note::
|
||||
These days, it is preferred to use ``tmp_path``.
|
||||
|
||||
:ref:`About the tmpdir and tmpdir_factory fixtures<tmpdir and tmpdir_factory>`.
|
||||
|
||||
.. _legacy_path: https://py.readthedocs.io/en/latest/path.html
|
||||
"""
|
||||
return legacy_path(tmp_path)
|
||||
|
|
|
@ -40,7 +40,6 @@ if TYPE_CHECKING:
|
|||
else:
|
||||
logging_StreamHandler = logging.StreamHandler
|
||||
|
||||
|
||||
DEFAULT_LOG_FORMAT = "%(levelname)-8s %(name)s:%(filename)s:%(lineno)d %(message)s"
|
||||
DEFAULT_LOG_DATE_FORMAT = "%H:%M:%S"
|
||||
_ANSI_ESCAPE_SEQ = re.compile(r"\x1b\[[\d;]+m")
|
||||
|
@ -218,7 +217,7 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
|
||||
def add_option_ini(option, dest, default=None, type=None, **kwargs):
|
||||
parser.addini(
|
||||
dest, default=default, type=type, help="default value for " + option
|
||||
dest, default=default, type=type, help="Default value for " + option
|
||||
)
|
||||
group.addoption(option, dest=dest, **kwargs)
|
||||
|
||||
|
@ -228,8 +227,8 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
default=None,
|
||||
metavar="LEVEL",
|
||||
help=(
|
||||
"level of messages to catch/display.\n"
|
||||
"Not set by default, so it depends on the root/parent log handler's"
|
||||
"Level of messages to catch/display."
|
||||
" Not set by default, so it depends on the root/parent log handler's"
|
||||
' effective level, where it is "WARNING" by default.'
|
||||
),
|
||||
)
|
||||
|
@ -237,58 +236,58 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
"--log-format",
|
||||
dest="log_format",
|
||||
default=DEFAULT_LOG_FORMAT,
|
||||
help="log format as used by the logging module.",
|
||||
help="Log format used by the logging module",
|
||||
)
|
||||
add_option_ini(
|
||||
"--log-date-format",
|
||||
dest="log_date_format",
|
||||
default=DEFAULT_LOG_DATE_FORMAT,
|
||||
help="log date format as used by the logging module.",
|
||||
help="Log date format used by the logging module",
|
||||
)
|
||||
parser.addini(
|
||||
"log_cli",
|
||||
default=False,
|
||||
type="bool",
|
||||
help='enable log display during test run (also known as "live logging").',
|
||||
help='Enable log display during test run (also known as "live logging")',
|
||||
)
|
||||
add_option_ini(
|
||||
"--log-cli-level", dest="log_cli_level", default=None, help="cli logging level."
|
||||
"--log-cli-level", dest="log_cli_level", default=None, help="CLI logging level"
|
||||
)
|
||||
add_option_ini(
|
||||
"--log-cli-format",
|
||||
dest="log_cli_format",
|
||||
default=None,
|
||||
help="log format as used by the logging module.",
|
||||
help="Log format used by the logging module",
|
||||
)
|
||||
add_option_ini(
|
||||
"--log-cli-date-format",
|
||||
dest="log_cli_date_format",
|
||||
default=None,
|
||||
help="log date format as used by the logging module.",
|
||||
help="Log date format used by the logging module",
|
||||
)
|
||||
add_option_ini(
|
||||
"--log-file",
|
||||
dest="log_file",
|
||||
default=None,
|
||||
help="path to a file when logging will be written to.",
|
||||
help="Path to a file when logging will be written to",
|
||||
)
|
||||
add_option_ini(
|
||||
"--log-file-level",
|
||||
dest="log_file_level",
|
||||
default=None,
|
||||
help="log file logging level.",
|
||||
help="Log file logging level",
|
||||
)
|
||||
add_option_ini(
|
||||
"--log-file-format",
|
||||
dest="log_file_format",
|
||||
default=DEFAULT_LOG_FORMAT,
|
||||
help="log format as used by the logging module.",
|
||||
help="Log format used by the logging module",
|
||||
)
|
||||
add_option_ini(
|
||||
"--log-file-date-format",
|
||||
dest="log_file_date_format",
|
||||
default=DEFAULT_LOG_DATE_FORMAT,
|
||||
help="log date format as used by the logging module.",
|
||||
help="Log date format used by the logging module",
|
||||
)
|
||||
add_option_ini(
|
||||
"--log-auto-indent",
|
||||
|
@ -345,6 +344,10 @@ class LogCaptureHandler(logging_StreamHandler):
|
|||
self.records = []
|
||||
self.stream = StringIO()
|
||||
|
||||
def clear(self) -> None:
|
||||
self.records.clear()
|
||||
self.stream = StringIO()
|
||||
|
||||
def handleError(self, record: logging.LogRecord) -> None:
|
||||
if logging.raiseExceptions:
|
||||
# Fail the test if the log message is bad (emit failed).
|
||||
|
@ -440,7 +443,7 @@ class LogCaptureFixture:
|
|||
|
||||
def clear(self) -> None:
|
||||
"""Reset the list of log records and the captured log text."""
|
||||
self.handler.reset()
|
||||
self.handler.clear()
|
||||
|
||||
def set_level(self, level: Union[int, str], logger: Optional[str] = None) -> None:
|
||||
"""Set the level of a logger for the duration of a test.
|
||||
|
|
|
@ -51,7 +51,7 @@ if TYPE_CHECKING:
|
|||
def pytest_addoption(parser: Parser) -> None:
|
||||
parser.addini(
|
||||
"norecursedirs",
|
||||
"directory patterns to avoid for recursion",
|
||||
"Directory patterns to avoid for recursion",
|
||||
type="args",
|
||||
default=[
|
||||
"*.egg",
|
||||
|
@ -67,26 +67,26 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
)
|
||||
parser.addini(
|
||||
"testpaths",
|
||||
"directories to search for tests when no files or directories are given in the "
|
||||
"command line.",
|
||||
"Directories to search for tests when no files or directories are given on the "
|
||||
"command line",
|
||||
type="args",
|
||||
default=[],
|
||||
)
|
||||
group = parser.getgroup("general", "running and selection options")
|
||||
group = parser.getgroup("general", "Running and selection options")
|
||||
group._addoption(
|
||||
"-x",
|
||||
"--exitfirst",
|
||||
action="store_const",
|
||||
dest="maxfail",
|
||||
const=1,
|
||||
help="exit instantly on first error or failed test.",
|
||||
help="Exit instantly on first error or failed test",
|
||||
)
|
||||
group = parser.getgroup("pytest-warnings")
|
||||
group.addoption(
|
||||
"-W",
|
||||
"--pythonwarnings",
|
||||
action="append",
|
||||
help="set which warnings to report, see -W option of python itself.",
|
||||
help="Set which warnings to report, see -W option of Python itself",
|
||||
)
|
||||
parser.addini(
|
||||
"filterwarnings",
|
||||
|
@ -102,37 +102,39 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
type=int,
|
||||
dest="maxfail",
|
||||
default=0,
|
||||
help="exit after first num failures or errors.",
|
||||
help="Exit after first num failures or errors",
|
||||
)
|
||||
group._addoption(
|
||||
"--strict-config",
|
||||
action="store_true",
|
||||
help="any warnings encountered while parsing the `pytest` section of the configuration file raise errors.",
|
||||
help="Any warnings encountered while parsing the `pytest` section of the "
|
||||
"configuration file raise errors",
|
||||
)
|
||||
group._addoption(
|
||||
"--strict-markers",
|
||||
action="store_true",
|
||||
help="markers not registered in the `markers` section of the configuration file raise errors.",
|
||||
help="Markers not registered in the `markers` section of the configuration "
|
||||
"file raise errors",
|
||||
)
|
||||
group._addoption(
|
||||
"--strict",
|
||||
action="store_true",
|
||||
help="(deprecated) alias to --strict-markers.",
|
||||
help="(Deprecated) alias to --strict-markers",
|
||||
)
|
||||
group._addoption(
|
||||
"-c",
|
||||
metavar="file",
|
||||
type=str,
|
||||
dest="inifilename",
|
||||
help="load configuration from `file` instead of trying to locate one of the implicit "
|
||||
"configuration files.",
|
||||
help="Load configuration from `file` instead of trying to locate one of the "
|
||||
"implicit configuration files",
|
||||
)
|
||||
group._addoption(
|
||||
"--continue-on-collection-errors",
|
||||
action="store_true",
|
||||
default=False,
|
||||
dest="continue_on_collection_errors",
|
||||
help="Force test execution even if collection errors occur.",
|
||||
help="Force test execution even if collection errors occur",
|
||||
)
|
||||
group._addoption(
|
||||
"--rootdir",
|
||||
|
@ -149,30 +151,30 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
"--collect-only",
|
||||
"--co",
|
||||
action="store_true",
|
||||
help="only collect tests, don't execute them.",
|
||||
help="Only collect tests, don't execute them",
|
||||
)
|
||||
group.addoption(
|
||||
"--pyargs",
|
||||
action="store_true",
|
||||
help="try to interpret all arguments as python packages.",
|
||||
help="Try to interpret all arguments as Python packages",
|
||||
)
|
||||
group.addoption(
|
||||
"--ignore",
|
||||
action="append",
|
||||
metavar="path",
|
||||
help="ignore path during collection (multi-allowed).",
|
||||
help="Ignore path during collection (multi-allowed)",
|
||||
)
|
||||
group.addoption(
|
||||
"--ignore-glob",
|
||||
action="append",
|
||||
metavar="path",
|
||||
help="ignore path pattern during collection (multi-allowed).",
|
||||
help="Ignore path pattern during collection (multi-allowed)",
|
||||
)
|
||||
group.addoption(
|
||||
"--deselect",
|
||||
action="append",
|
||||
metavar="nodeid_prefix",
|
||||
help="deselect item (via node id prefix) during collection (multi-allowed).",
|
||||
help="Deselect item (via node id prefix) during collection (multi-allowed)",
|
||||
)
|
||||
group.addoption(
|
||||
"--confcutdir",
|
||||
|
@ -180,14 +182,14 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
default=None,
|
||||
metavar="dir",
|
||||
type=functools.partial(directory_arg, optname="--confcutdir"),
|
||||
help="only load conftest.py's relative to specified dir.",
|
||||
help="Only load conftest.py's relative to specified dir",
|
||||
)
|
||||
group.addoption(
|
||||
"--noconftest",
|
||||
action="store_true",
|
||||
dest="noconftest",
|
||||
default=False,
|
||||
help="Don't load any conftest.py files.",
|
||||
help="Don't load any conftest.py files",
|
||||
)
|
||||
group.addoption(
|
||||
"--keepduplicates",
|
||||
|
@ -195,7 +197,7 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
action="store_true",
|
||||
dest="keepduplicates",
|
||||
default=False,
|
||||
help="Keep duplicate tests.",
|
||||
help="Keep duplicate tests",
|
||||
)
|
||||
group.addoption(
|
||||
"--collect-in-virtualenv",
|
||||
|
@ -209,8 +211,8 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
default="prepend",
|
||||
choices=["prepend", "append", "importlib"],
|
||||
dest="importmode",
|
||||
help="prepend/append to sys.path when importing test modules and conftest files, "
|
||||
"default is to prepend.",
|
||||
help="Prepend/append to sys.path when importing test modules and conftest "
|
||||
"files. Default: prepend.",
|
||||
)
|
||||
|
||||
group = parser.getgroup("debugconfig", "test session debugging and configuration")
|
||||
|
@ -221,8 +223,8 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
type=validate_basetemp,
|
||||
metavar="dir",
|
||||
help=(
|
||||
"base temporary directory for this test run."
|
||||
"(warning: this directory is removed if it exists)"
|
||||
"Base temporary directory for this test run. "
|
||||
"(Warning: this directory is removed if it exists.)"
|
||||
),
|
||||
)
|
||||
|
||||
|
|
|
@ -76,8 +76,8 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
dest="keyword",
|
||||
default="",
|
||||
metavar="EXPRESSION",
|
||||
help="only run tests which match the given substring expression. "
|
||||
"An expression is a python evaluatable expression "
|
||||
help="Only run tests which match the given substring expression. "
|
||||
"An expression is a Python evaluatable expression "
|
||||
"where all names are substring-matched against test names "
|
||||
"and their parent classes. Example: -k 'test_method or test_"
|
||||
"other' matches all test functions and classes whose name "
|
||||
|
@ -96,7 +96,7 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
dest="markexpr",
|
||||
default="",
|
||||
metavar="MARKEXPR",
|
||||
help="only run tests matching given mark expression.\n"
|
||||
help="Only run tests matching given mark expression. "
|
||||
"For example: -m 'mark1 and not mark2'.",
|
||||
)
|
||||
|
||||
|
@ -106,8 +106,8 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
help="show markers (builtin, plugin and per-project ones).",
|
||||
)
|
||||
|
||||
parser.addini("markers", "markers for test functions", "linelist")
|
||||
parser.addini(EMPTY_PARAMETERSET_OPTION, "default marker for empty parametersets")
|
||||
parser.addini("markers", "Markers for test functions", "linelist")
|
||||
parser.addini(EMPTY_PARAMETERSET_OPTION, "Default marker for empty parametersets")
|
||||
|
||||
|
||||
@hookimpl(tryfirst=True)
|
||||
|
|
|
@ -21,15 +21,12 @@ import types
|
|||
from typing import Callable
|
||||
from typing import Iterator
|
||||
from typing import Mapping
|
||||
from typing import NoReturn
|
||||
from typing import Optional
|
||||
from typing import Sequence
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import attr
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import NoReturn
|
||||
|
||||
|
||||
__all__ = [
|
||||
"Expression",
|
||||
|
@ -117,7 +114,7 @@ class Scanner:
|
|||
self.reject((type,))
|
||||
return None
|
||||
|
||||
def reject(self, expected: Sequence[TokenType]) -> "NoReturn":
|
||||
def reject(self, expected: Sequence[TokenType]) -> NoReturn:
|
||||
raise ParseError(
|
||||
self.current.pos + 1,
|
||||
"expected {}; got {}".format(
|
||||
|
|
|
@ -5,6 +5,7 @@ import warnings
|
|||
from typing import Any
|
||||
from typing import Callable
|
||||
from typing import cast
|
||||
from typing import NoReturn
|
||||
from typing import Optional
|
||||
from typing import Type
|
||||
from typing import TypeVar
|
||||
|
@ -14,7 +15,6 @@ from _pytest.deprecated import KEYWORD_MSG_ARG
|
|||
TYPE_CHECKING = False # Avoid circular import through compat.
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import NoReturn
|
||||
from typing_extensions import Protocol
|
||||
else:
|
||||
# typing.Protocol is only available starting from Python 3.8. It is also
|
||||
|
@ -115,7 +115,7 @@ def _with_exception(exception_type: _ET) -> Callable[[_F], _WithException[_F, _E
|
|||
@_with_exception(Exit)
|
||||
def exit(
|
||||
reason: str = "", returncode: Optional[int] = None, *, msg: Optional[str] = None
|
||||
) -> "NoReturn":
|
||||
) -> NoReturn:
|
||||
"""Exit testing process.
|
||||
|
||||
:param reason:
|
||||
|
@ -146,7 +146,7 @@ def exit(
|
|||
@_with_exception(Skipped)
|
||||
def skip(
|
||||
reason: str = "", *, allow_module_level: bool = False, msg: Optional[str] = None
|
||||
) -> "NoReturn":
|
||||
) -> NoReturn:
|
||||
"""Skip an executing test with the given message.
|
||||
|
||||
This function should be called only during testing (setup, call or teardown) or
|
||||
|
@ -176,9 +176,7 @@ def skip(
|
|||
|
||||
|
||||
@_with_exception(Failed)
|
||||
def fail(
|
||||
reason: str = "", pytrace: bool = True, msg: Optional[str] = None
|
||||
) -> "NoReturn":
|
||||
def fail(reason: str = "", pytrace: bool = True, msg: Optional[str] = None) -> NoReturn:
|
||||
"""Explicitly fail an executing test with the given message.
|
||||
|
||||
:param reason:
|
||||
|
@ -238,7 +236,7 @@ class XFailed(Failed):
|
|||
|
||||
|
||||
@_with_exception(XFailed)
|
||||
def xfail(reason: str = "") -> "NoReturn":
|
||||
def xfail(reason: str = "") -> NoReturn:
|
||||
"""Imperatively xfail an executing test or setup function with the given reason.
|
||||
|
||||
This function should be called only during testing (setup, call or teardown).
|
||||
|
|
|
@ -24,7 +24,7 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
dest="pastebin",
|
||||
default=None,
|
||||
choices=["failed", "all"],
|
||||
help="send failed|all info to bpaste.net pastebin service.",
|
||||
help="Send failed|all info to bpaste.net pastebin service",
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -89,7 +89,7 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
action="store_true",
|
||||
dest="lsof",
|
||||
default=False,
|
||||
help="run FD checks if lsof is available",
|
||||
help="Run FD checks if lsof is available",
|
||||
)
|
||||
|
||||
parser.addoption(
|
||||
|
@ -98,13 +98,13 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
dest="runpytest",
|
||||
choices=("inprocess", "subprocess"),
|
||||
help=(
|
||||
"run pytest sub runs in tests using an 'inprocess' "
|
||||
"Run pytest sub runs in tests using an 'inprocess' "
|
||||
"or 'subprocess' (python -m main) method"
|
||||
),
|
||||
)
|
||||
|
||||
parser.addini(
|
||||
"pytester_example_dir", help="directory to take the pytester example files from"
|
||||
"pytester_example_dir", help="Directory to take the pytester example files from"
|
||||
)
|
||||
|
||||
|
||||
|
@ -904,13 +904,13 @@ class Pytester:
|
|||
|
||||
self._monkeypatch.syspath_prepend(str(path))
|
||||
|
||||
def mkdir(self, name: str) -> Path:
|
||||
def mkdir(self, name: Union[str, "os.PathLike[str]"]) -> Path:
|
||||
"""Create a new (sub)directory."""
|
||||
p = self.path / name
|
||||
p.mkdir()
|
||||
return p
|
||||
|
||||
def mkpydir(self, name: str) -> Path:
|
||||
def mkpydir(self, name: Union[str, "os.PathLike[str]"]) -> Path:
|
||||
"""Create a new python package.
|
||||
|
||||
This creates a (sub)directory with an empty ``__init__.py`` file so it
|
||||
|
|
|
@ -77,10 +77,12 @@ from _pytest.pathlib import parts
|
|||
from _pytest.pathlib import visit
|
||||
from _pytest.scope import Scope
|
||||
from _pytest.warning_types import PytestCollectionWarning
|
||||
from _pytest.warning_types import PytestReturnNotNoneWarning
|
||||
from _pytest.warning_types import PytestUnhandledCoroutineWarning
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing_extensions import Literal
|
||||
|
||||
from _pytest.scope import _ScopeName
|
||||
|
||||
|
||||
|
@ -95,7 +97,7 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
action="store_true",
|
||||
dest="showfixtures",
|
||||
default=False,
|
||||
help="show available fixtures, sorted by plugin appearance "
|
||||
help="Show available fixtures, sorted by plugin appearance "
|
||||
"(fixtures with leading '_' are only shown with '-v')",
|
||||
)
|
||||
group.addoption(
|
||||
|
@ -103,32 +105,32 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
action="store_true",
|
||||
dest="show_fixtures_per_test",
|
||||
default=False,
|
||||
help="show fixtures per test",
|
||||
help="Show fixtures per test",
|
||||
)
|
||||
parser.addini(
|
||||
"python_files",
|
||||
type="args",
|
||||
# NOTE: default is also used in AssertionRewritingHook.
|
||||
default=["test_*.py", "*_test.py"],
|
||||
help="glob-style file patterns for Python test module discovery",
|
||||
help="Glob-style file patterns for Python test module discovery",
|
||||
)
|
||||
parser.addini(
|
||||
"python_classes",
|
||||
type="args",
|
||||
default=["Test"],
|
||||
help="prefixes or glob names for Python test class discovery",
|
||||
help="Prefixes or glob names for Python test class discovery",
|
||||
)
|
||||
parser.addini(
|
||||
"python_functions",
|
||||
type="args",
|
||||
default=["test"],
|
||||
help="prefixes or glob names for Python test function and method discovery",
|
||||
help="Prefixes or glob names for Python test function and method discovery",
|
||||
)
|
||||
parser.addini(
|
||||
"disable_test_id_escaping_and_forfeit_all_rights_to_community_support",
|
||||
type="bool",
|
||||
default=False,
|
||||
help="disable string escape non-ascii characters, might cause unwanted "
|
||||
help="Disable string escape non-ASCII characters, might cause unwanted "
|
||||
"side effects(use at your own risk)",
|
||||
)
|
||||
|
||||
|
@ -192,6 +194,13 @@ def pytest_pyfunc_call(pyfuncitem: "Function") -> Optional[object]:
|
|||
result = testfunction(**testargs)
|
||||
if hasattr(result, "__await__") or hasattr(result, "__aiter__"):
|
||||
async_warn_and_skip(pyfuncitem.nodeid)
|
||||
elif result is not None:
|
||||
warnings.warn(
|
||||
PytestReturnNotNoneWarning(
|
||||
f"Expected None, but {pyfuncitem.nodeid} returned {result!r}, which will be an error in a "
|
||||
"future version of pytest. Did you mean to use `assert` instead of `return`?"
|
||||
)
|
||||
)
|
||||
return True
|
||||
|
||||
|
||||
|
|
|
@ -133,9 +133,11 @@ class ApproxBase:
|
|||
# raise if there are any non-numeric elements in the sequence.
|
||||
|
||||
|
||||
def _recursive_list_map(f, x):
|
||||
if isinstance(x, list):
|
||||
return [_recursive_list_map(f, xi) for xi in x]
|
||||
def _recursive_sequence_map(f, x):
|
||||
"""Recursively map a function over a sequence of arbitrary depth"""
|
||||
if isinstance(x, (list, tuple)):
|
||||
seq_type = type(x)
|
||||
return seq_type(_recursive_sequence_map(f, xi) for xi in x)
|
||||
else:
|
||||
return f(x)
|
||||
|
||||
|
@ -144,7 +146,9 @@ class ApproxNumpy(ApproxBase):
|
|||
"""Perform approximate comparisons where the expected value is numpy array."""
|
||||
|
||||
def __repr__(self) -> str:
|
||||
list_scalars = _recursive_list_map(self._approx_scalar, self.expected.tolist())
|
||||
list_scalars = _recursive_sequence_map(
|
||||
self._approx_scalar, self.expected.tolist()
|
||||
)
|
||||
return f"approx({list_scalars!r})"
|
||||
|
||||
def _repr_compare(self, other_side: "ndarray") -> List[str]:
|
||||
|
@ -164,7 +168,7 @@ class ApproxNumpy(ApproxBase):
|
|||
return value
|
||||
|
||||
np_array_shape = self.expected.shape
|
||||
approx_side_as_list = _recursive_list_map(
|
||||
approx_side_as_seq = _recursive_sequence_map(
|
||||
self._approx_scalar, self.expected.tolist()
|
||||
)
|
||||
|
||||
|
@ -179,7 +183,7 @@ class ApproxNumpy(ApproxBase):
|
|||
max_rel_diff = -math.inf
|
||||
different_ids = []
|
||||
for index in itertools.product(*(range(i) for i in np_array_shape)):
|
||||
approx_value = get_value_from_nested_list(approx_side_as_list, index)
|
||||
approx_value = get_value_from_nested_list(approx_side_as_seq, index)
|
||||
other_value = get_value_from_nested_list(other_side, index)
|
||||
if approx_value != other_value:
|
||||
abs_diff = abs(approx_value.expected - other_value)
|
||||
|
@ -194,7 +198,7 @@ class ApproxNumpy(ApproxBase):
|
|||
(
|
||||
str(index),
|
||||
str(get_value_from_nested_list(other_side, index)),
|
||||
str(get_value_from_nested_list(approx_side_as_list, index)),
|
||||
str(get_value_from_nested_list(approx_side_as_seq, index)),
|
||||
)
|
||||
for index in different_ids
|
||||
]
|
||||
|
@ -326,7 +330,7 @@ class ApproxSequenceLike(ApproxBase):
|
|||
f"Lengths: {len(self.expected)} and {len(other_side)}",
|
||||
]
|
||||
|
||||
approx_side_as_map = _recursive_list_map(self._approx_scalar, self.expected)
|
||||
approx_side_as_map = _recursive_sequence_map(self._approx_scalar, self.expected)
|
||||
|
||||
number_of_elements = len(approx_side_as_map)
|
||||
max_abs_diff = -math.inf
|
||||
|
@ -666,6 +670,11 @@ def approx(expected, rel=None, abs=None, nan_ok: bool = False) -> ApproxBase:
|
|||
specialised test helpers in :std:doc:`numpy:reference/routines.testing`
|
||||
if you need support for comparisons, NaNs, or ULP-based tolerances.
|
||||
|
||||
To match strings using regex, you can use
|
||||
`Matches <https://github.com/asottile/re-assert#re_assertmatchespattern-str-args-kwargs>`_
|
||||
from the
|
||||
`re_assert package <https://github.com/asottile/re-assert>`_.
|
||||
|
||||
.. warning::
|
||||
|
||||
.. versionchanged:: 3.2
|
||||
|
@ -899,6 +908,12 @@ def raises(
|
|||
"""
|
||||
__tracebackhide__ = True
|
||||
|
||||
if not expected_exception:
|
||||
raise ValueError(
|
||||
f"Expected an exception type or a tuple of exception types, but got `{expected_exception!r}`. "
|
||||
f"Raising exceptions is already understood as failing the test, so you don't need "
|
||||
f"any special code to say 'this should never raise an exception'."
|
||||
)
|
||||
if isinstance(expected_exception, type):
|
||||
excepted_exceptions: Tuple[Type[E], ...] = (expected_exception,)
|
||||
else:
|
||||
|
|
|
@ -160,7 +160,14 @@ def warns(
|
|||
class WarningsRecorder(warnings.catch_warnings):
|
||||
"""A context manager to record raised warnings.
|
||||
|
||||
Each recorded warning is an instance of :class:`warnings.WarningMessage`.
|
||||
|
||||
Adapted from `warnings.catch_warnings`.
|
||||
|
||||
.. note::
|
||||
``DeprecationWarning`` and ``PendingDeprecationWarning`` are treated
|
||||
differently; see :ref:`ensuring_function_triggers`.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, *, _ispytest: bool = False) -> None:
|
||||
|
|
|
@ -8,6 +8,7 @@ from typing import Iterable
|
|||
from typing import Iterator
|
||||
from typing import List
|
||||
from typing import Mapping
|
||||
from typing import NoReturn
|
||||
from typing import Optional
|
||||
from typing import Tuple
|
||||
from typing import Type
|
||||
|
@ -36,7 +37,6 @@ from _pytest.nodes import Item
|
|||
from _pytest.outcomes import skip
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import NoReturn
|
||||
from typing_extensions import Literal
|
||||
|
||||
from _pytest.runner import CallInfo
|
||||
|
@ -229,7 +229,7 @@ class BaseReport:
|
|||
|
||||
def _report_unserialization_failure(
|
||||
type_name: str, report_class: Type[BaseReport], reportdict
|
||||
) -> "NoReturn":
|
||||
) -> NoReturn:
|
||||
url = "https://github.com/pytest-dev/pytest/issues"
|
||||
stream = StringIO()
|
||||
pprint("-" * 100, stream=stream)
|
||||
|
|
|
@ -46,14 +46,14 @@ if TYPE_CHECKING:
|
|||
|
||||
|
||||
def pytest_addoption(parser: Parser) -> None:
|
||||
group = parser.getgroup("terminal reporting", "reporting", after="general")
|
||||
group = parser.getgroup("terminal reporting", "Reporting", after="general")
|
||||
group.addoption(
|
||||
"--durations",
|
||||
action="store",
|
||||
type=int,
|
||||
default=None,
|
||||
metavar="N",
|
||||
help="show N slowest setup/test durations (N=0 for all).",
|
||||
help="Show N slowest setup/test durations (N=0 for all)",
|
||||
)
|
||||
group.addoption(
|
||||
"--durations-min",
|
||||
|
@ -61,7 +61,8 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
type=float,
|
||||
default=0.005,
|
||||
metavar="N",
|
||||
help="Minimal duration in seconds for inclusion in slowest list. Default 0.005",
|
||||
help="Minimal duration in seconds for inclusion in slowest list. "
|
||||
"Default: 0.005.",
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -18,13 +18,13 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
"--setuponly",
|
||||
"--setup-only",
|
||||
action="store_true",
|
||||
help="only setup fixtures, do not execute tests.",
|
||||
help="Only setup fixtures, do not execute tests",
|
||||
)
|
||||
group.addoption(
|
||||
"--setupshow",
|
||||
"--setup-show",
|
||||
action="store_true",
|
||||
help="show setup of fixtures while executing tests.",
|
||||
help="Show setup of fixtures while executing tests",
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -15,8 +15,8 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
"--setupplan",
|
||||
"--setup-plan",
|
||||
action="store_true",
|
||||
help="show what fixtures and tests would be executed but "
|
||||
"don't execute anything.",
|
||||
help="Show what fixtures and tests would be executed but "
|
||||
"don't execute anything",
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -31,12 +31,12 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
action="store_true",
|
||||
dest="runxfail",
|
||||
default=False,
|
||||
help="report the results of xfail tests as if they were not marked",
|
||||
help="Report the results of xfail tests as if they were not marked",
|
||||
)
|
||||
|
||||
parser.addini(
|
||||
"xfail_strict",
|
||||
"default for the strict parameter of xfail "
|
||||
"Default for the strict parameter of xfail "
|
||||
"markers when not given explicitly (default: False)",
|
||||
default=False,
|
||||
type="bool",
|
||||
|
|
|
@ -23,7 +23,7 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
action="store_true",
|
||||
default=False,
|
||||
dest="stepwise",
|
||||
help="exit on test failure and continue from last failing test next time",
|
||||
help="Exit on test failure and continue from last failing test next time",
|
||||
)
|
||||
group.addoption(
|
||||
"--sw-skip",
|
||||
|
@ -31,8 +31,8 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
action="store_true",
|
||||
default=False,
|
||||
dest="stepwise_skip",
|
||||
help="ignore the first failing test but stop on the next failing test.\n"
|
||||
"implicitly enables --stepwise.",
|
||||
help="Ignore the first failing test but stop on the next failing test. "
|
||||
"Implicitly enables --stepwise.",
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -35,7 +35,9 @@ from _pytest import nodes
|
|||
from _pytest import timing
|
||||
from _pytest._code import ExceptionInfo
|
||||
from _pytest._code.code import ExceptionRepr
|
||||
from _pytest._io import TerminalWriter
|
||||
from _pytest._io.wcwidth import wcswidth
|
||||
from _pytest.assertion.util import running_on_ci
|
||||
from _pytest.compat import final
|
||||
from _pytest.config import _PluggyPlugin
|
||||
from _pytest.config import Config
|
||||
|
@ -110,28 +112,28 @@ class MoreQuietAction(argparse.Action):
|
|||
|
||||
|
||||
def pytest_addoption(parser: Parser) -> None:
|
||||
group = parser.getgroup("terminal reporting", "reporting", after="general")
|
||||
group = parser.getgroup("terminal reporting", "Reporting", after="general")
|
||||
group._addoption(
|
||||
"-v",
|
||||
"--verbose",
|
||||
action="count",
|
||||
default=0,
|
||||
dest="verbose",
|
||||
help="increase verbosity.",
|
||||
help="Increase verbosity",
|
||||
)
|
||||
group._addoption(
|
||||
"--no-header",
|
||||
action="store_true",
|
||||
default=False,
|
||||
dest="no_header",
|
||||
help="disable header",
|
||||
help="Disable header",
|
||||
)
|
||||
group._addoption(
|
||||
"--no-summary",
|
||||
action="store_true",
|
||||
default=False,
|
||||
dest="no_summary",
|
||||
help="disable summary",
|
||||
help="Disable summary",
|
||||
)
|
||||
group._addoption(
|
||||
"-q",
|
||||
|
@ -139,14 +141,14 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
action=MoreQuietAction,
|
||||
default=0,
|
||||
dest="verbose",
|
||||
help="decrease verbosity.",
|
||||
help="Decrease verbosity",
|
||||
)
|
||||
group._addoption(
|
||||
"--verbosity",
|
||||
dest="verbose",
|
||||
type=int,
|
||||
default=0,
|
||||
help="set verbosity. Default is 0.",
|
||||
help="Set verbosity. Default: 0.",
|
||||
)
|
||||
group._addoption(
|
||||
"-r",
|
||||
|
@ -154,7 +156,7 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
dest="reportchars",
|
||||
default=_REPORTCHARS_DEFAULT,
|
||||
metavar="chars",
|
||||
help="show extra test summary info as specified by chars: (f)ailed, "
|
||||
help="Show extra test summary info as specified by chars: (f)ailed, "
|
||||
"(E)rror, (s)kipped, (x)failed, (X)passed, "
|
||||
"(p)assed, (P)assed with output, (a)ll except passed (p/P), or (A)ll. "
|
||||
"(w)arnings are enabled by default (see --disable-warnings), "
|
||||
|
@ -166,7 +168,7 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
default=False,
|
||||
dest="disable_warnings",
|
||||
action="store_true",
|
||||
help="disable warnings summary",
|
||||
help="Disable warnings summary",
|
||||
)
|
||||
group._addoption(
|
||||
"-l",
|
||||
|
@ -174,7 +176,7 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
action="store_true",
|
||||
dest="showlocals",
|
||||
default=False,
|
||||
help="show locals in tracebacks (disabled by default).",
|
||||
help="Show locals in tracebacks (disabled by default)",
|
||||
)
|
||||
group._addoption(
|
||||
"--tb",
|
||||
|
@ -183,7 +185,7 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
dest="tbstyle",
|
||||
default="auto",
|
||||
choices=["auto", "long", "short", "no", "line", "native"],
|
||||
help="traceback print mode (auto/long/short/line/native/no).",
|
||||
help="Traceback print mode (auto/long/short/line/native/no)",
|
||||
)
|
||||
group._addoption(
|
||||
"--show-capture",
|
||||
|
@ -192,14 +194,14 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
choices=["no", "stdout", "stderr", "log", "all"],
|
||||
default="all",
|
||||
help="Controls how captured stdout/stderr/log is shown on failed tests. "
|
||||
"Default is 'all'.",
|
||||
"Default: all.",
|
||||
)
|
||||
group._addoption(
|
||||
"--fulltrace",
|
||||
"--full-trace",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="don't cut any tracebacks (default is to cut).",
|
||||
help="Don't cut any tracebacks (default is to cut)",
|
||||
)
|
||||
group._addoption(
|
||||
"--color",
|
||||
|
@ -208,18 +210,20 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
dest="color",
|
||||
default="auto",
|
||||
choices=["yes", "no", "auto"],
|
||||
help="color terminal output (yes/no/auto).",
|
||||
help="Color terminal output (yes/no/auto)",
|
||||
)
|
||||
group._addoption(
|
||||
"--code-highlight",
|
||||
default="yes",
|
||||
choices=["yes", "no"],
|
||||
help="Whether code should be highlighted (only if --color is also enabled)",
|
||||
help="Whether code should be highlighted (only if --color is also enabled). "
|
||||
"Default: yes.",
|
||||
)
|
||||
|
||||
parser.addini(
|
||||
"console_output_style",
|
||||
help='console output: "classic", or with additional progress information ("progress" (percentage) | "count").',
|
||||
help='Console output: "classic", or with additional progress information '
|
||||
'("progress" (percentage) | "count")',
|
||||
default="progress",
|
||||
)
|
||||
|
||||
|
@ -728,8 +732,8 @@ class TerminalReporter:
|
|||
if config.inipath:
|
||||
line += ", configfile: " + bestrelpath(config.rootpath, config.inipath)
|
||||
|
||||
testpaths: List[str] = config.getini("testpaths")
|
||||
if config.invocation_params.dir == config.rootpath and config.args == testpaths:
|
||||
if config.args_source == Config.ArgsSource.TESTPATHS:
|
||||
testpaths: List[str] = config.getini("testpaths")
|
||||
line += ", testpaths: {}".format(", ".join(testpaths))
|
||||
|
||||
result = [line]
|
||||
|
@ -1074,33 +1078,43 @@ class TerminalReporter:
|
|||
if not self.reportchars:
|
||||
return
|
||||
|
||||
def show_simple(stat, lines: List[str]) -> None:
|
||||
def show_simple(lines: List[str], *, stat: str) -> None:
|
||||
failed = self.stats.get(stat, [])
|
||||
if not failed:
|
||||
return
|
||||
termwidth = self._tw.fullwidth
|
||||
config = self.config
|
||||
for rep in failed:
|
||||
line = _get_line_with_reprcrash_message(config, rep, termwidth)
|
||||
color = _color_for_type.get(stat, _color_for_type_default)
|
||||
line = _get_line_with_reprcrash_message(
|
||||
config, rep, self._tw, {color: True}
|
||||
)
|
||||
lines.append(line)
|
||||
|
||||
def show_xfailed(lines: List[str]) -> None:
|
||||
xfailed = self.stats.get("xfailed", [])
|
||||
for rep in xfailed:
|
||||
verbose_word = rep._get_verbose_word(self.config)
|
||||
pos = _get_pos(self.config, rep)
|
||||
lines.append(f"{verbose_word} {pos}")
|
||||
markup_word = self._tw.markup(
|
||||
verbose_word, **{_color_for_type["warnings"]: True}
|
||||
)
|
||||
nodeid = _get_node_id_with_markup(self._tw, self.config, rep)
|
||||
line = f"{markup_word} {nodeid}"
|
||||
reason = rep.wasxfail
|
||||
if reason:
|
||||
lines.append(" " + str(reason))
|
||||
line += " - " + str(reason)
|
||||
|
||||
lines.append(line)
|
||||
|
||||
def show_xpassed(lines: List[str]) -> None:
|
||||
xpassed = self.stats.get("xpassed", [])
|
||||
for rep in xpassed:
|
||||
verbose_word = rep._get_verbose_word(self.config)
|
||||
pos = _get_pos(self.config, rep)
|
||||
markup_word = self._tw.markup(
|
||||
verbose_word, **{_color_for_type["warnings"]: True}
|
||||
)
|
||||
nodeid = _get_node_id_with_markup(self._tw, self.config, rep)
|
||||
reason = rep.wasxfail
|
||||
lines.append(f"{verbose_word} {pos} {reason}")
|
||||
lines.append(f"{markup_word} {nodeid} {reason}")
|
||||
|
||||
def show_skipped(lines: List[str]) -> None:
|
||||
skipped: List[CollectReport] = self.stats.get("skipped", [])
|
||||
|
@ -1108,24 +1122,27 @@ class TerminalReporter:
|
|||
if not fskips:
|
||||
return
|
||||
verbose_word = skipped[0]._get_verbose_word(self.config)
|
||||
markup_word = self._tw.markup(
|
||||
verbose_word, **{_color_for_type["warnings"]: True}
|
||||
)
|
||||
prefix = "Skipped: "
|
||||
for num, fspath, lineno, reason in fskips:
|
||||
if reason.startswith("Skipped: "):
|
||||
reason = reason[9:]
|
||||
if reason.startswith(prefix):
|
||||
reason = reason[len(prefix) :]
|
||||
if lineno is not None:
|
||||
lines.append(
|
||||
"%s [%d] %s:%d: %s"
|
||||
% (verbose_word, num, fspath, lineno, reason)
|
||||
"%s [%d] %s:%d: %s" % (markup_word, num, fspath, lineno, reason)
|
||||
)
|
||||
else:
|
||||
lines.append("%s [%d] %s: %s" % (verbose_word, num, fspath, reason))
|
||||
lines.append("%s [%d] %s: %s" % (markup_word, num, fspath, reason))
|
||||
|
||||
REPORTCHAR_ACTIONS: Mapping[str, Callable[[List[str]], None]] = {
|
||||
"x": show_xfailed,
|
||||
"X": show_xpassed,
|
||||
"f": partial(show_simple, "failed"),
|
||||
"f": partial(show_simple, stat="failed"),
|
||||
"s": show_skipped,
|
||||
"p": partial(show_simple, "passed"),
|
||||
"E": partial(show_simple, "error"),
|
||||
"p": partial(show_simple, stat="passed"),
|
||||
"E": partial(show_simple, stat="error"),
|
||||
}
|
||||
|
||||
lines: List[str] = []
|
||||
|
@ -1135,7 +1152,7 @@ class TerminalReporter:
|
|||
action(lines)
|
||||
|
||||
if lines:
|
||||
self.write_sep("=", "short test summary info")
|
||||
self.write_sep("=", "short test summary info", cyan=True, bold=True)
|
||||
for line in lines:
|
||||
self.write_line(line)
|
||||
|
||||
|
@ -1249,9 +1266,14 @@ class TerminalReporter:
|
|||
return parts, main_color
|
||||
|
||||
|
||||
def _get_pos(config: Config, rep: BaseReport):
|
||||
def _get_node_id_with_markup(tw: TerminalWriter, config: Config, rep: BaseReport):
|
||||
nodeid = config.cwd_relative_nodeid(rep.nodeid)
|
||||
return nodeid
|
||||
path, *parts = nodeid.split("::")
|
||||
if parts:
|
||||
parts_markup = tw.markup("::".join(parts), bold=True)
|
||||
return path + "::" + parts_markup
|
||||
else:
|
||||
return path
|
||||
|
||||
|
||||
def _format_trimmed(format: str, msg: str, available_width: int) -> Optional[str]:
|
||||
|
@ -1280,13 +1302,14 @@ def _format_trimmed(format: str, msg: str, available_width: int) -> Optional[str
|
|||
|
||||
|
||||
def _get_line_with_reprcrash_message(
|
||||
config: Config, rep: BaseReport, termwidth: int
|
||||
config: Config, rep: BaseReport, tw: TerminalWriter, word_markup: Dict[str, bool]
|
||||
) -> str:
|
||||
"""Get summary line for a report, trying to add reprcrash message."""
|
||||
verbose_word = rep._get_verbose_word(config)
|
||||
pos = _get_pos(config, rep)
|
||||
word = tw.markup(verbose_word, **word_markup)
|
||||
node = _get_node_id_with_markup(tw, config, rep)
|
||||
|
||||
line = f"{verbose_word} {pos}"
|
||||
line = f"{word} {node}"
|
||||
line_width = wcswidth(line)
|
||||
|
||||
try:
|
||||
|
@ -1295,8 +1318,11 @@ def _get_line_with_reprcrash_message(
|
|||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
available_width = termwidth - line_width
|
||||
msg = _format_trimmed(" - {}", msg, available_width)
|
||||
if not running_on_ci():
|
||||
available_width = tw.fullwidth - line_width
|
||||
msg = _format_trimmed(" - {}", msg, available_width)
|
||||
else:
|
||||
msg = f" - {msg}"
|
||||
if msg is not None:
|
||||
line += msg
|
||||
|
||||
|
|
|
@ -316,7 +316,10 @@ class TestCaseFunction(Function):
|
|||
# Arguably we could always postpone tearDown(), but this changes the moment where the
|
||||
# TestCase instance interacts with the results object, so better to only do it
|
||||
# when absolutely needed.
|
||||
if self.config.getoption("usepdb") and not _is_skipped(self.obj):
|
||||
# We need to consider if the test itself is skipped, or the whole class.
|
||||
assert isinstance(self.parent, UnitTestCase)
|
||||
skipped = _is_skipped(self.obj) or _is_skipped(self.parent.obj)
|
||||
if self.config.getoption("usepdb") and not skipped:
|
||||
self._explicit_tearDown = self._testcase.tearDown
|
||||
setattr(self._testcase, "tearDown", lambda *args: None)
|
||||
|
||||
|
|
|
@ -55,6 +55,13 @@ class PytestRemovedIn8Warning(PytestDeprecationWarning):
|
|||
__module__ = "pytest"
|
||||
|
||||
|
||||
@final
|
||||
class PytestReturnNotNoneWarning(PytestDeprecationWarning):
|
||||
"""Warning emitted when a test function is returning value other than None."""
|
||||
|
||||
__module__ = "pytest"
|
||||
|
||||
|
||||
@final
|
||||
class PytestExperimentalApiWarning(PytestWarning, FutureWarning):
|
||||
"""Warning category used to denote experiments in pytest.
|
||||
|
|
|
@ -69,6 +69,7 @@ from _pytest.warning_types import PytestConfigWarning
|
|||
from _pytest.warning_types import PytestDeprecationWarning
|
||||
from _pytest.warning_types import PytestExperimentalApiWarning
|
||||
from _pytest.warning_types import PytestRemovedIn8Warning
|
||||
from _pytest.warning_types import PytestReturnNotNoneWarning
|
||||
from _pytest.warning_types import PytestUnhandledCoroutineWarning
|
||||
from _pytest.warning_types import PytestUnhandledThreadExceptionWarning
|
||||
from _pytest.warning_types import PytestUnknownMarkWarning
|
||||
|
@ -127,6 +128,7 @@ __all__ = [
|
|||
"PytestDeprecationWarning",
|
||||
"PytestExperimentalApiWarning",
|
||||
"PytestRemovedIn8Warning",
|
||||
"PytestReturnNotNoneWarning",
|
||||
"Pytester",
|
||||
"PytestPluginManager",
|
||||
"PytestUnhandledCoroutineWarning",
|
||||
|
|
|
@ -1292,3 +1292,14 @@ def test_no_brokenpipeerror_message(pytester: Pytester) -> None:
|
|||
|
||||
# Cleanup.
|
||||
popen.stderr.close()
|
||||
|
||||
|
||||
def test_function_return_non_none_warning(testdir) -> None:
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
def test_stuff():
|
||||
return "something"
|
||||
"""
|
||||
)
|
||||
res = testdir.runpytest()
|
||||
res.stdout.fnmatch_lines(["*Did you mean to use `assert` instead of `return`?*"])
|
||||
|
|
|
@ -172,6 +172,24 @@ def test_caplog_captures_for_all_stages(caplog, logging_during_setup_and_teardow
|
|||
assert set(caplog._item.stash[caplog_records_key]) == {"setup", "call"}
|
||||
|
||||
|
||||
def test_clear_for_call_stage(caplog, logging_during_setup_and_teardown):
|
||||
logger.info("a_call_log")
|
||||
assert [x.message for x in caplog.get_records("call")] == ["a_call_log"]
|
||||
assert [x.message for x in caplog.get_records("setup")] == ["a_setup_log"]
|
||||
assert set(caplog._item.stash[caplog_records_key]) == {"setup", "call"}
|
||||
|
||||
caplog.clear()
|
||||
|
||||
assert caplog.get_records("call") == []
|
||||
assert [x.message for x in caplog.get_records("setup")] == ["a_setup_log"]
|
||||
assert set(caplog._item.stash[caplog_records_key]) == {"setup", "call"}
|
||||
|
||||
logging.info("a_call_log_after_clear")
|
||||
assert [x.message for x in caplog.get_records("call")] == ["a_call_log_after_clear"]
|
||||
assert [x.message for x in caplog.get_records("setup")] == ["a_setup_log"]
|
||||
assert set(caplog._item.stash[caplog_records_key]) == {"setup", "call"}
|
||||
|
||||
|
||||
def test_ini_controls_global_log_level(pytester: Pytester) -> None:
|
||||
pytester.makepyfile(
|
||||
"""
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
anyio[curio,trio]==3.5.0
|
||||
django==4.0.4
|
||||
anyio[curio,trio]==3.6.1
|
||||
django==4.0.6
|
||||
pytest-asyncio==0.18.3
|
||||
pytest-bdd==5.0.0
|
||||
pytest-bdd==6.0.1
|
||||
pytest-cov==3.0.0
|
||||
pytest-django==4.5.2
|
||||
pytest-flakes==4.0.5
|
||||
pytest-html==3.1.1
|
||||
pytest-mock==3.7.0
|
||||
pytest-mock==3.8.2
|
||||
pytest-rerunfailures==10.2
|
||||
pytest-sugar==0.9.4
|
||||
pytest-sugar==0.9.5
|
||||
pytest-trio==0.7.0
|
||||
pytest-twisted==1.13.4
|
||||
twisted==22.4.0
|
||||
|
|
|
@ -2,12 +2,14 @@ import operator
|
|||
from contextlib import contextmanager
|
||||
from decimal import Decimal
|
||||
from fractions import Fraction
|
||||
from math import sqrt
|
||||
from operator import eq
|
||||
from operator import ne
|
||||
from typing import Optional
|
||||
|
||||
import pytest
|
||||
from _pytest.pytester import Pytester
|
||||
from _pytest.python_api import _recursive_sequence_map
|
||||
from pytest import approx
|
||||
|
||||
inf, nan = float("inf"), float("nan")
|
||||
|
@ -133,6 +135,18 @@ class TestApprox:
|
|||
],
|
||||
)
|
||||
|
||||
assert_approx_raises_regex(
|
||||
(1, 2.2, 4),
|
||||
(1, 3.2, 4),
|
||||
[
|
||||
r" comparison failed. Mismatched elements: 1 / 3:",
|
||||
rf" Max absolute difference: {SOME_FLOAT}",
|
||||
rf" Max relative difference: {SOME_FLOAT}",
|
||||
r" Index \| Obtained\s+\| Expected ",
|
||||
rf" 1 \| {SOME_FLOAT} \| {SOME_FLOAT} ± {SOME_FLOAT}",
|
||||
],
|
||||
)
|
||||
|
||||
# Specific test for comparison with 0.0 (relative diff will be 'inf')
|
||||
assert_approx_raises_regex(
|
||||
[0.0],
|
||||
|
@ -878,3 +892,31 @@ class TestApprox:
|
|||
"""pytest.approx() should raise an error on unordered sequences (#9692)."""
|
||||
with pytest.raises(TypeError, match="only supports ordered sequences"):
|
||||
assert {1, 2, 3} == approx({1, 2, 3})
|
||||
|
||||
|
||||
class TestRecursiveSequenceMap:
|
||||
def test_map_over_scalar(self):
|
||||
assert _recursive_sequence_map(sqrt, 16) == 4
|
||||
|
||||
def test_map_over_empty_list(self):
|
||||
assert _recursive_sequence_map(sqrt, []) == []
|
||||
|
||||
def test_map_over_list(self):
|
||||
assert _recursive_sequence_map(sqrt, [4, 16, 25, 676]) == [2, 4, 5, 26]
|
||||
|
||||
def test_map_over_tuple(self):
|
||||
assert _recursive_sequence_map(sqrt, (4, 16, 25, 676)) == (2, 4, 5, 26)
|
||||
|
||||
def test_map_over_nested_lists(self):
|
||||
assert _recursive_sequence_map(sqrt, [4, [25, 64], [[49]]]) == [
|
||||
2,
|
||||
[5, 8],
|
||||
[[7]],
|
||||
]
|
||||
|
||||
def test_map_over_mixed_sequence(self):
|
||||
assert _recursive_sequence_map(sqrt, [4, (25, 64), [(49)]]) == [
|
||||
2,
|
||||
(5, 8),
|
||||
[(7)],
|
||||
]
|
||||
|
|
|
@ -19,6 +19,16 @@ class TestRaises:
|
|||
excinfo = pytest.raises(ValueError, int, "hello")
|
||||
assert "invalid literal" in str(excinfo.value)
|
||||
|
||||
def test_raises_does_not_allow_none(self):
|
||||
with pytest.raises(ValueError, match="Expected an exception type or"):
|
||||
# We're testing that this invalid usage gives a helpful error,
|
||||
# so we can ignore Mypy telling us that None is invalid.
|
||||
pytest.raises(expected_exception=None) # type: ignore
|
||||
|
||||
def test_raises_does_not_allow_empty_tuple(self):
|
||||
with pytest.raises(ValueError, match="Expected an exception type or"):
|
||||
pytest.raises(expected_exception=())
|
||||
|
||||
def test_raises_callable_no_exception(self) -> None:
|
||||
class A:
|
||||
def __call__(self):
|
||||
|
@ -82,13 +92,9 @@ class TestRaises:
|
|||
def test_does_not_raise(self, pytester: Pytester) -> None:
|
||||
pytester.makepyfile(
|
||||
"""
|
||||
from contextlib import contextmanager
|
||||
from contextlib import nullcontext as does_not_raise
|
||||
import pytest
|
||||
|
||||
@contextmanager
|
||||
def does_not_raise():
|
||||
yield
|
||||
|
||||
@pytest.mark.parametrize('example_input,expectation', [
|
||||
(3, does_not_raise()),
|
||||
(2, does_not_raise()),
|
||||
|
@ -107,13 +113,9 @@ class TestRaises:
|
|||
def test_does_not_raise_does_raise(self, pytester: Pytester) -> None:
|
||||
pytester.makepyfile(
|
||||
"""
|
||||
from contextlib import contextmanager
|
||||
from contextlib import nullcontext as does_not_raise
|
||||
import pytest
|
||||
|
||||
@contextmanager
|
||||
def does_not_raise():
|
||||
yield
|
||||
|
||||
@pytest.mark.parametrize('example_input,expectation', [
|
||||
(0, does_not_raise()),
|
||||
(1, pytest.raises(ZeroDivisionError)),
|
||||
|
|
|
@ -1009,7 +1009,7 @@ class TestAssertionRewriteHookDetails:
|
|||
)
|
||||
assert pytester.runpytest().ret == 0
|
||||
|
||||
def test_write_pyc(self, pytester: Pytester, tmp_path, monkeypatch) -> None:
|
||||
def test_write_pyc(self, pytester: Pytester, tmp_path) -> None:
|
||||
from _pytest.assertion.rewrite import _write_pyc
|
||||
from _pytest.assertion import AssertionState
|
||||
|
||||
|
@ -1021,27 +1021,8 @@ class TestAssertionRewriteHookDetails:
|
|||
co = compile("1", "f.py", "single")
|
||||
assert _write_pyc(state, co, os.stat(source_path), pycpath)
|
||||
|
||||
if sys.platform == "win32":
|
||||
from contextlib import contextmanager
|
||||
|
||||
@contextmanager
|
||||
def atomic_write_failed(fn, mode="r", overwrite=False):
|
||||
e = OSError()
|
||||
e.errno = 10
|
||||
raise e
|
||||
yield # type:ignore[unreachable]
|
||||
|
||||
monkeypatch.setattr(
|
||||
_pytest.assertion.rewrite, "atomic_write", atomic_write_failed
|
||||
)
|
||||
else:
|
||||
|
||||
def raise_oserror(*args):
|
||||
raise OSError()
|
||||
|
||||
monkeypatch.setattr("os.rename", raise_oserror)
|
||||
|
||||
assert not _write_pyc(state, co, os.stat(source_path), pycpath)
|
||||
with mock.patch.object(os, "replace", side_effect=OSError):
|
||||
assert not _write_pyc(state, co, os.stat(source_path), pycpath)
|
||||
|
||||
def test_resources_provider_for_loader(self, pytester: Pytester) -> None:
|
||||
"""
|
||||
|
|
|
@ -897,6 +897,15 @@ def test_dontreadfrominput() -> None:
|
|||
iter_f = iter(f)
|
||||
pytest.raises(OSError, next, iter_f)
|
||||
pytest.raises(UnsupportedOperation, f.fileno)
|
||||
pytest.raises(UnsupportedOperation, f.flush)
|
||||
assert not f.readable()
|
||||
pytest.raises(UnsupportedOperation, f.seek, 0)
|
||||
assert not f.seekable()
|
||||
pytest.raises(UnsupportedOperation, f.tell)
|
||||
pytest.raises(UnsupportedOperation, f.truncate, 0)
|
||||
pytest.raises(UnsupportedOperation, f.write, b"")
|
||||
pytest.raises(UnsupportedOperation, f.writelines, [])
|
||||
assert not f.writable()
|
||||
f.close() # just for completeness
|
||||
|
||||
|
||||
|
|
|
@ -244,28 +244,32 @@ class TestCollectFS:
|
|||
pytester.makeini(
|
||||
"""
|
||||
[pytest]
|
||||
testpaths = gui uts
|
||||
testpaths = */tests
|
||||
"""
|
||||
)
|
||||
tmp_path = pytester.path
|
||||
ensure_file(tmp_path / "env" / "test_1.py").write_text("def test_env(): pass")
|
||||
ensure_file(tmp_path / "gui" / "test_2.py").write_text("def test_gui(): pass")
|
||||
ensure_file(tmp_path / "uts" / "test_3.py").write_text("def test_uts(): pass")
|
||||
ensure_file(tmp_path / "a" / "test_1.py").write_text("def test_a(): pass")
|
||||
ensure_file(tmp_path / "b" / "tests" / "test_2.py").write_text(
|
||||
"def test_b(): pass"
|
||||
)
|
||||
ensure_file(tmp_path / "c" / "tests" / "test_3.py").write_text(
|
||||
"def test_c(): pass"
|
||||
)
|
||||
|
||||
# executing from rootdir only tests from `testpaths` directories
|
||||
# are collected
|
||||
items, reprec = pytester.inline_genitems("-v")
|
||||
assert [x.name for x in items] == ["test_gui", "test_uts"]
|
||||
assert [x.name for x in items] == ["test_b", "test_c"]
|
||||
|
||||
# check that explicitly passing directories in the command-line
|
||||
# collects the tests
|
||||
for dirname in ("env", "gui", "uts"):
|
||||
for dirname in ("a", "b", "c"):
|
||||
items, reprec = pytester.inline_genitems(tmp_path.joinpath(dirname))
|
||||
assert [x.name for x in items] == ["test_%s" % dirname]
|
||||
|
||||
# changing cwd to each subdirectory and running pytest without
|
||||
# arguments collects the tests in that directory normally
|
||||
for dirname in ("env", "gui", "uts"):
|
||||
for dirname in ("a", "b", "c"):
|
||||
monkeypatch.chdir(pytester.path.joinpath(dirname))
|
||||
items, reprec = pytester.inline_genitems()
|
||||
assert [x.name for x in items] == ["test_%s" % dirname]
|
||||
|
|
|
@ -112,21 +112,26 @@ class TestParseIni:
|
|||
|
||||
@pytest.mark.parametrize(
|
||||
"section, name",
|
||||
[("tool:pytest", "setup.cfg"), ("pytest", "tox.ini"), ("pytest", "pytest.ini")],
|
||||
[
|
||||
("tool:pytest", "setup.cfg"),
|
||||
("pytest", "tox.ini"),
|
||||
("pytest", "pytest.ini"),
|
||||
("pytest", ".pytest.ini"),
|
||||
],
|
||||
)
|
||||
def test_ini_names(self, pytester: Pytester, name, section) -> None:
|
||||
pytester.path.joinpath(name).write_text(
|
||||
textwrap.dedent(
|
||||
"""
|
||||
[{section}]
|
||||
minversion = 1.0
|
||||
minversion = 3.36
|
||||
""".format(
|
||||
section=section
|
||||
)
|
||||
)
|
||||
)
|
||||
config = pytester.parseconfig()
|
||||
assert config.getini("minversion") == "1.0"
|
||||
assert config.getini("minversion") == "3.36"
|
||||
|
||||
def test_pyproject_toml(self, pytester: Pytester) -> None:
|
||||
pytester.makepyprojecttoml(
|
||||
|
@ -2117,8 +2122,8 @@ class TestDebugOptions:
|
|||
result = pytester.runpytest("-h")
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"*store internal tracing debug information in this log*",
|
||||
"*This file is opened with 'w' and truncated as a result*",
|
||||
"*Defaults to 'pytestdebug.log'.",
|
||||
"*Store internal tracing debug information in this log*",
|
||||
"*file. This file is opened with 'w' and truncated as a*",
|
||||
"*Default: pytestdebug.log.",
|
||||
]
|
||||
)
|
||||
|
|
|
@ -553,7 +553,7 @@ class TestConftestVisibility:
|
|||
)
|
||||
)
|
||||
print("created directory structure:")
|
||||
for x in pytester.path.rglob(""):
|
||||
for x in pytester.path.glob("**/"):
|
||||
print(" " + str(x.relative_to(pytester.path)))
|
||||
|
||||
return {"runner": runner, "package": package, "swc": swc, "snc": snc}
|
||||
|
|
|
@ -244,7 +244,7 @@ class TestPDB:
|
|||
"""
|
||||
def test_1():
|
||||
import logging
|
||||
logging.warn("get " + "rekt")
|
||||
logging.warning("get " + "rekt")
|
||||
assert False
|
||||
"""
|
||||
)
|
||||
|
@ -263,7 +263,7 @@ class TestPDB:
|
|||
"""
|
||||
def test_1():
|
||||
import logging
|
||||
logging.warn("get " + "rekt")
|
||||
logging.warning("get " + "rekt")
|
||||
assert False
|
||||
"""
|
||||
)
|
||||
|
@ -353,6 +353,7 @@ class TestPDB:
|
|||
result = pytester.runpytest_subprocess("--pdb", ".")
|
||||
result.stdout.fnmatch_lines(["-> import unknown"])
|
||||
|
||||
@pytest.mark.xfail(reason="#10042")
|
||||
def test_pdb_interaction_capturing_simple(self, pytester: Pytester) -> None:
|
||||
p1 = pytester.makepyfile(
|
||||
"""
|
||||
|
@ -521,6 +522,7 @@ class TestPDB:
|
|||
assert "BdbQuit" not in rest
|
||||
assert "UNEXPECTED EXCEPTION" not in rest
|
||||
|
||||
@pytest.mark.xfail(reason="#10042")
|
||||
def test_pdb_interaction_capturing_twice(self, pytester: Pytester) -> None:
|
||||
p1 = pytester.makepyfile(
|
||||
"""
|
||||
|
@ -556,6 +558,7 @@ class TestPDB:
|
|||
assert "1 failed" in rest
|
||||
self.flush(child)
|
||||
|
||||
@pytest.mark.xfail(reason="#10042")
|
||||
def test_pdb_with_injected_do_debug(self, pytester: Pytester) -> None:
|
||||
"""Simulates pdbpp, which injects Pdb into do_debug, and uses
|
||||
self.__class__ in do_continue.
|
||||
|
@ -1000,6 +1003,7 @@ class TestDebuggingBreakpoints:
|
|||
assert "reading from stdin while output" not in rest
|
||||
TestPDB.flush(child)
|
||||
|
||||
@pytest.mark.xfail(reason="#10042")
|
||||
def test_pdb_not_altered(self, pytester: Pytester) -> None:
|
||||
p1 = pytester.makepyfile(
|
||||
"""
|
||||
|
@ -1159,6 +1163,7 @@ def test_quit_with_swallowed_SystemExit(pytester: Pytester) -> None:
|
|||
|
||||
|
||||
@pytest.mark.parametrize("fixture", ("capfd", "capsys"))
|
||||
@pytest.mark.xfail(reason="#10042")
|
||||
def test_pdb_suspends_fixture_capturing(pytester: Pytester, fixture: str) -> None:
|
||||
"""Using "-s" with pytest should suspend/resume fixture capturing."""
|
||||
p1 = pytester.makepyfile(
|
||||
|
|
|
@ -113,6 +113,28 @@ class TestDoctests:
|
|||
reprec = pytester.inline_run(p)
|
||||
reprec.assertoutcome(failed=1)
|
||||
|
||||
def test_importmode(self, pytester: Pytester):
|
||||
p = pytester.makepyfile(
|
||||
**{
|
||||
"namespacepkg/innerpkg/__init__.py": "",
|
||||
"namespacepkg/innerpkg/a.py": """
|
||||
def some_func():
|
||||
return 42
|
||||
""",
|
||||
"namespacepkg/innerpkg/b.py": """
|
||||
from namespacepkg.innerpkg.a import some_func
|
||||
def my_func():
|
||||
'''
|
||||
>>> my_func()
|
||||
42
|
||||
'''
|
||||
return some_func()
|
||||
""",
|
||||
}
|
||||
)
|
||||
reprec = pytester.inline_run(p, "--doctest-modules", "--import-mode=importlib")
|
||||
reprec.assertoutcome(passed=1)
|
||||
|
||||
def test_new_pattern(self, pytester: Pytester):
|
||||
p = pytester.maketxtfile(
|
||||
xdoc="""
|
||||
|
@ -201,7 +223,11 @@ class TestDoctests:
|
|||
"Traceback (most recent call last):",
|
||||
' File "*/doctest.py", line *, in __run',
|
||||
" *",
|
||||
*((" *^^^^*",) if sys.version_info >= (3, 11) else ()),
|
||||
*(
|
||||
(" *^^^^*",)
|
||||
if (3, 11, 0, "beta", 4) > sys.version_info >= (3, 11)
|
||||
else ()
|
||||
),
|
||||
' File "<doctest test_doctest_unexpected_exception.txt[1]>", line 1, in <module>',
|
||||
"ZeroDivisionError: division by zero",
|
||||
"*/test_doctest_unexpected_exception.txt:2: UnexpectedException",
|
||||
|
|
|
@ -30,11 +30,11 @@ def test_help(pytester: Pytester) -> None:
|
|||
assert result.ret == 0
|
||||
result.stdout.fnmatch_lines(
|
||||
"""
|
||||
-m MARKEXPR only run tests matching given mark expression.
|
||||
For example: -m 'mark1 and not mark2'.
|
||||
reporting:
|
||||
-m MARKEXPR Only run tests matching given mark expression. For
|
||||
example: -m 'mark1 and not mark2'.
|
||||
Reporting:
|
||||
--durations=N *
|
||||
-V, --version display pytest version and information about plugins.
|
||||
-V, --version Display pytest version and information about plugins.
|
||||
When given twice, also display information about
|
||||
plugins.
|
||||
*setup.cfg*
|
||||
|
@ -71,9 +71,9 @@ def test_empty_help_param(pytester: Pytester) -> None:
|
|||
assert result.ret == 0
|
||||
lines = [
|
||||
" required_plugins (args):",
|
||||
" plugins that must be present for pytest to run*",
|
||||
" Plugins that must be present for pytest to run*",
|
||||
" test_ini (bool):*",
|
||||
"environment variables:",
|
||||
"Environment variables:",
|
||||
]
|
||||
result.stdout.fnmatch_lines(lines, consecutive=True)
|
||||
|
||||
|
|
|
@ -47,7 +47,7 @@ def test_wrap_session_notify_exception(ret_exc, pytester: Pytester) -> None:
|
|||
|
||||
end_lines = (
|
||||
result.stdout.lines[-4:]
|
||||
if sys.version_info >= (3, 11)
|
||||
if (3, 11, 0, "beta", 4) > sys.version_info >= (3, 11)
|
||||
else result.stdout.lines[-3:]
|
||||
)
|
||||
|
||||
|
@ -57,7 +57,7 @@ def test_wrap_session_notify_exception(ret_exc, pytester: Pytester) -> None:
|
|||
'INTERNALERROR> raise SystemExit("boom")',
|
||||
*(
|
||||
("INTERNALERROR> ^^^^^^^^^^^^^^^^^^^^^^^^",)
|
||||
if sys.version_info >= (3, 11)
|
||||
if (3, 11, 0, "beta", 4) > sys.version_info >= (3, 11)
|
||||
else ()
|
||||
),
|
||||
"INTERNALERROR> SystemExit: boom",
|
||||
|
@ -68,7 +68,7 @@ def test_wrap_session_notify_exception(ret_exc, pytester: Pytester) -> None:
|
|||
'INTERNALERROR> raise ValueError("boom")',
|
||||
*(
|
||||
("INTERNALERROR> ^^^^^^^^^^^^^^^^^^^^^^^^",)
|
||||
if sys.version_info >= (3, 11)
|
||||
if (3, 11, 0, "beta", 4) > sys.version_info >= (3, 11)
|
||||
else ()
|
||||
),
|
||||
"INTERNALERROR> ValueError: boom",
|
||||
|
|
|
@ -441,10 +441,8 @@ class TestXFail:
|
|||
result = pytester.runpytest(p, "-rx")
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"*test_one*test_this*",
|
||||
"*NOTRUN*noway",
|
||||
"*test_one*test_this_true*",
|
||||
"*NOTRUN*condition:*True*",
|
||||
"*test_one*test_this - reason: *NOTRUN* noway",
|
||||
"*test_one*test_this_true - reason: *NOTRUN* condition: True",
|
||||
"*1 passed*",
|
||||
]
|
||||
)
|
||||
|
@ -461,9 +459,7 @@ class TestXFail:
|
|||
"""
|
||||
)
|
||||
result = pytester.runpytest(p, "-rx")
|
||||
result.stdout.fnmatch_lines(
|
||||
["*test_one*test_this*", "*NOTRUN*hello", "*1 xfailed*"]
|
||||
)
|
||||
result.stdout.fnmatch_lines(["*test_one*test_this*NOTRUN*hello", "*1 xfailed*"])
|
||||
|
||||
def test_xfail_xpass(self, pytester: Pytester) -> None:
|
||||
p = pytester.makepyfile(
|
||||
|
@ -489,7 +485,7 @@ class TestXFail:
|
|||
result = pytester.runpytest(p)
|
||||
result.stdout.fnmatch_lines(["*1 xfailed*"])
|
||||
result = pytester.runpytest(p, "-rx")
|
||||
result.stdout.fnmatch_lines(["*XFAIL*test_this*", "*reason:*hello*"])
|
||||
result.stdout.fnmatch_lines(["*XFAIL*test_this*reason:*hello*"])
|
||||
result = pytester.runpytest(p, "--runxfail")
|
||||
result.stdout.fnmatch_lines(["*1 pass*"])
|
||||
|
||||
|
@ -507,7 +503,7 @@ class TestXFail:
|
|||
result = pytester.runpytest(p)
|
||||
result.stdout.fnmatch_lines(["*1 xfailed*"])
|
||||
result = pytester.runpytest(p, "-rx")
|
||||
result.stdout.fnmatch_lines(["*XFAIL*test_this*", "*reason:*hello*"])
|
||||
result.stdout.fnmatch_lines(["*XFAIL*test_this*reason:*hello*"])
|
||||
result = pytester.runpytest(p, "--runxfail")
|
||||
result.stdout.fnmatch_lines(
|
||||
"""
|
||||
|
@ -543,7 +539,7 @@ class TestXFail:
|
|||
"""
|
||||
)
|
||||
result = pytester.runpytest(p, "-rxX")
|
||||
result.stdout.fnmatch_lines(["*XFAIL*test_this*", "*NOTRUN*"])
|
||||
result.stdout.fnmatch_lines(["*XFAIL*test_this*NOTRUN*"])
|
||||
|
||||
def test_dynamic_xfail_set_during_funcarg_setup(self, pytester: Pytester) -> None:
|
||||
p = pytester.makepyfile(
|
||||
|
@ -622,7 +618,7 @@ class TestXFail:
|
|||
"""
|
||||
)
|
||||
result = pytester.runpytest(p, "-rxX")
|
||||
result.stdout.fnmatch_lines(["*XFAIL*", "*unsupported feature*"])
|
||||
result.stdout.fnmatch_lines(["*XFAIL*unsupported feature*"])
|
||||
assert result.ret == 0
|
||||
|
||||
@pytest.mark.parametrize("strict", [True, False])
|
||||
|
@ -1185,7 +1181,7 @@ def test_xfail_skipif_with_globals(pytester: Pytester) -> None:
|
|||
"""
|
||||
)
|
||||
result = pytester.runpytest("-rsx")
|
||||
result.stdout.fnmatch_lines(["*SKIP*x == 3*", "*XFAIL*test_boolean*", "*x == 3*"])
|
||||
result.stdout.fnmatch_lines(["*SKIP*x == 3*", "*XFAIL*test_boolean*x == 3*"])
|
||||
|
||||
|
||||
def test_default_markers(pytester: Pytester) -> None:
|
||||
|
@ -1297,8 +1293,7 @@ class TestBooleanCondition:
|
|||
result = pytester.runpytest("-rxs")
|
||||
result.stdout.fnmatch_lines(
|
||||
"""
|
||||
*XFAIL*
|
||||
*True123*
|
||||
*XFAIL*True123*
|
||||
*1 xfail*
|
||||
"""
|
||||
)
|
||||
|
|
|
@ -277,4 +277,4 @@ def test_stepwise_skip_is_independent(pytester: Pytester) -> None:
|
|||
|
||||
def test_sw_skip_help(pytester: Pytester) -> None:
|
||||
result = pytester.runpytest("-h")
|
||||
result.stdout.fnmatch_lines("*implicitly enables --stepwise.")
|
||||
result.stdout.fnmatch_lines("*Implicitly enables --stepwise.")
|
||||
|
|
|
@ -1139,7 +1139,21 @@ class TestTerminalFunctional:
|
|||
assert result.stdout.lines.count(expected) == 1
|
||||
|
||||
|
||||
def test_fail_extra_reporting(pytester: Pytester, monkeypatch) -> None:
|
||||
@pytest.mark.parametrize(
|
||||
("use_ci", "expected_message"),
|
||||
(
|
||||
(True, f"- AssertionError: {'this_failed'*100}"),
|
||||
(False, "- AssertionError: this_failedt..."),
|
||||
),
|
||||
ids=("on CI", "not on CI"),
|
||||
)
|
||||
def test_fail_extra_reporting(
|
||||
pytester: Pytester, monkeypatch, use_ci: bool, expected_message: str
|
||||
) -> None:
|
||||
if use_ci:
|
||||
monkeypatch.setenv("CI", "true")
|
||||
else:
|
||||
monkeypatch.delenv("CI", raising=False)
|
||||
monkeypatch.setenv("COLUMNS", "80")
|
||||
pytester.makepyfile("def test_this(): assert 0, 'this_failed' * 100")
|
||||
result = pytester.runpytest("-rN")
|
||||
|
@ -1148,7 +1162,7 @@ def test_fail_extra_reporting(pytester: Pytester, monkeypatch) -> None:
|
|||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"*test summary*",
|
||||
"FAILED test_fail_extra_reporting.py::test_this - AssertionError: this_failedt...",
|
||||
f"FAILED test_fail_extra_reporting.py::test_this {expected_message}",
|
||||
]
|
||||
)
|
||||
|
||||
|
@ -2319,7 +2333,7 @@ def test_line_with_reprcrash(monkeypatch: MonkeyPatch) -> None:
|
|||
def mock_get_pos(*args):
|
||||
return mocked_pos
|
||||
|
||||
monkeypatch.setattr(_pytest.terminal, "_get_pos", mock_get_pos)
|
||||
monkeypatch.setattr(_pytest.terminal, "_get_node_id_with_markup", mock_get_pos)
|
||||
|
||||
class config:
|
||||
pass
|
||||
|
@ -2333,10 +2347,16 @@ def test_line_with_reprcrash(monkeypatch: MonkeyPatch) -> None:
|
|||
pass
|
||||
|
||||
def check(msg, width, expected):
|
||||
class DummyTerminalWriter:
|
||||
fullwidth = width
|
||||
|
||||
def markup(self, word: str, **markup: str):
|
||||
return word
|
||||
|
||||
__tracebackhide__ = True
|
||||
if msg:
|
||||
rep.longrepr.reprcrash.message = msg # type: ignore
|
||||
actual = _get_line_with_reprcrash_message(config, rep(), width) # type: ignore
|
||||
actual = _get_line_with_reprcrash_message(config, rep(), DummyTerminalWriter(), {}) # type: ignore
|
||||
|
||||
assert actual == expected
|
||||
if actual != f"{mocked_verbose_word} {mocked_pos}":
|
||||
|
|
|
@ -1241,12 +1241,15 @@ def test_pdb_teardown_called(pytester: Pytester, monkeypatch: MonkeyPatch) -> No
|
|||
|
||||
|
||||
@pytest.mark.parametrize("mark", ["@unittest.skip", "@pytest.mark.skip"])
|
||||
def test_pdb_teardown_skipped(
|
||||
def test_pdb_teardown_skipped_for_functions(
|
||||
pytester: Pytester, monkeypatch: MonkeyPatch, mark: str
|
||||
) -> None:
|
||||
"""With --pdb, setUp and tearDown should not be called for skipped tests."""
|
||||
"""
|
||||
With --pdb, setUp and tearDown should not be called for tests skipped
|
||||
via a decorator (#7215).
|
||||
"""
|
||||
tracked: List[str] = []
|
||||
monkeypatch.setattr(pytest, "test_pdb_teardown_skipped", tracked, raising=False)
|
||||
monkeypatch.setattr(pytest, "track_pdb_teardown_skipped", tracked, raising=False)
|
||||
|
||||
pytester.makepyfile(
|
||||
"""
|
||||
|
@ -1256,10 +1259,10 @@ def test_pdb_teardown_skipped(
|
|||
class MyTestCase(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
pytest.test_pdb_teardown_skipped.append("setUp:" + self.id())
|
||||
pytest.track_pdb_teardown_skipped.append("setUp:" + self.id())
|
||||
|
||||
def tearDown(self):
|
||||
pytest.test_pdb_teardown_skipped.append("tearDown:" + self.id())
|
||||
pytest.track_pdb_teardown_skipped.append("tearDown:" + self.id())
|
||||
|
||||
{mark}("skipped for reasons")
|
||||
def test_1(self):
|
||||
|
@ -1274,6 +1277,43 @@ def test_pdb_teardown_skipped(
|
|||
assert tracked == []
|
||||
|
||||
|
||||
@pytest.mark.parametrize("mark", ["@unittest.skip", "@pytest.mark.skip"])
|
||||
def test_pdb_teardown_skipped_for_classes(
|
||||
pytester: Pytester, monkeypatch: MonkeyPatch, mark: str
|
||||
) -> None:
|
||||
"""
|
||||
With --pdb, setUp and tearDown should not be called for tests skipped
|
||||
via a decorator on the class (#10060).
|
||||
"""
|
||||
tracked: List[str] = []
|
||||
monkeypatch.setattr(pytest, "track_pdb_teardown_skipped", tracked, raising=False)
|
||||
|
||||
pytester.makepyfile(
|
||||
"""
|
||||
import unittest
|
||||
import pytest
|
||||
|
||||
{mark}("skipped for reasons")
|
||||
class MyTestCase(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
pytest.track_pdb_teardown_skipped.append("setUp:" + self.id())
|
||||
|
||||
def tearDown(self):
|
||||
pytest.track_pdb_teardown_skipped.append("tearDown:" + self.id())
|
||||
|
||||
def test_1(self):
|
||||
pass
|
||||
|
||||
""".format(
|
||||
mark=mark
|
||||
)
|
||||
)
|
||||
result = pytester.runpytest_inprocess("--pdb")
|
||||
result.stdout.fnmatch_lines("* 1 skipped in *")
|
||||
assert tracked == []
|
||||
|
||||
|
||||
def test_async_support(pytester: Pytester) -> None:
|
||||
pytest.importorskip("unittest.async_case")
|
||||
|
||||
|
|
Loading…
Reference in New Issue