Merge branch 'main' into Improvement-remove-prune_dependency_tree

This commit is contained in:
Sadra Barikbin 2023-12-04 18:04:56 +03:30
commit 1491a6e4bb
90 changed files with 3529 additions and 858 deletions

View File

@ -22,7 +22,7 @@ jobs:
pull-requests: write pull-requests: write
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: true persist-credentials: true

View File

@ -20,13 +20,13 @@ jobs:
timeout-minutes: 10 timeout-minutes: 10
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false persist-credentials: false
- name: Build and Check Package - name: Build and Check Package
uses: hynek/build-and-inspect-python-package@v1.5 uses: hynek/build-and-inspect-python-package@v1.5.4
deploy: deploy:
if: github.repository == 'pytest-dev/pytest' if: github.repository == 'pytest-dev/pytest'
@ -38,7 +38,7 @@ jobs:
id-token: write id-token: write
contents: write contents: write
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- name: Download Package - name: Download Package
uses: actions/download-artifact@v3 uses: actions/download-artifact@v3
@ -47,14 +47,14 @@ jobs:
path: dist path: dist
- name: Publish package to PyPI - name: Publish package to PyPI
uses: pypa/gh-action-pypi-publish@v1.8.10 uses: pypa/gh-action-pypi-publish@v1.8.11
- name: Push tag - name: Push tag
run: | run: |
git config user.name "pytest bot" git config user.name "pytest bot"
git config user.email "pytestbot@gmail.com" git config user.email "pytestbot@gmail.com"
git tag --annotate --message=v${{ github.event.inputs.version }} v${{ github.event.inputs.version }} ${{ github.sha }} git tag --annotate --message=v${{ github.event.inputs.version }} ${{ github.event.inputs.version }} ${{ github.sha }}
git push origin v${{ github.event.inputs.version }} git push origin ${{ github.event.inputs.version }}
release-notes: release-notes:
@ -67,7 +67,7 @@ jobs:
permissions: permissions:
contents: write contents: write
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false persist-credentials: false

View File

@ -27,7 +27,7 @@ jobs:
pull-requests: write pull-requests: write
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0

View File

@ -30,12 +30,12 @@ jobs:
package: package:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false persist-credentials: false
- name: Build and Check Package - name: Build and Check Package
uses: hynek/build-and-inspect-python-package@v1.5 uses: hynek/build-and-inspect-python-package@v1.5.4
build: build:
needs: [package] needs: [package]
@ -167,7 +167,7 @@ jobs:
use_coverage: true use_coverage: true
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false persist-credentials: false

View File

@ -20,7 +20,7 @@ jobs:
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0

View File

@ -1,6 +1,6 @@
repos: repos:
- repo: https://github.com/psf/black - repo: https://github.com/psf/black
rev: 23.7.0 rev: 23.11.0
hooks: hooks:
- id: black - id: black
args: [--safe, --quiet] args: [--safe, --quiet]
@ -10,7 +10,7 @@ repos:
- id: blacken-docs - id: blacken-docs
additional_dependencies: [black==23.7.0] additional_dependencies: [black==23.7.0]
- repo: https://github.com/pre-commit/pre-commit-hooks - repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0 rev: v4.5.0
hooks: hooks:
- id: trailing-whitespace - id: trailing-whitespace
- id: end-of-file-fixer - id: end-of-file-fixer
@ -37,17 +37,17 @@ repos:
- flake8-typing-imports==1.12.0 - flake8-typing-imports==1.12.0
- flake8-docstrings==1.5.0 - flake8-docstrings==1.5.0
- repo: https://github.com/asottile/reorder-python-imports - repo: https://github.com/asottile/reorder-python-imports
rev: v3.10.0 rev: v3.12.0
hooks: hooks:
- id: reorder-python-imports - id: reorder-python-imports
args: ['--application-directories=.:src', --py38-plus] args: ['--application-directories=.:src', --py38-plus]
- repo: https://github.com/asottile/pyupgrade - repo: https://github.com/asottile/pyupgrade
rev: v3.10.1 rev: v3.15.0
hooks: hooks:
- id: pyupgrade - id: pyupgrade
args: [--py38-plus] args: [--py38-plus]
- repo: https://github.com/asottile/setup-cfg-fmt - repo: https://github.com/asottile/setup-cfg-fmt
rev: v2.4.0 rev: v2.5.0
hooks: hooks:
- id: setup-cfg-fmt - id: setup-cfg-fmt
args: ["--max-py-version=3.12", "--include-version-classifiers"] args: ["--max-py-version=3.12", "--include-version-classifiers"]
@ -56,7 +56,7 @@ repos:
hooks: hooks:
- id: python-use-type-annotations - id: python-use-type-annotations
- repo: https://github.com/pre-commit/mirrors-mypy - repo: https://github.com/pre-commit/mirrors-mypy
rev: v1.5.1 rev: v1.7.1
hooks: hooks:
- id: mypy - id: mypy
files: ^(src/|testing/) files: ^(src/|testing/)

View File

@ -9,6 +9,10 @@ python:
path: . path: .
- requirements: doc/en/requirements.txt - requirements: doc/en/requirements.txt
sphinx:
configuration: doc/en/conf.py
fail_on_warning: true
build: build:
os: ubuntu-20.04 os: ubuntu-20.04
tools: tools:

11
AUTHORS
View File

@ -56,6 +56,7 @@ Barney Gale
Ben Gartner Ben Gartner
Ben Webb Ben Webb
Benjamin Peterson Benjamin Peterson
Benjamin Schubert
Bernard Pratz Bernard Pratz
Bo Wu Bo Wu
Bob Ippolito Bob Ippolito
@ -187,6 +188,7 @@ Javier Romero
Jeff Rackauckas Jeff Rackauckas
Jeff Widman Jeff Widman
Jenni Rinker Jenni Rinker
Jens Tröger
John Eddie Ayson John Eddie Ayson
John Litborn John Litborn
John Towler John Towler
@ -235,6 +237,7 @@ Maho
Maik Figura Maik Figura
Mandeep Bhutani Mandeep Bhutani
Manuel Krebber Manuel Krebber
Marc Mueller
Marc Schlaich Marc Schlaich
Marcelo Duarte Trevisani Marcelo Duarte Trevisani
Marcin Bachry Marcin Bachry
@ -265,6 +268,7 @@ Michal Wajszczuk
Michał Zięba Michał Zięba
Mickey Pashov Mickey Pashov
Mihai Capotă Mihai Capotă
Mihail Milushev
Mike Hoyle (hoylemd) Mike Hoyle (hoylemd)
Mike Lundy Mike Lundy
Milan Lesnek Milan Lesnek
@ -272,6 +276,7 @@ Miro Hrončok
Nathaniel Compton Nathaniel Compton
Nathaniel Waisbrot Nathaniel Waisbrot
Ned Batchelder Ned Batchelder
Neil Martin
Neven Mundar Neven Mundar
Nicholas Devenish Nicholas Devenish
Nicholas Murphy Nicholas Murphy
@ -289,6 +294,7 @@ Ondřej Súkup
Oscar Benjamin Oscar Benjamin
Parth Patel Parth Patel
Patrick Hayes Patrick Hayes
Patrick Lannigan
Paul Müller Paul Müller
Paul Reece Paul Reece
Pauli Virtanen Pauli Virtanen
@ -328,12 +334,14 @@ Ronny Pfannschmidt
Ross Lawley Ross Lawley
Ruaridh Williamson Ruaridh Williamson
Russel Winder Russel Winder
Ryan Puddephatt
Ryan Wooden Ryan Wooden
Sadra Barikbin Sadra Barikbin
Saiprasad Kale Saiprasad Kale
Samuel Colvin Samuel Colvin
Samuel Dion-Girardeau Samuel Dion-Girardeau
Samuel Searles-Bryant Samuel Searles-Bryant
Samuel Therrien (Avasam)
Samuele Pedroni Samuele Pedroni
Sanket Duthade Sanket Duthade
Sankt Petersbug Sankt Petersbug
@ -343,7 +351,9 @@ Segev Finer
Serhii Mozghovyi Serhii Mozghovyi
Seth Junot Seth Junot
Shantanu Jain Shantanu Jain
Sharad Nair
Shubham Adep Shubham Adep
Simon Blanchard
Simon Gomizelj Simon Gomizelj
Simon Holesch Simon Holesch
Simon Kerr Simon Kerr
@ -363,6 +373,7 @@ Tadek Teleżyński
Takafumi Arakaki Takafumi Arakaki
Taneli Hukkinen Taneli Hukkinen
Tanvi Mehta Tanvi Mehta
Tanya Agarwal
Tarcisio Fischer Tarcisio Fischer
Tareq Alayan Tareq Alayan
Tatiana Ovary Tatiana Ovary

View File

@ -197,8 +197,9 @@ Short version
~~~~~~~~~~~~~ ~~~~~~~~~~~~~
#. Fork the repository. #. Fork the repository.
#. Fetch tags from upstream if necessary (if you cloned only main `git fetch --tags https://github.com/pytest-dev/pytest`).
#. Enable and install `pre-commit <https://pre-commit.com>`_ to ensure style-guides and code checks are followed. #. Enable and install `pre-commit <https://pre-commit.com>`_ to ensure style-guides and code checks are followed.
#. Follow **PEP-8** for naming and `black <https://github.com/psf/black>`_ for formatting. #. Follow `PEP-8 <https://www.python.org/dev/peps/pep-0008/>`_ for naming.
#. Tests are run using ``tox``:: #. Tests are run using ``tox``::
tox -e linting,py39 tox -e linting,py39
@ -236,6 +237,7 @@ Here is a simple overview, with pytest-specific bits:
$ git clone git@github.com:YOUR_GITHUB_USERNAME/pytest.git $ git clone git@github.com:YOUR_GITHUB_USERNAME/pytest.git
$ cd pytest $ cd pytest
$ git fetch --tags https://github.com/pytest-dev/pytest
# now, create your own branch off "main": # now, create your own branch off "main":
$ git checkout -b your-bugfix-branch-name main $ git checkout -b your-bugfix-branch-name main
@ -280,7 +282,7 @@ Here is a simple overview, with pytest-specific bits:
This command will run tests via the "tox" tool against Python 3.9 This command will run tests via the "tox" tool against Python 3.9
and also perform "lint" coding-style checks. and also perform "lint" coding-style checks.
#. You can now edit your local working copy and run the tests again as necessary. Please follow PEP-8 for naming. #. You can now edit your local working copy and run the tests again as necessary. Please follow `PEP-8 <https://www.python.org/dev/peps/pep-0008/>`_ for naming.
You can pass different options to ``tox``. For example, to run tests on Python 3.9 and pass options to pytest You can pass different options to ``tox``. For example, to run tests on Python 3.9 and pass options to pytest
(e.g. enter pdb on failure) to pytest you can do:: (e.g. enter pdb on failure) to pytest you can do::

View File

@ -20,7 +20,7 @@
:target: https://codecov.io/gh/pytest-dev/pytest :target: https://codecov.io/gh/pytest-dev/pytest
:alt: Code coverage Status :alt: Code coverage Status
.. image:: https://github.com/pytest-dev/pytest/workflows/test/badge.svg .. image:: https://github.com/pytest-dev/pytest/actions/workflows/test.yml/badge.svg
:target: https://github.com/pytest-dev/pytest/actions?query=workflow%3Atest :target: https://github.com/pytest-dev/pytest/actions?query=workflow%3Atest
.. image:: https://results.pre-commit.ci/badge/github/pytest-dev/pytest/main.svg .. image:: https://results.pre-commit.ci/badge/github/pytest-dev/pytest/main.svg

View File

@ -0,0 +1,2 @@
Added :func:`ExceptionInfo.group_contains() <pytest.ExceptionInfo.group_contains>`, an assertion
helper that tests if an `ExceptionGroup` contains a matching exception.

View File

@ -1,2 +0,0 @@
markers are now considered in the reverse mro order to ensure base class markers are considered first
this resolves a regression.

View File

@ -0,0 +1,2 @@
Added more comprehensive set assertion rewrites for comparisons other than equality ``==``, with
the following operations now providing better failure messages: ``!=``, ``<=``, ``>=``, ``<``, and ``>``.

3
changelog/11065.doc.rst Normal file
View File

@ -0,0 +1,3 @@
Use pytestconfig instead of request.config in cache example
to be consistent with the API documentation.

1
changelog/11091.doc.rst Normal file
View File

@ -0,0 +1 @@
Updated documentation and tests to refer to hyphonated options: replaced ``--junitxml`` with ``--junit-xml`` and ``--collectonly`` with ``--collect-only``.

View File

@ -0,0 +1,11 @@
Sanitized the handling of the ``default`` parameter when defining configuration options.
Previously if ``default`` was not supplied for :meth:`parser.addini <pytest.Parser.addini>` and the configuration option value was not defined in a test session, then calls to :func:`config.getini <pytest.Config.getini>` returned an *empty list* or an *empty string* depending on whether ``type`` was supplied or not respectively, which is clearly incorrect. Also, ``None`` was not honored even if ``default=None`` was used explicitly while defining the option.
Now the behavior of :meth:`parser.addini <pytest.Parser.addini>` is as follows:
* If ``default`` is NOT passed but ``type`` is provided, then a type-specific default will be returned. For example ``type=bool`` will return ``False``, ``type=str`` will return ``""``, etc.
* If ``default=None`` is passed and the option is not defined in a test session, then ``None`` will be returned, regardless of the ``type``.
* If neither ``default`` nor ``type`` are provided, assume ``type=str`` and return ``""`` as default (this is as per previous behavior).
The team decided to not introduce a deprecation period for this change, as doing so would be complicated both in terms of communicating this to the community as well as implementing it, and also because the team believes this change should not break existing plugins except in rare cases.

View File

@ -0,0 +1,2 @@
Logging to a file using the ``--log-file`` option will use ``--log-level``, ``--log-format`` and ``--log-date-format`` as fallback
if ``--log-file-level``, ``--log-file-format`` and ``--log-file-date-format`` are not provided respectively.

View File

@ -0,0 +1,3 @@
The :fixture:`pytester` fixture now uses the :fixture:`monkeypatch` fixture to manage the current working directory.
If you use ``pytester`` in combination with :func:`monkeypatch.undo() <pytest.MonkeyPatch.undo>`, the CWD might get restored.
Use :func:`monkeypatch.context() <pytest.MonkeyPatch.context>` instead.

View File

@ -0,0 +1,5 @@
Added the new :confval:`verbosity_assertions` configuration option for fine-grained control of failed assertions verbosity.
See :ref:`Fine-grained verbosity <pytest.fine_grained_verbosity>` for more details.
For plugin authors, :attr:`config.get_verbosity <pytest.Config.get_verbosity>` can be used to retrieve the verbosity level for a specific verbosity type.

View File

@ -0,0 +1 @@
:func:`pytest.deprecated_call` now also considers warnings of type :class:`FutureWarning`.

View File

@ -0,0 +1,4 @@
Parametrized tests now *really do* ensure that the ids given to each input are unique - for
example, ``a, a, a0`` now results in ``a1, a2, a0`` instead of the previous (buggy) ``a0, a1, a0``.
This necessarily means changing nodeids where these were previously colliding, and for
readability adds an underscore when non-unique ids end in a number.

View File

@ -0,0 +1 @@
Improved very verbose diff output to color it as a diff instead of only red.

View File

@ -0,0 +1 @@
Fixed crash when using an empty string for the same parametrized value more than once.

View File

@ -0,0 +1 @@
Handle an edge case where :data:`sys.stderr` and :data:`sys.__stderr__` might already be closed when :ref:`faulthandler` is tearing down.

View File

@ -0,0 +1 @@
Improved the documentation and type signature for :func:`pytest.mark.xfail <pytest.mark.xfail>`'s ``condition`` param to use ``False`` as the default value.

View File

@ -0,0 +1,2 @@
Added :func:`LogCaptureFixture.filtering() <pytest.LogCaptureFixture.filtering>` context manager that
adds a given :class:`logging.Filter` object to the caplog fixture.

View File

@ -0,0 +1 @@
Fixed the selftests to pass correctly if ``FORCE_COLOR``, ``NO_COLOR`` or ``PY_COLORS`` is set in the calling environment.

View File

@ -0,0 +1,4 @@
Improved the very verbose diff for every standard library container types: the indentation is now consistent and the markers are on their own separate lines, which should reduce the diffs shown to users.
Previously, the default python pretty printer was used to generate the output, which puts opening and closing
markers on the same line as the first/last entry, in addition to not having consistent indentation.

View File

@ -14,7 +14,7 @@ Each file should be named like ``<ISSUE>.<TYPE>.rst``, where
``<ISSUE>`` is an issue number, and ``<TYPE>`` is one of: ``<ISSUE>`` is an issue number, and ``<TYPE>`` is one of:
* ``feature``: new user facing features, like new command-line options and new behavior. * ``feature``: new user facing features, like new command-line options and new behavior.
* ``improvement``: improvement of existing functionality, usually without requiring user intervention (for example, new fields being written in ``--junitxml``, improved colors in terminal, etc). * ``improvement``: improvement of existing functionality, usually without requiring user intervention (for example, new fields being written in ``--junit-xml``, improved colors in terminal, etc).
* ``bugfix``: fixes a bug. * ``bugfix``: fixes a bug.
* ``doc``: documentation improvement, like rewording an entire session or adding missing docs. * ``doc``: documentation improvement, like rewording an entire session or adding missing docs.
* ``deprecation``: feature deprecation. * ``deprecation``: feature deprecation.

View File

@ -6,6 +6,7 @@ Release announcements
:maxdepth: 2 :maxdepth: 2
release-7.4.3
release-7.4.2 release-7.4.2
release-7.4.1 release-7.4.1
release-7.4.0 release-7.4.0

View File

@ -0,0 +1,19 @@
pytest-7.4.3
=======================================
pytest 7.4.3 has just been released to PyPI.
This is a bug-fix release, being a drop-in replacement. To upgrade::
pip install --upgrade pytest
The full changelog is available at https://docs.pytest.org/en/stable/changelog.html.
Thanks to all of the contributors to this release:
* Bruno Oliveira
* Marc Mueller
Happy testing,
The pytest Development Team

View File

@ -28,6 +28,21 @@ with advance notice in the **Deprecations** section of releases.
.. towncrier release notes start .. towncrier release notes start
pytest 7.4.3 (2023-10-24)
=========================
Bug Fixes
---------
- `#10447 <https://github.com/pytest-dev/pytest/issues/10447>`_: Markers are now considered in the reverse mro order to ensure base class markers are considered first -- this resolves a regression.
- `#11239 <https://github.com/pytest-dev/pytest/issues/11239>`_: Fixed ``:=`` in asserts impacting unrelated test cases.
- `#11439 <https://github.com/pytest-dev/pytest/issues/11439>`_: Handled an edge case where :data:`sys.stderr` might already be closed when :ref:`faulthandler` is tearing down.
pytest 7.4.2 (2023-09-07) pytest 7.4.2 (2023-09-07)
========================= =========================

View File

@ -645,7 +645,7 @@ By using ``legacy`` you will keep using the legacy/xunit1 format when upgrading
pytest 6.0, where the default format will be ``xunit2``. pytest 6.0, where the default format will be ``xunit2``.
In order to let users know about the transition, pytest will issue a warning in case In order to let users know about the transition, pytest will issue a warning in case
the ``--junitxml`` option is given in the command line but ``junit_family`` is not explicitly the ``--junit-xml`` option is given in the command line but ``junit_family`` is not explicitly
configured in ``pytest.ini``. configured in ``pytest.ini``.
Services known to support the ``xunit2`` format: Services known to support the ``xunit2`` format:

View File

@ -136,7 +136,7 @@ Or select multiple nodes:
Node IDs for failing tests are displayed in the test summary info Node IDs for failing tests are displayed in the test summary info
when running pytest with the ``-rf`` option. You can also when running pytest with the ``-rf`` option. You can also
construct Node IDs from the output of ``pytest --collectonly``. construct Node IDs from the output of ``pytest --collect-only``.
Using ``-k expr`` to select tests based on their name Using ``-k expr`` to select tests based on their name
------------------------------------------------------- -------------------------------------------------------

View File

@ -1090,4 +1090,4 @@ application with standard ``pytest`` command-line options:
.. code-block:: bash .. code-block:: bash
./app_main --pytest --verbose --tb=long --junitxml=results.xml test-suite/ ./app_main --pytest --verbose --tb=long --junit=xml=results.xml test-suite/

View File

@ -22,7 +22,7 @@ Install ``pytest``
.. code-block:: bash .. code-block:: bash
$ pytest --version $ pytest --version
pytest 7.4.2 pytest 7.4.3
.. _`simpletest`: .. _`simpletest`:
@ -97,6 +97,30 @@ Use the :ref:`raises <assertraises>` helper to assert that some code raises an e
with pytest.raises(SystemExit): with pytest.raises(SystemExit):
f() f()
You can also use the context provided by :ref:`raises <assertraises>` to
assert that an expected exception is part of a raised ``ExceptionGroup``:
.. code-block:: python
# content of test_exceptiongroup.py
import pytest
def f():
raise ExceptionGroup(
"Group message",
[
RuntimeError(),
],
)
def test_exception_in_group():
with pytest.raises(ExceptionGroup) as excinfo:
f()
assert excinfo.group_contains(RuntimeError)
assert not excinfo.group_contains(TypeError)
Execute the test function with “quiet” reporting mode: Execute the test function with “quiet” reporting mode:
.. code-block:: pytest .. code-block:: pytest

View File

@ -98,6 +98,27 @@ and if you need to have access to the actual exception info you may use:
the actual exception raised. The main attributes of interest are the actual exception raised. The main attributes of interest are
``.type``, ``.value`` and ``.traceback``. ``.type``, ``.value`` and ``.traceback``.
Note that ``pytest.raises`` will match the exception type or any subclasses (like the standard ``except`` statement).
If you want to check if a block of code is raising an exact exception type, you need to check that explicitly:
.. code-block:: python
def test_foo_not_implemented():
def foo():
raise NotImplementedError
with pytest.raises(RuntimeError) as excinfo:
foo()
assert excinfo.type is RuntimeError
The :func:`pytest.raises` call will succeed, even though the function raises :class:`NotImplementedError`, because
:class:`NotImplementedError` is a subclass of :class:`RuntimeError`; however the following `assert` statement will
catch the problem.
Matching exception messages
~~~~~~~~~~~~~~~~~~~~~~~~~~~
You can pass a ``match`` keyword parameter to the context-manager to test You can pass a ``match`` keyword parameter to the context-manager to test
that a regular expression matches on the string representation of an exception that a regular expression matches on the string representation of an exception
(similar to the ``TestCase.assertRaisesRegex`` method from ``unittest``): (similar to the ``TestCase.assertRaisesRegex`` method from ``unittest``):
@ -115,36 +136,111 @@ that a regular expression matches on the string representation of an exception
with pytest.raises(ValueError, match=r".* 123 .*"): with pytest.raises(ValueError, match=r".* 123 .*"):
myfunc() myfunc()
The regexp parameter of the ``match`` method is matched with the ``re.search`` Notes:
function, so in the above example ``match='123'`` would have worked as
well.
There's an alternate form of the :func:`pytest.raises` function where you pass * The ``match`` parameter is matched with the :func:`re.search`
a function that will be executed with the given ``*args`` and ``**kwargs`` and function, so in the above example ``match='123'`` would have worked as well.
assert that the given exception is raised: * The ``match`` parameter also matches against `PEP-678 <https://peps.python.org/pep-0678/>`__ ``__notes__``.
Matching exception groups
~~~~~~~~~~~~~~~~~~~~~~~~~
You can also use the :func:`excinfo.group_contains() <pytest.ExceptionInfo.group_contains>`
method to test for exceptions returned as part of an ``ExceptionGroup``:
.. code-block:: python .. code-block:: python
pytest.raises(ExpectedException, func, *args, **kwargs) def test_exception_in_group():
with pytest.raises(RuntimeError) as excinfo:
raise ExceptionGroup(
"Group message",
[
RuntimeError("Exception 123 raised"),
],
)
assert excinfo.group_contains(RuntimeError, match=r".* 123 .*")
assert not excinfo.group_contains(TypeError)
The optional ``match`` keyword parameter works the same way as for
:func:`pytest.raises`.
By default ``group_contains()`` will recursively search for a matching
exception at any level of nested ``ExceptionGroup`` instances. You can
specify a ``depth`` keyword parameter if you only want to match an
exception at a specific level; exceptions contained directly in the top
``ExceptionGroup`` would match ``depth=1``.
.. code-block:: python
def test_exception_in_group_at_given_depth():
with pytest.raises(RuntimeError) as excinfo:
raise ExceptionGroup(
"Group message",
[
RuntimeError(),
ExceptionGroup(
"Nested group",
[
TypeError(),
],
),
],
)
assert excinfo.group_contains(RuntimeError, depth=1)
assert excinfo.group_contains(TypeError, depth=2)
assert not excinfo.group_contains(RuntimeError, depth=2)
assert not excinfo.group_contains(TypeError, depth=1)
Alternate form (legacy)
~~~~~~~~~~~~~~~~~~~~~~~
There is an alternate form where you pass
a function that will be executed, along ``*args`` and ``**kwargs``, and :func:`pytest.raises`
will execute the function with the arguments and assert that the given exception is raised:
.. code-block:: python
def func(x):
if x <= 0:
raise ValueError("x needs to be larger than zero")
pytest.raises(ValueError, func, x=-1)
The reporter will provide you with helpful output in case of failures such as *no The reporter will provide you with helpful output in case of failures such as *no
exception* or *wrong exception*. exception* or *wrong exception*.
Note that it is also possible to specify a "raises" argument to This form was the original :func:`pytest.raises` API, developed before the ``with`` statement was
``pytest.mark.xfail``, which checks that the test is failing in a more added to the Python language. Nowadays, this form is rarely used, with the context-manager form (using ``with``)
being considered more readable.
Nonetheless, this form is fully supported and not deprecated in any way.
xfail mark and pytest.raises
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
It is also possible to specify a ``raises`` argument to
:ref:`pytest.mark.xfail <pytest.mark.xfail ref>`, which checks that the test is failing in a more
specific way than just having any exception raised: specific way than just having any exception raised:
.. code-block:: python .. code-block:: python
def f():
raise IndexError()
@pytest.mark.xfail(raises=IndexError) @pytest.mark.xfail(raises=IndexError)
def test_f(): def test_f():
f() f()
Using :func:`pytest.raises` is likely to be better for cases where you are
testing exceptions your own code is deliberately raising, whereas using This will only "xfail" if the test fails by raising ``IndexError`` or subclasses.
``@pytest.mark.xfail`` with a check function is probably better for something
like documenting unfixed bugs (where the test describes what "should" happen) * Using :ref:`pytest.mark.xfail <pytest.mark.xfail ref>` with the ``raises`` parameter is probably better for something
or bugs in dependencies. like documenting unfixed bugs (where the test describes what "should" happen) or bugs in dependencies.
* Using :func:`pytest.raises` is likely to be better for cases where you are
testing exceptions your own code is deliberately raising, which is the majority of cases.
.. _`assertwarns`: .. _`assertwarns`:

View File

@ -213,12 +213,12 @@ across pytest invocations:
@pytest.fixture @pytest.fixture
def mydata(request): def mydata(pytestconfig):
val = request.config.cache.get("example/value", None) val = pytestconfig.cache.get("example/value", None)
if val is None: if val is None:
expensive_computation() expensive_computation()
val = 42 val = 42
request.config.cache.set("example/value", val) pytestconfig.cache.set("example/value", val)
return val return val

View File

@ -241,7 +241,7 @@ through ``add_color_level()``. Example:
.. code-block:: python .. code-block:: python
@pytest.hookimpl @pytest.hookimpl(trylast=True)
def pytest_configure(config): def pytest_configure(config):
logging_plugin = config.pluginmanager.get_plugin("logging-plugin") logging_plugin = config.pluginmanager.get_plugin("logging-plugin")

View File

@ -16,6 +16,12 @@ Examples for modifying traceback printing:
pytest -l # show local variables (shortcut) pytest -l # show local variables (shortcut)
pytest --no-showlocals # hide local variables (if addopts enables them) pytest --no-showlocals # hide local variables (if addopts enables them)
pytest --capture=fd # default, capture at the file descriptor level
pytest --capture=sys # capture at the sys level
pytest --capture=no # don't capture
pytest -s # don't capture (shortcut)
pytest --capture=tee-sys # capture to logs but also output to sys level streams
pytest --tb=auto # (default) 'long' tracebacks for the first and last pytest --tb=auto # (default) 'long' tracebacks for the first and last
# entry, but 'short' style for the other entries # entry, but 'short' style for the other entries
pytest --tb=long # exhaustive, informative traceback formatting pytest --tb=long # exhaustive, informative traceback formatting
@ -36,6 +42,16 @@ option you make sure a trace is shown.
Verbosity Verbosity
-------------------------------------------------- --------------------------------------------------
Examples for modifying printing verbosity:
.. code-block:: bash
pytest --quiet # quiet - less verbose - mode
pytest -q # quiet - less verbose - mode (shortcut)
pytest -v # increase verbosity, display individual test names
pytest -vv # more verbose, display more details from the test output
pytest -vvv # not a standard , but may be used for even more detail in certain setups
The ``-v`` flag controls the verbosity of pytest output in various aspects: test session progress, assertion The ``-v`` flag controls the verbosity of pytest output in various aspects: test session progress, assertion
details when tests fail, fixtures details with ``--fixtures``, etc. details when tests fail, fixtures details with ``--fixtures``, etc.
@ -270,6 +286,20 @@ situations, for example you are shown even fixtures that start with ``_`` if you
Using higher verbosity levels (``-vvv``, ``-vvvv``, ...) is supported, but has no effect in pytest itself at the moment, Using higher verbosity levels (``-vvv``, ``-vvvv``, ...) is supported, but has no effect in pytest itself at the moment,
however some plugins might make use of higher verbosity. however some plugins might make use of higher verbosity.
.. _`pytest.fine_grained_verbosity`:
Fine-grained verbosity
~~~~~~~~~~~~~~~~~~~~~~
In addition to specifying the application wide verbosity level, it is possible to control specific aspects independently.
This is done by setting a verbosity level in the configuration file for the specific aspect of the output.
:confval:`verbosity_assertions`: Controls how verbose the assertion output should be when pytest is executed. Running
``pytest --no-header`` with a value of ``2`` would have the same output as the previous example, but each test inside
the file is shown by a single character in the output.
(Note: currently this is the only option available, but more might be added in the future).
.. _`pytest.detailed_failed_tests_usage`: .. _`pytest.detailed_failed_tests_usage`:
Producing a detailed summary report Producing a detailed summary report
@ -478,7 +508,7 @@ integration servers, use this invocation:
.. code-block:: bash .. code-block:: bash
pytest --junitxml=path pytest --junit-xml=path
to create an XML file at ``path``. to create an XML file at ``path``.

View File

@ -90,7 +90,7 @@ and can also be used to hold pytest configuration if they have a ``[pytest]`` se
setup.cfg setup.cfg
~~~~~~~~~ ~~~~~~~~~
``setup.cfg`` files are general purpose configuration files, used originally by :doc:`distutils <python:distutils/configfile>`, and can also be used to hold pytest configuration ``setup.cfg`` files are general purpose configuration files, used originally by ``distutils`` (now deprecated) and `setuptools <https://setuptools.pypa.io/en/latest/userguide/declarative_config.html>`__, and can also be used to hold pytest configuration
if they have a ``[tool:pytest]`` section. if they have a ``[tool:pytest]`` section.
.. code-block:: ini .. code-block:: ini

File diff suppressed because it is too large Load Diff

View File

@ -1,3 +1,5 @@
:tocdepth: 3
.. _`api-reference`: .. _`api-reference`:
API Reference API Reference
@ -237,17 +239,18 @@ pytest.mark.xfail
Marks a test function as *expected to fail*. Marks a test function as *expected to fail*.
.. py:function:: pytest.mark.xfail(condition=None, *, reason=None, raises=None, run=True, strict=xfail_strict) .. py:function:: pytest.mark.xfail(condition=False, *, reason=None, raises=None, run=True, strict=xfail_strict)
:type condition: bool or str :keyword Union[bool, str] condition:
:param condition:
Condition for marking the test function as xfail (``True/False`` or a Condition for marking the test function as xfail (``True/False`` or a
:ref:`condition string <string conditions>`). If a bool, you also have :ref:`condition string <string conditions>`). If a ``bool``, you also have
to specify ``reason`` (see :ref:`condition string <string conditions>`). to specify ``reason`` (see :ref:`condition string <string conditions>`).
:keyword str reason: :keyword str reason:
Reason why the test function is marked as xfail. Reason why the test function is marked as xfail.
:keyword Type[Exception] raises: :keyword Type[Exception] raises:
Exception subclass (or tuple of subclasses) expected to be raised by the test function; other exceptions will fail the test. Exception class (or tuple of classes) expected to be raised by the test function; other exceptions will fail the test.
Note that subclasses of the classes passed will also result in a match (similar to how the ``except`` statement works).
:keyword bool run: :keyword bool run:
Whether the test function should actually be executed. If ``False``, the function will always xfail and will Whether the test function should actually be executed. If ``False``, the function will always xfail and will
not be executed (useful if a function is segfaulting). not be executed (useful if a function is segfaulting).
@ -1819,6 +1822,19 @@ passed multiple times. The expected format is ``name=value``. For example::
clean_db clean_db
.. confval:: verbosity_assertions
Set a verbosity level specifically for assertion related output, overriding the application wide level.
.. code-block:: ini
[pytest]
verbosity_assertions = 2
Defaults to application wide verbosity level (via the ``-v`` command-line option). A special value of
"auto" can be used to explicitly use the global verbosity level.
.. confval:: xfail_strict .. confval:: xfail_strict
If set to ``True``, tests marked with ``@pytest.mark.xfail`` that actually succeed will by default fail the If set to ``True``, tests marked with ``@pytest.mark.xfail`` that actually succeed will by default fail the

View File

@ -2,7 +2,7 @@ pallets-sphinx-themes
pluggy>=1.2.0 pluggy>=1.2.0
pygments-pytest>=2.3.0 pygments-pytest>=2.3.0
sphinx-removed-in>=0.2.0 sphinx-removed-in>=0.2.0
sphinx>=5,<6 sphinx>=5,<8
sphinxcontrib-trio sphinxcontrib-trio
sphinxcontrib-svg2pdfconverter sphinxcontrib-svg2pdfconverter
# Pin packaging because it no longer handles 'latest' version, which # Pin packaging because it no longer handles 'latest' version, which

View File

@ -31,16 +31,22 @@ class InvalidFeatureRelease(Exception):
SLUG = "pytest-dev/pytest" SLUG = "pytest-dev/pytest"
PR_BODY = """\ PR_BODY = """\
Created by the [prepare release pr](https://github.com/pytest-dev/pytest/actions/workflows/prepare-release-pr.yml) Created by the [prepare release pr]\
workflow. (https://github.com/pytest-dev/pytest/actions/workflows/prepare-release-pr.yml) workflow.
Once all builds pass and it has been **approved** by one or more maintainers, Once all builds pass and it has been **approved** by one or more maintainers, start the \
start the [deploy](https://github.com/pytest-dev/pytest/actions/workflows/deploy.yml) workflow, using these parameters: [deploy](https://github.com/pytest-dev/pytest/actions/workflows/deploy.yml) workflow, using these parameters:
* `Use workflow from`: `release-{version}`. * `Use workflow from`: `release-{version}`.
* `Release version`: `{version}`. * `Release version`: `{version}`.
After the `deploy` workflow has been approved by a core maintainer, the package will be uploaded to PyPI automatically. Or execute on the command line:
```console
gh workflow run deploy.yml -r release-{version} -f version={version}
```
After the workflow has been approved by a core maintainer, the package will be uploaded to PyPI automatically.
""" """

View File

@ -57,6 +57,7 @@ DEVELOPMENT_STATUS_CLASSIFIERS = (
ADDITIONAL_PROJECTS = { # set of additional projects to consider as plugins ADDITIONAL_PROJECTS = { # set of additional projects to consider as plugins
"logassert", "logassert",
"nuts", "nuts",
"flask_fixture",
} }

View File

@ -697,6 +697,14 @@ class ExceptionInfo(Generic[E]):
) )
return fmt.repr_excinfo(self) return fmt.repr_excinfo(self)
def _stringify_exception(self, exc: BaseException) -> str:
return "\n".join(
[
str(exc),
*getattr(exc, "__notes__", []),
]
)
def match(self, regexp: Union[str, Pattern[str]]) -> "Literal[True]": def match(self, regexp: Union[str, Pattern[str]]) -> "Literal[True]":
"""Check whether the regular expression `regexp` matches the string """Check whether the regular expression `regexp` matches the string
representation of the exception using :func:`python:re.search`. representation of the exception using :func:`python:re.search`.
@ -704,12 +712,7 @@ class ExceptionInfo(Generic[E]):
If it matches `True` is returned, otherwise an `AssertionError` is raised. If it matches `True` is returned, otherwise an `AssertionError` is raised.
""" """
__tracebackhide__ = True __tracebackhide__ = True
value = "\n".join( value = self._stringify_exception(self.value)
[
str(self.value),
*getattr(self.value, "__notes__", []),
]
)
msg = f"Regex pattern did not match.\n Regex: {regexp!r}\n Input: {value!r}" msg = f"Regex pattern did not match.\n Regex: {regexp!r}\n Input: {value!r}"
if regexp == value: if regexp == value:
msg += "\n Did you mean to `re.escape()` the regex?" msg += "\n Did you mean to `re.escape()` the regex?"
@ -717,6 +720,69 @@ class ExceptionInfo(Generic[E]):
# Return True to allow for "assert excinfo.match()". # Return True to allow for "assert excinfo.match()".
return True return True
def _group_contains(
self,
exc_group: BaseExceptionGroup[BaseException],
expected_exception: Union[Type[BaseException], Tuple[Type[BaseException], ...]],
match: Union[str, Pattern[str], None],
target_depth: Optional[int] = None,
current_depth: int = 1,
) -> bool:
"""Return `True` if a `BaseExceptionGroup` contains a matching exception."""
if (target_depth is not None) and (current_depth > target_depth):
# already descended past the target depth
return False
for exc in exc_group.exceptions:
if isinstance(exc, BaseExceptionGroup):
if self._group_contains(
exc, expected_exception, match, target_depth, current_depth + 1
):
return True
if (target_depth is not None) and (current_depth != target_depth):
# not at the target depth, no match
continue
if not isinstance(exc, expected_exception):
continue
if match is not None:
value = self._stringify_exception(exc)
if not re.search(match, value):
continue
return True
return False
def group_contains(
self,
expected_exception: Union[Type[BaseException], Tuple[Type[BaseException], ...]],
*,
match: Union[str, Pattern[str], None] = None,
depth: Optional[int] = None,
) -> bool:
"""Check whether a captured exception group contains a matching exception.
:param Type[BaseException] | Tuple[Type[BaseException]] expected_exception:
The expected exception type, or a tuple if one of multiple possible
exception types are expected.
:param str | Pattern[str] | None match:
If specified, a string containing a regular expression,
or a regular expression object, that is tested against the string
representation of the exception and its `PEP-678 <https://peps.python.org/pep-0678/>` `__notes__`
using :func:`re.search`.
To match a literal string that may contain :ref:`special characters
<re-syntax>`, the pattern can first be escaped with :func:`re.escape`.
:param Optional[int] depth:
If `None`, will search for a matching exception at any nesting depth.
If >= 1, will only match an exception if it's at the specified depth (depth = 1 being
the exceptions contained within the topmost exception group).
"""
msg = "Captured exception is not an instance of `BaseExceptionGroup`"
assert isinstance(self.value, BaseExceptionGroup), msg
msg = "`depth` must be >= 1 if specified"
assert (depth is None) or (depth >= 1), msg
return self._group_contains(self.value, expected_exception, match, depth)
@dataclasses.dataclass @dataclasses.dataclass
class FormattedExcinfo: class FormattedExcinfo:

701
src/_pytest/_io/pprint.py Normal file
View File

@ -0,0 +1,701 @@
# This module was imported from the cpython standard library
# (https://github.com/python/cpython/) at commit
# c5140945c723ae6c4b7ee81ff720ac8ea4b52cfd (python3.12).
#
#
# Original Author: Fred L. Drake, Jr.
# fdrake@acm.org
#
# This is a simple little module I wrote to make life easier. I didn't
# see anything quite like it in the library, though I may have overlooked
# something. I wrote this when I was trying to read some heavily nested
# tuples with fairly non-descriptive content. This is modeled very much
# after Lisp/Scheme - style pretty-printing of lists. If you find it
# useful, thank small children who sleep at night.
import collections as _collections
import dataclasses as _dataclasses
import re
import types as _types
from io import StringIO as _StringIO
from typing import Any
from typing import Callable
from typing import Dict
from typing import IO
from typing import Iterator
from typing import List
from typing import Optional
from typing import Set
from typing import Tuple
class _safe_key:
"""Helper function for key functions when sorting unorderable objects.
The wrapped-object will fallback to a Py2.x style comparison for
unorderable types (sorting first comparing the type name and then by
the obj ids). Does not work recursively, so dict.items() must have
_safe_key applied to both the key and the value.
"""
__slots__ = ["obj"]
def __init__(self, obj):
self.obj = obj
def __lt__(self, other):
try:
return self.obj < other.obj
except TypeError:
return (str(type(self.obj)), id(self.obj)) < (
str(type(other.obj)),
id(other.obj),
)
def _safe_tuple(t):
"""Helper function for comparing 2-tuples"""
return _safe_key(t[0]), _safe_key(t[1])
class PrettyPrinter:
def __init__(
self,
indent: int = 4,
width: int = 80,
depth: Optional[int] = None,
*,
sort_dicts: bool = True,
underscore_numbers: bool = False,
) -> None:
"""Handle pretty printing operations onto a stream using a set of
configured parameters.
indent
Number of spaces to indent for each level of nesting.
width
Attempted maximum number of columns in the output.
depth
The maximum depth to print out nested structures.
sort_dicts
If true, dict keys are sorted.
"""
indent = int(indent)
width = int(width)
if indent < 0:
raise ValueError("indent must be >= 0")
if depth is not None and depth <= 0:
raise ValueError("depth must be > 0")
if not width:
raise ValueError("width must be != 0")
self._depth = depth
self._indent_per_level = indent
self._width = width
self._sort_dicts = sort_dicts
self._underscore_numbers = underscore_numbers
def pformat(self, object: Any) -> str:
sio = _StringIO()
self._format(object, sio, 0, 0, set(), 0)
return sio.getvalue()
def _format(
self,
object: Any,
stream: IO[str],
indent: int,
allowance: int,
context: Set[int],
level: int,
) -> None:
objid = id(object)
if objid in context:
stream.write(_recursion(object))
return
p = self._dispatch.get(type(object).__repr__, None)
if p is not None:
context.add(objid)
p(self, object, stream, indent, allowance, context, level + 1)
context.remove(objid)
elif (
_dataclasses.is_dataclass(object)
and not isinstance(object, type)
and object.__dataclass_params__.repr
and
# Check dataclass has generated repr method.
hasattr(object.__repr__, "__wrapped__")
and "__create_fn__" in object.__repr__.__wrapped__.__qualname__
):
context.add(objid)
self._pprint_dataclass(
object, stream, indent, allowance, context, level + 1
)
context.remove(objid)
else:
stream.write(self._repr(object, context, level))
def _pprint_dataclass(
self,
object: Any,
stream: IO[str],
indent: int,
allowance: int,
context: Set[int],
level: int,
) -> None:
cls_name = object.__class__.__name__
items = [
(f.name, getattr(object, f.name))
for f in _dataclasses.fields(object)
if f.repr
]
stream.write(cls_name + "(")
self._format_namespace_items(items, stream, indent, allowance, context, level)
stream.write(")")
_dispatch: Dict[
Callable[..., str],
Callable[["PrettyPrinter", Any, IO[str], int, int, Set[int], int], None],
] = {}
def _pprint_dict(
self,
object: Any,
stream: IO[str],
indent: int,
allowance: int,
context: Set[int],
level: int,
) -> None:
write = stream.write
write("{")
if self._sort_dicts:
items = sorted(object.items(), key=_safe_tuple)
else:
items = object.items()
self._format_dict_items(items, stream, indent, allowance, context, level)
write("}")
_dispatch[dict.__repr__] = _pprint_dict
def _pprint_ordered_dict(
self,
object: Any,
stream: IO[str],
indent: int,
allowance: int,
context: Set[int],
level: int,
) -> None:
if not len(object):
stream.write(repr(object))
return
cls = object.__class__
stream.write(cls.__name__ + "(")
self._pprint_dict(object, stream, indent, allowance, context, level)
stream.write(")")
_dispatch[_collections.OrderedDict.__repr__] = _pprint_ordered_dict
def _pprint_list(
self,
object: Any,
stream: IO[str],
indent: int,
allowance: int,
context: Set[int],
level: int,
) -> None:
stream.write("[")
self._format_items(object, stream, indent, allowance, context, level)
stream.write("]")
_dispatch[list.__repr__] = _pprint_list
def _pprint_tuple(
self,
object: Any,
stream: IO[str],
indent: int,
allowance: int,
context: Set[int],
level: int,
) -> None:
stream.write("(")
self._format_items(object, stream, indent, allowance, context, level)
stream.write(")")
_dispatch[tuple.__repr__] = _pprint_tuple
def _pprint_set(
self,
object: Any,
stream: IO[str],
indent: int,
allowance: int,
context: Set[int],
level: int,
) -> None:
if not len(object):
stream.write(repr(object))
return
typ = object.__class__
if typ is set:
stream.write("{")
endchar = "}"
else:
stream.write(typ.__name__ + "({")
endchar = "})"
object = sorted(object, key=_safe_key)
self._format_items(object, stream, indent, allowance, context, level)
stream.write(endchar)
_dispatch[set.__repr__] = _pprint_set
_dispatch[frozenset.__repr__] = _pprint_set
def _pprint_str(
self,
object: Any,
stream: IO[str],
indent: int,
allowance: int,
context: Set[int],
level: int,
) -> None:
write = stream.write
if not len(object):
write(repr(object))
return
chunks = []
lines = object.splitlines(True)
if level == 1:
indent += 1
allowance += 1
max_width1 = max_width = self._width - indent
for i, line in enumerate(lines):
rep = repr(line)
if i == len(lines) - 1:
max_width1 -= allowance
if len(rep) <= max_width1:
chunks.append(rep)
else:
# A list of alternating (non-space, space) strings
parts = re.findall(r"\S*\s*", line)
assert parts
assert not parts[-1]
parts.pop() # drop empty last part
max_width2 = max_width
current = ""
for j, part in enumerate(parts):
candidate = current + part
if j == len(parts) - 1 and i == len(lines) - 1:
max_width2 -= allowance
if len(repr(candidate)) > max_width2:
if current:
chunks.append(repr(current))
current = part
else:
current = candidate
if current:
chunks.append(repr(current))
if len(chunks) == 1:
write(rep)
return
if level == 1:
write("(")
for i, rep in enumerate(chunks):
if i > 0:
write("\n" + " " * indent)
write(rep)
if level == 1:
write(")")
_dispatch[str.__repr__] = _pprint_str
def _pprint_bytes(
self,
object: Any,
stream: IO[str],
indent: int,
allowance: int,
context: Set[int],
level: int,
) -> None:
write = stream.write
if len(object) <= 4:
write(repr(object))
return
parens = level == 1
if parens:
indent += 1
allowance += 1
write("(")
delim = ""
for rep in _wrap_bytes_repr(object, self._width - indent, allowance):
write(delim)
write(rep)
if not delim:
delim = "\n" + " " * indent
if parens:
write(")")
_dispatch[bytes.__repr__] = _pprint_bytes
def _pprint_bytearray(
self,
object: Any,
stream: IO[str],
indent: int,
allowance: int,
context: Set[int],
level: int,
) -> None:
write = stream.write
write("bytearray(")
self._pprint_bytes(
bytes(object), stream, indent + 10, allowance + 1, context, level + 1
)
write(")")
_dispatch[bytearray.__repr__] = _pprint_bytearray
def _pprint_mappingproxy(
self,
object: Any,
stream: IO[str],
indent: int,
allowance: int,
context: Set[int],
level: int,
) -> None:
stream.write("mappingproxy(")
self._format(object.copy(), stream, indent, allowance, context, level)
stream.write(")")
_dispatch[_types.MappingProxyType.__repr__] = _pprint_mappingproxy
def _pprint_simplenamespace(
self,
object: Any,
stream: IO[str],
indent: int,
allowance: int,
context: Set[int],
level: int,
) -> None:
if type(object) is _types.SimpleNamespace:
# The SimpleNamespace repr is "namespace" instead of the class
# name, so we do the same here. For subclasses; use the class name.
cls_name = "namespace"
else:
cls_name = object.__class__.__name__
items = object.__dict__.items()
stream.write(cls_name + "(")
self._format_namespace_items(items, stream, indent, allowance, context, level)
stream.write(")")
_dispatch[_types.SimpleNamespace.__repr__] = _pprint_simplenamespace
def _format_dict_items(
self,
items: List[Tuple[Any, Any]],
stream: IO[str],
indent: int,
allowance: int,
context: Set[int],
level: int,
) -> None:
if not items:
return
write = stream.write
item_indent = indent + self._indent_per_level
delimnl = "\n" + " " * item_indent
for key, ent in items:
write(delimnl)
write(self._repr(key, context, level))
write(": ")
self._format(ent, stream, item_indent, 1, context, level)
write(",")
write("\n" + " " * indent)
def _format_namespace_items(
self,
items: List[Tuple[Any, Any]],
stream: IO[str],
indent: int,
allowance: int,
context: Set[int],
level: int,
) -> None:
if not items:
return
write = stream.write
item_indent = indent + self._indent_per_level
delimnl = "\n" + " " * item_indent
for key, ent in items:
write(delimnl)
write(key)
write("=")
if id(ent) in context:
# Special-case representation of recursion to match standard
# recursive dataclass repr.
write("...")
else:
self._format(
ent,
stream,
item_indent + len(key) + 1,
1,
context,
level,
)
write(",")
write("\n" + " " * indent)
def _format_items(
self,
items: List[Any],
stream: IO[str],
indent: int,
allowance: int,
context: Set[int],
level: int,
) -> None:
if not items:
return
write = stream.write
item_indent = indent + self._indent_per_level
delimnl = "\n" + " " * item_indent
for item in items:
write(delimnl)
self._format(item, stream, item_indent, 1, context, level)
write(",")
write("\n" + " " * indent)
def _repr(self, object: Any, context: Set[int], level: int) -> str:
return self.format(object, context.copy(), self._depth, level)
def format(
self, object: Any, context: Set[int], maxlevels: Optional[int], level: int
) -> str:
return self._safe_repr(object, context, maxlevels, level)
def _pprint_default_dict(
self,
object: Any,
stream: IO[str],
indent: int,
allowance: int,
context: Set[int],
level: int,
) -> None:
rdf = self._repr(object.default_factory, context, level)
stream.write(f"{object.__class__.__name__}({rdf}, ")
self._pprint_dict(object, stream, indent, allowance, context, level)
stream.write(")")
_dispatch[_collections.defaultdict.__repr__] = _pprint_default_dict
def _pprint_counter(
self,
object: Any,
stream: IO[str],
indent: int,
allowance: int,
context: Set[int],
level: int,
) -> None:
stream.write(object.__class__.__name__ + "(")
if object:
stream.write("{")
items = object.most_common()
self._format_dict_items(items, stream, indent, allowance, context, level)
stream.write("}")
stream.write(")")
_dispatch[_collections.Counter.__repr__] = _pprint_counter
def _pprint_chain_map(
self,
object: Any,
stream: IO[str],
indent: int,
allowance: int,
context: Set[int],
level: int,
) -> None:
if not len(object.maps) or (len(object.maps) == 1 and not len(object.maps[0])):
stream.write(repr(object))
return
stream.write(object.__class__.__name__ + "(")
self._format_items(object.maps, stream, indent, allowance, context, level)
stream.write(")")
_dispatch[_collections.ChainMap.__repr__] = _pprint_chain_map
def _pprint_deque(
self,
object: Any,
stream: IO[str],
indent: int,
allowance: int,
context: Set[int],
level: int,
) -> None:
stream.write(object.__class__.__name__ + "(")
if object.maxlen is not None:
stream.write("maxlen=%d, " % object.maxlen)
stream.write("[")
self._format_items(object, stream, indent, allowance + 1, context, level)
stream.write("])")
_dispatch[_collections.deque.__repr__] = _pprint_deque
def _pprint_user_dict(
self,
object: Any,
stream: IO[str],
indent: int,
allowance: int,
context: Set[int],
level: int,
) -> None:
self._format(object.data, stream, indent, allowance, context, level - 1)
_dispatch[_collections.UserDict.__repr__] = _pprint_user_dict
def _pprint_user_list(
self,
object: Any,
stream: IO[str],
indent: int,
allowance: int,
context: Set[int],
level: int,
) -> None:
self._format(object.data, stream, indent, allowance, context, level - 1)
_dispatch[_collections.UserList.__repr__] = _pprint_user_list
def _pprint_user_string(
self,
object: Any,
stream: IO[str],
indent: int,
allowance: int,
context: Set[int],
level: int,
) -> None:
self._format(object.data, stream, indent, allowance, context, level - 1)
_dispatch[_collections.UserString.__repr__] = _pprint_user_string
def _safe_repr(
self, object: Any, context: Set[int], maxlevels: Optional[int], level: int
) -> str:
typ = type(object)
if typ in _builtin_scalars:
return repr(object)
r = getattr(typ, "__repr__", None)
if issubclass(typ, int) and r is int.__repr__:
if self._underscore_numbers:
return f"{object:_d}"
else:
return repr(object)
if issubclass(typ, dict) and r is dict.__repr__:
if not object:
return "{}"
objid = id(object)
if maxlevels and level >= maxlevels:
return "{...}"
if objid in context:
return _recursion(object)
context.add(objid)
components: List[str] = []
append = components.append
level += 1
if self._sort_dicts:
items = sorted(object.items(), key=_safe_tuple)
else:
items = object.items()
for k, v in items:
krepr = self.format(k, context, maxlevels, level)
vrepr = self.format(v, context, maxlevels, level)
append(f"{krepr}: {vrepr}")
context.remove(objid)
return "{%s}" % ", ".join(components)
if (issubclass(typ, list) and r is list.__repr__) or (
issubclass(typ, tuple) and r is tuple.__repr__
):
if issubclass(typ, list):
if not object:
return "[]"
format = "[%s]"
elif len(object) == 1:
format = "(%s,)"
else:
if not object:
return "()"
format = "(%s)"
objid = id(object)
if maxlevels and level >= maxlevels:
return format % "..."
if objid in context:
return _recursion(object)
context.add(objid)
components = []
append = components.append
level += 1
for o in object:
orepr = self.format(o, context, maxlevels, level)
append(orepr)
context.remove(objid)
return format % ", ".join(components)
return repr(object)
_builtin_scalars = frozenset({str, bytes, bytearray, float, complex, bool, type(None)})
def _recursion(object: Any) -> str:
return f"<Recursion on {type(object).__name__} with id={id(object)}>"
def _wrap_bytes_repr(object: Any, width: int, allowance: int) -> Iterator[str]:
current = b""
last = len(object) // 4 * 4
for i in range(0, len(object), 4):
part = object[i : i + 4]
candidate = current + part
if i == last:
width -= allowance
if len(repr(candidate)) > width:
if current:
yield repr(current)
current = part
else:
current = candidate
if current:
yield repr(current)

View File

@ -1,8 +1,5 @@
import pprint import pprint
import reprlib import reprlib
from typing import Any
from typing import Dict
from typing import IO
from typing import Optional from typing import Optional
@ -132,49 +129,3 @@ def saferepr_unlimited(obj: object, use_ascii: bool = True) -> str:
return repr(obj) return repr(obj)
except Exception as exc: except Exception as exc:
return _format_repr_exception(exc, obj) return _format_repr_exception(exc, obj)
class AlwaysDispatchingPrettyPrinter(pprint.PrettyPrinter):
"""PrettyPrinter that always dispatches (regardless of width)."""
def _format(
self,
object: object,
stream: IO[str],
indent: int,
allowance: int,
context: Dict[int, Any],
level: int,
) -> None:
# Type ignored because _dispatch is private.
p = self._dispatch.get(type(object).__repr__, None) # type: ignore[attr-defined]
objid = id(object)
if objid in context or p is None:
# Type ignored because _format is private.
super()._format( # type: ignore[misc]
object,
stream,
indent,
allowance,
context,
level,
)
return
context[objid] = 1
p(self, object, stream, indent, allowance, context, level + 1)
del context[objid]
def _pformat_dispatch(
object: object,
indent: int = 1,
width: int = 80,
depth: Optional[int] = None,
*,
compact: bool = False,
) -> str:
return AlwaysDispatchingPrettyPrinter(
indent=indent, width=width, depth=depth, compact=compact
).pformat(object)

View File

@ -3,6 +3,7 @@ import os
import shutil import shutil
import sys import sys
from typing import final from typing import final
from typing import Literal
from typing import Optional from typing import Optional
from typing import Sequence from typing import Sequence
from typing import TextIO from typing import TextIO
@ -193,15 +194,21 @@ class TerminalWriter:
for indent, new_line in zip(indents, new_lines): for indent, new_line in zip(indents, new_lines):
self.line(indent + new_line) self.line(indent + new_line)
def _highlight(self, source: str) -> str: def _highlight(
"""Highlight the given source code if we have markup support.""" self, source: str, lexer: Literal["diff", "python"] = "python"
) -> str:
"""Highlight the given source if we have markup support."""
from _pytest.config.exceptions import UsageError from _pytest.config.exceptions import UsageError
if not self.hasmarkup or not self.code_highlight: if not self.hasmarkup or not self.code_highlight:
return source return source
try: try:
from pygments.formatters.terminal import TerminalFormatter from pygments.formatters.terminal import TerminalFormatter
from pygments.lexers.python import PythonLexer
if lexer == "python":
from pygments.lexers.python import PythonLexer as Lexer
elif lexer == "diff":
from pygments.lexers.diff import DiffLexer as Lexer
from pygments import highlight from pygments import highlight
import pygments.util import pygments.util
except ImportError: except ImportError:
@ -210,7 +217,7 @@ class TerminalWriter:
try: try:
highlighted: str = highlight( highlighted: str = highlight(
source, source,
PythonLexer(), Lexer(),
TerminalFormatter( TerminalFormatter(
bg=os.getenv("PYTEST_THEME_MODE", "dark"), bg=os.getenv("PYTEST_THEME_MODE", "dark"),
style=os.getenv("PYTEST_THEME"), style=os.getenv("PYTEST_THEME"),

View File

@ -755,7 +755,13 @@ class LocalPath:
if ensure: if ensure:
self.dirpath().ensure(dir=1) self.dirpath().ensure(dir=1)
if encoding: if encoding:
return error.checked_call(io.open, self.strpath, mode, encoding=encoding) # Using type ignore here because of this error:
# error: Argument 1 has incompatible type overloaded function;
# expected "Callable[[str, Any, Any], TextIOWrapper]" [arg-type]
# Which seems incorrect, given io.open supports the given argument types.
return error.checked_call(
io.open, self.strpath, mode, encoding=encoding # type:ignore[arg-type]
)
return error.checked_call(open, self.strpath, mode) return error.checked_call(open, self.strpath, mode)
def _fastjoin(self, name): def _fastjoin(self, name):
@ -1261,13 +1267,19 @@ class LocalPath:
@classmethod @classmethod
def mkdtemp(cls, rootdir=None): def mkdtemp(cls, rootdir=None):
"""Return a Path object pointing to a fresh new temporary directory """Return a Path object pointing to a fresh new temporary directory
(which we created ourself). (which we created ourselves).
""" """
import tempfile import tempfile
if rootdir is None: if rootdir is None:
rootdir = cls.get_temproot() rootdir = cls.get_temproot()
return cls(error.checked_call(tempfile.mkdtemp, dir=str(rootdir))) # Using type ignore here because of this error:
# error: Argument 1 has incompatible type overloaded function; expected "Callable[[str], str]" [arg-type]
# Which seems incorrect, given tempfile.mkdtemp supports the given argument types.
path = error.checked_call(
tempfile.mkdtemp, dir=str(rootdir) # type:ignore[arg-type]
)
return cls(path)
@classmethod @classmethod
def make_numbered_dir( def make_numbered_dir(

View File

@ -42,6 +42,14 @@ def pytest_addoption(parser: Parser) -> None:
help="Enables the pytest_assertion_pass hook. " help="Enables the pytest_assertion_pass hook. "
"Make sure to delete any previously generated pyc cache files.", "Make sure to delete any previously generated pyc cache files.",
) )
Config._add_verbosity_ini(
parser,
Config.VERBOSITY_ASSERTIONS,
help=(
"Specify a verbosity level for assertions, overriding the main level. "
"Higher levels will provide more detailed explanation when an assertion fails."
),
)
def register_assert_rewrite(*names: str) -> None: def register_assert_rewrite(*names: str) -> None:

View File

@ -13,6 +13,7 @@ import struct
import sys import sys
import tokenize import tokenize
import types import types
from collections import defaultdict
from pathlib import Path from pathlib import Path
from pathlib import PurePath from pathlib import PurePath
from typing import Callable from typing import Callable
@ -45,6 +46,10 @@ if TYPE_CHECKING:
from _pytest.assertion import AssertionState from _pytest.assertion import AssertionState
class Sentinel:
pass
assertstate_key = StashKey["AssertionState"]() assertstate_key = StashKey["AssertionState"]()
# pytest caches rewritten pycs in pycache dirs # pytest caches rewritten pycs in pycache dirs
@ -52,6 +57,9 @@ PYTEST_TAG = f"{sys.implementation.cache_tag}-pytest-{version}"
PYC_EXT = ".py" + (__debug__ and "c" or "o") PYC_EXT = ".py" + (__debug__ and "c" or "o")
PYC_TAIL = "." + PYTEST_TAG + PYC_EXT PYC_TAIL = "." + PYTEST_TAG + PYC_EXT
# Special marker that denotes we have just left a scope definition
_SCOPE_END_MARKER = Sentinel()
class AssertionRewritingHook(importlib.abc.MetaPathFinder, importlib.abc.Loader): class AssertionRewritingHook(importlib.abc.MetaPathFinder, importlib.abc.Loader):
"""PEP302/PEP451 import hook which rewrites asserts.""" """PEP302/PEP451 import hook which rewrites asserts."""
@ -418,7 +426,10 @@ def _saferepr(obj: object) -> str:
def _get_maxsize_for_saferepr(config: Optional[Config]) -> Optional[int]: def _get_maxsize_for_saferepr(config: Optional[Config]) -> Optional[int]:
"""Get `maxsize` configuration for saferepr based on the given config object.""" """Get `maxsize` configuration for saferepr based on the given config object."""
verbosity = config.getoption("verbose") if config is not None else 0 if config is None:
verbosity = 0
else:
verbosity = config.get_verbosity(Config.VERBOSITY_ASSERTIONS)
if verbosity >= 2: if verbosity >= 2:
return None return None
if verbosity >= 1: if verbosity >= 1:
@ -634,6 +645,8 @@ class AssertionRewriter(ast.NodeVisitor):
.push_format_context() and .pop_format_context() which allows .push_format_context() and .pop_format_context() which allows
to build another %-formatted string while already building one. to build another %-formatted string while already building one.
:scope: A tuple containing the current scope used for variables_overwrite.
:variables_overwrite: A dict filled with references to variables :variables_overwrite: A dict filled with references to variables
that change value within an assert. This happens when a variable is that change value within an assert. This happens when a variable is
reassigned with the walrus operator reassigned with the walrus operator
@ -655,7 +668,10 @@ class AssertionRewriter(ast.NodeVisitor):
else: else:
self.enable_assertion_pass_hook = False self.enable_assertion_pass_hook = False
self.source = source self.source = source
self.variables_overwrite: Dict[str, str] = {} self.scope: tuple[ast.AST, ...] = ()
self.variables_overwrite: defaultdict[
tuple[ast.AST, ...], Dict[str, str]
] = defaultdict(dict)
def run(self, mod: ast.Module) -> None: def run(self, mod: ast.Module) -> None:
"""Find all assert statements in *mod* and rewrite them.""" """Find all assert statements in *mod* and rewrite them."""
@ -719,9 +735,17 @@ class AssertionRewriter(ast.NodeVisitor):
mod.body[pos:pos] = imports mod.body[pos:pos] = imports
# Collect asserts. # Collect asserts.
nodes: List[ast.AST] = [mod] self.scope = (mod,)
nodes: List[Union[ast.AST, Sentinel]] = [mod]
while nodes: while nodes:
node = nodes.pop() node = nodes.pop()
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)):
self.scope = tuple((*self.scope, node))
nodes.append(_SCOPE_END_MARKER)
if node == _SCOPE_END_MARKER:
self.scope = self.scope[:-1]
continue
assert isinstance(node, ast.AST)
for name, field in ast.iter_fields(node): for name, field in ast.iter_fields(node):
if isinstance(field, list): if isinstance(field, list):
new: List[ast.AST] = [] new: List[ast.AST] = []
@ -992,7 +1016,7 @@ class AssertionRewriter(ast.NodeVisitor):
] ]
): ):
pytest_temp = self.variable() pytest_temp = self.variable()
self.variables_overwrite[ self.variables_overwrite[self.scope][
v.left.target.id v.left.target.id
] = v.left # type:ignore[assignment] ] = v.left # type:ignore[assignment]
v.left.target.id = pytest_temp v.left.target.id = pytest_temp
@ -1035,17 +1059,20 @@ class AssertionRewriter(ast.NodeVisitor):
new_args = [] new_args = []
new_kwargs = [] new_kwargs = []
for arg in call.args: for arg in call.args:
if isinstance(arg, ast.Name) and arg.id in self.variables_overwrite: if isinstance(arg, ast.Name) and arg.id in self.variables_overwrite.get(
arg = self.variables_overwrite[arg.id] # type:ignore[assignment] self.scope, {}
):
arg = self.variables_overwrite[self.scope][
arg.id
] # type:ignore[assignment]
res, expl = self.visit(arg) res, expl = self.visit(arg)
arg_expls.append(expl) arg_expls.append(expl)
new_args.append(res) new_args.append(res)
for keyword in call.keywords: for keyword in call.keywords:
if ( if isinstance(
isinstance(keyword.value, ast.Name) keyword.value, ast.Name
and keyword.value.id in self.variables_overwrite ) and keyword.value.id in self.variables_overwrite.get(self.scope, {}):
): keyword.value = self.variables_overwrite[self.scope][
keyword.value = self.variables_overwrite[
keyword.value.id keyword.value.id
] # type:ignore[assignment] ] # type:ignore[assignment]
res, expl = self.visit(keyword.value) res, expl = self.visit(keyword.value)
@ -1081,12 +1108,14 @@ class AssertionRewriter(ast.NodeVisitor):
def visit_Compare(self, comp: ast.Compare) -> Tuple[ast.expr, str]: def visit_Compare(self, comp: ast.Compare) -> Tuple[ast.expr, str]:
self.push_format_context() self.push_format_context()
# We first check if we have overwritten a variable in the previous assert # We first check if we have overwritten a variable in the previous assert
if isinstance(comp.left, ast.Name) and comp.left.id in self.variables_overwrite: if isinstance(
comp.left = self.variables_overwrite[ comp.left, ast.Name
) and comp.left.id in self.variables_overwrite.get(self.scope, {}):
comp.left = self.variables_overwrite[self.scope][
comp.left.id comp.left.id
] # type:ignore[assignment] ] # type:ignore[assignment]
if isinstance(comp.left, ast.NamedExpr): if isinstance(comp.left, ast.NamedExpr):
self.variables_overwrite[ self.variables_overwrite[self.scope][
comp.left.target.id comp.left.target.id
] = comp.left # type:ignore[assignment] ] = comp.left # type:ignore[assignment]
left_res, left_expl = self.visit(comp.left) left_res, left_expl = self.visit(comp.left)
@ -1106,7 +1135,7 @@ class AssertionRewriter(ast.NodeVisitor):
and next_operand.target.id == left_res.id and next_operand.target.id == left_res.id
): ):
next_operand.target.id = self.variable() next_operand.target.id = self.variable()
self.variables_overwrite[ self.variables_overwrite[self.scope][
left_res.id left_res.id
] = next_operand # type:ignore[assignment] ] = next_operand # type:ignore[assignment]
next_res, next_expl = self.visit(next_operand) next_res, next_expl = self.visit(next_operand)

View File

@ -1,12 +1,13 @@
"""Utilities for truncating assertion output. """Utilities for truncating assertion output.
Current default behaviour is to truncate assertion explanations at Current default behaviour is to truncate assertion explanations at
~8 terminal lines, unless running in "-vv" mode or running on CI. terminal lines, unless running with an assertions verbosity level of at least 2 or running on CI.
""" """
from typing import List from typing import List
from typing import Optional from typing import Optional
from _pytest.assertion import util from _pytest.assertion import util
from _pytest.config import Config
from _pytest.nodes import Item from _pytest.nodes import Item
@ -26,7 +27,7 @@ def truncate_if_required(
def _should_truncate_item(item: Item) -> bool: def _should_truncate_item(item: Item) -> bool:
"""Whether or not this test item is eligible for truncation.""" """Whether or not this test item is eligible for truncation."""
verbose = item.config.option.verbose verbose = item.config.get_verbosity(Config.VERBOSITY_ASSERTIONS)
return verbose < 2 and not util.running_on_ci() return verbose < 2 and not util.running_on_ci()

View File

@ -7,14 +7,16 @@ from typing import Any
from typing import Callable from typing import Callable
from typing import Iterable from typing import Iterable
from typing import List from typing import List
from typing import Literal
from typing import Mapping from typing import Mapping
from typing import Optional from typing import Optional
from typing import Protocol
from typing import Sequence from typing import Sequence
from unicodedata import normalize from unicodedata import normalize
import _pytest._code import _pytest._code
from _pytest import outcomes from _pytest import outcomes
from _pytest._io.saferepr import _pformat_dispatch from _pytest._io.pprint import PrettyPrinter
from _pytest._io.saferepr import saferepr from _pytest._io.saferepr import saferepr
from _pytest._io.saferepr import saferepr_unlimited from _pytest._io.saferepr import saferepr_unlimited
from _pytest.config import Config from _pytest.config import Config
@ -33,6 +35,11 @@ _assertion_pass: Optional[Callable[[int, str, str], None]] = None
_config: Optional[Config] = None _config: Optional[Config] = None
class _HighlightFunc(Protocol):
def __call__(self, source: str, lexer: Literal["diff", "python"] = "python") -> str:
"""Apply highlighting to the given source."""
def format_explanation(explanation: str) -> str: def format_explanation(explanation: str) -> str:
r"""Format an explanation. r"""Format an explanation.
@ -161,7 +168,7 @@ def assertrepr_compare(
config, op: str, left: Any, right: Any, use_ascii: bool = False config, op: str, left: Any, right: Any, use_ascii: bool = False
) -> Optional[List[str]]: ) -> Optional[List[str]]:
"""Return specialised explanations for some operators/operands.""" """Return specialised explanations for some operators/operands."""
verbose = config.getoption("verbose") verbose = config.get_verbosity(Config.VERBOSITY_ASSERTIONS)
# Strings which normalize equal are often hard to distinguish when printed; use ascii() to make this easier. # Strings which normalize equal are often hard to distinguish when printed; use ascii() to make this easier.
# See issue #3246. # See issue #3246.
@ -189,10 +196,27 @@ def assertrepr_compare(
explanation = None explanation = None
try: try:
if op == "==": if op == "==":
explanation = _compare_eq_any(left, right, verbose) writer = config.get_terminal_writer()
explanation = _compare_eq_any(left, right, writer._highlight, verbose)
elif op == "not in": elif op == "not in":
if istext(left) and istext(right): if istext(left) and istext(right):
explanation = _notin_text(left, right, verbose) explanation = _notin_text(left, right, verbose)
elif op == "!=":
if isset(left) and isset(right):
explanation = ["Both sets are equal"]
elif op == ">=":
if isset(left) and isset(right):
explanation = _compare_gte_set(left, right, verbose)
elif op == "<=":
if isset(left) and isset(right):
explanation = _compare_lte_set(left, right, verbose)
elif op == ">":
if isset(left) and isset(right):
explanation = _compare_gt_set(left, right, verbose)
elif op == "<":
if isset(left) and isset(right):
explanation = _compare_lt_set(left, right, verbose)
except outcomes.Exit: except outcomes.Exit:
raise raise
except Exception: except Exception:
@ -209,7 +233,9 @@ def assertrepr_compare(
return [summary] + explanation return [summary] + explanation
def _compare_eq_any(left: Any, right: Any, verbose: int = 0) -> List[str]: def _compare_eq_any(
left: Any, right: Any, highlighter: _HighlightFunc, verbose: int = 0
) -> List[str]:
explanation = [] explanation = []
if istext(left) and istext(right): if istext(left) and istext(right):
explanation = _diff_text(left, right, verbose) explanation = _diff_text(left, right, verbose)
@ -229,7 +255,7 @@ def _compare_eq_any(left: Any, right: Any, verbose: int = 0) -> List[str]:
# field values, not the type or field names. But this branch # field values, not the type or field names. But this branch
# intentionally only handles the same-type case, which was often # intentionally only handles the same-type case, which was often
# used in older code bases before dataclasses/attrs were available. # used in older code bases before dataclasses/attrs were available.
explanation = _compare_eq_cls(left, right, verbose) explanation = _compare_eq_cls(left, right, highlighter, verbose)
elif issequence(left) and issequence(right): elif issequence(left) and issequence(right):
explanation = _compare_eq_sequence(left, right, verbose) explanation = _compare_eq_sequence(left, right, verbose)
elif isset(left) and isset(right): elif isset(left) and isset(right):
@ -238,7 +264,7 @@ def _compare_eq_any(left: Any, right: Any, verbose: int = 0) -> List[str]:
explanation = _compare_eq_dict(left, right, verbose) explanation = _compare_eq_dict(left, right, verbose)
if isiterable(left) and isiterable(right): if isiterable(left) and isiterable(right):
expl = _compare_eq_iterable(left, right, verbose) expl = _compare_eq_iterable(left, right, highlighter, verbose)
explanation.extend(expl) explanation.extend(expl)
return explanation return explanation
@ -292,45 +318,31 @@ def _diff_text(left: str, right: str, verbose: int = 0) -> List[str]:
return explanation return explanation
def _surrounding_parens_on_own_lines(lines: List[str]) -> None:
"""Move opening/closing parenthesis/bracket to own lines."""
opening = lines[0][:1]
if opening in ["(", "[", "{"]:
lines[0] = " " + lines[0][1:]
lines[:] = [opening] + lines
closing = lines[-1][-1:]
if closing in [")", "]", "}"]:
lines[-1] = lines[-1][:-1] + ","
lines[:] = lines + [closing]
def _compare_eq_iterable( def _compare_eq_iterable(
left: Iterable[Any], right: Iterable[Any], verbose: int = 0 left: Iterable[Any],
right: Iterable[Any],
highligher: _HighlightFunc,
verbose: int = 0,
) -> List[str]: ) -> List[str]:
if verbose <= 0 and not running_on_ci(): if verbose <= 0 and not running_on_ci():
return ["Use -v to get more diff"] return ["Use -v to get more diff"]
# dynamic import to speedup pytest # dynamic import to speedup pytest
import difflib import difflib
left_formatting = pprint.pformat(left).splitlines() left_formatting = PrettyPrinter().pformat(left).splitlines()
right_formatting = pprint.pformat(right).splitlines() right_formatting = PrettyPrinter().pformat(right).splitlines()
# Re-format for different output lengths.
lines_left = len(left_formatting)
lines_right = len(right_formatting)
if lines_left != lines_right:
left_formatting = _pformat_dispatch(left).splitlines()
right_formatting = _pformat_dispatch(right).splitlines()
if lines_left > 1 or lines_right > 1:
_surrounding_parens_on_own_lines(left_formatting)
_surrounding_parens_on_own_lines(right_formatting)
explanation = ["Full diff:"] explanation = ["Full diff:"]
# "right" is the expected base against which we compare "left", # "right" is the expected base against which we compare "left",
# see https://github.com/pytest-dev/pytest/issues/3333 # see https://github.com/pytest-dev/pytest/issues/3333
explanation.extend( explanation.extend(
line.rstrip() for line in difflib.ndiff(right_formatting, left_formatting) highligher(
"\n".join(
line.rstrip()
for line in difflib.ndiff(right_formatting, left_formatting)
),
lexer="diff",
).splitlines()
) )
return explanation return explanation
@ -392,15 +404,49 @@ def _compare_eq_set(
left: AbstractSet[Any], right: AbstractSet[Any], verbose: int = 0 left: AbstractSet[Any], right: AbstractSet[Any], verbose: int = 0
) -> List[str]: ) -> List[str]:
explanation = [] explanation = []
diff_left = left - right explanation.extend(_set_one_sided_diff("left", left, right))
diff_right = right - left explanation.extend(_set_one_sided_diff("right", right, left))
if diff_left: return explanation
explanation.append("Extra items in the left set:")
for item in diff_left:
explanation.append(saferepr(item)) def _compare_gt_set(
if diff_right: left: AbstractSet[Any], right: AbstractSet[Any], verbose: int = 0
explanation.append("Extra items in the right set:") ) -> List[str]:
for item in diff_right: explanation = _compare_gte_set(left, right, verbose)
if not explanation:
return ["Both sets are equal"]
return explanation
def _compare_lt_set(
left: AbstractSet[Any], right: AbstractSet[Any], verbose: int = 0
) -> List[str]:
explanation = _compare_lte_set(left, right, verbose)
if not explanation:
return ["Both sets are equal"]
return explanation
def _compare_gte_set(
left: AbstractSet[Any], right: AbstractSet[Any], verbose: int = 0
) -> List[str]:
return _set_one_sided_diff("right", right, left)
def _compare_lte_set(
left: AbstractSet[Any], right: AbstractSet[Any], verbose: int = 0
) -> List[str]:
return _set_one_sided_diff("left", left, right)
def _set_one_sided_diff(
posn: str, set1: AbstractSet[Any], set2: AbstractSet[Any]
) -> List[str]:
explanation = []
diff = set1 - set2
if diff:
explanation.append(f"Extra items in the {posn} set:")
for item in diff:
explanation.append(saferepr(item)) explanation.append(saferepr(item))
return explanation return explanation
@ -446,7 +492,9 @@ def _compare_eq_dict(
return explanation return explanation
def _compare_eq_cls(left: Any, right: Any, verbose: int) -> List[str]: def _compare_eq_cls(
left: Any, right: Any, highlighter: _HighlightFunc, verbose: int
) -> List[str]:
if not has_default_eq(left): if not has_default_eq(left):
return [] return []
if isdatacls(left): if isdatacls(left):
@ -492,7 +540,9 @@ def _compare_eq_cls(left: Any, right: Any, verbose: int) -> List[str]:
] ]
explanation += [ explanation += [
indent + line indent + line
for line in _compare_eq_any(field_left, field_right, verbose) for line in _compare_eq_any(
field_left, field_right, highlighter, verbose
)
] ]
return explanation return explanation

View File

@ -22,6 +22,7 @@ from typing import Any
from typing import Callable from typing import Callable
from typing import cast from typing import cast
from typing import Dict from typing import Dict
from typing import Final
from typing import final from typing import final
from typing import Generator from typing import Generator
from typing import IO from typing import IO
@ -37,6 +38,7 @@ from typing import Type
from typing import TYPE_CHECKING from typing import TYPE_CHECKING
from typing import Union from typing import Union
import pluggy
from pluggy import HookimplMarker from pluggy import HookimplMarker
from pluggy import HookimplOpts from pluggy import HookimplOpts
from pluggy import HookspecMarker from pluggy import HookspecMarker
@ -46,6 +48,7 @@ from pluggy import PluginManager
import _pytest._code import _pytest._code
import _pytest.deprecated import _pytest.deprecated
import _pytest.hookspec import _pytest.hookspec
from .compat import PathAwareHookProxy
from .exceptions import PrintHelp as PrintHelp from .exceptions import PrintHelp as PrintHelp
from .exceptions import UsageError as UsageError from .exceptions import UsageError as UsageError
from .findpaths import determine_setup from .findpaths import determine_setup
@ -67,7 +70,7 @@ from _pytest.warning_types import warn_explicit_for
if TYPE_CHECKING: if TYPE_CHECKING:
from _pytest._code.code import _TracebackStyle from _pytest._code.code import _TracebackStyle
from _pytest.terminal import TerminalReporter from _pytest.terminal import TerminalReporter
from .argparsing import Argument from .argparsing import Argument, Parser
_PluggyPlugin = object _PluggyPlugin = object
@ -1005,10 +1008,8 @@ class Config:
# Deprecated alias. Was never public. Can be removed in a few releases. # Deprecated alias. Was never public. Can be removed in a few releases.
self._store = self.stash self._store = self.stash
from .compat import PathAwareHookProxy
self.trace = self.pluginmanager.trace.root.get("config") self.trace = self.pluginmanager.trace.root.get("config")
self.hook = PathAwareHookProxy(self.pluginmanager.hook) self.hook: pluggy.HookRelay = PathAwareHookProxy(self.pluginmanager.hook) # type: ignore[assignment]
self._inicache: Dict[str, Any] = {} self._inicache: Dict[str, Any] = {}
self._override_ini: Sequence[str] = () self._override_ini: Sequence[str] = ()
self._opt2dest: Dict[str, str] = {} self._opt2dest: Dict[str, str] = {}
@ -1495,6 +1496,27 @@ class Config:
def getini(self, name: str): def getini(self, name: str):
"""Return configuration value from an :ref:`ini file <configfiles>`. """Return configuration value from an :ref:`ini file <configfiles>`.
If a configuration value is not defined in an
:ref:`ini file <configfiles>`, then the ``default`` value provided while
registering the configuration through
:func:`parser.addini <pytest.Parser.addini>` will be returned.
Please note that you can even provide ``None`` as a valid
default value.
If ``default`` is not provided while registering using
:func:`parser.addini <pytest.Parser.addini>`, then a default value
based on the ``type`` parameter passed to
:func:`parser.addini <pytest.Parser.addini>` will be returned.
The default values based on ``type`` are:
``paths``, ``pathlist``, ``args`` and ``linelist`` : empty list ``[]``
``bool`` : ``False``
``string`` : empty string ``""``
If neither the ``default`` nor the ``type`` parameter is passed
while registering the configuration through
:func:`parser.addini <pytest.Parser.addini>`, then the configuration
is treated as a string and a default empty string '' is returned.
If the specified name hasn't been registered through a prior If the specified name hasn't been registered through a prior
:func:`parser.addini <pytest.Parser.addini>` call (usually from a :func:`parser.addini <pytest.Parser.addini>` call (usually from a
plugin), a ValueError is raised. plugin), a ValueError is raised.
@ -1521,11 +1543,7 @@ class Config:
try: try:
value = self.inicfg[name] value = self.inicfg[name]
except KeyError: except KeyError:
if default is not None:
return default return default
if type is None:
return ""
return []
else: else:
value = override_value value = override_value
# Coerce the values based on types. # Coerce the values based on types.
@ -1633,6 +1651,78 @@ class Config:
"""Deprecated, use getoption(skip=True) instead.""" """Deprecated, use getoption(skip=True) instead."""
return self.getoption(name, skip=True) return self.getoption(name, skip=True)
#: Verbosity type for failed assertions (see :confval:`verbosity_assertions`).
VERBOSITY_ASSERTIONS: Final = "assertions"
_VERBOSITY_INI_DEFAULT: Final = "auto"
def get_verbosity(self, verbosity_type: Optional[str] = None) -> int:
r"""Retrieve the verbosity level for a fine-grained verbosity type.
:param verbosity_type: Verbosity type to get level for. If a level is
configured for the given type, that value will be returned. If the
given type is not a known verbosity type, the global verbosity
level will be returned. If the given type is None (default), the
global verbosity level will be returned.
To configure a level for a fine-grained verbosity type, the
configuration file should have a setting for the configuration name
and a numeric value for the verbosity level. A special value of "auto"
can be used to explicitly use the global verbosity level.
Example:
.. code-block:: ini
# content of pytest.ini
[pytest]
verbosity_assertions = 2
.. code-block:: console
pytest -v
.. code-block:: python
print(config.get_verbosity()) # 1
print(config.get_verbosity(Config.VERBOSITY_ASSERTIONS)) # 2
"""
global_level = self.option.verbose
assert isinstance(global_level, int)
if verbosity_type is None:
return global_level
ini_name = Config._verbosity_ini_name(verbosity_type)
if ini_name not in self._parser._inidict:
return global_level
level = self.getini(ini_name)
if level == Config._VERBOSITY_INI_DEFAULT:
return global_level
return int(level)
@staticmethod
def _verbosity_ini_name(verbosity_type: str) -> str:
return f"verbosity_{verbosity_type}"
@staticmethod
def _add_verbosity_ini(parser: "Parser", verbosity_type: str, help: str) -> None:
"""Add a output verbosity configuration option for the given output type.
:param parser: Parser for command line arguments and ini-file values.
:param verbosity_type: Fine-grained verbosity category.
:param help: Description of the output this type controls.
The value should be retrieved via a call to
:py:func:`config.get_verbosity(type) <pytest.Config.get_verbosity>`.
"""
parser.addini(
Config._verbosity_ini_name(verbosity_type),
help=help,
type="string",
default=Config._VERBOSITY_INI_DEFAULT,
)
def _warn_about_missing_assertion(self, mode: str) -> None: def _warn_about_missing_assertion(self, mode: str) -> None:
if not _assertion_supported(): if not _assertion_supported():
if mode == "plain": if mode == "plain":

View File

@ -27,6 +27,14 @@ from _pytest.deprecated import check_ispytest
FILE_OR_DIR = "file_or_dir" FILE_OR_DIR = "file_or_dir"
class NotSet:
def __repr__(self) -> str:
return "<notset>"
NOT_SET = NotSet()
@final @final
class Parser: class Parser:
"""Parser for command line arguments and ini-file values. """Parser for command line arguments and ini-file values.
@ -176,7 +184,7 @@ class Parser:
type: Optional[ type: Optional[
Literal["string", "paths", "pathlist", "args", "linelist", "bool"] Literal["string", "paths", "pathlist", "args", "linelist", "bool"]
] = None, ] = None,
default: Any = None, default: Any = NOT_SET,
) -> None: ) -> None:
"""Register an ini-file option. """Register an ini-file option.
@ -203,10 +211,30 @@ class Parser:
:py:func:`config.getini(name) <pytest.Config.getini>`. :py:func:`config.getini(name) <pytest.Config.getini>`.
""" """
assert type in (None, "string", "paths", "pathlist", "args", "linelist", "bool") assert type in (None, "string", "paths", "pathlist", "args", "linelist", "bool")
if default is NOT_SET:
default = get_ini_default_for_type(type)
self._inidict[name] = (help, type, default) self._inidict[name] = (help, type, default)
self._ininames.append(name) self._ininames.append(name)
def get_ini_default_for_type(
type: Optional[Literal["string", "paths", "pathlist", "args", "linelist", "bool"]]
) -> Any:
"""
Used by addini to get the default value for a given ini-option type, when
default is not supplied.
"""
if type is None:
return ""
elif type in ("paths", "pathlist", "args", "linelist"):
return []
elif type == "bool":
return False
else:
return ""
class ArgumentError(Exception): class ArgumentError(Exception):
"""Raised if an Argument instance is created with invalid or """Raised if an Argument instance is created with invalid or
inconsistent arguments.""" inconsistent arguments."""

View File

@ -1,15 +1,18 @@
from __future__ import annotations
import functools import functools
import warnings import warnings
from pathlib import Path from pathlib import Path
from typing import Optional from typing import Mapping
import pluggy
from ..compat import LEGACY_PATH from ..compat import LEGACY_PATH
from ..compat import legacy_path from ..compat import legacy_path
from ..deprecated import HOOK_LEGACY_PATH_ARG from ..deprecated import HOOK_LEGACY_PATH_ARG
from _pytest.nodes import _check_path
# hookname: (Path, LEGACY_PATH) # hookname: (Path, LEGACY_PATH)
imply_paths_hooks = { imply_paths_hooks: Mapping[str, tuple[str, str]] = {
"pytest_ignore_collect": ("collection_path", "path"), "pytest_ignore_collect": ("collection_path", "path"),
"pytest_collect_file": ("file_path", "path"), "pytest_collect_file": ("file_path", "path"),
"pytest_pycollect_makemodule": ("module_path", "path"), "pytest_pycollect_makemodule": ("module_path", "path"),
@ -18,6 +21,14 @@ imply_paths_hooks = {
} }
def _check_path(path: Path, fspath: LEGACY_PATH) -> None:
if Path(fspath) != path:
raise ValueError(
f"Path({fspath!r}) != {path!r}\n"
"if both path and fspath are given they need to be equal"
)
class PathAwareHookProxy: class PathAwareHookProxy:
""" """
this helper wraps around hook callers this helper wraps around hook callers
@ -27,24 +38,24 @@ class PathAwareHookProxy:
this may have to be changed later depending on bugs this may have to be changed later depending on bugs
""" """
def __init__(self, hook_caller): def __init__(self, hook_relay: pluggy.HookRelay) -> None:
self.__hook_caller = hook_caller self._hook_relay = hook_relay
def __dir__(self): def __dir__(self) -> list[str]:
return dir(self.__hook_caller) return dir(self._hook_relay)
def __getattr__(self, key, _wraps=functools.wraps): def __getattr__(self, key: str) -> pluggy.HookCaller:
hook = getattr(self.__hook_caller, key) hook: pluggy.HookCaller = getattr(self._hook_relay, key)
if key not in imply_paths_hooks: if key not in imply_paths_hooks:
self.__dict__[key] = hook self.__dict__[key] = hook
return hook return hook
else: else:
path_var, fspath_var = imply_paths_hooks[key] path_var, fspath_var = imply_paths_hooks[key]
@_wraps(hook) @functools.wraps(hook)
def fixed_hook(**kw): def fixed_hook(**kw):
path_value: Optional[Path] = kw.pop(path_var, None) path_value: Path | None = kw.pop(path_var, None)
fspath_value: Optional[LEGACY_PATH] = kw.pop(fspath_var, None) fspath_value: LEGACY_PATH | None = kw.pop(fspath_var, None)
if fspath_value is not None: if fspath_value is not None:
warnings.warn( warnings.warn(
HOOK_LEGACY_PATH_ARG.format( HOOK_LEGACY_PATH_ARG.format(
@ -65,6 +76,8 @@ class PathAwareHookProxy:
kw[fspath_var] = fspath_value kw[fspath_var] = fspath_value
return hook(**kw) return hook(**kw)
fixed_hook.name = hook.name # type: ignore[attr-defined]
fixed_hook.spec = hook.spec # type: ignore[attr-defined]
fixed_hook.__name__ = key fixed_hook.__name__ = key
self.__dict__[key] = fixed_hook self.__dict__[key] = fixed_hook
return fixed_hook return fixed_hook # type: ignore[return-value]

View File

@ -1,4 +1,3 @@
import io
import os import os
import sys import sys
from typing import Generator from typing import Generator
@ -10,8 +9,8 @@ from _pytest.nodes import Item
from _pytest.stash import StashKey from _pytest.stash import StashKey
fault_handler_original_stderr_fd_key = StashKey[int]()
fault_handler_stderr_fd_key = StashKey[int]() fault_handler_stderr_fd_key = StashKey[int]()
fault_handler_originally_enabled_key = StashKey[bool]()
def pytest_addoption(parser: Parser) -> None: def pytest_addoption(parser: Parser) -> None:
@ -25,8 +24,15 @@ def pytest_addoption(parser: Parser) -> None:
def pytest_configure(config: Config) -> None: def pytest_configure(config: Config) -> None:
import faulthandler import faulthandler
config.stash[fault_handler_stderr_fd_key] = os.dup(get_stderr_fileno()) # at teardown we want to restore the original faulthandler fileno
config.stash[fault_handler_originally_enabled_key] = faulthandler.is_enabled() # but faulthandler has no api to return the original fileno
# so here we stash the stderr fileno to be used at teardown
# sys.stderr and sys.__stderr__ may be closed or patched during the session
# so we can't rely on their values being good at that point (#11572).
stderr_fileno = get_stderr_fileno()
if faulthandler.is_enabled():
config.stash[fault_handler_original_stderr_fd_key] = stderr_fileno
config.stash[fault_handler_stderr_fd_key] = os.dup(stderr_fileno)
faulthandler.enable(file=config.stash[fault_handler_stderr_fd_key]) faulthandler.enable(file=config.stash[fault_handler_stderr_fd_key])
@ -38,9 +44,10 @@ def pytest_unconfigure(config: Config) -> None:
if fault_handler_stderr_fd_key in config.stash: if fault_handler_stderr_fd_key in config.stash:
os.close(config.stash[fault_handler_stderr_fd_key]) os.close(config.stash[fault_handler_stderr_fd_key])
del config.stash[fault_handler_stderr_fd_key] del config.stash[fault_handler_stderr_fd_key]
if config.stash.get(fault_handler_originally_enabled_key, False):
# Re-enable the faulthandler if it was originally enabled. # Re-enable the faulthandler if it was originally enabled.
faulthandler.enable(file=get_stderr_fileno()) if fault_handler_original_stderr_fd_key in config.stash:
faulthandler.enable(config.stash[fault_handler_original_stderr_fd_key])
del config.stash[fault_handler_original_stderr_fd_key]
def get_stderr_fileno() -> int: def get_stderr_fileno() -> int:
@ -51,7 +58,7 @@ def get_stderr_fileno() -> int:
if fileno == -1: if fileno == -1:
raise AttributeError() raise AttributeError()
return fileno return fileno
except (AttributeError, io.UnsupportedOperation): except (AttributeError, ValueError):
# pytest-xdist monkeypatches sys.stderr with an object that is not an actual file. # pytest-xdist monkeypatches sys.stderr with an object that is not an actual file.
# https://docs.python.org/3/library/faulthandler.html#issue-with-file-descriptors # https://docs.python.org/3/library/faulthandler.html#issue-with-file-descriptors
# This is potentially dangerous, but the best we can do. # This is potentially dangerous, but the best we can do.

View File

@ -369,7 +369,7 @@ def record_testsuite_property(request: FixtureRequest) -> Callable[[str, object]
__tracebackhide__ = True __tracebackhide__ = True
def record_func(name: str, value: object) -> None: def record_func(name: str, value: object) -> None:
"""No-op function in case --junitxml was not passed in the command-line.""" """No-op function in case --junit-xml was not passed in the command-line."""
__tracebackhide__ = True __tracebackhide__ = True
_check_record_param_type("name", name) _check_record_param_type("name", name)

View File

@ -303,13 +303,13 @@ def pytest_addoption(parser: Parser) -> None:
add_option_ini( add_option_ini(
"--log-file-format", "--log-file-format",
dest="log_file_format", dest="log_file_format",
default=DEFAULT_LOG_FORMAT, default=None,
help="Log format used by the logging module", help="Log format used by the logging module",
) )
add_option_ini( add_option_ini(
"--log-file-date-format", "--log-file-date-format",
dest="log_file_date_format", dest="log_file_date_format",
default=DEFAULT_LOG_DATE_FORMAT, default=None,
help="Log date format used by the logging module", help="Log date format used by the logging module",
) )
add_option_ini( add_option_ini(
@ -564,6 +564,22 @@ class LogCaptureFixture:
self.handler.setLevel(handler_orig_level) self.handler.setLevel(handler_orig_level)
logging.disable(original_disable_level) logging.disable(original_disable_level)
@contextmanager
def filtering(self, filter_: logging.Filter) -> Generator[None, None, None]:
"""Context manager that temporarily adds the given filter to the caplog's
:meth:`handler` for the 'with' statement block, and removes that filter at the
end of the block.
:param filter_: A custom :class:`logging.Filter` object.
.. versionadded:: 7.5
"""
self.handler.addFilter(filter_)
try:
yield
finally:
self.handler.removeFilter(filter_)
@fixture @fixture
def caplog(request: FixtureRequest) -> Generator[LogCaptureFixture, None, None]: def caplog(request: FixtureRequest) -> Generator[LogCaptureFixture, None, None]:
@ -635,7 +651,9 @@ class LoggingPlugin:
self.report_handler.setFormatter(self.formatter) self.report_handler.setFormatter(self.formatter)
# File logging. # File logging.
self.log_file_level = get_log_level_for_setting(config, "log_file_level") self.log_file_level = get_log_level_for_setting(
config, "log_file_level", "log_level"
)
log_file = get_option_ini(config, "log_file") or os.devnull log_file = get_option_ini(config, "log_file") or os.devnull
if log_file != os.devnull: if log_file != os.devnull:
directory = os.path.dirname(os.path.abspath(log_file)) directory = os.path.dirname(os.path.abspath(log_file))

View File

@ -7,6 +7,7 @@ import importlib
import os import os
import sys import sys
from pathlib import Path from pathlib import Path
from typing import AbstractSet
from typing import Callable from typing import Callable
from typing import Dict from typing import Dict
from typing import final from typing import final
@ -22,6 +23,8 @@ from typing import Type
from typing import TYPE_CHECKING from typing import TYPE_CHECKING
from typing import Union from typing import Union
import pluggy
import _pytest._code import _pytest._code
from _pytest import nodes from _pytest import nodes
from _pytest.config import Config from _pytest.config import Config
@ -31,6 +34,7 @@ from _pytest.config import hookimpl
from _pytest.config import PytestPluginManager from _pytest.config import PytestPluginManager
from _pytest.config import UsageError from _pytest.config import UsageError
from _pytest.config.argparsing import Parser from _pytest.config.argparsing import Parser
from _pytest.config.compat import PathAwareHookProxy
from _pytest.fixtures import FixtureManager from _pytest.fixtures import FixtureManager
from _pytest.outcomes import exit from _pytest.outcomes import exit
from _pytest.pathlib import absolutepath from _pytest.pathlib import absolutepath
@ -429,11 +433,15 @@ def pytest_collection_modifyitems(items: List[nodes.Item], config: Config) -> No
class FSHookProxy: class FSHookProxy:
def __init__(self, pm: PytestPluginManager, remove_mods) -> None: def __init__(
self,
pm: PytestPluginManager,
remove_mods: AbstractSet[object],
) -> None:
self.pm = pm self.pm = pm
self.remove_mods = remove_mods self.remove_mods = remove_mods
def __getattr__(self, name: str): def __getattr__(self, name: str) -> pluggy.HookCaller:
x = self.pm.subset_hook_caller(name, remove_plugins=self.remove_mods) x = self.pm.subset_hook_caller(name, remove_plugins=self.remove_mods)
self.__dict__[name] = x self.__dict__[name] = x
return x return x
@ -546,7 +554,7 @@ class Session(nodes.FSCollector):
path_ = path if isinstance(path, Path) else Path(path) path_ = path if isinstance(path, Path) else Path(path)
return path_ in self._initialpaths return path_ in self._initialpaths
def gethookproxy(self, fspath: "os.PathLike[str]"): def gethookproxy(self, fspath: "os.PathLike[str]") -> pluggy.HookRelay:
# Optimization: Path(Path(...)) is much slower than isinstance. # Optimization: Path(Path(...)) is much slower than isinstance.
path = fspath if isinstance(fspath, Path) else Path(fspath) path = fspath if isinstance(fspath, Path) else Path(fspath)
pm = self.config.pluginmanager pm = self.config.pluginmanager
@ -563,11 +571,10 @@ class Session(nodes.FSCollector):
) )
my_conftestmodules = pm._getconftestmodules(path) my_conftestmodules = pm._getconftestmodules(path)
remove_mods = pm._conftest_plugins.difference(my_conftestmodules) remove_mods = pm._conftest_plugins.difference(my_conftestmodules)
proxy: pluggy.HookRelay
if remove_mods: if remove_mods:
# One or more conftests are not in use at this fspath. # One or more conftests are not in use at this path.
from .config.compat import PathAwareHookProxy proxy = PathAwareHookProxy(FSHookProxy(pm, remove_mods)) # type: ignore[arg-type,assignment]
proxy = PathAwareHookProxy(FSHookProxy(pm, remove_mods))
else: else:
# All plugins are active for this fspath. # All plugins are active for this fspath.
proxy = self.config.hook proxy = self.config.hook

View File

@ -457,7 +457,7 @@ if TYPE_CHECKING:
@overload @overload
def __call__( def __call__(
self, self,
condition: Union[str, bool] = ..., condition: Union[str, bool] = False,
*conditions: Union[str, bool], *conditions: Union[str, bool],
reason: str = ..., reason: str = ...,
run: bool = ..., run: bool = ...,

View File

@ -19,6 +19,8 @@ from typing import TYPE_CHECKING
from typing import TypeVar from typing import TypeVar
from typing import Union from typing import Union
import pluggy
import _pytest._code import _pytest._code
from _pytest._code import getfslineno from _pytest._code import getfslineno
from _pytest._code.code import ExceptionInfo from _pytest._code.code import ExceptionInfo
@ -27,6 +29,7 @@ from _pytest._code.code import Traceback
from _pytest.compat import LEGACY_PATH from _pytest.compat import LEGACY_PATH
from _pytest.config import Config from _pytest.config import Config
from _pytest.config import ConftestImportFailure from _pytest.config import ConftestImportFailure
from _pytest.config.compat import _check_path
from _pytest.deprecated import FSCOLLECTOR_GETHOOKPROXY_ISINITPATH from _pytest.deprecated import FSCOLLECTOR_GETHOOKPROXY_ISINITPATH
from _pytest.deprecated import NODE_CTOR_FSPATH_ARG from _pytest.deprecated import NODE_CTOR_FSPATH_ARG
from _pytest.mark.structures import Mark from _pytest.mark.structures import Mark
@ -94,14 +97,6 @@ def iterparentnodeids(nodeid: str) -> Iterator[str]:
yield nodeid yield nodeid
def _check_path(path: Path, fspath: LEGACY_PATH) -> None:
if Path(fspath) != path:
raise ValueError(
f"Path({fspath!r}) != {path!r}\n"
"if both path and fspath are given they need to be equal"
)
def _imply_path( def _imply_path(
node_type: Type["Node"], node_type: Type["Node"],
path: Optional[Path], path: Optional[Path],
@ -127,6 +122,20 @@ _NodeType = TypeVar("_NodeType", bound="Node")
class NodeMeta(type): class NodeMeta(type):
"""Metaclass used by :class:`Node` to enforce that direct construction raises
:class:`Failed`.
This behaviour supports the indirection introduced with :meth:`Node.from_parent`,
the named constructor to be used instead of direct construction. The design
decision to enforce indirection with :class:`NodeMeta` was made as a
temporary aid for refactoring the collection tree, which was diagnosed to
have :class:`Node` objects whose creational patterns were overly entangled.
Once the refactoring is complete, this metaclass can be removed.
See https://github.com/pytest-dev/pytest/projects/3 for an overview of the
progress on detangling the :class:`Node` classes.
"""
def __call__(self, *k, **kw): def __call__(self, *k, **kw):
msg = ( msg = (
"Direct construction of {name} has been deprecated, please use {name}.from_parent.\n" "Direct construction of {name} has been deprecated, please use {name}.from_parent.\n"
@ -264,7 +273,7 @@ class Node(metaclass=NodeMeta):
return cls._create(parent=parent, **kw) return cls._create(parent=parent, **kw)
@property @property
def ihook(self): def ihook(self) -> pluggy.HookRelay:
"""fspath-sensitive hook proxy used to call pytest hooks.""" """fspath-sensitive hook proxy used to call pytest hooks."""
return self.session.gethookproxy(self.path) return self.session.gethookproxy(self.path)

View File

@ -623,8 +623,9 @@ def module_name_from_path(path: Path, root: Path) -> str:
# Use the parts for the relative path to the root path. # Use the parts for the relative path to the root path.
path_parts = relative_path.parts path_parts = relative_path.parts
# Module name for packages do not contain the __init__ file. # Module name for packages do not contain the __init__ file, unless
if path_parts[-1] == "__init__": # the `__init__.py` file is at the root.
if len(path_parts) >= 2 and path_parts[-1] == "__init__":
path_parts = path_parts[:-1] path_parts = path_parts[:-1]
return ".".join(path_parts) return ".".join(path_parts)
@ -680,7 +681,7 @@ def resolve_package_path(path: Path) -> Optional[Path]:
result = None result = None
for parent in itertools.chain((path,), path.parents): for parent in itertools.chain((path,), path.parents):
if parent.is_dir(): if parent.is_dir():
if not parent.joinpath("__init__.py").is_file(): if not (parent / "__init__.py").is_file():
break break
if not parent.name.isidentifier(): if not parent.name.isidentifier():
break break

View File

@ -121,13 +121,18 @@ def pytest_configure(config: Config) -> None:
class LsofFdLeakChecker: class LsofFdLeakChecker:
def get_open_files(self) -> List[Tuple[str, str]]: def get_open_files(self) -> List[Tuple[str, str]]:
if sys.version_info >= (3, 11):
# New in Python 3.11, ignores utf-8 mode
encoding = locale.getencoding()
else:
encoding = locale.getpreferredencoding(False)
out = subprocess.run( out = subprocess.run(
("lsof", "-Ffn0", "-p", str(os.getpid())), ("lsof", "-Ffn0", "-p", str(os.getpid())),
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL, stderr=subprocess.DEVNULL,
check=True, check=True,
text=True, text=True,
encoding=locale.getpreferredencoding(False), encoding=encoding,
).stdout ).stdout
def isopen(line: str) -> bool: def isopen(line: str) -> bool:
@ -625,14 +630,6 @@ class RunResult:
) )
class CwdSnapshot:
def __init__(self) -> None:
self.__saved = os.getcwd()
def restore(self) -> None:
os.chdir(self.__saved)
class SysModulesSnapshot: class SysModulesSnapshot:
def __init__(self, preserve: Optional[Callable[[str], bool]] = None) -> None: def __init__(self, preserve: Optional[Callable[[str], bool]] = None) -> None:
self.__preserve = preserve self.__preserve = preserve
@ -696,15 +693,14 @@ class Pytester:
#: be added to the list. The type of items to add to the list depends on #: be added to the list. The type of items to add to the list depends on
#: the method using them so refer to them for details. #: the method using them so refer to them for details.
self.plugins: List[Union[str, _PluggyPlugin]] = [] self.plugins: List[Union[str, _PluggyPlugin]] = []
self._cwd_snapshot = CwdSnapshot()
self._sys_path_snapshot = SysPathsSnapshot() self._sys_path_snapshot = SysPathsSnapshot()
self._sys_modules_snapshot = self.__take_sys_modules_snapshot() self._sys_modules_snapshot = self.__take_sys_modules_snapshot()
self.chdir()
self._request.addfinalizer(self._finalize) self._request.addfinalizer(self._finalize)
self._method = self._request.config.getoption("--runpytest") self._method = self._request.config.getoption("--runpytest")
self._test_tmproot = tmp_path_factory.mktemp(f"tmp-{name}", numbered=True) self._test_tmproot = tmp_path_factory.mktemp(f"tmp-{name}", numbered=True)
self._monkeypatch = mp = monkeypatch self._monkeypatch = mp = monkeypatch
self.chdir()
mp.setenv("PYTEST_DEBUG_TEMPROOT", str(self._test_tmproot)) mp.setenv("PYTEST_DEBUG_TEMPROOT", str(self._test_tmproot))
# Ensure no unexpected caching via tox. # Ensure no unexpected caching via tox.
mp.delenv("TOX_ENV_DIR", raising=False) mp.delenv("TOX_ENV_DIR", raising=False)
@ -735,7 +731,6 @@ class Pytester:
""" """
self._sys_modules_snapshot.restore() self._sys_modules_snapshot.restore()
self._sys_path_snapshot.restore() self._sys_path_snapshot.restore()
self._cwd_snapshot.restore()
def __take_sys_modules_snapshot(self) -> SysModulesSnapshot: def __take_sys_modules_snapshot(self) -> SysModulesSnapshot:
# Some zope modules used by twisted-related tests keep internal state # Some zope modules used by twisted-related tests keep internal state
@ -760,7 +755,7 @@ class Pytester:
This is done automatically upon instantiation. This is done automatically upon instantiation.
""" """
os.chdir(self.path) self._monkeypatch.chdir(self.path)
def _makefile( def _makefile(
self, self,
@ -1073,7 +1068,7 @@ class Pytester:
return self.inline_run(*values) return self.inline_run(*values)
def inline_genitems(self, *args) -> Tuple[List[Item], HookRecorder]: def inline_genitems(self, *args) -> Tuple[List[Item], HookRecorder]:
"""Run ``pytest.main(['--collectonly'])`` in-process. """Run ``pytest.main(['--collect-only'])`` in-process.
Runs the :py:func:`pytest.main` function to run all of pytest inside Runs the :py:func:`pytest.main` function to run all of pytest inside
the test process itself like :py:meth:`inline_run`, but returns a the test process itself like :py:meth:`inline_run`, but returns a

View File

@ -1010,8 +1010,18 @@ class IdMaker:
# Suffix non-unique IDs to make them unique. # Suffix non-unique IDs to make them unique.
for index, id in enumerate(resolved_ids): for index, id in enumerate(resolved_ids):
if id_counts[id] > 1: if id_counts[id] > 1:
resolved_ids[index] = f"{id}{id_suffixes[id]}" suffix = ""
if id and id[-1].isdigit():
suffix = "_"
new_id = f"{id}{suffix}{id_suffixes[id]}"
while new_id in set(resolved_ids):
id_suffixes[id] += 1 id_suffixes[id] += 1
new_id = f"{id}{suffix}{id_suffixes[id]}"
resolved_ids[index] = new_id
id_suffixes[id] += 1
assert len(resolved_ids) == len(
set(resolved_ids)
), f"Internal error: {resolved_ids=}"
return resolved_ids return resolved_ids
def _resolve_ids(self) -> Iterable[str]: def _resolve_ids(self) -> Iterable[str]:

View File

@ -804,11 +804,13 @@ def raises( # noqa: F811
def raises( # noqa: F811 def raises( # noqa: F811
expected_exception: Union[Type[E], Tuple[Type[E], ...]], *args: Any, **kwargs: Any expected_exception: Union[Type[E], Tuple[Type[E], ...]], *args: Any, **kwargs: Any
) -> Union["RaisesContext[E]", _pytest._code.ExceptionInfo[E]]: ) -> Union["RaisesContext[E]", _pytest._code.ExceptionInfo[E]]:
r"""Assert that a code block/function call raises an exception. r"""Assert that a code block/function call raises an exception type, or one of its subclasses.
:param typing.Type[E] | typing.Tuple[typing.Type[E], ...] expected_exception: :param typing.Type[E] | typing.Tuple[typing.Type[E], ...] expected_exception:
The expected exception type, or a tuple if one of multiple possible The expected exception type, or a tuple if one of multiple possible
exception types are expected. exception types are expected. Note that subclasses of the passed exceptions
will also match.
:kwparam str | typing.Pattern[str] | None match: :kwparam str | typing.Pattern[str] | None match:
If specified, a string containing a regular expression, If specified, a string containing a regular expression,
or a regular expression object, that is tested against the string or a regular expression object, that is tested against the string
@ -826,13 +828,13 @@ def raises( # noqa: F811
.. currentmodule:: _pytest._code .. currentmodule:: _pytest._code
Use ``pytest.raises`` as a context manager, which will capture the exception of the given Use ``pytest.raises`` as a context manager, which will capture the exception of the given
type:: type, or any of its subclasses::
>>> import pytest >>> import pytest
>>> with pytest.raises(ZeroDivisionError): >>> with pytest.raises(ZeroDivisionError):
... 1/0 ... 1/0
If the code block does not raise the expected exception (``ZeroDivisionError`` in the example If the code block does not raise the expected exception (:class:`ZeroDivisionError` in the example
above), or no exception at all, the check will fail instead. above), or no exception at all, the check will fail instead.
You can also use the keyword argument ``match`` to assert that the You can also use the keyword argument ``match`` to assert that the
@ -845,7 +847,7 @@ def raises( # noqa: F811
... raise ValueError("value must be 42") ... raise ValueError("value must be 42")
The ``match`` argument searches the formatted exception string, which includes any The ``match`` argument searches the formatted exception string, which includes any
`PEP-678 <https://peps.python.org/pep-0678/>` ``__notes__``: `PEP-678 <https://peps.python.org/pep-0678/>`__ ``__notes__``:
>>> with pytest.raises(ValueError, match=r'had a note added'): # doctest: +SKIP >>> with pytest.raises(ValueError, match=r'had a note added'): # doctest: +SKIP
... e = ValueError("value must be 42") ... e = ValueError("value must be 42")
@ -860,6 +862,20 @@ def raises( # noqa: F811
>>> assert exc_info.type is ValueError >>> assert exc_info.type is ValueError
>>> assert exc_info.value.args[0] == "value must be 42" >>> assert exc_info.value.args[0] == "value must be 42"
.. warning::
Given that ``pytest.raises`` matches subclasses, be wary of using it to match :class:`Exception` like this::
with pytest.raises(Exception): # Careful, this will catch ANY exception raised.
some_function()
Because :class:`Exception` is the base class of almost all exceptions, it is easy for this to hide
real bugs, where the user wrote this expecting a specific exception, but some other exception is being
raised due to a bug introduced during a refactoring.
Avoid using ``pytest.raises`` to catch :class:`Exception` unless certain that you really want to catch
**any** exception raised.
.. note:: .. note::
When using ``pytest.raises`` as a context manager, it's worthwhile to When using ``pytest.raises`` as a context manager, it's worthwhile to
@ -872,7 +888,7 @@ def raises( # noqa: F811
>>> with pytest.raises(ValueError) as exc_info: >>> with pytest.raises(ValueError) as exc_info:
... if value > 10: ... if value > 10:
... raise ValueError("value must be <= 10") ... raise ValueError("value must be <= 10")
... assert exc_info.type is ValueError # this will not execute ... assert exc_info.type is ValueError # This will not execute.
Instead, the following approach must be taken (note the difference in Instead, the following approach must be taken (note the difference in
scope):: scope)::
@ -891,6 +907,10 @@ def raises( # noqa: F811
See :ref:`parametrizing_conditional_raising` for an example. See :ref:`parametrizing_conditional_raising` for an example.
.. seealso::
:ref:`assertraises` for more examples and detailed discussion.
**Legacy form** **Legacy form**
It is possible to specify a callable by passing a to-be-called lambda:: It is possible to specify a callable by passing a to-be-called lambda::

View File

@ -56,7 +56,7 @@ def deprecated_call( # noqa: F811
def deprecated_call( # noqa: F811 def deprecated_call( # noqa: F811
func: Optional[Callable[..., Any]] = None, *args: Any, **kwargs: Any func: Optional[Callable[..., Any]] = None, *args: Any, **kwargs: Any
) -> Union["WarningsRecorder", Any]: ) -> Union["WarningsRecorder", Any]:
"""Assert that code produces a ``DeprecationWarning`` or ``PendingDeprecationWarning``. """Assert that code produces a ``DeprecationWarning`` or ``PendingDeprecationWarning`` or ``FutureWarning``.
This function can be used as a context manager:: This function can be used as a context manager::
@ -82,7 +82,9 @@ def deprecated_call( # noqa: F811
__tracebackhide__ = True __tracebackhide__ = True
if func is not None: if func is not None:
args = (func,) + args args = (func,) + args
return warns((DeprecationWarning, PendingDeprecationWarning), *args, **kwargs) return warns(
(DeprecationWarning, PendingDeprecationWarning, FutureWarning), *args, **kwargs
)
@overload @overload

View File

@ -8,3 +8,5 @@ import _pytest._py.path as path
sys.modules["py.error"] = error sys.modules["py.error"] = error
sys.modules["py.path"] = path sys.modules["py.path"] = path
__all__ = ["error", "path"]

View File

@ -868,6 +868,9 @@ class TestLocalPath(CommonFSTests):
py_path.strpath, str_path py_path.strpath, str_path
) )
@pytest.mark.xfail(
reason="#11603", raises=(error.EEXIST, error.ENOENT), strict=False
)
def test_make_numbered_dir_multiprocess_safe(self, tmpdir): def test_make_numbered_dir_multiprocess_safe(self, tmpdir):
# https://github.com/pytest-dev/py/issues/30 # https://github.com/pytest-dev/py/issues/30
with multiprocessing.Pool() as pool: with multiprocessing.Pool() as pool:
@ -1080,14 +1083,14 @@ class TestImport:
name = "pointsback123" name = "pointsback123"
ModuleType = type(os) ModuleType = type(os)
p = tmpdir.ensure(name + ".py") p = tmpdir.ensure(name + ".py")
with monkeypatch.context() as mp:
for ending in (".pyc", "$py.class", ".pyo"): for ending in (".pyc", "$py.class", ".pyo"):
mod = ModuleType(name) mod = ModuleType(name)
pseudopath = tmpdir.ensure(name + ending) pseudopath = tmpdir.ensure(name + ending)
mod.__file__ = str(pseudopath) mod.__file__ = str(pseudopath)
monkeypatch.setitem(sys.modules, name, mod) mp.setitem(sys.modules, name, mod)
newmod = p.pyimport() newmod = p.pyimport()
assert mod == newmod assert mod == newmod
monkeypatch.undo()
mod = ModuleType(name) mod = ModuleType(name)
pseudopath = tmpdir.ensure(name + "123.py") pseudopath = tmpdir.ensure(name + "123.py")
mod.__file__ = str(pseudopath) mod.__file__ = str(pseudopath)

View File

@ -341,6 +341,45 @@ class TestGeneralUsage:
assert res.ret == 0 assert res.ret == 0
res.stdout.fnmatch_lines(["*1 passed*"]) res.stdout.fnmatch_lines(["*1 passed*"])
def test_direct_addressing_selects_duplicates(self, pytester: Pytester) -> None:
p = pytester.makepyfile(
"""
import pytest
@pytest.mark.parametrize("a", [1, 2, 10, 11, 2, 1, 12, 11])
def test_func(a):
pass
"""
)
result = pytester.runpytest(p)
result.assert_outcomes(failed=0, passed=8)
def test_direct_addressing_selects_duplicates_1(self, pytester: Pytester) -> None:
p = pytester.makepyfile(
"""
import pytest
@pytest.mark.parametrize("a", [1, 2, 10, 11, 2, 1, 12, 1_1,2_1])
def test_func(a):
pass
"""
)
result = pytester.runpytest(p)
result.assert_outcomes(failed=0, passed=9)
def test_direct_addressing_selects_duplicates_2(self, pytester: Pytester) -> None:
p = pytester.makepyfile(
"""
import pytest
@pytest.mark.parametrize("a", ["a","b","c","a","a1"])
def test_func(a):
pass
"""
)
result = pytester.runpytest(p)
result.assert_outcomes(failed=0, passed=5)
def test_direct_addressing_notfound(self, pytester: Pytester) -> None: def test_direct_addressing_notfound(self, pytester: Pytester) -> None:
p = pytester.makepyfile( p = pytester.makepyfile(
""" """

View File

@ -27,6 +27,9 @@ from _pytest.pytester import Pytester
if TYPE_CHECKING: if TYPE_CHECKING:
from _pytest._code.code import _TracebackStyle from _pytest._code.code import _TracebackStyle
if sys.version_info[:2] < (3, 11):
from exceptiongroup import ExceptionGroup
@pytest.fixture @pytest.fixture
def limited_recursion_depth(): def limited_recursion_depth():
@ -444,6 +447,92 @@ def test_match_raises_error(pytester: Pytester) -> None:
result.stdout.re_match_lines([r".*__tracebackhide__ = True.*", *match]) result.stdout.re_match_lines([r".*__tracebackhide__ = True.*", *match])
class TestGroupContains:
def test_contains_exception_type(self) -> None:
exc_group = ExceptionGroup("", [RuntimeError()])
with pytest.raises(ExceptionGroup) as exc_info:
raise exc_group
assert exc_info.group_contains(RuntimeError)
def test_doesnt_contain_exception_type(self) -> None:
exc_group = ExceptionGroup("", [ValueError()])
with pytest.raises(ExceptionGroup) as exc_info:
raise exc_group
assert not exc_info.group_contains(RuntimeError)
def test_contains_exception_match(self) -> None:
exc_group = ExceptionGroup("", [RuntimeError("exception message")])
with pytest.raises(ExceptionGroup) as exc_info:
raise exc_group
assert exc_info.group_contains(RuntimeError, match=r"^exception message$")
def test_doesnt_contain_exception_match(self) -> None:
exc_group = ExceptionGroup("", [RuntimeError("message that will not match")])
with pytest.raises(ExceptionGroup) as exc_info:
raise exc_group
assert not exc_info.group_contains(RuntimeError, match=r"^exception message$")
def test_contains_exception_type_unlimited_depth(self) -> None:
exc_group = ExceptionGroup("", [ExceptionGroup("", [RuntimeError()])])
with pytest.raises(ExceptionGroup) as exc_info:
raise exc_group
assert exc_info.group_contains(RuntimeError)
def test_contains_exception_type_at_depth_1(self) -> None:
exc_group = ExceptionGroup("", [RuntimeError()])
with pytest.raises(ExceptionGroup) as exc_info:
raise exc_group
assert exc_info.group_contains(RuntimeError, depth=1)
def test_doesnt_contain_exception_type_past_depth(self) -> None:
exc_group = ExceptionGroup("", [ExceptionGroup("", [RuntimeError()])])
with pytest.raises(ExceptionGroup) as exc_info:
raise exc_group
assert not exc_info.group_contains(RuntimeError, depth=1)
def test_contains_exception_type_specific_depth(self) -> None:
exc_group = ExceptionGroup("", [ExceptionGroup("", [RuntimeError()])])
with pytest.raises(ExceptionGroup) as exc_info:
raise exc_group
assert exc_info.group_contains(RuntimeError, depth=2)
def test_contains_exception_match_unlimited_depth(self) -> None:
exc_group = ExceptionGroup(
"", [ExceptionGroup("", [RuntimeError("exception message")])]
)
with pytest.raises(ExceptionGroup) as exc_info:
raise exc_group
assert exc_info.group_contains(RuntimeError, match=r"^exception message$")
def test_contains_exception_match_at_depth_1(self) -> None:
exc_group = ExceptionGroup("", [RuntimeError("exception message")])
with pytest.raises(ExceptionGroup) as exc_info:
raise exc_group
assert exc_info.group_contains(
RuntimeError, match=r"^exception message$", depth=1
)
def test_doesnt_contain_exception_match_past_depth(self) -> None:
exc_group = ExceptionGroup(
"", [ExceptionGroup("", [RuntimeError("exception message")])]
)
with pytest.raises(ExceptionGroup) as exc_info:
raise exc_group
assert not exc_info.group_contains(
RuntimeError, match=r"^exception message$", depth=1
)
def test_contains_exception_match_specific_depth(self) -> None:
exc_group = ExceptionGroup(
"", [ExceptionGroup("", [RuntimeError("exception message")])]
)
with pytest.raises(ExceptionGroup) as exc_info:
raise exc_group
assert exc_info.group_contains(
RuntimeError, match=r"^exception message$", depth=2
)
class TestFormattedExcinfo: class TestFormattedExcinfo:
@pytest.fixture @pytest.fixture
def importasmod(self, tmp_path: Path, _sys_snapshot): def importasmod(self, tmp_path: Path, _sys_snapshot):
@ -765,7 +854,11 @@ raise ValueError()
reprtb = p.repr_traceback(excinfo) reprtb = p.repr_traceback(excinfo)
assert len(reprtb.reprentries) == 3 assert len(reprtb.reprentries) == 3
def test_traceback_short_no_source(self, importasmod, monkeypatch) -> None: def test_traceback_short_no_source(
self,
importasmod,
monkeypatch: pytest.MonkeyPatch,
) -> None:
mod = importasmod( mod = importasmod(
""" """
def func1(): def func1():
@ -777,14 +870,14 @@ raise ValueError()
excinfo = pytest.raises(ValueError, mod.entry) excinfo = pytest.raises(ValueError, mod.entry)
from _pytest._code.code import Code from _pytest._code.code import Code
monkeypatch.setattr(Code, "path", "bogus") with monkeypatch.context() as mp:
mp.setattr(Code, "path", "bogus")
p = FormattedExcinfo(style="short") p = FormattedExcinfo(style="short")
reprtb = p.repr_traceback_entry(excinfo.traceback[-2]) reprtb = p.repr_traceback_entry(excinfo.traceback[-2])
lines = reprtb.lines lines = reprtb.lines
last_p = FormattedExcinfo(style="short") last_p = FormattedExcinfo(style="short")
last_reprtb = last_p.repr_traceback_entry(excinfo.traceback[-1], excinfo) last_reprtb = last_p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
last_lines = last_reprtb.lines last_lines = last_reprtb.lines
monkeypatch.undo()
assert lines[0] == " func1()" assert lines[0] == " func1()"
assert last_lines[0] == ' raise ValueError("hello")' assert last_lines[0] == ' raise ValueError("hello")'

View File

@ -22,6 +22,26 @@ if sys.gettrace():
sys.settrace(orig_trace) sys.settrace(orig_trace)
@pytest.fixture(autouse=True)
def set_column_width(monkeypatch: pytest.MonkeyPatch) -> None:
"""
Force terminal width to 80: some tests check the formatting of --help, which is sensible
to terminal width.
"""
monkeypatch.setenv("COLUMNS", "80")
@pytest.fixture(autouse=True)
def reset_colors(monkeypatch: pytest.MonkeyPatch) -> None:
"""
Reset all color-related variables to prevent them from affecting internal pytest output
in tests that depend on it.
"""
monkeypatch.delenv("PY_COLORS", raising=False)
monkeypatch.delenv("NO_COLOR", raising=False)
monkeypatch.delenv("FORCE_COLOR", raising=False)
@pytest.hookimpl(wrapper=True, tryfirst=True) @pytest.hookimpl(wrapper=True, tryfirst=True)
def pytest_collection_modifyitems(items) -> Generator[None, None, None]: def pytest_collection_modifyitems(items) -> Generator[None, None, None]:
"""Prefer faster tests. """Prefer faster tests.
@ -151,6 +171,9 @@ def color_mapping():
"red": "\x1b[31m", "red": "\x1b[31m",
"green": "\x1b[32m", "green": "\x1b[32m",
"yellow": "\x1b[33m", "yellow": "\x1b[33m",
"light-gray": "\x1b[90m",
"light-red": "\x1b[91m",
"light-green": "\x1b[92m",
"bold": "\x1b[1m", "bold": "\x1b[1m",
"reset": "\x1b[0m", "reset": "\x1b[0m",
"kw": "\x1b[94m", "kw": "\x1b[94m",
@ -162,6 +185,7 @@ def color_mapping():
"endline": "\x1b[90m\x1b[39;49;00m", "endline": "\x1b[90m\x1b[39;49;00m",
} }
RE_COLORS = {k: re.escape(v) for k, v in COLORS.items()} RE_COLORS = {k: re.escape(v) for k, v in COLORS.items()}
NO_COLORS = {k: "" for k in COLORS.keys()}
@classmethod @classmethod
def format(cls, lines: List[str]) -> List[str]: def format(cls, lines: List[str]) -> List[str]:
@ -178,6 +202,11 @@ def color_mapping():
"""Replace color names for use with LineMatcher.re_match_lines""" """Replace color names for use with LineMatcher.re_match_lines"""
return [line.format(**cls.RE_COLORS) for line in lines] return [line.format(**cls.RE_COLORS) for line in lines]
@classmethod
def strip_colors(cls, lines: List[str]) -> List[str]:
"""Entirely remove every color code"""
return [line.format(**cls.NO_COLORS) for line in lines]
return ColorMapping return ColorMapping

406
testing/io/test_pprint.py Normal file
View File

@ -0,0 +1,406 @@
import textwrap
from collections import ChainMap
from collections import Counter
from collections import defaultdict
from collections import deque
from collections import OrderedDict
from dataclasses import dataclass
from types import MappingProxyType
from types import SimpleNamespace
from typing import Any
import pytest
from _pytest._io.pprint import PrettyPrinter
@dataclass
class EmptyDataclass:
pass
@dataclass
class DataclassWithOneItem:
foo: str
@dataclass
class DataclassWithTwoItems:
foo: str
bar: str
@pytest.mark.parametrize(
("data", "expected"),
(
pytest.param(
EmptyDataclass(),
"EmptyDataclass()",
id="dataclass-empty",
),
pytest.param(
DataclassWithOneItem(foo="bar"),
"""
DataclassWithOneItem(
foo='bar',
)
""",
id="dataclass-one-item",
),
pytest.param(
DataclassWithTwoItems(foo="foo", bar="bar"),
"""
DataclassWithTwoItems(
foo='foo',
bar='bar',
)
""",
id="dataclass-two-items",
),
pytest.param(
{},
"{}",
id="dict-empty",
),
pytest.param(
{"one": 1},
"""
{
'one': 1,
}
""",
id="dict-one-item",
),
pytest.param(
{"one": 1, "two": 2},
"""
{
'one': 1,
'two': 2,
}
""",
id="dict-two-items",
),
pytest.param(OrderedDict(), "OrderedDict()", id="ordereddict-empty"),
pytest.param(
OrderedDict({"one": 1}),
"""
OrderedDict({
'one': 1,
})
""",
id="ordereddict-one-item",
),
pytest.param(
OrderedDict({"one": 1, "two": 2}),
"""
OrderedDict({
'one': 1,
'two': 2,
})
""",
id="ordereddict-two-items",
),
pytest.param(
[],
"[]",
id="list-empty",
),
pytest.param(
[1],
"""
[
1,
]
""",
id="list-one-item",
),
pytest.param(
[1, 2],
"""
[
1,
2,
]
""",
id="list-two-items",
),
pytest.param(
tuple(),
"()",
id="tuple-empty",
),
pytest.param(
(1,),
"""
(
1,
)
""",
id="tuple-one-item",
),
pytest.param(
(1, 2),
"""
(
1,
2,
)
""",
id="tuple-two-items",
),
pytest.param(
set(),
"set()",
id="set-empty",
),
pytest.param(
{1},
"""
{
1,
}
""",
id="set-one-item",
),
pytest.param(
{1, 2},
"""
{
1,
2,
}
""",
id="set-two-items",
),
pytest.param(
MappingProxyType({}),
"mappingproxy({})",
id="mappingproxy-empty",
),
pytest.param(
MappingProxyType({"one": 1}),
"""
mappingproxy({
'one': 1,
})
""",
id="mappingproxy-one-item",
),
pytest.param(
MappingProxyType({"one": 1, "two": 2}),
"""
mappingproxy({
'one': 1,
'two': 2,
})
""",
id="mappingproxy-two-items",
),
pytest.param(
SimpleNamespace(),
"namespace()",
id="simplenamespace-empty",
),
pytest.param(
SimpleNamespace(one=1),
"""
namespace(
one=1,
)
""",
id="simplenamespace-one-item",
),
pytest.param(
SimpleNamespace(one=1, two=2),
"""
namespace(
one=1,
two=2,
)
""",
id="simplenamespace-two-items",
),
pytest.param(
defaultdict(str), "defaultdict(<class 'str'>, {})", id="defaultdict-empty"
),
pytest.param(
defaultdict(str, {"one": "1"}),
"""
defaultdict(<class 'str'>, {
'one': '1',
})
""",
id="defaultdict-one-item",
),
pytest.param(
defaultdict(str, {"one": "1", "two": "2"}),
"""
defaultdict(<class 'str'>, {
'one': '1',
'two': '2',
})
""",
id="defaultdict-two-items",
),
pytest.param(
Counter(),
"Counter()",
id="counter-empty",
),
pytest.param(
Counter("1"),
"""
Counter({
'1': 1,
})
""",
id="counter-one-item",
),
pytest.param(
Counter("121"),
"""
Counter({
'1': 2,
'2': 1,
})
""",
id="counter-two-items",
),
pytest.param(ChainMap(), "ChainMap({})", id="chainmap-empty"),
pytest.param(
ChainMap({"one": 1, "two": 2}),
"""
ChainMap(
{
'one': 1,
'two': 2,
},
)
""",
id="chainmap-one-item",
),
pytest.param(
ChainMap({"one": 1}, {"two": 2}),
"""
ChainMap(
{
'one': 1,
},
{
'two': 2,
},
)
""",
id="chainmap-two-items",
),
pytest.param(
deque(),
"deque([])",
id="deque-empty",
),
pytest.param(
deque([1]),
"""
deque([
1,
])
""",
id="deque-one-item",
),
pytest.param(
deque([1, 2]),
"""
deque([
1,
2,
])
""",
id="deque-two-items",
),
pytest.param(
deque([1, 2], maxlen=3),
"""
deque(maxlen=3, [
1,
2,
])
""",
id="deque-maxlen",
),
pytest.param(
{
"chainmap": ChainMap({"one": 1}, {"two": 2}),
"counter": Counter("122"),
"dataclass": DataclassWithTwoItems(foo="foo", bar="bar"),
"defaultdict": defaultdict(str, {"one": "1", "two": "2"}),
"deque": deque([1, 2], maxlen=3),
"dict": {"one": 1, "two": 2},
"list": [1, 2],
"mappingproxy": MappingProxyType({"one": 1, "two": 2}),
"ordereddict": OrderedDict({"one": 1, "two": 2}),
"set": {1, 2},
"simplenamespace": SimpleNamespace(one=1, two=2),
"tuple": (1, 2),
},
"""
{
'chainmap': ChainMap(
{
'one': 1,
},
{
'two': 2,
},
),
'counter': Counter({
'2': 2,
'1': 1,
}),
'dataclass': DataclassWithTwoItems(
foo='foo',
bar='bar',
),
'defaultdict': defaultdict(<class 'str'>, {
'one': '1',
'two': '2',
}),
'deque': deque(maxlen=3, [
1,
2,
]),
'dict': {
'one': 1,
'two': 2,
},
'list': [
1,
2,
],
'mappingproxy': mappingproxy({
'one': 1,
'two': 2,
}),
'ordereddict': OrderedDict({
'one': 1,
'two': 2,
}),
'set': {
1,
2,
},
'simplenamespace': namespace(
one=1,
two=2,
),
'tuple': (
1,
2,
),
}
""",
id="deep-example",
),
),
)
def test_consistent_pretty_printer(data: Any, expected: str) -> None:
assert PrettyPrinter().pformat(data) == textwrap.dedent(expected).strip()

View File

@ -1,5 +1,4 @@
import pytest import pytest
from _pytest._io.saferepr import _pformat_dispatch
from _pytest._io.saferepr import DEFAULT_REPR_MAX_SIZE from _pytest._io.saferepr import DEFAULT_REPR_MAX_SIZE
from _pytest._io.saferepr import saferepr from _pytest._io.saferepr import saferepr
from _pytest._io.saferepr import saferepr_unlimited from _pytest._io.saferepr import saferepr_unlimited
@ -159,12 +158,6 @@ def test_unicode():
assert saferepr(val) == reprval assert saferepr(val) == reprval
def test_pformat_dispatch():
assert _pformat_dispatch("a") == "'a'"
assert _pformat_dispatch("a" * 10, width=5) == "'aaaaaaaaaa'"
assert _pformat_dispatch("foo bar", width=5) == "('foo '\n 'bar')"
def test_broken_getattribute(): def test_broken_getattribute():
"""saferepr() can create proper representations of classes with """saferepr() can create proper representations of classes with
broken __getattribute__ (#7145) broken __getattribute__ (#7145)

View File

@ -1,5 +1,7 @@
# mypy: disable-error-code="attr-defined" # mypy: disable-error-code="attr-defined"
# mypy: disallow-untyped-defs
import logging import logging
from typing import Iterator
import pytest import pytest
from _pytest.logging import caplog_records_key from _pytest.logging import caplog_records_key
@ -9,8 +11,8 @@ logger = logging.getLogger(__name__)
sublogger = logging.getLogger(__name__ + ".baz") sublogger = logging.getLogger(__name__ + ".baz")
@pytest.fixture @pytest.fixture(autouse=True)
def cleanup_disabled_logging(): def cleanup_disabled_logging() -> Iterator[None]:
"""Simple fixture that ensures that a test doesn't disable logging. """Simple fixture that ensures that a test doesn't disable logging.
This is necessary because ``logging.disable()`` is global, so a test disabling logging This is necessary because ``logging.disable()`` is global, so a test disabling logging
@ -27,7 +29,7 @@ def test_fixture_help(pytester: Pytester) -> None:
result.stdout.fnmatch_lines(["*caplog*"]) result.stdout.fnmatch_lines(["*caplog*"])
def test_change_level(caplog): def test_change_level(caplog: pytest.LogCaptureFixture) -> None:
caplog.set_level(logging.INFO) caplog.set_level(logging.INFO)
logger.debug("handler DEBUG level") logger.debug("handler DEBUG level")
logger.info("handler INFO level") logger.info("handler INFO level")
@ -42,7 +44,7 @@ def test_change_level(caplog):
assert "CRITICAL" in caplog.text assert "CRITICAL" in caplog.text
def test_change_level_logging_disabled(caplog, cleanup_disabled_logging): def test_change_level_logging_disabled(caplog: pytest.LogCaptureFixture) -> None:
logging.disable(logging.CRITICAL) logging.disable(logging.CRITICAL)
assert logging.root.manager.disable == logging.CRITICAL assert logging.root.manager.disable == logging.CRITICAL
caplog.set_level(logging.WARNING) caplog.set_level(logging.WARNING)
@ -85,9 +87,7 @@ def test_change_level_undo(pytester: Pytester) -> None:
result.stdout.no_fnmatch_line("*log from test2*") result.stdout.no_fnmatch_line("*log from test2*")
def test_change_disabled_level_undo( def test_change_disabled_level_undo(pytester: Pytester) -> None:
pytester: Pytester, cleanup_disabled_logging
) -> None:
"""Ensure that '_force_enable_logging' in 'set_level' is undone after the end of the test. """Ensure that '_force_enable_logging' in 'set_level' is undone after the end of the test.
Tests the logging output themselves (affected by disabled logging level). Tests the logging output themselves (affected by disabled logging level).
@ -144,7 +144,7 @@ def test_change_level_undos_handler_level(pytester: Pytester) -> None:
result.assert_outcomes(passed=3) result.assert_outcomes(passed=3)
def test_with_statement(caplog): def test_with_statement_at_level(caplog: pytest.LogCaptureFixture) -> None:
with caplog.at_level(logging.INFO): with caplog.at_level(logging.INFO):
logger.debug("handler DEBUG level") logger.debug("handler DEBUG level")
logger.info("handler INFO level") logger.info("handler INFO level")
@ -159,7 +159,9 @@ def test_with_statement(caplog):
assert "CRITICAL" in caplog.text assert "CRITICAL" in caplog.text
def test_with_statement_logging_disabled(caplog, cleanup_disabled_logging): def test_with_statement_at_level_logging_disabled(
caplog: pytest.LogCaptureFixture,
) -> None:
logging.disable(logging.CRITICAL) logging.disable(logging.CRITICAL)
assert logging.root.manager.disable == logging.CRITICAL assert logging.root.manager.disable == logging.CRITICAL
with caplog.at_level(logging.WARNING): with caplog.at_level(logging.WARNING):
@ -185,6 +187,22 @@ def test_with_statement_logging_disabled(caplog, cleanup_disabled_logging):
assert logging.root.manager.disable == logging.CRITICAL assert logging.root.manager.disable == logging.CRITICAL
def test_with_statement_filtering(caplog: pytest.LogCaptureFixture) -> None:
class TestFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
record.msg = "filtered handler call"
return True
with caplog.at_level(logging.INFO):
with caplog.filtering(TestFilter()):
logger.info("handler call")
logger.info("handler call")
filtered_tuple, unfiltered_tuple = caplog.record_tuples
assert filtered_tuple == ("test_fixture", 20, "filtered handler call")
assert unfiltered_tuple == ("test_fixture", 20, "handler call")
@pytest.mark.parametrize( @pytest.mark.parametrize(
"level_str,expected_disable_level", "level_str,expected_disable_level",
[ [
@ -198,8 +216,8 @@ def test_with_statement_logging_disabled(caplog, cleanup_disabled_logging):
], ],
) )
def test_force_enable_logging_level_string( def test_force_enable_logging_level_string(
caplog, cleanup_disabled_logging, level_str, expected_disable_level caplog: pytest.LogCaptureFixture, level_str: str, expected_disable_level: int
): ) -> None:
"""Test _force_enable_logging using a level string. """Test _force_enable_logging using a level string.
``expected_disable_level`` is one level below ``level_str`` because the disabled log level ``expected_disable_level`` is one level below ``level_str`` because the disabled log level
@ -218,7 +236,7 @@ def test_force_enable_logging_level_string(
assert test_logger.manager.disable == expected_disable_level assert test_logger.manager.disable == expected_disable_level
def test_log_access(caplog): def test_log_access(caplog: pytest.LogCaptureFixture) -> None:
caplog.set_level(logging.INFO) caplog.set_level(logging.INFO)
logger.info("boo %s", "arg") logger.info("boo %s", "arg")
assert caplog.records[0].levelname == "INFO" assert caplog.records[0].levelname == "INFO"
@ -226,7 +244,7 @@ def test_log_access(caplog):
assert "boo arg" in caplog.text assert "boo arg" in caplog.text
def test_messages(caplog): def test_messages(caplog: pytest.LogCaptureFixture) -> None:
caplog.set_level(logging.INFO) caplog.set_level(logging.INFO)
logger.info("boo %s", "arg") logger.info("boo %s", "arg")
logger.info("bar %s\nbaz %s", "arg1", "arg2") logger.info("bar %s\nbaz %s", "arg1", "arg2")
@ -247,14 +265,14 @@ def test_messages(caplog):
assert "Exception" not in caplog.messages[-1] assert "Exception" not in caplog.messages[-1]
def test_record_tuples(caplog): def test_record_tuples(caplog: pytest.LogCaptureFixture) -> None:
caplog.set_level(logging.INFO) caplog.set_level(logging.INFO)
logger.info("boo %s", "arg") logger.info("boo %s", "arg")
assert caplog.record_tuples == [(__name__, logging.INFO, "boo arg")] assert caplog.record_tuples == [(__name__, logging.INFO, "boo arg")]
def test_unicode(caplog): def test_unicode(caplog: pytest.LogCaptureFixture) -> None:
caplog.set_level(logging.INFO) caplog.set_level(logging.INFO)
logger.info("") logger.info("")
assert caplog.records[0].levelname == "INFO" assert caplog.records[0].levelname == "INFO"
@ -262,7 +280,7 @@ def test_unicode(caplog):
assert "" in caplog.text assert "" in caplog.text
def test_clear(caplog): def test_clear(caplog: pytest.LogCaptureFixture) -> None:
caplog.set_level(logging.INFO) caplog.set_level(logging.INFO)
logger.info("") logger.info("")
assert len(caplog.records) assert len(caplog.records)
@ -273,7 +291,9 @@ def test_clear(caplog):
@pytest.fixture @pytest.fixture
def logging_during_setup_and_teardown(caplog): def logging_during_setup_and_teardown(
caplog: pytest.LogCaptureFixture,
) -> Iterator[None]:
caplog.set_level("INFO") caplog.set_level("INFO")
logger.info("a_setup_log") logger.info("a_setup_log")
yield yield
@ -281,7 +301,9 @@ def logging_during_setup_and_teardown(caplog):
assert [x.message for x in caplog.get_records("teardown")] == ["a_teardown_log"] assert [x.message for x in caplog.get_records("teardown")] == ["a_teardown_log"]
def test_caplog_captures_for_all_stages(caplog, logging_during_setup_and_teardown): def test_caplog_captures_for_all_stages(
caplog: pytest.LogCaptureFixture, logging_during_setup_and_teardown: None
) -> None:
assert not caplog.records assert not caplog.records
assert not caplog.get_records("call") assert not caplog.get_records("call")
logger.info("a_call_log") logger.info("a_call_log")
@ -290,25 +312,31 @@ def test_caplog_captures_for_all_stages(caplog, logging_during_setup_and_teardow
assert [x.message for x in caplog.get_records("setup")] == ["a_setup_log"] assert [x.message for x in caplog.get_records("setup")] == ["a_setup_log"]
# This reaches into private API, don't use this type of thing in real tests! # This reaches into private API, don't use this type of thing in real tests!
assert set(caplog._item.stash[caplog_records_key]) == {"setup", "call"} caplog_records = caplog._item.stash[caplog_records_key]
assert set(caplog_records) == {"setup", "call"}
def test_clear_for_call_stage(caplog, logging_during_setup_and_teardown): def test_clear_for_call_stage(
caplog: pytest.LogCaptureFixture, logging_during_setup_and_teardown: None
) -> None:
logger.info("a_call_log") logger.info("a_call_log")
assert [x.message for x in caplog.get_records("call")] == ["a_call_log"] assert [x.message for x in caplog.get_records("call")] == ["a_call_log"]
assert [x.message for x in caplog.get_records("setup")] == ["a_setup_log"] assert [x.message for x in caplog.get_records("setup")] == ["a_setup_log"]
assert set(caplog._item.stash[caplog_records_key]) == {"setup", "call"} caplog_records = caplog._item.stash[caplog_records_key]
assert set(caplog_records) == {"setup", "call"}
caplog.clear() caplog.clear()
assert caplog.get_records("call") == [] assert caplog.get_records("call") == []
assert [x.message for x in caplog.get_records("setup")] == ["a_setup_log"] assert [x.message for x in caplog.get_records("setup")] == ["a_setup_log"]
assert set(caplog._item.stash[caplog_records_key]) == {"setup", "call"} caplog_records = caplog._item.stash[caplog_records_key]
assert set(caplog_records) == {"setup", "call"}
logging.info("a_call_log_after_clear") logging.info("a_call_log_after_clear")
assert [x.message for x in caplog.get_records("call")] == ["a_call_log_after_clear"] assert [x.message for x in caplog.get_records("call")] == ["a_call_log_after_clear"]
assert [x.message for x in caplog.get_records("setup")] == ["a_setup_log"] assert [x.message for x in caplog.get_records("setup")] == ["a_setup_log"]
assert set(caplog._item.stash[caplog_records_key]) == {"setup", "call"} caplog_records = caplog._item.stash[caplog_records_key]
assert set(caplog_records) == {"setup", "call"}
def test_ini_controls_global_log_level(pytester: Pytester) -> None: def test_ini_controls_global_log_level(pytester: Pytester) -> None:

View File

@ -77,14 +77,14 @@ def test_root_logger_affected(pytester: Pytester) -> None:
assert "warning text going to logger" not in stdout assert "warning text going to logger" not in stdout
assert "info text going to logger" not in stdout assert "info text going to logger" not in stdout
# The log file should contain the warning and the error log messages and # The log file should only contain the error log messages and
# not the info one, because the default level of the root logger is # not the warning or info ones, because the root logger is set to
# WARNING. # ERROR using --log-level=ERROR.
assert os.path.isfile(log_file) assert os.path.isfile(log_file)
with open(log_file, encoding="utf-8") as rfh: with open(log_file, encoding="utf-8") as rfh:
contents = rfh.read() contents = rfh.read()
assert "info text going to logger" not in contents assert "info text going to logger" not in contents
assert "warning text going to logger" in contents assert "warning text going to logger" not in contents
assert "error text going to logger" in contents assert "error text going to logger" in contents
@ -1331,3 +1331,62 @@ def test_date_format_percentf_tz_log(pytester: Pytester) -> None:
result.stdout.re_match_lines( result.stdout.re_match_lines(
[r"^[0-9-]{10} [0-9:]{8}.[0-9]{6}[+-][0-9\.]+; WARNING; text"] [r"^[0-9-]{10} [0-9:]{8}.[0-9]{6}[+-][0-9\.]+; WARNING; text"]
) )
def test_log_file_cli_fallback_options(pytester: Pytester) -> None:
"""Make sure that fallback values for log-file formats and level works."""
pytester.makepyfile(
"""
import logging
logger = logging.getLogger()
def test_foo():
logger.info('info text going to logger')
logger.warning('warning text going to logger')
logger.error('error text going to logger')
assert 0
"""
)
log_file = str(pytester.path.joinpath("pytest.log"))
result = pytester.runpytest(
"--log-level=ERROR",
"--log-format=%(asctime)s %(message)s",
"--log-date-format=%H:%M",
"--log-file=pytest.log",
)
assert result.ret == 1
# The log file should only contain the error log messages
# not the warning or info ones and the format and date format
# should match the formats provided using --log-format and --log-date-format
assert os.path.isfile(log_file)
with open(log_file, encoding="utf-8") as rfh:
contents = rfh.read()
assert re.match(r"[0-9]{2}:[0-9]{2} error text going to logger\s*", contents)
assert "info text going to logger" not in contents
assert "warning text going to logger" not in contents
assert "error text going to logger" in contents
# Try with a different format and date format to make sure that the formats
# are being used
result = pytester.runpytest(
"--log-level=ERROR",
"--log-format=%(asctime)s : %(message)s",
"--log-date-format=%H:%M:%S",
"--log-file=pytest.log",
)
assert result.ret == 1
# The log file should only contain the error log messages
# not the warning or info ones and the format and date format
# should match the formats provided using --log-format and --log-date-format
assert os.path.isfile(log_file)
with open(log_file, encoding="utf-8") as rfh:
contents = rfh.read()
assert re.match(
r"[0-9]{2}:[0-9]{2}:[0-9]{2} : error text going to logger\s*", contents
)
assert "info text going to logger" not in contents
assert "warning text going to logger" not in contents
assert "error text going to logger" in contents

View File

@ -1,15 +1,15 @@
anyio[curio,trio]==4.0.0 anyio[curio,trio]==4.1.0
django==4.2.4 django==4.2.7
pytest-asyncio==0.21.1 pytest-asyncio==0.23.1
pytest-bdd==6.1.1 pytest-bdd==7.0.1
pytest-cov==4.1.0 pytest-cov==4.1.0
pytest-django==4.5.2 pytest-django==4.7.0
pytest-flakes==4.0.5 pytest-flakes==4.0.5
pytest-html==4.0.0 pytest-html==4.1.1
pytest-mock==3.11.1 pytest-mock==3.12.0
pytest-rerunfailures==12.0 pytest-rerunfailures==13.0
pytest-sugar==0.9.7 pytest-sugar==0.9.7
pytest-trio==0.7.0 pytest-trio==0.7.0
pytest-twisted==1.14.0 pytest-twisted==1.14.0
twisted==23.8.0 twisted==23.10.0
pytest-xvfb==3.0.0 pytest-xvfb==3.0.0

View File

@ -776,13 +776,13 @@ class TestSorting:
pytester.makepyfile( pytester.makepyfile(
"""\ """\
class Test1: class Test1:
def test_foo(): pass def test_foo(self): pass
def test_bar(): pass def test_bar(self): pass
class Test2: class Test2:
def test_foo(): pass def test_foo(self): pass
test_bar = Test1.test_bar test_bar = Test1.test_bar
class Test3(Test2): class Test3(Test2):
def test_baz(): pass def test_baz(self): pass
""" """
) )
result = pytester.runpytest("--collect-only") result = pytester.runpytest("--collect-only")

View File

@ -626,6 +626,13 @@ class TestMetafunc:
).make_unique_parameterset_ids() ).make_unique_parameterset_ids()
assert result == [expected] assert result == [expected]
def test_idmaker_duplicated_empty_str(self) -> None:
"""Regression test for empty strings parametrized more than once (#11563)."""
result = IdMaker(
("a",), [pytest.param(""), pytest.param("")], None, None, None, None, None
).make_unique_parameterset_ids()
assert result == ["0", "1"]
def test_parametrize_ids_exception(self, pytester: Pytester) -> None: def test_parametrize_ids_exception(self, pytester: Pytester) -> None:
""" """
:param pytester: the instance of Pytester class, a temporary :param pytester: the instance of Pytester class, a temporary
@ -1518,7 +1525,7 @@ class TestMetafuncFunctional:
pass pass
""" """
) )
result = pytester.runpytest("--collectonly") result = pytester.runpytest("--collect-only")
result.stdout.fnmatch_lines( result.stdout.fnmatch_lines(
[ [
"collected 0 items / 1 error", "collected 0 items / 1 error",

View File

@ -13,20 +13,68 @@ import pytest
from _pytest import outcomes from _pytest import outcomes
from _pytest.assertion import truncate from _pytest.assertion import truncate
from _pytest.assertion import util from _pytest.assertion import util
from _pytest.config import Config as _Config
from _pytest.monkeypatch import MonkeyPatch from _pytest.monkeypatch import MonkeyPatch
from _pytest.pytester import Pytester from _pytest.pytester import Pytester
def mock_config(verbose=0): def mock_config(verbose: int = 0, assertion_override: Optional[int] = None):
class TerminalWriter:
def _highlight(self, source, lexer):
return source
class Config: class Config:
def getoption(self, name): def get_terminal_writer(self):
if name == "verbose": return TerminalWriter()
def get_verbosity(self, verbosity_type: Optional[str] = None) -> int:
if verbosity_type is None:
return verbose return verbose
raise KeyError("Not mocked out: %s" % name) if verbosity_type == _Config.VERBOSITY_ASSERTIONS:
if assertion_override is not None:
return assertion_override
return verbose
raise KeyError(f"Not mocked out: {verbosity_type}")
return Config() return Config()
class TestMockConfig:
SOME_VERBOSITY_LEVEL = 3
SOME_OTHER_VERBOSITY_LEVEL = 10
def test_verbose_exposes_value(self):
config = mock_config(verbose=TestMockConfig.SOME_VERBOSITY_LEVEL)
assert config.get_verbosity() == TestMockConfig.SOME_VERBOSITY_LEVEL
def test_get_assertion_override_not_set_verbose_value(self):
config = mock_config(verbose=TestMockConfig.SOME_VERBOSITY_LEVEL)
assert (
config.get_verbosity(_Config.VERBOSITY_ASSERTIONS)
== TestMockConfig.SOME_VERBOSITY_LEVEL
)
def test_get_assertion_override_set_custom_value(self):
config = mock_config(
verbose=TestMockConfig.SOME_VERBOSITY_LEVEL,
assertion_override=TestMockConfig.SOME_OTHER_VERBOSITY_LEVEL,
)
assert (
config.get_verbosity(_Config.VERBOSITY_ASSERTIONS)
== TestMockConfig.SOME_OTHER_VERBOSITY_LEVEL
)
def test_get_unsupported_type_error(self):
config = mock_config(verbose=TestMockConfig.SOME_VERBOSITY_LEVEL)
with pytest.raises(KeyError):
config.get_verbosity("--- NOT A VERBOSITY LEVEL ---")
class TestImportHookInstallation: class TestImportHookInstallation:
@pytest.mark.parametrize("initial_conftest", [True, False]) @pytest.mark.parametrize("initial_conftest", [True, False])
@pytest.mark.parametrize("mode", ["plain", "rewrite"]) @pytest.mark.parametrize("mode", ["plain", "rewrite"])
@ -403,10 +451,13 @@ class TestAssert_reprcompare:
[0, 2], [0, 2],
""" """
Full diff: Full diff:
- [0, 2] [
0,
- 2,
? ^ ? ^
+ [0, 1] + 1,
? ^ ? ^
]
""", """,
id="lists", id="lists",
), ),
@ -415,10 +466,12 @@ class TestAssert_reprcompare:
{0: 2}, {0: 2},
""" """
Full diff: Full diff:
- {0: 2} {
- 0: 2,
? ^ ? ^
+ {0: 1} + 0: 1,
? ^ ? ^
}
""", """,
id="dicts", id="dicts",
), ),
@ -427,10 +480,13 @@ class TestAssert_reprcompare:
{0, 2}, {0, 2},
""" """
Full diff: Full diff:
- {0, 2} {
0,
- 2,
? ^ ? ^
+ {0, 1} + 1,
? ^ ? ^
}
""", """,
id="sets", id="sets",
), ),
@ -567,13 +623,17 @@ class TestAssert_reprcompare:
"Differing items:", "Differing items:",
"{'env': {'env1': 1, 'env2': 2}} != {'env': {'env1': 1}}", "{'env': {'env1': 1, 'env2': 2}} != {'env': {'env1': 1}}",
"Full diff:", "Full diff:",
"- {'common': 1, 'env': {'env1': 1}}", " {",
"+ {'common': 1, 'env': {'env1': 1, 'env2': 2}}", " 'common': 1,",
"? +++++++++++", " 'env': {",
" 'env1': 1,",
"+ 'env2': 2,",
" },",
" }",
] ]
long_a = "a" * 80 long_a = "a" * 80
sub = {"long_a": long_a, "sub1": {"long_a": "substring that gets wrapped " * 2}} sub = {"long_a": long_a, "sub1": {"long_a": "substring that gets wrapped " * 3}}
d1 = {"env": {"sub": sub}} d1 = {"env": {"sub": sub}}
d2 = {"env": {"sub": sub}, "new": 1} d2 = {"env": {"sub": sub}, "new": 1}
diff = callequal(d1, d2, verbose=True) diff = callequal(d1, d2, verbose=True)
@ -584,9 +644,15 @@ class TestAssert_reprcompare:
"{'new': 1}", "{'new': 1}",
"Full diff:", "Full diff:",
" {", " {",
" 'env': {'sub': {'long_a': '" + long_a + "',", " 'env': {",
" 'sub1': {'long_a': 'substring that gets wrapped substring '", " 'sub': {",
" 'that gets wrapped '}}},", f" 'long_a': '{long_a}',",
" 'sub1': {",
" 'long_a': 'substring that gets wrapped substring that gets wrapped '",
" 'substring that gets wrapped ',",
" },",
" },",
" },",
"- 'new': 1,", "- 'new': 1,",
" }", " }",
] ]
@ -629,8 +695,13 @@ class TestAssert_reprcompare:
"Right contains 2 more items:", "Right contains 2 more items:",
"{'b': 1, 'c': 2}", "{'b': 1, 'c': 2}",
"Full diff:", "Full diff:",
"- {'b': 1, 'c': 2}", " {",
"+ {'a': 0}", "- 'b': 1,",
"? ^ ^",
"+ 'a': 0,",
"? ^ ^",
"- 'c': 2,",
" }",
] ]
lines = callequal({"b": 1, "c": 2}, {"a": 0}, verbose=2) lines = callequal({"b": 1, "c": 2}, {"a": 0}, verbose=2)
assert lines == [ assert lines == [
@ -640,8 +711,13 @@ class TestAssert_reprcompare:
"Right contains 1 more item:", "Right contains 1 more item:",
"{'a': 0}", "{'a': 0}",
"Full diff:", "Full diff:",
"- {'a': 0}", " {",
"+ {'b': 1, 'c': 2}", "- 'a': 0,",
"? ^ ^",
"+ 'b': 1,",
"? ^ ^",
"+ 'c': 2,",
" }",
] ]
def test_sequence_different_items(self) -> None: def test_sequence_different_items(self) -> None:
@ -651,8 +727,17 @@ class TestAssert_reprcompare:
"At index 0 diff: 1 != 3", "At index 0 diff: 1 != 3",
"Right contains one more item: 5", "Right contains one more item: 5",
"Full diff:", "Full diff:",
"- (3, 4, 5)", " (",
"+ (1, 2)", "- 3,",
"? ^",
"+ 1,",
"? ^",
"- 4,",
"? ^",
"+ 2,",
"? ^",
"- 5,",
" )",
] ]
lines = callequal((1, 2, 3), (4,), verbose=2) lines = callequal((1, 2, 3), (4,), verbose=2)
assert lines == [ assert lines == [
@ -660,8 +745,27 @@ class TestAssert_reprcompare:
"At index 0 diff: 1 != 4", "At index 0 diff: 1 != 4",
"Left contains 2 more items, first extra item: 2", "Left contains 2 more items, first extra item: 2",
"Full diff:", "Full diff:",
"- (4,)", " (",
"+ (1, 2, 3)", "- 4,",
"? ^",
"+ 1,",
"? ^",
"+ 2,",
"+ 3,",
" )",
]
lines = callequal((1, 2, 3), (1, 20, 3), verbose=2)
assert lines == [
"(1, 2, 3) == (1, 20, 3)",
"At index 1 diff: 2 != 20",
"Full diff:",
" (",
" 1,",
"- 20,",
"? -",
"+ 2,",
" 3,",
" )",
] ]
def test_set(self) -> None: def test_set(self) -> None:
@ -1345,7 +1449,61 @@ def test_reprcompare_whitespaces() -> None:
] ]
def test_pytest_assertrepr_compare_integration(pytester: Pytester) -> None: class TestSetAssertions:
@pytest.mark.parametrize("op", [">=", ">", "<=", "<", "=="])
def test_set_extra_item(self, op, pytester: Pytester) -> None:
pytester.makepyfile(
f"""
def test_hello():
x = set("hello x")
y = set("hello y")
assert x {op} y
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"*def test_hello():*",
f"*assert x {op} y*",
]
)
if op in [">=", ">", "=="]:
result.stdout.fnmatch_lines(
[
"*E*Extra items in the right set:*",
"*E*'y'",
]
)
if op in ["<=", "<", "=="]:
result.stdout.fnmatch_lines(
[
"*E*Extra items in the left set:*",
"*E*'x'",
]
)
@pytest.mark.parametrize("op", [">", "<", "!="])
def test_set_proper_superset_equal(self, pytester: Pytester, op) -> None:
pytester.makepyfile(
f"""
def test_hello():
x = set([1, 2, 3])
y = x.copy()
assert x {op} y
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"*def test_hello():*",
f"*assert x {op} y*",
"*E*Both sets are equal*",
]
)
def test_pytest_assertrepr_compare_integration(self, pytester: Pytester) -> None:
pytester.makepyfile( pytester.makepyfile(
""" """
def test_hello(): def test_hello():
@ -1367,28 +1525,6 @@ def test_pytest_assertrepr_compare_integration(pytester: Pytester) -> None:
) )
def test_sequence_comparison_uses_repr(pytester: Pytester) -> None:
pytester.makepyfile(
"""
def test_hello():
x = set("hello x")
y = set("hello y")
assert x == y
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"*def test_hello():*",
"*assert x == y*",
"*E*Extra items*left*",
"*E*'x'*",
"*E*Extra items*right*",
"*E*'y'*",
]
)
def test_assertrepr_loaded_per_dir(pytester: Pytester) -> None: def test_assertrepr_loaded_per_dir(pytester: Pytester) -> None:
pytester.makepyfile(test_base=["def test_base(): assert 1 == 2"]) pytester.makepyfile(test_base=["def test_base(): assert 1 == 2"])
a = pytester.mkdir("a") a = pytester.mkdir("a")
@ -1752,3 +1888,117 @@ def test_reprcompare_verbose_long() -> None:
"{'v0': 0, 'v1': 1, 'v2': 12, 'v3': 3, 'v4': 4, 'v5': 5, " "{'v0': 0, 'v1': 1, 'v2': 12, 'v3': 3, 'v4': 4, 'v5': 5, "
"'v6': 6, 'v7': 7, 'v8': 8, 'v9': 9, 'v10': 10}" "'v6': 6, 'v7': 7, 'v8': 8, 'v9': 9, 'v10': 10}"
) )
@pytest.mark.parametrize("enable_colors", [True, False])
@pytest.mark.parametrize(
("test_code", "expected_lines"),
(
(
"""
def test():
assert [0, 1] == [0, 2]
""",
[
"{bold}{red}E {light-red}- 2,{hl-reset}{endline}{reset}",
"{bold}{red}E {light-green}+ 1,{hl-reset}{endline}{reset}",
],
),
(
"""
def test():
assert {f"number-is-{i}": i for i in range(1, 6)} == {
f"number-is-{i}": i for i in range(5)
}
""",
[
"{bold}{red}E {light-gray} {hl-reset} {{{endline}{reset}",
"{bold}{red}E {light-gray} {hl-reset} 'number-is-1': 1,{endline}{reset}",
"{bold}{red}E {light-green}+ 'number-is-5': 5,{hl-reset}{endline}{reset}",
],
),
),
)
def test_comparisons_handle_colors(
pytester: Pytester, color_mapping, enable_colors, test_code, expected_lines
) -> None:
p = pytester.makepyfile(test_code)
result = pytester.runpytest(
f"--color={'yes' if enable_colors else 'no'}", "-vv", str(p)
)
formatter = (
color_mapping.format_for_fnmatch
if enable_colors
else color_mapping.strip_colors
)
result.stdout.fnmatch_lines(formatter(expected_lines), consecutive=False)
def test_fine_grained_assertion_verbosity(pytester: Pytester):
long_text = "Lorem ipsum dolor sit amet " * 10
p = pytester.makepyfile(
f"""
def test_ok():
pass
def test_words_fail():
fruits1 = ["banana", "apple", "grapes", "melon", "kiwi"]
fruits2 = ["banana", "apple", "orange", "melon", "kiwi"]
assert fruits1 == fruits2
def test_numbers_fail():
number_to_text1 = {{str(x): x for x in range(5)}}
number_to_text2 = {{str(x * 10): x * 10 for x in range(5)}}
assert number_to_text1 == number_to_text2
def test_long_text_fail():
long_text = "{long_text}"
assert "hello world" in long_text
"""
)
pytester.makeini(
"""
[pytest]
verbosity_assertions = 2
"""
)
result = pytester.runpytest(p)
result.stdout.fnmatch_lines(
[
f"{p.name} .FFF [100%]",
"E At index 2 diff: 'grapes' != 'orange'",
"E Full diff:",
"E [",
"E 'banana',",
"E 'apple',",
"E - 'orange',",
"E ? ^ ^^",
"E + 'grapes',",
"E ? ^ ^ +",
"E 'melon',",
"E 'kiwi',",
"E ]",
"E Full diff:",
"E {",
"E '0': 0,",
"E - '10': 10,",
"E ? - -",
"E + '1': 1,",
"E - '20': 20,",
"E ? - -",
"E + '2': 2,",
"E - '30': 30,",
"E ? - -",
"E + '3': 3,",
"E - '40': 40,",
"E ? - -",
"E + '4': 4,",
"E }",
f"E AssertionError: assert 'hello world' in '{long_text}'",
]
)

View File

@ -895,7 +895,11 @@ def test_rewritten():
) )
@pytest.mark.skipif('"__pypy__" in sys.modules') @pytest.mark.skipif('"__pypy__" in sys.modules')
def test_pyc_vs_pyo(self, pytester: Pytester, monkeypatch) -> None: def test_pyc_vs_pyo(
self,
pytester: Pytester,
monkeypatch: pytest.MonkeyPatch,
) -> None:
pytester.makepyfile( pytester.makepyfile(
""" """
import pytest import pytest
@ -905,13 +909,13 @@ def test_rewritten():
) )
p = make_numbered_dir(root=Path(pytester.path), prefix="runpytest-") p = make_numbered_dir(root=Path(pytester.path), prefix="runpytest-")
tmp = "--basetemp=%s" % p tmp = "--basetemp=%s" % p
monkeypatch.setenv("PYTHONOPTIMIZE", "2") with monkeypatch.context() as mp:
monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", raising=False) mp.setenv("PYTHONOPTIMIZE", "2")
monkeypatch.delenv("PYTHONPYCACHEPREFIX", raising=False) mp.delenv("PYTHONDONTWRITEBYTECODE", raising=False)
mp.delenv("PYTHONPYCACHEPREFIX", raising=False)
assert pytester.runpytest_subprocess(tmp).ret == 0 assert pytester.runpytest_subprocess(tmp).ret == 0
tagged = "test_pyc_vs_pyo." + PYTEST_TAG tagged = "test_pyc_vs_pyo." + PYTEST_TAG
assert tagged + ".pyo" in os.listdir("__pycache__") assert tagged + ".pyo" in os.listdir("__pycache__")
monkeypatch.undo()
monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", raising=False) monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", raising=False)
monkeypatch.delenv("PYTHONPYCACHEPREFIX", raising=False) monkeypatch.delenv("PYTHONPYCACHEPREFIX", raising=False)
assert pytester.runpytest_subprocess(tmp).ret == 1 assert pytester.runpytest_subprocess(tmp).ret == 1
@ -1543,6 +1547,27 @@ class TestIssue11028:
result.stdout.fnmatch_lines(["*assert 4 > 5", "*where 5 = add_one(4)"]) result.stdout.fnmatch_lines(["*assert 4 > 5", "*where 5 = add_one(4)"])
class TestIssue11239:
def test_assertion_walrus_different_test_cases(self, pytester: Pytester) -> None:
"""Regression for (#11239)
Walrus operator rewriting would leak to separate test cases if they used the same variables.
"""
pytester.makepyfile(
"""
def test_1():
state = {"x": 2}.get("x")
assert state is not None
def test_2():
db = {"x": 2}
assert (state := db.get("x")) is not None
"""
)
result = pytester.runpytest()
assert result.ret == 0
@pytest.mark.skipif( @pytest.mark.skipif(
sys.maxsize <= (2**31 - 1), reason="Causes OverflowError on 32bit systems" sys.maxsize <= (2**31 - 1), reason="Causes OverflowError on 32bit systems"
) )
@ -2031,13 +2056,15 @@ class TestReprSizeVerbosity:
) )
def test_get_maxsize_for_saferepr(self, verbose: int, expected_size) -> None: def test_get_maxsize_for_saferepr(self, verbose: int, expected_size) -> None:
class FakeConfig: class FakeConfig:
def getoption(self, name: str) -> int: def get_verbosity(self, verbosity_type: Optional[str] = None) -> int:
assert name == "verbose"
return verbose return verbose
config = FakeConfig() config = FakeConfig()
assert _get_maxsize_for_saferepr(cast(Config, config)) == expected_size assert _get_maxsize_for_saferepr(cast(Config, config)) == expected_size
def test_get_maxsize_for_saferepr_no_config(self) -> None:
assert _get_maxsize_for_saferepr(None) == DEFAULT_REPR_MAX_SIZE
def create_test_file(self, pytester: Pytester, size: int) -> None: def create_test_file(self, pytester: Pytester, size: int) -> None:
pytester.makepyfile( pytester.makepyfile(
f""" f"""

View File

@ -5,6 +5,7 @@ import re
import sys import sys
import textwrap import textwrap
from pathlib import Path from pathlib import Path
from typing import Any
from typing import Dict from typing import Dict
from typing import List from typing import List
from typing import Sequence from typing import Sequence
@ -21,6 +22,8 @@ from _pytest.config import Config
from _pytest.config import ConftestImportFailure from _pytest.config import ConftestImportFailure
from _pytest.config import ExitCode from _pytest.config import ExitCode
from _pytest.config import parse_warning_filter from _pytest.config import parse_warning_filter
from _pytest.config.argparsing import get_ini_default_for_type
from _pytest.config.argparsing import Parser
from _pytest.config.exceptions import UsageError from _pytest.config.exceptions import UsageError
from _pytest.config.findpaths import determine_setup from _pytest.config.findpaths import determine_setup
from _pytest.config.findpaths import get_common_ancestor from _pytest.config.findpaths import get_common_ancestor
@ -857,6 +860,68 @@ class TestConfigAPI:
assert len(values) == 2 assert len(values) == 2
assert values == ["456", "123"] assert values == ["456", "123"]
def test_addini_default_values(self, pytester: Pytester) -> None:
"""Tests the default values for configuration based on
config type
"""
pytester.makeconftest(
"""
def pytest_addoption(parser):
parser.addini("linelist1", "", type="linelist")
parser.addini("paths1", "", type="paths")
parser.addini("pathlist1", "", type="pathlist")
parser.addini("args1", "", type="args")
parser.addini("bool1", "", type="bool")
parser.addini("string1", "", type="string")
parser.addini("none_1", "", type="linelist", default=None)
parser.addini("none_2", "", default=None)
parser.addini("no_type", "")
"""
)
config = pytester.parseconfig()
# default for linelist, paths, pathlist and args is []
value = config.getini("linelist1")
assert value == []
value = config.getini("paths1")
assert value == []
value = config.getini("pathlist1")
assert value == []
value = config.getini("args1")
assert value == []
# default for bool is False
value = config.getini("bool1")
assert value is False
# default for string is ""
value = config.getini("string1")
assert value == ""
# should return None if None is explicity set as default value
# irrespective of the type argument
value = config.getini("none_1")
assert value is None
value = config.getini("none_2")
assert value is None
# in case no type is provided and no default set
# treat it as string and default value will be ""
value = config.getini("no_type")
assert value == ""
@pytest.mark.parametrize(
"type, expected",
[
pytest.param(None, "", id="None"),
pytest.param("string", "", id="string"),
pytest.param("paths", [], id="paths"),
pytest.param("pathlist", [], id="pathlist"),
pytest.param("args", [], id="args"),
pytest.param("linelist", [], id="linelist"),
pytest.param("bool", False, id="bool"),
],
)
def test_get_ini_default_for_type(self, type: Any, expected: Any) -> None:
assert get_ini_default_for_type(type) == expected
def test_confcutdir_check_isdir(self, pytester: Pytester) -> None: def test_confcutdir_check_isdir(self, pytester: Pytester) -> None:
"""Give an error if --confcutdir is not a valid directory (#2078)""" """Give an error if --confcutdir is not a valid directory (#2078)"""
exp_match = r"^--confcutdir must be a directory, given: " exp_match = r"^--confcutdir must be a directory, given: "
@ -1894,16 +1959,6 @@ def test_invocation_args(pytester: Pytester) -> None:
], ],
) )
def test_config_blocked_default_plugins(pytester: Pytester, plugin: str) -> None: def test_config_blocked_default_plugins(pytester: Pytester, plugin: str) -> None:
if plugin == "debugging":
# Fixed in xdist (after 1.27.0).
# https://github.com/pytest-dev/pytest-xdist/pull/422
try:
import xdist # noqa: F401
except ImportError:
pass
else:
pytest.skip("does not work with xdist currently")
p = pytester.makepyfile("def test(): pass") p = pytester.makepyfile("def test(): pass")
result = pytester.runpytest(str(p), "-pno:%s" % plugin) result = pytester.runpytest(str(p), "-pno:%s" % plugin)
@ -2181,3 +2236,76 @@ class TestDebugOptions:
"*Default: pytestdebug.log.", "*Default: pytestdebug.log.",
] ]
) )
class TestVerbosity:
SOME_OUTPUT_TYPE = Config.VERBOSITY_ASSERTIONS
SOME_OUTPUT_VERBOSITY_LEVEL = 5
class VerbosityIni:
def pytest_addoption(self, parser: Parser) -> None:
Config._add_verbosity_ini(
parser, TestVerbosity.SOME_OUTPUT_TYPE, help="some help text"
)
def test_level_matches_verbose_when_not_specified(
self, pytester: Pytester, tmp_path: Path
) -> None:
tmp_path.joinpath("pytest.ini").write_text(
textwrap.dedent(
"""\
[pytest]
addopts = --verbose
"""
),
encoding="utf-8",
)
pytester.plugins = [TestVerbosity.VerbosityIni()]
config = pytester.parseconfig(tmp_path)
assert (
config.get_verbosity(TestVerbosity.SOME_OUTPUT_TYPE)
== config.option.verbose
)
def test_level_matches_verbose_when_not_known_type(
self, pytester: Pytester, tmp_path: Path
) -> None:
tmp_path.joinpath("pytest.ini").write_text(
textwrap.dedent(
"""\
[pytest]
addopts = --verbose
"""
),
encoding="utf-8",
)
pytester.plugins = [TestVerbosity.VerbosityIni()]
config = pytester.parseconfig(tmp_path)
assert config.get_verbosity("some fake verbosity type") == config.option.verbose
def test_level_matches_specified_override(
self, pytester: Pytester, tmp_path: Path
) -> None:
setting_name = f"verbosity_{TestVerbosity.SOME_OUTPUT_TYPE}"
tmp_path.joinpath("pytest.ini").write_text(
textwrap.dedent(
f"""\
[pytest]
addopts = --verbose
{setting_name} = {TestVerbosity.SOME_OUTPUT_VERBOSITY_LEVEL}
"""
),
encoding="utf-8",
)
pytester.plugins = [TestVerbosity.VerbosityIni()]
config = pytester.parseconfig(tmp_path)
assert (
config.get_verbosity(TestVerbosity.SOME_OUTPUT_TYPE)
== TestVerbosity.SOME_OUTPUT_VERBOSITY_LEVEL
)

View File

@ -21,10 +21,14 @@ TESTCASES = [
E assert [1, 4, 3] == [1, 2, 3] E assert [1, 4, 3] == [1, 2, 3]
E At index 1 diff: 4 != 2 E At index 1 diff: 4 != 2
E Full diff: E Full diff:
E - [1, 2, 3] E [
E 1,
E - 2,
E ? ^ E ? ^
E + [1, 4, 3] E + 4,
E ? ^ E ? ^
E 3,
E ]
""", """,
id="Compare lists, one item differs", id="Compare lists, one item differs",
), ),
@ -40,9 +44,11 @@ TESTCASES = [
E assert [1, 2, 3] == [1, 2] E assert [1, 2, 3] == [1, 2]
E Left contains one more item: 3 E Left contains one more item: 3
E Full diff: E Full diff:
E - [1, 2] E [
E + [1, 2, 3] E 1,
E ? +++ E 2,
E + 3,
E ]
""", """,
id="Compare lists, one extra item", id="Compare lists, one extra item",
), ),
@ -59,9 +65,11 @@ TESTCASES = [
E At index 1 diff: 3 != 2 E At index 1 diff: 3 != 2
E Right contains one more item: 3 E Right contains one more item: 3
E Full diff: E Full diff:
E - [1, 2, 3] E [
E ? --- E 1,
E + [1, 3] E - 2,
E 3,
E ]
""", """,
id="Compare lists, one item missing", id="Compare lists, one item missing",
), ),
@ -77,10 +85,14 @@ TESTCASES = [
E assert (1, 4, 3) == (1, 2, 3) E assert (1, 4, 3) == (1, 2, 3)
E At index 1 diff: 4 != 2 E At index 1 diff: 4 != 2
E Full diff: E Full diff:
E - (1, 2, 3) E (
E 1,
E - 2,
E ? ^ E ? ^
E + (1, 4, 3) E + 4,
E ? ^ E ? ^
E 3,
E )
""", """,
id="Compare tuples", id="Compare tuples",
), ),
@ -99,10 +111,12 @@ TESTCASES = [
E Extra items in the right set: E Extra items in the right set:
E 2 E 2
E Full diff: E Full diff:
E - {1, 2, 3} E {
E ? ^ ^ E 1,
E + {1, 3, 4} E - 2,
E ? ^ ^ E 3,
E + 4,
E }
""", """,
id="Compare sets", id="Compare sets",
), ),
@ -123,10 +137,13 @@ TESTCASES = [
E Right contains 1 more item: E Right contains 1 more item:
E {2: 'eggs'} E {2: 'eggs'}
E Full diff: E Full diff:
E - {1: 'spam', 2: 'eggs'} E {
E 1: 'spam',
E - 2: 'eggs',
E ? ^ E ? ^
E + {1: 'spam', 3: 'eggs'} E + 3: 'eggs',
E ? ^ E ? ^
E }
""", """,
id="Compare dicts with differing keys", id="Compare dicts with differing keys",
), ),
@ -145,10 +162,11 @@ TESTCASES = [
E Differing items: E Differing items:
E {2: 'eggs'} != {2: 'bacon'} E {2: 'eggs'} != {2: 'bacon'}
E Full diff: E Full diff:
E - {1: 'spam', 2: 'bacon'} E {
E ? ^^^^^ E 1: 'spam',
E + {1: 'spam', 2: 'eggs'} E - 2: 'bacon',
E ? ^^^^ E + 2: 'eggs',
E }
""", """,
id="Compare dicts with differing values", id="Compare dicts with differing values",
), ),
@ -169,10 +187,11 @@ TESTCASES = [
E Right contains 1 more item: E Right contains 1 more item:
E {3: 'bacon'} E {3: 'bacon'}
E Full diff: E Full diff:
E - {1: 'spam', 3: 'bacon'} E {
E ? ^ ^^^^^ E 1: 'spam',
E + {1: 'spam', 2: 'eggs'} E - 3: 'bacon',
E ? ^ ^^^^ E + 2: 'eggs',
E }
""", """,
id="Compare dicts with differing items", id="Compare dicts with differing items",
), ),

View File

@ -290,10 +290,10 @@ class TestParser:
def test_argcomplete(pytester: Pytester, monkeypatch: MonkeyPatch) -> None: def test_argcomplete(pytester: Pytester, monkeypatch: MonkeyPatch) -> None:
try: if sys.version_info >= (3, 11):
# New in Python 3.11, ignores utf-8 mode # New in Python 3.11, ignores utf-8 mode
encoding = locale.getencoding() # type: ignore[attr-defined] encoding = locale.getencoding()
except AttributeError: else:
encoding = locale.getpreferredencoding(False) encoding = locale.getpreferredencoding(False)
try: try:
bash_version = subprocess.run( bash_version = subprocess.run(

View File

@ -28,6 +28,7 @@ from _pytest.pathlib import resolve_package_path
from _pytest.pathlib import safe_exists from _pytest.pathlib import safe_exists
from _pytest.pathlib import symlink_or_skip from _pytest.pathlib import symlink_or_skip
from _pytest.pathlib import visit from _pytest.pathlib import visit
from _pytest.pytester import Pytester
from _pytest.tmpdir import TempPathFactory from _pytest.tmpdir import TempPathFactory
@ -235,15 +236,15 @@ class TestImportPath:
name = "pointsback123" name = "pointsback123"
p = tmp_path.joinpath(name + ".py") p = tmp_path.joinpath(name + ".py")
p.touch() p.touch()
with monkeypatch.context() as mp:
for ending in (".pyc", ".pyo"): for ending in (".pyc", ".pyo"):
mod = ModuleType(name) mod = ModuleType(name)
pseudopath = tmp_path.joinpath(name + ending) pseudopath = tmp_path.joinpath(name + ending)
pseudopath.touch() pseudopath.touch()
mod.__file__ = str(pseudopath) mod.__file__ = str(pseudopath)
monkeypatch.setitem(sys.modules, name, mod) mp.setitem(sys.modules, name, mod)
newmod = import_path(p, root=tmp_path) newmod = import_path(p, root=tmp_path)
assert mod == newmod assert mod == newmod
monkeypatch.undo()
mod = ModuleType(name) mod = ModuleType(name)
pseudopath = tmp_path.joinpath(name + "123.py") pseudopath = tmp_path.joinpath(name + "123.py")
pseudopath.touch() pseudopath.touch()
@ -345,18 +346,18 @@ def test_resolve_package_path(tmp_path: Path) -> None:
(pkg / "subdir").mkdir() (pkg / "subdir").mkdir()
(pkg / "subdir/__init__.py").touch() (pkg / "subdir/__init__.py").touch()
assert resolve_package_path(pkg) == pkg assert resolve_package_path(pkg) == pkg
assert resolve_package_path(pkg.joinpath("subdir", "__init__.py")) == pkg assert resolve_package_path(pkg / "subdir/__init__.py") == pkg
def test_package_unimportable(tmp_path: Path) -> None: def test_package_unimportable(tmp_path: Path) -> None:
pkg = tmp_path / "pkg1-1" pkg = tmp_path / "pkg1-1"
pkg.mkdir() pkg.mkdir()
pkg.joinpath("__init__.py").touch() pkg.joinpath("__init__.py").touch()
subdir = pkg.joinpath("subdir") subdir = pkg / "subdir"
subdir.mkdir() subdir.mkdir()
pkg.joinpath("subdir/__init__.py").touch() (pkg / "subdir/__init__.py").touch()
assert resolve_package_path(subdir) == subdir assert resolve_package_path(subdir) == subdir
xyz = subdir.joinpath("xyz.py") xyz = subdir / "xyz.py"
xyz.touch() xyz.touch()
assert resolve_package_path(xyz) == subdir assert resolve_package_path(xyz) == subdir
assert not resolve_package_path(pkg) assert not resolve_package_path(pkg)
@ -592,6 +593,10 @@ class TestImportLibMode:
result = module_name_from_path(tmp_path / "src/app/__init__.py", tmp_path) result = module_name_from_path(tmp_path / "src/app/__init__.py", tmp_path)
assert result == "src.app" assert result == "src.app"
# Unless __init__.py file is at the root, in which case we cannot have an empty module name.
result = module_name_from_path(tmp_path / "__init__.py", tmp_path)
assert result == "__init__"
def test_insert_missing_modules( def test_insert_missing_modules(
self, monkeypatch: MonkeyPatch, tmp_path: Path self, monkeypatch: MonkeyPatch, tmp_path: Path
) -> None: ) -> None:
@ -663,6 +668,22 @@ class TestImportLibMode:
mod = import_path(init, root=tmp_path, mode=ImportMode.importlib) mod = import_path(init, root=tmp_path, mode=ImportMode.importlib)
assert len(mod.instance.INSTANCES) == 1 assert len(mod.instance.INSTANCES) == 1
def test_importlib_root_is_package(self, pytester: Pytester) -> None:
"""
Regression for importing a `__init__`.py file that is at the root
(#11417).
"""
pytester.makepyfile(__init__="")
pytester.makepyfile(
"""
def test_my_test():
assert True
"""
)
result = pytester.runpytest("--import-mode=importlib")
result.stdout.fnmatch_lines("* 1 passed *")
def test_safe_exists(tmp_path: Path) -> None: def test_safe_exists(tmp_path: Path) -> None:
d = tmp_path.joinpath("some_dir") d = tmp_path.joinpath("some_dir")

View File

@ -2,7 +2,6 @@ import os
import subprocess import subprocess
import sys import sys
import time import time
from pathlib import Path
from types import ModuleType from types import ModuleType
from typing import List from typing import List
@ -11,7 +10,6 @@ import pytest
from _pytest.config import ExitCode from _pytest.config import ExitCode
from _pytest.config import PytestPluginManager from _pytest.config import PytestPluginManager
from _pytest.monkeypatch import MonkeyPatch from _pytest.monkeypatch import MonkeyPatch
from _pytest.pytester import CwdSnapshot
from _pytest.pytester import HookRecorder from _pytest.pytester import HookRecorder
from _pytest.pytester import LineMatcher from _pytest.pytester import LineMatcher
from _pytest.pytester import Pytester from _pytest.pytester import Pytester
@ -301,17 +299,6 @@ def test_assert_outcomes_after_pytest_error(pytester: Pytester) -> None:
result.assert_outcomes(passed=0) result.assert_outcomes(passed=0)
def test_cwd_snapshot(pytester: Pytester) -> None:
foo = pytester.mkdir("foo")
bar = pytester.mkdir("bar")
os.chdir(foo)
snapshot = CwdSnapshot()
os.chdir(bar)
assert Path().absolute() == bar
snapshot.restore()
assert Path().absolute() == foo
class TestSysModulesSnapshot: class TestSysModulesSnapshot:
key = "my-test-module" key = "my-test-module"

View File

@ -192,7 +192,7 @@ class TestDeprecatedCall:
f() f()
@pytest.mark.parametrize( @pytest.mark.parametrize(
"warning_type", [PendingDeprecationWarning, DeprecationWarning] "warning_type", [PendingDeprecationWarning, DeprecationWarning, FutureWarning]
) )
@pytest.mark.parametrize("mode", ["context_manager", "call"]) @pytest.mark.parametrize("mode", ["context_manager", "call"])
@pytest.mark.parametrize("call_f_first", [True, False]) @pytest.mark.parametrize("call_f_first", [True, False])
@ -221,7 +221,6 @@ class TestDeprecatedCall:
UserWarning, UserWarning,
SyntaxWarning, SyntaxWarning,
RuntimeWarning, RuntimeWarning,
FutureWarning,
ImportWarning, ImportWarning,
UnicodeWarning, UnicodeWarning,
] ]

View File

@ -1802,7 +1802,7 @@ def test_terminal_no_summary_warnings_header_once(pytester: Pytester) -> None:
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
def tr() -> TerminalReporter: def tr() -> TerminalReporter:
config = _pytest.config._prepareconfig() config = _pytest.config._prepareconfig([])
return TerminalReporter(config) return TerminalReporter(config)