Merge branch 'main' of https://github.com/pytest-dev/pytest into downstream_testing_2
This commit is contained in:
commit
495c5abf31
|
@ -0,0 +1,56 @@
|
|||
name: deploy
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
# These tags are protected, see:
|
||||
# https://github.com/pytest-dev/pytest/settings/tag_protection
|
||||
- "[0-9]+.[0-9]+.[0-9]+"
|
||||
- "[0-9]+.[0-9]+.[0-9]+rc[0-9]+"
|
||||
|
||||
|
||||
# Set permissions at the job level.
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
|
||||
deploy:
|
||||
if: github.repository == 'pytest-dev/pytest'
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: "3.7"
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install --upgrade build tox
|
||||
|
||||
- name: Build package
|
||||
run: |
|
||||
python -m build
|
||||
|
||||
- name: Publish package to PyPI
|
||||
uses: pypa/gh-action-pypi-publish@master
|
||||
with:
|
||||
user: __token__
|
||||
password: ${{ secrets.pypi_token }}
|
||||
|
||||
- name: Publish GitHub release notes
|
||||
env:
|
||||
GH_RELEASE_NOTES_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
sudo apt-get install pandoc
|
||||
tox -e publish-gh-release-notes
|
|
@ -1,4 +1,4 @@
|
|||
name: main
|
||||
name: test
|
||||
|
||||
on:
|
||||
push:
|
||||
|
@ -37,6 +37,7 @@ jobs:
|
|||
"windows-py38",
|
||||
"windows-py39",
|
||||
"windows-py310",
|
||||
"windows-py311",
|
||||
|
||||
"ubuntu-py37",
|
||||
"ubuntu-py37-pluggy",
|
||||
|
@ -44,6 +45,7 @@ jobs:
|
|||
"ubuntu-py38",
|
||||
"ubuntu-py39",
|
||||
"ubuntu-py310",
|
||||
"ubuntu-py311",
|
||||
"ubuntu-pypy3",
|
||||
|
||||
"macos-py37",
|
||||
|
@ -75,9 +77,13 @@ jobs:
|
|||
os: windows-latest
|
||||
tox_env: "py39-xdist"
|
||||
- name: "windows-py310"
|
||||
python: "3.10.1"
|
||||
python: "3.10"
|
||||
os: windows-latest
|
||||
tox_env: "py310-xdist"
|
||||
- name: "windows-py311"
|
||||
python: "3.11-dev"
|
||||
os: windows-latest
|
||||
tox_env: "py311"
|
||||
|
||||
- name: "ubuntu-py37"
|
||||
python: "3.7"
|
||||
|
@ -101,9 +107,13 @@ jobs:
|
|||
os: ubuntu-latest
|
||||
tox_env: "py39-xdist"
|
||||
- name: "ubuntu-py310"
|
||||
python: "3.10.1"
|
||||
python: "3.10"
|
||||
os: ubuntu-latest
|
||||
tox_env: "py310-xdist"
|
||||
- name: "ubuntu-py311"
|
||||
python: "3.11-dev"
|
||||
os: ubuntu-latest
|
||||
tox_env: "py311"
|
||||
- name: "ubuntu-pypy3"
|
||||
python: "pypy-3.7"
|
||||
os: ubuntu-latest
|
||||
|
@ -177,46 +187,3 @@ jobs:
|
|||
fail_ci_if_error: true
|
||||
files: ./coverage.xml
|
||||
verbose: true
|
||||
|
||||
deploy:
|
||||
if: github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags') && github.repository == 'pytest-dev/pytest'
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
needs: [build]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: "3.7"
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install --upgrade build tox
|
||||
|
||||
- name: Build package
|
||||
run: |
|
||||
python -m build
|
||||
|
||||
- name: Publish package to PyPI
|
||||
uses: pypa/gh-action-pypi-publish@master
|
||||
with:
|
||||
user: __token__
|
||||
password: ${{ secrets.pypi_token }}
|
||||
|
||||
- name: Publish GitHub release notes
|
||||
env:
|
||||
GH_RELEASE_NOTES_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
sudo apt-get install pandoc
|
||||
tox -e publish-gh-release-notes
|
|
@ -12,6 +12,7 @@ permissions: {}
|
|||
|
||||
jobs:
|
||||
createPullRequest:
|
||||
if: github.repository_owner == 'pytest-dev'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
|
|
|
@ -20,6 +20,14 @@ repos:
|
|||
- id: debug-statements
|
||||
exclude: _pytest/(debugging|hookspec).py
|
||||
language_version: python3
|
||||
- repo: https://github.com/myint/autoflake
|
||||
rev: v1.4
|
||||
hooks:
|
||||
- id: autoflake
|
||||
name: autoflake
|
||||
args: ["--in-place", "--remove-unused-variables", "--remove-all-unused-imports"]
|
||||
language: python
|
||||
files: \.py$
|
||||
- repo: https://github.com/PyCQA/flake8
|
||||
rev: 4.0.1
|
||||
hooks:
|
||||
|
@ -29,12 +37,12 @@ repos:
|
|||
- flake8-typing-imports==1.12.0
|
||||
- flake8-docstrings==1.5.0
|
||||
- repo: https://github.com/asottile/reorder_python_imports
|
||||
rev: v2.7.1
|
||||
rev: v3.0.1
|
||||
hooks:
|
||||
- id: reorder-python-imports
|
||||
args: ['--application-directories=.:src', --py37-plus]
|
||||
- repo: https://github.com/asottile/pyupgrade
|
||||
rev: v2.31.0
|
||||
rev: v2.31.1
|
||||
hooks:
|
||||
- id: pyupgrade
|
||||
args: [--py37-plus]
|
||||
|
@ -48,7 +56,7 @@ repos:
|
|||
hooks:
|
||||
- id: python-use-type-annotations
|
||||
- repo: https://github.com/pre-commit/mirrors-mypy
|
||||
rev: v0.931
|
||||
rev: v0.940
|
||||
hooks:
|
||||
- id: mypy
|
||||
files: ^(src/|testing/)
|
||||
|
|
3
AUTHORS
3
AUTHORS
|
@ -185,8 +185,10 @@ Katerina Koukiou
|
|||
Keri Volans
|
||||
Kevin Cox
|
||||
Kevin J. Foley
|
||||
Kian Eliasi
|
||||
Kian-Meng Ang
|
||||
Kodi B. Arfer
|
||||
Kojo Idrissa
|
||||
Kostis Anagnostopoulos
|
||||
Kristoffer Nordström
|
||||
Kyle Altendorf
|
||||
|
@ -288,6 +290,7 @@ Ruaridh Williamson
|
|||
Russel Winder
|
||||
Ryan Wooden
|
||||
Saiprasad Kale
|
||||
Samuel Colvin
|
||||
Samuel Dion-Girardeau
|
||||
Samuel Searles-Bryant
|
||||
Samuele Pedroni
|
||||
|
|
|
@ -20,8 +20,8 @@
|
|||
:target: https://codecov.io/gh/pytest-dev/pytest
|
||||
:alt: Code coverage Status
|
||||
|
||||
.. image:: https://github.com/pytest-dev/pytest/workflows/main/badge.svg
|
||||
:target: https://github.com/pytest-dev/pytest/actions?query=workflow%3Amain
|
||||
.. image:: https://github.com/pytest-dev/pytest/workflows/test/badge.svg
|
||||
:target: https://github.com/pytest-dev/pytest/actions?query=workflow%3Atest
|
||||
|
||||
.. image:: https://results.pre-commit.ci/badge/github/pytest-dev/pytest/main.svg
|
||||
:target: https://results.pre-commit.ci/latest/github/pytest-dev/pytest/main
|
||||
|
|
|
@ -142,7 +142,7 @@ Both automatic and manual processes described above follow the same steps from t
|
|||
|
||||
Wait for the deploy to complete, then make sure it is `available on PyPI <https://pypi.org/project/pytest>`_.
|
||||
|
||||
#. Merge the PR.
|
||||
#. Merge the PR. **Make sure it's not squash-merged**, so that the tagged commit ends up in the main branch.
|
||||
|
||||
#. Cherry-pick the CHANGELOG / announce files to the ``main`` branch::
|
||||
|
||||
|
|
|
@ -0,0 +1,2 @@
|
|||
Introduce multiline display for warning matching via :py:func:`pytest.warns` and
|
||||
enhance match comparison for :py:func:`_pytest._code.ExceptionInfo.match` as returned by :py:func:`pytest.raises`.
|
|
@ -1,15 +0,0 @@
|
|||
As per our policy, the following features have been deprecated in the 6.X series and are now
|
||||
removed:
|
||||
|
||||
* ``pytest._fillfuncargs`` function.
|
||||
|
||||
* ``pytest_warning_captured`` hook - use ``pytest_warning_recorded`` instead.
|
||||
|
||||
* ``-k -foobar`` syntax - use ``-k 'not foobar'`` instead.
|
||||
|
||||
* ``-k foobar:`` syntax.
|
||||
|
||||
* ``pytest.collect`` module - import from ``pytest`` directly.
|
||||
|
||||
For more information consult
|
||||
`Deprecations and Removals <https://docs.pytest.org/en/latest/deprecations.html>`__ in the docs.
|
|
@ -1 +0,0 @@
|
|||
Pytest will now avoid specialized assert formatting when it is detected that the default __eq__ is overridden
|
|
@ -1 +0,0 @@
|
|||
Dropped support for Python 3.6, which reached `end-of-life <https://devguide.python.org/#status-of-python-branches>`__ at 2021-12-23.
|
|
@ -1,10 +0,0 @@
|
|||
Symbolic link components are no longer resolved in conftest paths.
|
||||
This means that if a conftest appears twice in collection tree, using symlinks, it will be executed twice.
|
||||
For example, given
|
||||
|
||||
tests/real/conftest.py
|
||||
tests/real/test_it.py
|
||||
tests/link -> tests/real
|
||||
|
||||
running ``pytest tests`` now imports the conftest twice, once as ``tests/real/conftest.py`` and once as ``tests/link/conftest.py``.
|
||||
This is a fix to match a similar change made to test collection itself in pytest 6.0 (see :pull:`6523` for details).
|
|
@ -1 +0,0 @@
|
|||
When ``-vv`` is given on command line, show skipping and xfail reasons in full instead of truncating them to fit the terminal width.
|
|
@ -0,0 +1 @@
|
|||
An unnecessary ``numpy`` import inside :func:`pytest.approx` was removed.
|
|
@ -0,0 +1 @@
|
|||
Display assertion message without escaped newline characters with ``-vv``.
|
|
@ -6,6 +6,9 @@ Release announcements
|
|||
:maxdepth: 2
|
||||
|
||||
|
||||
release-7.1.1
|
||||
release-7.1.0
|
||||
release-7.0.1
|
||||
release-7.0.0
|
||||
release-7.0.0rc1
|
||||
release-6.2.5
|
||||
|
|
|
@ -0,0 +1,20 @@
|
|||
pytest-7.0.1
|
||||
=======================================
|
||||
|
||||
pytest 7.0.1 has just been released to PyPI.
|
||||
|
||||
This is a bug-fix release, being a drop-in replacement. To upgrade::
|
||||
|
||||
pip install --upgrade pytest
|
||||
|
||||
The full changelog is available at https://docs.pytest.org/en/stable/changelog.html.
|
||||
|
||||
Thanks to all of the contributors to this release:
|
||||
|
||||
* Anthony Sottile
|
||||
* Bruno Oliveira
|
||||
* Ran Benita
|
||||
|
||||
|
||||
Happy testing,
|
||||
The pytest Development Team
|
|
@ -0,0 +1,48 @@
|
|||
pytest-7.1.0
|
||||
=======================================
|
||||
|
||||
The pytest team is proud to announce the 7.1.0 release!
|
||||
|
||||
This release contains new features, improvements, and bug fixes,
|
||||
the full list of changes is available in the changelog:
|
||||
|
||||
https://docs.pytest.org/en/stable/changelog.html
|
||||
|
||||
For complete documentation, please visit:
|
||||
|
||||
https://docs.pytest.org/en/stable/
|
||||
|
||||
As usual, you can upgrade from PyPI via:
|
||||
|
||||
pip install -U pytest
|
||||
|
||||
Thanks to all of the contributors to this release:
|
||||
|
||||
* Akuli
|
||||
* Andrew Svetlov
|
||||
* Anthony Sottile
|
||||
* Brett Holman
|
||||
* Bruno Oliveira
|
||||
* Chris NeJame
|
||||
* Dan Alvizu
|
||||
* Elijah DeLee
|
||||
* Emmanuel Arias
|
||||
* Fabian Egli
|
||||
* Florian Bruhin
|
||||
* Gabor Szabo
|
||||
* Hasan Ramezani
|
||||
* Hugo van Kemenade
|
||||
* Kian Meng, Ang
|
||||
* Kojo Idrissa
|
||||
* Masaru Tsuchiyama
|
||||
* Olga Matoula
|
||||
* P. L. Lim
|
||||
* Ran Benita
|
||||
* Tobias Deiminger
|
||||
* Yuval Shimon
|
||||
* eduardo naufel schettino
|
||||
* Éric
|
||||
|
||||
|
||||
Happy testing,
|
||||
The pytest Development Team
|
|
@ -0,0 +1,18 @@
|
|||
pytest-7.1.1
|
||||
=======================================
|
||||
|
||||
pytest 7.1.1 has just been released to PyPI.
|
||||
|
||||
This is a bug-fix release, being a drop-in replacement. To upgrade::
|
||||
|
||||
pip install --upgrade pytest
|
||||
|
||||
The full changelog is available at https://docs.pytest.org/en/stable/changelog.html.
|
||||
|
||||
Thanks to all of the contributors to this release:
|
||||
|
||||
* Ran Benita
|
||||
|
||||
|
||||
Happy testing,
|
||||
The pytest Development Team
|
|
@ -65,7 +65,7 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a
|
|||
Fixture that returns a :py:class:`dict` that will be injected into the
|
||||
namespace of doctests.
|
||||
|
||||
pytestconfig [session scope] -- .../_pytest/fixtures.py:1365
|
||||
pytestconfig [session scope] -- .../_pytest/fixtures.py:1334
|
||||
Session-scoped fixture that returns the session's :class:`pytest.Config`
|
||||
object.
|
||||
|
||||
|
@ -134,7 +134,7 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a
|
|||
|
||||
.. _legacy_path: https://py.readthedocs.io/en/latest/path.html
|
||||
|
||||
caplog -- .../_pytest/logging.py:483
|
||||
caplog -- .../_pytest/logging.py:487
|
||||
Access and control log capturing.
|
||||
|
||||
Captured logs are available through the following properties/methods::
|
||||
|
|
|
@ -28,6 +28,131 @@ with advance notice in the **Deprecations** section of releases.
|
|||
|
||||
.. towncrier release notes start
|
||||
|
||||
pytest 7.1.1 (2022-03-17)
|
||||
=========================
|
||||
|
||||
Bug Fixes
|
||||
---------
|
||||
|
||||
- `#9767 <https://github.com/pytest-dev/pytest/issues/9767>`_: Fixed a regression in pytest 7.1.0 where some conftest.py files outside of the source tree (e.g. in the `site-packages` directory) were not picked up.
|
||||
|
||||
|
||||
pytest 7.1.0 (2022-03-13)
|
||||
=========================
|
||||
|
||||
Breaking Changes
|
||||
----------------
|
||||
|
||||
- `#8838 <https://github.com/pytest-dev/pytest/issues/8838>`_: As per our policy, the following features have been deprecated in the 6.X series and are now
|
||||
removed:
|
||||
|
||||
* ``pytest._fillfuncargs`` function.
|
||||
|
||||
* ``pytest_warning_captured`` hook - use ``pytest_warning_recorded`` instead.
|
||||
|
||||
* ``-k -foobar`` syntax - use ``-k 'not foobar'`` instead.
|
||||
|
||||
* ``-k foobar:`` syntax.
|
||||
|
||||
* ``pytest.collect`` module - import from ``pytest`` directly.
|
||||
|
||||
For more information consult
|
||||
`Deprecations and Removals <https://docs.pytest.org/en/latest/deprecations.html>`__ in the docs.
|
||||
|
||||
|
||||
- `#9437 <https://github.com/pytest-dev/pytest/issues/9437>`_: Dropped support for Python 3.6, which reached `end-of-life <https://devguide.python.org/#status-of-python-branches>`__ at 2021-12-23.
|
||||
|
||||
|
||||
|
||||
Improvements
|
||||
------------
|
||||
|
||||
- `#5192 <https://github.com/pytest-dev/pytest/issues/5192>`_: Fixed test output for some data types where ``-v`` would show less information.
|
||||
|
||||
Also, when showing diffs for sequences, ``-q`` would produce full diffs instead of the expected diff.
|
||||
|
||||
|
||||
- `#9362 <https://github.com/pytest-dev/pytest/issues/9362>`_: pytest now avoids specialized assert formatting when it is detected that the default ``__eq__`` is overridden in ``attrs`` or ``dataclasses``.
|
||||
|
||||
|
||||
- `#9536 <https://github.com/pytest-dev/pytest/issues/9536>`_: When ``-vv`` is given on command line, show skipping and xfail reasons in full instead of truncating them to fit the terminal width.
|
||||
|
||||
|
||||
- `#9644 <https://github.com/pytest-dev/pytest/issues/9644>`_: More information about the location of resources that led Python to raise :class:`ResourceWarning` can now
|
||||
be obtained by enabling :mod:`tracemalloc`.
|
||||
|
||||
See :ref:`resource-warnings` for more information.
|
||||
|
||||
|
||||
- `#9678 <https://github.com/pytest-dev/pytest/issues/9678>`_: More types are now accepted in the ``ids`` argument to ``@pytest.mark.parametrize``.
|
||||
Previously only `str`, `float`, `int` and `bool` were accepted;
|
||||
now `bytes`, `complex`, `re.Pattern`, `Enum` and anything with a `__name__` are also accepted.
|
||||
|
||||
|
||||
- `#9692 <https://github.com/pytest-dev/pytest/issues/9692>`_: :func:`pytest.approx` now raises a :class:`TypeError` when given an unordered sequence (such as :class:`set`).
|
||||
|
||||
Note that this implies that custom classes which only implement ``__iter__`` and ``__len__`` are no longer supported as they don't guarantee order.
|
||||
|
||||
|
||||
|
||||
Bug Fixes
|
||||
---------
|
||||
|
||||
- `#8242 <https://github.com/pytest-dev/pytest/issues/8242>`_: The deprecation of raising :class:`unittest.SkipTest` to skip collection of
|
||||
tests during the pytest collection phase is reverted - this is now a supported
|
||||
feature again.
|
||||
|
||||
|
||||
- `#9493 <https://github.com/pytest-dev/pytest/issues/9493>`_: Symbolic link components are no longer resolved in conftest paths.
|
||||
This means that if a conftest appears twice in collection tree, using symlinks, it will be executed twice.
|
||||
For example, given
|
||||
|
||||
tests/real/conftest.py
|
||||
tests/real/test_it.py
|
||||
tests/link -> tests/real
|
||||
|
||||
running ``pytest tests`` now imports the conftest twice, once as ``tests/real/conftest.py`` and once as ``tests/link/conftest.py``.
|
||||
This is a fix to match a similar change made to test collection itself in pytest 6.0 (see :pull:`6523` for details).
|
||||
|
||||
|
||||
- `#9626 <https://github.com/pytest-dev/pytest/issues/9626>`_: Fixed count of selected tests on terminal collection summary when there were errors or skipped modules.
|
||||
|
||||
If there were errors or skipped modules on collection, pytest would mistakenly subtract those from the selected count.
|
||||
|
||||
|
||||
- `#9645 <https://github.com/pytest-dev/pytest/issues/9645>`_: Fixed regression where ``--import-mode=importlib`` used together with :envvar:`PYTHONPATH` or :confval:`pythonpath` would cause import errors in test suites.
|
||||
|
||||
|
||||
- `#9708 <https://github.com/pytest-dev/pytest/issues/9708>`_: :fixture:`pytester` now requests a :fixture:`monkeypatch` fixture instead of creating one internally. This solves some issues with tests that involve pytest environment variables.
|
||||
|
||||
|
||||
- `#9730 <https://github.com/pytest-dev/pytest/issues/9730>`_: Malformed ``pyproject.toml`` files now produce a clearer error message.
|
||||
|
||||
|
||||
pytest 7.0.1 (2022-02-11)
|
||||
=========================
|
||||
|
||||
Bug Fixes
|
||||
---------
|
||||
|
||||
- `#9608 <https://github.com/pytest-dev/pytest/issues/9608>`_: Fix invalid importing of ``importlib.readers`` in Python 3.9.
|
||||
|
||||
|
||||
- `#9610 <https://github.com/pytest-dev/pytest/issues/9610>`_: Restore `UnitTestFunction.obj` to return unbound rather than bound method.
|
||||
Fixes a crash during a failed teardown in unittest TestCases with non-default `__init__`.
|
||||
Regressed in pytest 7.0.0.
|
||||
|
||||
|
||||
- `#9636 <https://github.com/pytest-dev/pytest/issues/9636>`_: The ``pythonpath`` plugin was renamed to ``python_path``. This avoids a conflict with the ``pytest-pythonpath`` plugin.
|
||||
|
||||
|
||||
- `#9642 <https://github.com/pytest-dev/pytest/issues/9642>`_: Fix running tests by id with ``::`` in the parametrize portion.
|
||||
|
||||
|
||||
- `#9643 <https://github.com/pytest-dev/pytest/issues/9643>`_: Delay issuing a :class:`~pytest.PytestWarning` about diamond inheritance involving :class:`~pytest.Item` and
|
||||
:class:`~pytest.Collector` so it can be filtered using :ref:`standard warning filters <warnings>`.
|
||||
|
||||
|
||||
pytest 7.0.0 (2022-02-03)
|
||||
=========================
|
||||
|
||||
|
@ -187,6 +312,8 @@ Deprecations
|
|||
:class:`unittest.SkipTest` / :meth:`unittest.TestCase.skipTest` /
|
||||
:func:`unittest.skip` in unittest test cases is fully supported.
|
||||
|
||||
.. note:: This deprecation has been reverted in pytest 7.1.0.
|
||||
|
||||
|
||||
- `#8315 <https://github.com/pytest-dev/pytest/issues/8315>`_: Several behaviors of :meth:`Parser.addoption <pytest.Parser.addoption>` are now
|
||||
scheduled for removal in pytest 8 (deprecated since pytest 2.4.0):
|
||||
|
|
|
@ -382,7 +382,6 @@ texinfo_documents = [
|
|||
]
|
||||
|
||||
|
||||
# Example configuration for intersphinx: refer to the Python standard library.
|
||||
intersphinx_mapping = {
|
||||
"pluggy": ("https://pluggy.readthedocs.io/en/stable", None),
|
||||
"python": ("https://docs.python.org/3", None),
|
||||
|
@ -390,10 +389,6 @@ intersphinx_mapping = {
|
|||
"pip": ("https://pip.pypa.io/en/stable", None),
|
||||
"tox": ("https://tox.wiki/en/stable", None),
|
||||
"virtualenv": ("https://virtualenv.pypa.io/en/stable", None),
|
||||
"django": (
|
||||
"http://docs.djangoproject.com/en/stable",
|
||||
"http://docs.djangoproject.com/en/stable/_objects",
|
||||
),
|
||||
"setuptools": ("https://setuptools.pypa.io/en/stable", None),
|
||||
}
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@ Deprecated Features
|
|||
-------------------
|
||||
|
||||
Below is a complete list of all pytest features which are considered deprecated. Using those features will issue
|
||||
:class:`PytestWarning` or subclasses, which can be filtered using :ref:`standard warning filters <warnings>`.
|
||||
:class:`~pytest.PytestWarning` or subclasses, which can be filtered using :ref:`standard warning filters <warnings>`.
|
||||
|
||||
.. _instance-collector-deprecation:
|
||||
|
||||
|
@ -241,19 +241,6 @@ scheduled for removal in pytest 8 (deprecated since pytest 2.4.0):
|
|||
- ``parser.addoption(..., type="int/string/float/complex")`` - use ``type=int`` etc. instead.
|
||||
|
||||
|
||||
Raising ``unittest.SkipTest`` during collection
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. deprecated:: 7.0
|
||||
|
||||
Raising :class:`unittest.SkipTest` to skip collection of tests during the
|
||||
pytest collection phase is deprecated. Use :func:`pytest.skip` instead.
|
||||
|
||||
Note: This deprecation only relates to using `unittest.SkipTest` during test
|
||||
collection. You are probably not doing that. Ordinary usage of
|
||||
:class:`unittest.SkipTest` / :meth:`unittest.TestCase.skipTest` /
|
||||
:func:`unittest.skip` in unittest test cases is fully supported.
|
||||
|
||||
Using ``pytest.warns(None)``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
|
@ -314,7 +301,7 @@ The ``pytest_warning_captured`` hook
|
|||
|
||||
This hook has an `item` parameter which cannot be serialized by ``pytest-xdist``.
|
||||
|
||||
Use the ``pytest_warning_recored`` hook instead, which replaces the ``item`` parameter
|
||||
Use the ``pytest_warning_recorded`` hook instead, which replaces the ``item`` parameter
|
||||
by a ``nodeid`` parameter.
|
||||
|
||||
|
||||
|
|
|
@ -155,7 +155,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
|||
> assert [0, 1, 2] == [0, 1, 3]
|
||||
E assert [0, 1, 2] == [0, 1, 3]
|
||||
E At index 2 diff: 2 != 3
|
||||
E Use -v to get the full diff
|
||||
E Use -v to get more diff
|
||||
|
||||
failure_demo.py:63: AssertionError
|
||||
______________ TestSpecialisedExplanations.test_eq_list_long _______________
|
||||
|
@ -168,7 +168,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
|||
> assert a == b
|
||||
E assert [0, 0, 0, 0, 0, 0, ...] == [0, 0, 0, 0, 0, 0, ...]
|
||||
E At index 100 diff: 1 != 2
|
||||
E Use -v to get the full diff
|
||||
E Use -v to get more diff
|
||||
|
||||
failure_demo.py:68: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_eq_dict _________________
|
||||
|
@ -215,7 +215,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
|||
> assert [1, 2] == [1, 2, 3]
|
||||
E assert [1, 2] == [1, 2, 3]
|
||||
E Right contains one more item: 3
|
||||
E Use -v to get the full diff
|
||||
E Use -v to get more diff
|
||||
|
||||
failure_demo.py:77: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_in_list _________________
|
||||
|
|
|
@ -22,7 +22,7 @@ Install ``pytest``
|
|||
.. code-block:: bash
|
||||
|
||||
$ pytest --version
|
||||
pytest 7.0.0
|
||||
pytest 7.1.1
|
||||
|
||||
.. _`simpletest`:
|
||||
|
||||
|
|
|
@ -201,7 +201,7 @@ if you run this module:
|
|||
E '1'
|
||||
E Extra items in the right set:
|
||||
E '5'
|
||||
E Use -v to get the full diff
|
||||
E Use -v to get more diff
|
||||
|
||||
test_assert2.py:4: AssertionError
|
||||
========================= short test summary info ==========================
|
||||
|
|
|
@ -5,7 +5,7 @@ How to set up bash completion
|
|||
=============================
|
||||
|
||||
When using bash as your shell, ``pytest`` can use argcomplete
|
||||
(https://argcomplete.readthedocs.io/) for auto-completion.
|
||||
(https://kislyuk.github.io/argcomplete/) for auto-completion.
|
||||
For this ``argcomplete`` needs to be installed **and** enabled.
|
||||
|
||||
Install argcomplete using:
|
||||
|
|
|
@ -358,7 +358,7 @@ Additional use cases of warnings in tests
|
|||
|
||||
Here are some use cases involving warnings that often come up in tests, and suggestions on how to deal with them:
|
||||
|
||||
- To ensure that **any** warning is emitted, use:
|
||||
- To ensure that **at least one** warning is emitted, use:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
|
@ -441,3 +441,18 @@ Please read our :ref:`backwards-compatibility` to learn how we proceed about dep
|
|||
features.
|
||||
|
||||
The full list of warnings is listed in :ref:`the reference documentation <warnings ref>`.
|
||||
|
||||
|
||||
.. _`resource-warnings`:
|
||||
|
||||
Resource Warnings
|
||||
-----------------
|
||||
|
||||
Additional information of the source of a :class:`ResourceWarning` can be obtained when captured by pytest if
|
||||
:mod:`tracemalloc` module is enabled.
|
||||
|
||||
One convenient way to enable :mod:`tracemalloc` when running tests is to set the :envvar:`PYTHONTRACEMALLOC` to a large
|
||||
enough number of frames (say ``20``, but that number is application dependent).
|
||||
|
||||
For more information, consult the `Python Development Mode <https://docs.python.org/3/library/devmode.html>`__
|
||||
section in the Python documentation.
|
||||
|
|
|
@ -84,7 +84,7 @@ Executing pytest normally gives us this output (we are skipping the header to fo
|
|||
> assert fruits1 == fruits2
|
||||
E AssertionError: assert ['banana', 'a...elon', 'kiwi'] == ['banana', 'a...elon', 'kiwi']
|
||||
E At index 2 diff: 'grapes' != 'orange'
|
||||
E Use -v to get the full diff
|
||||
E Use -v to get more diff
|
||||
|
||||
test_verbosity_example.py:8: AssertionError
|
||||
____________________________ test_numbers_fail _____________________________
|
||||
|
@ -99,7 +99,7 @@ Executing pytest normally gives us this output (we are skipping the header to fo
|
|||
E {'1': 1, '2': 2, '3': 3, '4': 4}
|
||||
E Right contains 4 more items:
|
||||
E {'10': 10, '20': 20, '30': 30, '40': 40}
|
||||
E Use -v to get the full diff
|
||||
E Use -v to get more diff
|
||||
|
||||
test_verbosity_example.py:14: AssertionError
|
||||
___________________________ test_long_text_fail ____________________________
|
||||
|
|
|
@ -21,7 +21,7 @@ there is no need to activate it.
|
|||
Here is a little annotated list for some popular plugins:
|
||||
|
||||
* :pypi:`pytest-django`: write tests
|
||||
for :std:doc:`django <django:index>` apps, using pytest integration.
|
||||
for `django <https://docs.djangoproject.com/>`_ apps, using pytest integration.
|
||||
|
||||
* :pypi:`pytest-twisted`: write tests
|
||||
for `twisted <https://twistedmatrix.com/>`_ apps, starting a reactor and
|
||||
|
|
|
@ -1,11 +1,16 @@
|
|||
:orphan:
|
||||
|
||||
.. sidebar:: Next Open Trainings
|
||||
|
||||
- `PyConDE <https://2022.pycon.de/program/W93DBJ/>`__, April 11th 2022 (3h), Berlin, Germany
|
||||
- `PyConIT <https://pycon.it/en/talk/pytest-simple-rapid-and-fun-testing-with-python>`__, June 3rd 2022 (4h), Florence, Italy
|
||||
- `Professional Testing with Python <https://python-academy.com/courses/python_course_testing.html>`_, via `Python Academy <https://www.python-academy.com/>`_, March 7th to 9th 2023 (3 day in-depth training), Remote and Leipzig, Germany
|
||||
|
||||
Also see :doc:`previous talks and blogposts <talks>`.
|
||||
|
||||
..
|
||||
.. sidebar:: Next Open Trainings
|
||||
|
||||
- `Professional Testing with Python <https://www.python-academy.com/courses/specialtopics/python_course_testing.html>`_, via `Python Academy <https://www.python-academy.com/>`_, February 1st to 3rd, 2022, Leipzig (Germany) and remote.
|
||||
|
||||
Also see `previous talks and blogposts <talks.html>`_.
|
||||
- `Europython <https://ep2022.europython.eu/>`__, July 11th to 17th (3h), Dublin, Ireland
|
||||
- `CH Open Workshoptage <https://workshoptage.ch/>`__ (German), September 6th to 8th (1 day), Bern, Switzerland
|
||||
|
||||
.. _features:
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -11,9 +11,14 @@ Books
|
|||
- `Python Testing with pytest, by Brian Okken (2017)
|
||||
<https://pragprog.com/book/bopytest/python-testing-with-pytest>`_.
|
||||
|
||||
- `Python Testing with pytest, Second Edition, by Brian Okken (2022)
|
||||
<https://pragprog.com/titles/bopytest2/python-testing-with-pytest-second-edition>`_.
|
||||
|
||||
Talks and blog postings
|
||||
---------------------------------------------
|
||||
|
||||
- `pytest: Simple, rapid and fun testing with Python, <https://youtu.be/cSJ-X3TbQ1c?t=15752>`_ (@ 4:22:32), Florian Bruhin, WeAreDevelopers World Congress 2021
|
||||
|
||||
- Webinar: `pytest: Test Driven Development für Python (German) <https://bruhin.software/ins-pytest/>`_, Florian Bruhin, via mylearning.ch, 2020
|
||||
|
||||
- Webinar: `Simplify Your Tests with Fixtures <https://blog.jetbrains.com/pycharm/2020/08/webinar-recording-simplify-your-tests-with-fixtures-with-oliver-bestwalter/>`_, Oliver Bestwalter, via JetBrains, 2020
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
import sys
|
||||
|
||||
from distutils.core import setup
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -672,10 +672,11 @@ class ExceptionInfo(Generic[E]):
|
|||
If it matches `True` is returned, otherwise an `AssertionError` is raised.
|
||||
"""
|
||||
__tracebackhide__ = True
|
||||
msg = "Regex pattern {!r} does not match {!r}."
|
||||
if regexp == str(self.value):
|
||||
msg += " Did you mean to `re.escape()` the regex?"
|
||||
assert re.search(regexp, str(self.value)), msg.format(regexp, str(self.value))
|
||||
value = str(self.value)
|
||||
msg = f"Regex pattern did not match.\n Regex: {regexp!r}\n Input: {value!r}"
|
||||
if regexp == value:
|
||||
msg += "\n Did you mean to `re.escape()` the regex?"
|
||||
assert re.search(regexp, value), msg
|
||||
# Return True to allow for "assert excinfo.match()".
|
||||
return True
|
||||
|
||||
|
|
|
@ -107,6 +107,23 @@ def saferepr(obj: object, maxsize: Optional[int] = DEFAULT_REPR_MAX_SIZE) -> str
|
|||
return SafeRepr(maxsize).repr(obj)
|
||||
|
||||
|
||||
def saferepr_unlimited(obj: object) -> str:
|
||||
"""Return an unlimited-size safe repr-string for the given object.
|
||||
|
||||
As with saferepr, failing __repr__ functions of user instances
|
||||
will be represented with a short exception info.
|
||||
|
||||
This function is a wrapper around simple repr.
|
||||
|
||||
Note: a cleaner solution would be to alter ``saferepr``this way
|
||||
when maxsize=None, but that might affect some other code.
|
||||
"""
|
||||
try:
|
||||
return repr(obj)
|
||||
except Exception as exc:
|
||||
return _format_repr_exception(exc, obj)
|
||||
|
||||
|
||||
class AlwaysDispatchingPrettyPrinter(pprint.PrettyPrinter):
|
||||
"""PrettyPrinter that always dispatches (regardless of width)."""
|
||||
|
||||
|
|
|
@ -273,13 +273,15 @@ class AssertionRewritingHook(importlib.abc.MetaPathFinder, importlib.abc.Loader)
|
|||
with open(pathname, "rb") as f:
|
||||
return f.read()
|
||||
|
||||
if sys.version_info >= (3, 9):
|
||||
if sys.version_info >= (3, 10):
|
||||
|
||||
def get_resource_reader(self, name: str) -> importlib.abc.TraversableResources: # type: ignore
|
||||
from types import SimpleNamespace
|
||||
from importlib.readers import FileReader
|
||||
if sys.version_info < (3, 11):
|
||||
from importlib.readers import FileReader
|
||||
else:
|
||||
from importlib.resources.readers import FileReader
|
||||
|
||||
return FileReader(SimpleNamespace(path=self._rewritten_names[name]))
|
||||
return FileReader(types.SimpleNamespace(path=self._rewritten_names[name]))
|
||||
|
||||
|
||||
def _write_pyc_fp(
|
||||
|
|
|
@ -14,8 +14,8 @@ from typing import Sequence
|
|||
import _pytest._code
|
||||
from _pytest import outcomes
|
||||
from _pytest._io.saferepr import _pformat_dispatch
|
||||
from _pytest._io.saferepr import safeformat
|
||||
from _pytest._io.saferepr import saferepr
|
||||
from _pytest._io.saferepr import saferepr_unlimited
|
||||
from _pytest.config import Config
|
||||
|
||||
# The _reprcompare attribute on the util module is used by the new assertion
|
||||
|
@ -160,8 +160,8 @@ def assertrepr_compare(config, op: str, left: Any, right: Any) -> Optional[List[
|
|||
"""Return specialised explanations for some operators/operands."""
|
||||
verbose = config.getoption("verbose")
|
||||
if verbose > 1:
|
||||
left_repr = safeformat(left)
|
||||
right_repr = safeformat(right)
|
||||
left_repr = saferepr_unlimited(left)
|
||||
right_repr = saferepr_unlimited(right)
|
||||
else:
|
||||
# XXX: "15 chars indentation" is wrong
|
||||
# ("E AssertionError: assert "); should use term width.
|
||||
|
@ -223,8 +223,6 @@ def _compare_eq_any(left: Any, right: Any, verbose: int = 0) -> List[str]:
|
|||
explanation = _compare_eq_set(left, right, verbose)
|
||||
elif isdict(left) and isdict(right):
|
||||
explanation = _compare_eq_dict(left, right, verbose)
|
||||
elif verbose > 0:
|
||||
explanation = _compare_eq_verbose(left, right)
|
||||
|
||||
if isiterable(left) and isiterable(right):
|
||||
expl = _compare_eq_iterable(left, right, verbose)
|
||||
|
@ -281,18 +279,6 @@ def _diff_text(left: str, right: str, verbose: int = 0) -> List[str]:
|
|||
return explanation
|
||||
|
||||
|
||||
def _compare_eq_verbose(left: Any, right: Any) -> List[str]:
|
||||
keepends = True
|
||||
left_lines = repr(left).splitlines(keepends)
|
||||
right_lines = repr(right).splitlines(keepends)
|
||||
|
||||
explanation: List[str] = []
|
||||
explanation += ["+" + line for line in left_lines]
|
||||
explanation += ["-" + line for line in right_lines]
|
||||
|
||||
return explanation
|
||||
|
||||
|
||||
def _surrounding_parens_on_own_lines(lines: List[str]) -> None:
|
||||
"""Move opening/closing parenthesis/bracket to own lines."""
|
||||
opening = lines[0][:1]
|
||||
|
@ -308,8 +294,8 @@ def _surrounding_parens_on_own_lines(lines: List[str]) -> None:
|
|||
def _compare_eq_iterable(
|
||||
left: Iterable[Any], right: Iterable[Any], verbose: int = 0
|
||||
) -> List[str]:
|
||||
if not verbose and not running_on_ci():
|
||||
return ["Use -v to get the full diff"]
|
||||
if verbose <= 0 and not running_on_ci():
|
||||
return ["Use -v to get more diff"]
|
||||
# dynamic import to speedup pytest
|
||||
import difflib
|
||||
|
||||
|
|
|
@ -254,7 +254,7 @@ default_plugins = essential_plugins + (
|
|||
"warnings",
|
||||
"logging",
|
||||
"reports",
|
||||
"pythonpath",
|
||||
"python_path",
|
||||
*(["unraisableexception", "threadexception"] if sys.version_info >= (3, 8) else []),
|
||||
"faulthandler",
|
||||
)
|
||||
|
@ -309,7 +309,9 @@ def _prepareconfig(
|
|||
elif isinstance(args, os.PathLike):
|
||||
args = [os.fspath(args)]
|
||||
elif not isinstance(args, list):
|
||||
msg = "`args` parameter expected to be a list of strings, got: {!r} (type: {})"
|
||||
msg = ( # type:ignore[unreachable]
|
||||
"`args` parameter expected to be a list of strings, got: {!r} (type: {})"
|
||||
)
|
||||
raise TypeError(msg.format(args, type(args)))
|
||||
|
||||
config = get_config(args, plugins)
|
||||
|
@ -538,11 +540,7 @@ class PytestPluginManager(PluginManager):
|
|||
"""
|
||||
if self._confcutdir is None:
|
||||
return True
|
||||
try:
|
||||
path.relative_to(self._confcutdir)
|
||||
except ValueError:
|
||||
return False
|
||||
return True
|
||||
return path not in self._confcutdir.parents
|
||||
|
||||
def _try_load_conftest(
|
||||
self, anchor: Path, importmode: Union[str, ImportMode], rootpath: Path
|
||||
|
|
|
@ -70,7 +70,7 @@ def load_config_dict_from_file(
|
|||
try:
|
||||
config = tomli.loads(toml_text)
|
||||
except tomli.TOMLDecodeError as exc:
|
||||
raise UsageError(str(exc)) from exc
|
||||
raise UsageError(f"{filepath}: {exc}") from exc
|
||||
|
||||
result = config.get("tool", {}).get("pytest", {}).get("ini_options", None)
|
||||
if result is not None:
|
||||
|
|
|
@ -47,11 +47,6 @@ STRICT_OPTION = PytestRemovedIn8Warning(
|
|||
# This deprecation is never really meant to be removed.
|
||||
PRIVATE = PytestDeprecationWarning("A private pytest class or function was used.")
|
||||
|
||||
UNITTEST_SKIP_DURING_COLLECTION = PytestRemovedIn8Warning(
|
||||
"Raising unittest.SkipTest to skip tests during collection is deprecated. "
|
||||
"Use pytest.skip() instead."
|
||||
)
|
||||
|
||||
ARGUMENT_PERCENT_DEFAULT = PytestRemovedIn8Warning(
|
||||
'pytest now uses argparse. "%default" should be changed to "%(default)s"',
|
||||
)
|
||||
|
|
|
@ -597,8 +597,17 @@ class FixtureRequest:
|
|||
funcitem = self._pyfuncitem
|
||||
scope = fixturedef._scope
|
||||
try:
|
||||
param = funcitem.callspec.getparam(argname)
|
||||
except (AttributeError, ValueError):
|
||||
callspec = funcitem.callspec
|
||||
except AttributeError:
|
||||
callspec = None
|
||||
if callspec is not None and argname in callspec.params:
|
||||
param = callspec.params[argname]
|
||||
param_index = callspec.indices[argname]
|
||||
# If a parametrize invocation set a scope it will override
|
||||
# the static scope defined with the fixture function.
|
||||
with suppress(KeyError):
|
||||
scope = callspec._arg2scope[argname]
|
||||
else:
|
||||
param = NOTSET
|
||||
param_index = 0
|
||||
has_params = fixturedef.params is not None
|
||||
|
@ -638,12 +647,6 @@ class FixtureRequest:
|
|||
)
|
||||
)
|
||||
fail(msg, pytrace=False)
|
||||
else:
|
||||
param_index = funcitem.callspec.indices[argname]
|
||||
# If a parametrize invocation set a scope it will override
|
||||
# the static scope defined with the fixture function.
|
||||
with suppress(KeyError):
|
||||
scope = funcitem.callspec._arg2scope[argname]
|
||||
|
||||
subrequest = SubRequest(
|
||||
self, scope, param, param_index, fixturedef, _ispytest=True
|
||||
|
@ -927,7 +930,7 @@ def _eval_scope_callable(
|
|||
|
||||
@final
|
||||
class FixtureDef(Generic[FixtureValue]):
|
||||
"""A container for a factory definition."""
|
||||
"""A container for a fixture definition."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
|
@ -939,33 +942,56 @@ class FixtureDef(Generic[FixtureValue]):
|
|||
params: Optional[Sequence[object]],
|
||||
unittest: bool = False,
|
||||
ids: Optional[
|
||||
Union[
|
||||
Tuple[Union[None, str, float, int, bool], ...],
|
||||
Callable[[Any], Optional[object]],
|
||||
]
|
||||
Union[Tuple[Optional[object], ...], Callable[[Any], Optional[object]]]
|
||||
] = None,
|
||||
) -> None:
|
||||
self._fixturemanager = fixturemanager
|
||||
# The "base" node ID for the fixture.
|
||||
#
|
||||
# This is a node ID prefix. A fixture is only available to a node (e.g.
|
||||
# a `Function` item) if the fixture's baseid is a parent of the node's
|
||||
# nodeid (see the `iterparentnodeids` function for what constitutes a
|
||||
# "parent" and a "prefix" in this context).
|
||||
#
|
||||
# For a fixture found in a Collector's object (e.g. a `Module`s module,
|
||||
# a `Class`'s class), the baseid is the Collector's nodeid.
|
||||
#
|
||||
# For a fixture found in a conftest plugin, the baseid is the conftest's
|
||||
# directory path relative to the rootdir.
|
||||
#
|
||||
# For other plugins, the baseid is the empty string (always matches).
|
||||
self.baseid = baseid or ""
|
||||
# Whether the fixture was found from a node or a conftest in the
|
||||
# collection tree. Will be false for fixtures defined in non-conftest
|
||||
# plugins.
|
||||
self.has_location = baseid is not None
|
||||
# The fixture factory function.
|
||||
self.func = func
|
||||
# The name by which the fixture may be requested.
|
||||
self.argname = argname
|
||||
if scope is None:
|
||||
scope = Scope.Function
|
||||
elif callable(scope):
|
||||
scope = _eval_scope_callable(scope, argname, fixturemanager.config)
|
||||
|
||||
if isinstance(scope, str):
|
||||
scope = Scope.from_user(
|
||||
scope, descr=f"Fixture '{func.__name__}'", where=baseid
|
||||
)
|
||||
self._scope = scope
|
||||
# If the fixture is directly parametrized, the parameter values.
|
||||
self.params: Optional[Sequence[object]] = params
|
||||
self.argnames: Tuple[str, ...] = getfuncargnames(
|
||||
func, name=argname, is_method=unittest
|
||||
)
|
||||
self.unittest = unittest
|
||||
# If the fixture is directly parametrized, a tuple of explicit IDs to
|
||||
# assign to the parameter values, or a callable to generate an ID given
|
||||
# a parameter value.
|
||||
self.ids = ids
|
||||
# The names requested by the fixtures.
|
||||
self.argnames = getfuncargnames(func, name=argname, is_method=unittest)
|
||||
# Whether the fixture was collected from a unittest TestCase class.
|
||||
# Note that it really only makes sense to define autouse fixtures in
|
||||
# unittest TestCases.
|
||||
self.unittest = unittest
|
||||
# If the fixture was executed, the current value of the fixture.
|
||||
# Can change if the fixture is executed with different parameters.
|
||||
self.cached_result: Optional[_FixtureCachedResult[FixtureValue]] = None
|
||||
self._finalizers: List[Callable[[], object]] = []
|
||||
|
||||
|
@ -1093,18 +1119,8 @@ def pytest_fixture_setup(
|
|||
|
||||
|
||||
def _ensure_immutable_ids(
|
||||
ids: Optional[
|
||||
Union[
|
||||
Iterable[Union[None, str, float, int, bool]],
|
||||
Callable[[Any], Optional[object]],
|
||||
]
|
||||
],
|
||||
) -> Optional[
|
||||
Union[
|
||||
Tuple[Union[None, str, float, int, bool], ...],
|
||||
Callable[[Any], Optional[object]],
|
||||
]
|
||||
]:
|
||||
ids: Optional[Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]]]
|
||||
) -> Optional[Union[Tuple[Optional[object], ...], Callable[[Any], Optional[object]]]]:
|
||||
if ids is None:
|
||||
return None
|
||||
if callable(ids):
|
||||
|
@ -1148,9 +1164,8 @@ class FixtureFunctionMarker:
|
|||
scope: "Union[_ScopeName, Callable[[str, Config], _ScopeName]]"
|
||||
params: Optional[Tuple[object, ...]] = attr.ib(converter=_params_converter)
|
||||
autouse: bool = False
|
||||
ids: Union[
|
||||
Tuple[Union[None, str, float, int, bool], ...],
|
||||
Callable[[Any], Optional[object]],
|
||||
ids: Optional[
|
||||
Union[Tuple[Optional[object], ...], Callable[[Any], Optional[object]]]
|
||||
] = attr.ib(
|
||||
default=None,
|
||||
converter=_ensure_immutable_ids,
|
||||
|
@ -1191,10 +1206,7 @@ def fixture(
|
|||
params: Optional[Iterable[object]] = ...,
|
||||
autouse: bool = ...,
|
||||
ids: Optional[
|
||||
Union[
|
||||
Iterable[Union[None, str, float, int, bool]],
|
||||
Callable[[Any], Optional[object]],
|
||||
]
|
||||
Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]]
|
||||
] = ...,
|
||||
name: Optional[str] = ...,
|
||||
) -> FixtureFunction:
|
||||
|
@ -1209,10 +1221,7 @@ def fixture(
|
|||
params: Optional[Iterable[object]] = ...,
|
||||
autouse: bool = ...,
|
||||
ids: Optional[
|
||||
Union[
|
||||
Iterable[Union[None, str, float, int, bool]],
|
||||
Callable[[Any], Optional[object]],
|
||||
]
|
||||
Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]]
|
||||
] = ...,
|
||||
name: Optional[str] = None,
|
||||
) -> FixtureFunctionMarker:
|
||||
|
@ -1226,10 +1235,7 @@ def fixture(
|
|||
params: Optional[Iterable[object]] = None,
|
||||
autouse: bool = False,
|
||||
ids: Optional[
|
||||
Union[
|
||||
Iterable[Union[None, str, float, int, bool]],
|
||||
Callable[[Any], Optional[object]],
|
||||
]
|
||||
Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]]
|
||||
] = None,
|
||||
name: Optional[str] = None,
|
||||
) -> Union[FixtureFunctionMarker, FixtureFunction]:
|
||||
|
@ -1271,7 +1277,7 @@ def fixture(
|
|||
the fixture.
|
||||
|
||||
:param ids:
|
||||
List of string ids each corresponding to the params so that they are
|
||||
Sequence of ids each corresponding to the params so that they are
|
||||
part of the test id. If no ids are provided they will be generated
|
||||
automatically from the params.
|
||||
|
||||
|
|
|
@ -870,7 +870,10 @@ def resolve_collection_argument(
|
|||
If the path doesn't exist, raise UsageError.
|
||||
If the path is a directory and selection parts are present, raise UsageError.
|
||||
"""
|
||||
strpath, *parts = str(arg).split("::")
|
||||
base, squacket, rest = str(arg).partition("[")
|
||||
strpath, *parts = base.split("::")
|
||||
if parts:
|
||||
parts[-1] = f"{parts[-1]}{squacket}{rest}"
|
||||
if as_pypath:
|
||||
strpath = search_pypath(strpath)
|
||||
fspath = invocation_path / strpath
|
||||
|
|
|
@ -397,7 +397,7 @@ if TYPE_CHECKING:
|
|||
from _pytest.scope import _ScopeName
|
||||
|
||||
class _SkipMarkDecorator(MarkDecorator):
|
||||
@overload # type: ignore[override,misc]
|
||||
@overload # type: ignore[override,misc,no-overload-impl]
|
||||
def __call__(self, arg: Markable) -> Markable:
|
||||
...
|
||||
|
||||
|
@ -415,7 +415,7 @@ if TYPE_CHECKING:
|
|||
...
|
||||
|
||||
class _XfailMarkDecorator(MarkDecorator):
|
||||
@overload # type: ignore[override,misc]
|
||||
@overload # type: ignore[override,misc,no-overload-impl]
|
||||
def __call__(self, arg: Markable) -> Markable:
|
||||
...
|
||||
|
||||
|
|
|
@ -656,20 +656,6 @@ class Item(Node):
|
|||
|
||||
nextitem = None
|
||||
|
||||
def __init_subclass__(cls) -> None:
|
||||
problems = ", ".join(
|
||||
base.__name__ for base in cls.__bases__ if issubclass(base, Collector)
|
||||
)
|
||||
if problems:
|
||||
warnings.warn(
|
||||
f"{cls.__name__} is an Item subclass and should not be a collector, "
|
||||
f"however its bases {problems} are collectors.\n"
|
||||
"Please split the Collectors and the Item into separate node types.\n"
|
||||
"Pytest Doc example: https://docs.pytest.org/en/latest/example/nonpython.html\n"
|
||||
"example pull request on a plugin: https://github.com/asmeurer/pytest-flakes/pull/40/",
|
||||
PytestWarning,
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name,
|
||||
|
@ -697,6 +683,37 @@ class Item(Node):
|
|||
#: for this test.
|
||||
self.user_properties: List[Tuple[str, object]] = []
|
||||
|
||||
self._check_item_and_collector_diamond_inheritance()
|
||||
|
||||
def _check_item_and_collector_diamond_inheritance(self) -> None:
|
||||
"""
|
||||
Check if the current type inherits from both File and Collector
|
||||
at the same time, emitting a warning accordingly (#8447).
|
||||
"""
|
||||
cls = type(self)
|
||||
|
||||
# We inject an attribute in the type to avoid issuing this warning
|
||||
# for the same class more than once, which is not helpful.
|
||||
# It is a hack, but was deemed acceptable in order to avoid
|
||||
# flooding the user in the common case.
|
||||
attr_name = "_pytest_diamond_inheritance_warning_shown"
|
||||
if getattr(cls, attr_name, False):
|
||||
return
|
||||
setattr(cls, attr_name, True)
|
||||
|
||||
problems = ", ".join(
|
||||
base.__name__ for base in cls.__bases__ if issubclass(base, Collector)
|
||||
)
|
||||
if problems:
|
||||
warnings.warn(
|
||||
f"{cls.__name__} is an Item subclass and should not be a collector, "
|
||||
f"however its bases {problems} are collectors.\n"
|
||||
"Please split the Collectors and the Item into separate node types.\n"
|
||||
"Pytest Doc example: https://docs.pytest.org/en/latest/example/nonpython.html\n"
|
||||
"example pull request on a plugin: https://github.com/asmeurer/pytest-flakes/pull/40/",
|
||||
PytestWarning,
|
||||
)
|
||||
|
||||
def runtest(self) -> None:
|
||||
"""Run the test case for this item.
|
||||
|
||||
|
|
|
@ -603,11 +603,20 @@ def insert_missing_modules(modules: Dict[str, ModuleType], module_name: str) ->
|
|||
module_parts = module_name.split(".")
|
||||
while module_name:
|
||||
if module_name not in modules:
|
||||
module = ModuleType(
|
||||
module_name,
|
||||
doc="Empty module created by pytest's importmode=importlib.",
|
||||
)
|
||||
modules[module_name] = module
|
||||
try:
|
||||
# If sys.meta_path is empty, calling import_module will issue
|
||||
# a warning and raise ModuleNotFoundError. To avoid the
|
||||
# warning, we check sys.meta_path explicitly and raise the error
|
||||
# ourselves to fall back to creating a dummy module.
|
||||
if not sys.meta_path:
|
||||
raise ModuleNotFoundError
|
||||
importlib.import_module(module_name)
|
||||
except ModuleNotFoundError:
|
||||
module = ModuleType(
|
||||
module_name,
|
||||
doc="Empty module created by pytest's importmode=importlib.",
|
||||
)
|
||||
modules[module_name] = module
|
||||
module_parts.pop(-1)
|
||||
module_name = ".".join(module_parts)
|
||||
|
||||
|
|
|
@ -477,7 +477,9 @@ def LineMatcher_fixture(request: FixtureRequest) -> Type["LineMatcher"]:
|
|||
|
||||
|
||||
@fixture
|
||||
def pytester(request: FixtureRequest, tmp_path_factory: TempPathFactory) -> "Pytester":
|
||||
def pytester(
|
||||
request: FixtureRequest, tmp_path_factory: TempPathFactory, monkeypatch: MonkeyPatch
|
||||
) -> "Pytester":
|
||||
"""
|
||||
Facilities to write tests/configuration files, execute pytest in isolation, and match
|
||||
against expected output, perfect for black-box testing of pytest plugins.
|
||||
|
@ -488,7 +490,7 @@ def pytester(request: FixtureRequest, tmp_path_factory: TempPathFactory) -> "Pyt
|
|||
It is particularly useful for testing plugins. It is similar to the :fixture:`tmp_path`
|
||||
fixture but provides methods which aid in testing pytest itself.
|
||||
"""
|
||||
return Pytester(request, tmp_path_factory, _ispytest=True)
|
||||
return Pytester(request, tmp_path_factory, monkeypatch, _ispytest=True)
|
||||
|
||||
|
||||
@fixture
|
||||
|
@ -683,6 +685,7 @@ class Pytester:
|
|||
self,
|
||||
request: FixtureRequest,
|
||||
tmp_path_factory: TempPathFactory,
|
||||
monkeypatch: MonkeyPatch,
|
||||
*,
|
||||
_ispytest: bool = False,
|
||||
) -> None:
|
||||
|
@ -706,7 +709,7 @@ class Pytester:
|
|||
self._method = self._request.config.getoption("--runpytest")
|
||||
self._test_tmproot = tmp_path_factory.mktemp(f"tmp-{name}", numbered=True)
|
||||
|
||||
self._monkeypatch = mp = MonkeyPatch()
|
||||
self._monkeypatch = mp = monkeypatch
|
||||
mp.setenv("PYTEST_DEBUG_TEMPROOT", str(self._test_tmproot))
|
||||
# Ensure no unexpected caching via tox.
|
||||
mp.delenv("TOX_ENV_DIR", raising=False)
|
||||
|
@ -738,7 +741,6 @@ class Pytester:
|
|||
self._sys_modules_snapshot.restore()
|
||||
self._sys_path_snapshot.restore()
|
||||
self._cwd_snapshot.restore()
|
||||
self._monkeypatch.undo()
|
||||
|
||||
def __take_sys_modules_snapshot(self) -> SysModulesSnapshot:
|
||||
# Some zope modules used by twisted-related tests keep internal state
|
||||
|
@ -830,7 +832,7 @@ class Pytester:
|
|||
return self._makefile(ext, args, kwargs)
|
||||
|
||||
def makeconftest(self, source: str) -> Path:
|
||||
"""Write a contest.py file with 'source' as contents."""
|
||||
"""Write a conftest.py file with 'source' as contents."""
|
||||
return self.makepyfile(conftest=source)
|
||||
|
||||
def makeini(self, source: str) -> Path:
|
||||
|
|
|
@ -905,8 +905,6 @@ class InstanceDummy:
|
|||
only to ignore it; this dummy class keeps them working. This will be removed
|
||||
in pytest 8."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
def __getattr__(name: str) -> object:
|
||||
if name == "Instance":
|
||||
|
@ -942,7 +940,7 @@ class IdMaker:
|
|||
# ParameterSet.
|
||||
idfn: Optional[Callable[[Any], Optional[object]]]
|
||||
# Optionally, explicit IDs for ParameterSets by index.
|
||||
ids: Optional[Sequence[Union[None, str]]]
|
||||
ids: Optional[Sequence[Optional[object]]]
|
||||
# Optionally, the pytest config.
|
||||
# Used for controlling ASCII escaping, and for calling the
|
||||
# :hook:`pytest_make_parametrize_id` hook.
|
||||
|
@ -950,6 +948,9 @@ class IdMaker:
|
|||
# Optionally, the ID of the node being parametrized.
|
||||
# Used only for clearer error messages.
|
||||
nodeid: Optional[str]
|
||||
# Optionally, the ID of the function being parametrized.
|
||||
# Used only for clearer error messages.
|
||||
func_name: Optional[str]
|
||||
|
||||
def make_unique_parameterset_ids(self) -> List[str]:
|
||||
"""Make a unique identifier for each ParameterSet, that may be used to
|
||||
|
@ -984,9 +985,7 @@ class IdMaker:
|
|||
yield parameterset.id
|
||||
elif self.ids and idx < len(self.ids) and self.ids[idx] is not None:
|
||||
# ID provided in the IDs list - parametrize(..., ids=[...]).
|
||||
id = self.ids[idx]
|
||||
assert id is not None
|
||||
yield _ascii_escaped_by_config(id, self.config)
|
||||
yield self._idval_from_value_required(self.ids[idx], idx)
|
||||
else:
|
||||
# ID not provided - generate it.
|
||||
yield "-".join(
|
||||
|
@ -1055,6 +1054,25 @@ class IdMaker:
|
|||
return name
|
||||
return None
|
||||
|
||||
def _idval_from_value_required(self, val: object, idx: int) -> str:
|
||||
"""Like _idval_from_value(), but fails if the type is not supported."""
|
||||
id = self._idval_from_value(val)
|
||||
if id is not None:
|
||||
return id
|
||||
|
||||
# Fail.
|
||||
if self.func_name is not None:
|
||||
prefix = f"In {self.func_name}: "
|
||||
elif self.nodeid is not None:
|
||||
prefix = f"In {self.nodeid}: "
|
||||
else:
|
||||
prefix = ""
|
||||
msg = (
|
||||
f"{prefix}ids contains unsupported value {saferepr(val)} (type: {type(val)!r}) at index {idx}. "
|
||||
"Supported types are: str, bytes, int, float, complex, bool, enum, regex or anything with a __name__."
|
||||
)
|
||||
fail(msg, pytrace=False)
|
||||
|
||||
@staticmethod
|
||||
def _idval_from_argname(argname: str, idx: int) -> str:
|
||||
"""Make an ID for a parameter in a ParameterSet from the argument name
|
||||
|
@ -1184,10 +1202,7 @@ class Metafunc:
|
|||
argvalues: Iterable[Union[ParameterSet, Sequence[object], object]],
|
||||
indirect: Union[bool, Sequence[str]] = False,
|
||||
ids: Optional[
|
||||
Union[
|
||||
Iterable[Union[None, str, float, int, bool]],
|
||||
Callable[[Any], Optional[object]],
|
||||
]
|
||||
Union[Iterable[Optional[object]], Callable[[Any], Optional[object]]]
|
||||
] = None,
|
||||
scope: "Optional[_ScopeName]" = None,
|
||||
*,
|
||||
|
@ -1318,10 +1333,7 @@ class Metafunc:
|
|||
self,
|
||||
argnames: Sequence[str],
|
||||
ids: Optional[
|
||||
Union[
|
||||
Iterable[Union[None, str, float, int, bool]],
|
||||
Callable[[Any], Optional[object]],
|
||||
]
|
||||
Union[Iterable[Optional[object]], Callable[[Any], Optional[object]]]
|
||||
],
|
||||
parametersets: Sequence[ParameterSet],
|
||||
nodeid: str,
|
||||
|
@ -1351,16 +1363,22 @@ class Metafunc:
|
|||
idfn = None
|
||||
ids_ = self._validate_ids(ids, parametersets, self.function.__name__)
|
||||
id_maker = IdMaker(
|
||||
argnames, parametersets, idfn, ids_, self.config, nodeid=nodeid
|
||||
argnames,
|
||||
parametersets,
|
||||
idfn,
|
||||
ids_,
|
||||
self.config,
|
||||
nodeid=nodeid,
|
||||
func_name=self.function.__name__,
|
||||
)
|
||||
return id_maker.make_unique_parameterset_ids()
|
||||
|
||||
def _validate_ids(
|
||||
self,
|
||||
ids: Iterable[Union[None, str, float, int, bool]],
|
||||
ids: Iterable[Optional[object]],
|
||||
parametersets: Sequence[ParameterSet],
|
||||
func_name: str,
|
||||
) -> List[Union[None, str]]:
|
||||
) -> List[Optional[object]]:
|
||||
try:
|
||||
num_ids = len(ids) # type: ignore[arg-type]
|
||||
except TypeError:
|
||||
|
@ -1375,22 +1393,7 @@ class Metafunc:
|
|||
msg = "In {}: {} parameter sets specified, with different number of ids: {}"
|
||||
fail(msg.format(func_name, len(parametersets), num_ids), pytrace=False)
|
||||
|
||||
new_ids = []
|
||||
for idx, id_value in enumerate(itertools.islice(ids, num_ids)):
|
||||
if id_value is None or isinstance(id_value, str):
|
||||
new_ids.append(id_value)
|
||||
elif isinstance(id_value, (float, int, bool)):
|
||||
new_ids.append(str(id_value))
|
||||
else:
|
||||
msg = ( # type: ignore[unreachable]
|
||||
"In {}: ids must be list of string/float/int/bool, "
|
||||
"found: {} (type: {!r}) at index {}"
|
||||
)
|
||||
fail(
|
||||
msg.format(func_name, saferepr(id_value), type(id_value), idx),
|
||||
pytrace=False,
|
||||
)
|
||||
return new_ids
|
||||
return list(itertools.islice(ids, num_ids))
|
||||
|
||||
def _resolve_arg_value_types(
|
||||
self,
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
import math
|
||||
import pprint
|
||||
from collections.abc import Collection
|
||||
from collections.abc import Sized
|
||||
from decimal import Decimal
|
||||
from numbers import Complex
|
||||
|
@ -8,7 +9,6 @@ from typing import Any
|
|||
from typing import Callable
|
||||
from typing import cast
|
||||
from typing import Generic
|
||||
from typing import Iterable
|
||||
from typing import List
|
||||
from typing import Mapping
|
||||
from typing import Optional
|
||||
|
@ -131,7 +131,6 @@ class ApproxBase:
|
|||
# a numeric type. For this reason, the default is to do nothing. The
|
||||
# classes that deal with sequences should reimplement this method to
|
||||
# raise if there are any non-numeric elements in the sequence.
|
||||
pass
|
||||
|
||||
|
||||
def _recursive_list_map(f, x):
|
||||
|
@ -307,12 +306,12 @@ class ApproxMapping(ApproxBase):
|
|||
raise TypeError(msg.format(key, value, pprint.pformat(self.expected)))
|
||||
|
||||
|
||||
class ApproxSequencelike(ApproxBase):
|
||||
class ApproxSequenceLike(ApproxBase):
|
||||
"""Perform approximate comparisons where the expected value is a sequence of numbers."""
|
||||
|
||||
def __repr__(self) -> str:
|
||||
seq_type = type(self.expected)
|
||||
if seq_type not in (tuple, list, set):
|
||||
if seq_type not in (tuple, list):
|
||||
seq_type = list
|
||||
return "approx({!r})".format(
|
||||
seq_type(self._approx_scalar(x) for x in self.expected)
|
||||
|
@ -320,7 +319,6 @@ class ApproxSequencelike(ApproxBase):
|
|||
|
||||
def _repr_compare(self, other_side: Sequence[float]) -> List[str]:
|
||||
import math
|
||||
import numpy as np
|
||||
|
||||
if len(self.expected) != len(other_side):
|
||||
return [
|
||||
|
@ -341,7 +339,7 @@ class ApproxSequencelike(ApproxBase):
|
|||
abs_diff = abs(approx_value.expected - other_value)
|
||||
max_abs_diff = max(max_abs_diff, abs_diff)
|
||||
if other_value == 0.0:
|
||||
max_rel_diff = np.inf
|
||||
max_rel_diff = math.inf
|
||||
else:
|
||||
max_rel_diff = max(max_rel_diff, abs_diff / abs(other_value))
|
||||
different_ids.append(i)
|
||||
|
@ -516,7 +514,7 @@ class ApproxDecimal(ApproxScalar):
|
|||
|
||||
|
||||
def approx(expected, rel=None, abs=None, nan_ok: bool = False) -> ApproxBase:
|
||||
"""Assert that two numbers (or two sets of numbers) are equal to each other
|
||||
"""Assert that two numbers (or two ordered sequences of numbers) are equal to each other
|
||||
within some tolerance.
|
||||
|
||||
Due to the :std:doc:`tutorial/floatingpoint`, numbers that we
|
||||
|
@ -548,16 +546,11 @@ def approx(expected, rel=None, abs=None, nan_ok: bool = False) -> ApproxBase:
|
|||
>>> 0.1 + 0.2 == approx(0.3)
|
||||
True
|
||||
|
||||
The same syntax also works for sequences of numbers::
|
||||
The same syntax also works for ordered sequences of numbers::
|
||||
|
||||
>>> (0.1 + 0.2, 0.2 + 0.4) == approx((0.3, 0.6))
|
||||
True
|
||||
|
||||
Dictionary *values*::
|
||||
|
||||
>>> {'a': 0.1 + 0.2, 'b': 0.2 + 0.4} == approx({'a': 0.3, 'b': 0.6})
|
||||
True
|
||||
|
||||
``numpy`` arrays::
|
||||
|
||||
>>> import numpy as np # doctest: +SKIP
|
||||
|
@ -570,6 +563,20 @@ def approx(expected, rel=None, abs=None, nan_ok: bool = False) -> ApproxBase:
|
|||
>>> np.array([0.1, 0.2]) + np.array([0.2, 0.1]) == approx(0.3) # doctest: +SKIP
|
||||
True
|
||||
|
||||
Only ordered sequences are supported, because ``approx`` needs
|
||||
to infer the relative position of the sequences without ambiguity. This means
|
||||
``sets`` and other unordered sequences are not supported.
|
||||
|
||||
Finally, dictionary *values* can also be compared::
|
||||
|
||||
>>> {'a': 0.1 + 0.2, 'b': 0.2 + 0.4} == approx({'a': 0.3, 'b': 0.6})
|
||||
True
|
||||
|
||||
The comparison will be true if both mappings have the same keys and their
|
||||
respective values match the expected tolerances.
|
||||
|
||||
**Tolerances**
|
||||
|
||||
By default, ``approx`` considers numbers within a relative tolerance of
|
||||
``1e-6`` (i.e. one part in a million) of its expected value to be equal.
|
||||
This treatment would lead to surprising results if the expected value was
|
||||
|
@ -709,12 +716,19 @@ def approx(expected, rel=None, abs=None, nan_ok: bool = False) -> ApproxBase:
|
|||
expected = _as_numpy_array(expected)
|
||||
cls = ApproxNumpy
|
||||
elif (
|
||||
isinstance(expected, Iterable)
|
||||
hasattr(expected, "__getitem__")
|
||||
and isinstance(expected, Sized)
|
||||
# Type ignored because the error is wrong -- not unreachable.
|
||||
and not isinstance(expected, STRING_TYPES) # type: ignore[unreachable]
|
||||
):
|
||||
cls = ApproxSequencelike
|
||||
cls = ApproxSequenceLike
|
||||
elif (
|
||||
isinstance(expected, Collection)
|
||||
# Type ignored because the error is wrong -- not unreachable.
|
||||
and not isinstance(expected, STRING_TYPES) # type: ignore[unreachable]
|
||||
):
|
||||
msg = f"pytest.approx() only supports ordered sequences, but got: {repr(expected)}"
|
||||
raise TypeError(msg)
|
||||
else:
|
||||
cls = ApproxScalar
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
"""Record warnings during test function execution."""
|
||||
import re
|
||||
import warnings
|
||||
from pprint import pformat
|
||||
from types import TracebackType
|
||||
from typing import Any
|
||||
from typing import Callable
|
||||
|
@ -110,7 +111,7 @@ def warns(
|
|||
r"""Assert that code raises a particular class of warning.
|
||||
|
||||
Specifically, the parameter ``expected_warning`` can be a warning class or
|
||||
sequence of warning classes, and the inside the ``with`` block must issue a warning of that class or
|
||||
sequence of warning classes, and the code inside the ``with`` block must issue a warning of that class or
|
||||
classes.
|
||||
|
||||
This helper produces a list of :class:`warnings.WarningMessage` objects,
|
||||
|
@ -142,10 +143,11 @@ def warns(
|
|||
__tracebackhide__ = True
|
||||
if not args:
|
||||
if kwargs:
|
||||
msg = "Unexpected keyword arguments passed to pytest.warns: "
|
||||
msg += ", ".join(sorted(kwargs))
|
||||
msg += "\nUse context-manager form instead?"
|
||||
raise TypeError(msg)
|
||||
argnames = ", ".join(sorted(kwargs))
|
||||
raise TypeError(
|
||||
f"Unexpected keyword arguments passed to pytest.warns: {argnames}"
|
||||
"\nUse context-manager form instead?"
|
||||
)
|
||||
return WarningsChecker(expected_warning, match_expr=match, _ispytest=True)
|
||||
else:
|
||||
func = args[0]
|
||||
|
@ -191,7 +193,7 @@ class WarningsRecorder(warnings.catch_warnings):
|
|||
if issubclass(w.category, cls):
|
||||
return self._list.pop(i)
|
||||
__tracebackhide__ = True
|
||||
raise AssertionError("%r not found in warning list" % cls)
|
||||
raise AssertionError(f"{cls!r} not found in warning list")
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Clear the list of recorded warnings."""
|
||||
|
@ -202,7 +204,7 @@ class WarningsRecorder(warnings.catch_warnings):
|
|||
def __enter__(self) -> "WarningsRecorder": # type: ignore
|
||||
if self._entered:
|
||||
__tracebackhide__ = True
|
||||
raise RuntimeError("Cannot enter %r twice" % self)
|
||||
raise RuntimeError(f"Cannot enter {self!r} twice")
|
||||
_list = super().__enter__()
|
||||
# record=True means it's None.
|
||||
assert _list is not None
|
||||
|
@ -218,7 +220,7 @@ class WarningsRecorder(warnings.catch_warnings):
|
|||
) -> None:
|
||||
if not self._entered:
|
||||
__tracebackhide__ = True
|
||||
raise RuntimeError("Cannot exit %r without entering first" % self)
|
||||
raise RuntimeError(f"Cannot exit {self!r} without entering first")
|
||||
|
||||
super().__exit__(exc_type, exc_val, exc_tb)
|
||||
|
||||
|
@ -268,16 +270,17 @@ class WarningsChecker(WarningsRecorder):
|
|||
|
||||
__tracebackhide__ = True
|
||||
|
||||
def found_str():
|
||||
return pformat([record.message for record in self], indent=2)
|
||||
|
||||
# only check if we're not currently handling an exception
|
||||
if exc_type is None and exc_val is None and exc_tb is None:
|
||||
if self.expected_warning is not None:
|
||||
if not any(issubclass(r.category, self.expected_warning) for r in self):
|
||||
__tracebackhide__ = True
|
||||
fail(
|
||||
"DID NOT WARN. No warnings of type {} were emitted. "
|
||||
"The list of emitted warnings is: {}.".format(
|
||||
self.expected_warning, [each.message for each in self]
|
||||
)
|
||||
f"DID NOT WARN. No warnings of type {self.expected_warning} were emitted.\n"
|
||||
f"The list of emitted warnings is: {found_str()}."
|
||||
)
|
||||
elif self.match_expr is not None:
|
||||
for r in self:
|
||||
|
@ -286,11 +289,8 @@ class WarningsChecker(WarningsRecorder):
|
|||
break
|
||||
else:
|
||||
fail(
|
||||
"DID NOT WARN. No warnings of type {} matching"
|
||||
" ('{}') were emitted. The list of emitted warnings"
|
||||
" is: {}.".format(
|
||||
self.expected_warning,
|
||||
self.match_expr,
|
||||
[each.message for each in self],
|
||||
)
|
||||
f"""\
|
||||
DID NOT WARN. No warnings of type {self.expected_warning} matching the regex were emitted.
|
||||
Regex: {self.match_expr}
|
||||
Emitted warnings: {found_str()}"""
|
||||
)
|
||||
|
|
|
@ -2,7 +2,6 @@
|
|||
import bdb
|
||||
import os
|
||||
import sys
|
||||
import warnings
|
||||
from typing import Callable
|
||||
from typing import cast
|
||||
from typing import Dict
|
||||
|
@ -28,7 +27,6 @@ from _pytest._code.code import TerminalRepr
|
|||
from _pytest.compat import final
|
||||
from _pytest.config.argparsing import Parser
|
||||
from _pytest.deprecated import check_ispytest
|
||||
from _pytest.deprecated import UNITTEST_SKIP_DURING_COLLECTION
|
||||
from _pytest.nodes import Collector
|
||||
from _pytest.nodes import Item
|
||||
from _pytest.nodes import Node
|
||||
|
@ -379,11 +377,6 @@ def pytest_make_collect_report(collector: Collector) -> CollectReport:
|
|||
# Type ignored because unittest is loaded dynamically.
|
||||
skip_exceptions.append(unittest.SkipTest) # type: ignore
|
||||
if isinstance(call.excinfo.value, tuple(skip_exceptions)):
|
||||
if unittest is not None and isinstance(
|
||||
call.excinfo.value, unittest.SkipTest # type: ignore[attr-defined]
|
||||
):
|
||||
warnings.warn(UNITTEST_SKIP_DURING_COLLECTION, stacklevel=2)
|
||||
|
||||
outcome = "skipped"
|
||||
r_ = collector._repr_failure_py(call.excinfo, "line")
|
||||
assert isinstance(r_, ExceptionChainRepr), repr(r_)
|
||||
|
|
|
@ -663,7 +663,7 @@ class TerminalReporter:
|
|||
errors = len(self.stats.get("error", []))
|
||||
skipped = len(self.stats.get("skipped", []))
|
||||
deselected = len(self.stats.get("deselected", []))
|
||||
selected = self._numcollected - errors - skipped - deselected
|
||||
selected = self._numcollected - deselected
|
||||
line = "collected " if final else "collecting "
|
||||
line += (
|
||||
str(self._numcollected) + " item" + ("" if self._numcollected == 1 else "s")
|
||||
|
@ -674,7 +674,7 @@ class TerminalReporter:
|
|||
line += " / %d deselected" % deselected
|
||||
if skipped:
|
||||
line += " / %d skipped" % skipped
|
||||
if self._numcollected > selected > 0:
|
||||
if self._numcollected > selected:
|
||||
line += " / %d selected" % selected
|
||||
if self.isatty:
|
||||
self.rewrite(line, bold=True, erase=True)
|
||||
|
|
|
@ -185,6 +185,15 @@ class TestCaseFunction(Function):
|
|||
_excinfo: Optional[List[_pytest._code.ExceptionInfo[BaseException]]] = None
|
||||
_testcase: Optional["unittest.TestCase"] = None
|
||||
|
||||
def _getobj(self):
|
||||
assert self.parent is not None
|
||||
# Unlike a regular Function in a Class, where `item.obj` returns
|
||||
# a *bound* method (attached to an instance), TestCaseFunction's
|
||||
# `obj` returns an *unbound* method (not attached to an instance).
|
||||
# This inconsistency is probably not desirable, but needs some
|
||||
# consideration before changing.
|
||||
return getattr(self.parent.obj, self.originalname) # type: ignore[attr-defined]
|
||||
|
||||
def setup(self) -> None:
|
||||
# A bound method to be called during teardown() if set (see 'runtest()').
|
||||
self._explicit_tearDown: Optional[Callable[[], None]] = None
|
||||
|
|
|
@ -81,6 +81,23 @@ def warning_record_to_str(warning_message: warnings.WarningMessage) -> str:
|
|||
warning_message.lineno,
|
||||
warning_message.line,
|
||||
)
|
||||
if warning_message.source is not None:
|
||||
try:
|
||||
import tracemalloc
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
tb = tracemalloc.get_object_traceback(warning_message.source)
|
||||
if tb is not None:
|
||||
formatted_tb = "\n".join(tb.format())
|
||||
# Use a leading new line to better separate the (large) output
|
||||
# from the traceback to the previous warning text.
|
||||
msg += f"\nObject allocated at:\n{formatted_tb}"
|
||||
else:
|
||||
# No need for a leading new line.
|
||||
url = "https://docs.pytest.org/en/stable/how-to/capture-warnings.html#resource-warnings"
|
||||
msg += "Enable tracemalloc to get traceback where the object was allocated.\n"
|
||||
msg += f"See {url} for more info."
|
||||
return msg
|
||||
|
||||
|
||||
|
|
|
@ -1238,8 +1238,6 @@ def test_pdb_can_be_rewritten(pytester: Pytester) -> None:
|
|||
" def check():",
|
||||
"> assert 1 == 2",
|
||||
"E assert 1 == 2",
|
||||
"E +1",
|
||||
"E -2",
|
||||
"",
|
||||
"pdb.py:2: AssertionError",
|
||||
"*= 1 failed in *",
|
||||
|
|
|
@ -420,18 +420,20 @@ def test_match_raises_error(pytester: Pytester) -> None:
|
|||
excinfo.match(r'[123]+')
|
||||
"""
|
||||
)
|
||||
result = pytester.runpytest()
|
||||
result = pytester.runpytest("--tb=short")
|
||||
assert result.ret != 0
|
||||
|
||||
exc_msg = "Regex pattern '[[]123[]]+' does not match 'division by zero'."
|
||||
result.stdout.fnmatch_lines([f"E * AssertionError: {exc_msg}"])
|
||||
match = [
|
||||
r"E .* AssertionError: Regex pattern did not match.",
|
||||
r"E .* Regex: '\[123\]\+'",
|
||||
r"E .* Input: 'division by zero'",
|
||||
]
|
||||
result.stdout.re_match_lines(match)
|
||||
result.stdout.no_fnmatch_line("*__tracebackhide__ = True*")
|
||||
|
||||
result = pytester.runpytest("--fulltrace")
|
||||
assert result.ret != 0
|
||||
result.stdout.fnmatch_lines(
|
||||
["*__tracebackhide__ = True*", f"E * AssertionError: {exc_msg}"]
|
||||
)
|
||||
result.stdout.re_match_lines([r".*__tracebackhide__ = True.*", *match])
|
||||
|
||||
|
||||
class TestFormattedExcinfo:
|
||||
|
|
|
@ -1,16 +1,13 @@
|
|||
# flake8: noqa
|
||||
# disable flake check on this file because some constructs are strange
|
||||
# or redundant on purpose and can't be disable on a line-by-line basis
|
||||
import ast
|
||||
import inspect
|
||||
import linecache
|
||||
import sys
|
||||
import textwrap
|
||||
from pathlib import Path
|
||||
from types import CodeType
|
||||
from typing import Any
|
||||
from typing import Dict
|
||||
from typing import Optional
|
||||
|
||||
import pytest
|
||||
from _pytest._code import Code
|
||||
|
|
|
@ -86,23 +86,6 @@ def test_private_is_deprecated() -> None:
|
|||
PrivateInit(10, _ispytest=True)
|
||||
|
||||
|
||||
def test_raising_unittest_skiptest_during_collection_is_deprecated(
|
||||
pytester: Pytester,
|
||||
) -> None:
|
||||
pytester.makepyfile(
|
||||
"""
|
||||
import unittest
|
||||
raise unittest.SkipTest()
|
||||
"""
|
||||
)
|
||||
result = pytester.runpytest()
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"*PytestRemovedIn8Warning: Raising unittest.SkipTest*",
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("hooktype", ["hook", "ihook"])
|
||||
def test_hookproxy_warnings_for_pathlib(tmp_path, hooktype, request):
|
||||
path = legacy_path(tmp_path)
|
||||
|
|
|
@ -2,6 +2,7 @@ import pytest
|
|||
from _pytest._io.saferepr import _pformat_dispatch
|
||||
from _pytest._io.saferepr import DEFAULT_REPR_MAX_SIZE
|
||||
from _pytest._io.saferepr import saferepr
|
||||
from _pytest._io.saferepr import saferepr_unlimited
|
||||
|
||||
|
||||
def test_simple_repr():
|
||||
|
@ -179,3 +180,23 @@ def test_broken_getattribute():
|
|||
assert saferepr(SomeClass()).startswith(
|
||||
"<[RuntimeError() raised in repr()] SomeClass object at 0x"
|
||||
)
|
||||
|
||||
|
||||
def test_saferepr_unlimited():
|
||||
dict5 = {f"v{i}": i for i in range(5)}
|
||||
assert saferepr_unlimited(dict5) == "{'v0': 0, 'v1': 1, 'v2': 2, 'v3': 3, 'v4': 4}"
|
||||
|
||||
dict_long = {f"v{i}": i for i in range(1_000)}
|
||||
r = saferepr_unlimited(dict_long)
|
||||
assert "..." not in r
|
||||
assert "\n" not in r
|
||||
|
||||
|
||||
def test_saferepr_unlimited_exc():
|
||||
class A:
|
||||
def __repr__(self):
|
||||
raise ValueError(42)
|
||||
|
||||
assert saferepr_unlimited(A()).startswith(
|
||||
"<[ValueError(42) raised in repr()] A object at 0x"
|
||||
)
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
anyio[curio,trio]==3.5.0
|
||||
django==4.0.1
|
||||
pytest-asyncio==0.17.2
|
||||
django==4.0.3
|
||||
pytest-asyncio==0.18.2
|
||||
pytest-bdd==5.0.0
|
||||
pytest-cov==3.0.0
|
||||
pytest-django==4.5.2
|
||||
|
@ -11,5 +11,5 @@ pytest-rerunfailures==10.2
|
|||
pytest-sugar==0.9.4
|
||||
pytest-trio==0.7.0
|
||||
pytest-twisted==1.13.4
|
||||
twisted==21.7.0
|
||||
twisted==22.2.0
|
||||
pytest-xvfb==2.0.0
|
||||
|
|
|
@ -92,9 +92,7 @@ SOME_INT = r"[0-9]+\s*"
|
|||
|
||||
|
||||
class TestApprox:
|
||||
def test_error_messages(self, assert_approx_raises_regex):
|
||||
np = pytest.importorskip("numpy")
|
||||
|
||||
def test_error_messages_native_dtypes(self, assert_approx_raises_regex):
|
||||
assert_approx_raises_regex(
|
||||
2.0,
|
||||
1.0,
|
||||
|
@ -135,6 +133,22 @@ class TestApprox:
|
|||
],
|
||||
)
|
||||
|
||||
# Specific test for comparison with 0.0 (relative diff will be 'inf')
|
||||
assert_approx_raises_regex(
|
||||
[0.0],
|
||||
[1.0],
|
||||
[
|
||||
r" comparison failed. Mismatched elements: 1 / 1:",
|
||||
rf" Max absolute difference: {SOME_FLOAT}",
|
||||
r" Max relative difference: inf",
|
||||
r" Index \| Obtained\s+\| Expected ",
|
||||
rf"\s*0\s*\| {SOME_FLOAT} \| {SOME_FLOAT} ± {SOME_FLOAT}",
|
||||
],
|
||||
)
|
||||
|
||||
def test_error_messages_numpy_dtypes(self, assert_approx_raises_regex):
|
||||
np = pytest.importorskip("numpy")
|
||||
|
||||
a = np.linspace(0, 100, 20)
|
||||
b = np.linspace(0, 100, 20)
|
||||
a[10] += 0.5
|
||||
|
@ -175,18 +189,6 @@ class TestApprox:
|
|||
)
|
||||
|
||||
# Specific test for comparison with 0.0 (relative diff will be 'inf')
|
||||
assert_approx_raises_regex(
|
||||
[0.0],
|
||||
[1.0],
|
||||
[
|
||||
r" comparison failed. Mismatched elements: 1 / 1:",
|
||||
rf" Max absolute difference: {SOME_FLOAT}",
|
||||
r" Max relative difference: inf",
|
||||
r" Index \| Obtained\s+\| Expected ",
|
||||
rf"\s*0\s*\| {SOME_FLOAT} \| {SOME_FLOAT} ± {SOME_FLOAT}",
|
||||
],
|
||||
)
|
||||
|
||||
assert_approx_raises_regex(
|
||||
np.array([0.0]),
|
||||
np.array([1.0]),
|
||||
|
@ -858,13 +860,21 @@ class TestApprox:
|
|||
assert approx(expected, rel=5e-7, abs=0) == actual
|
||||
assert approx(expected, rel=5e-8, abs=0) != actual
|
||||
|
||||
def test_generic_sized_iterable_object(self):
|
||||
class MySizedIterable:
|
||||
def __iter__(self):
|
||||
return iter([1, 2, 3, 4])
|
||||
def test_generic_ordered_sequence(self):
|
||||
class MySequence:
|
||||
def __getitem__(self, i):
|
||||
return [1, 2, 3, 4][i]
|
||||
|
||||
def __len__(self):
|
||||
return 4
|
||||
|
||||
expected = MySizedIterable()
|
||||
assert [1, 2, 3, 4] == approx(expected)
|
||||
expected = MySequence()
|
||||
assert [1, 2, 3, 4] == approx(expected, abs=1e-4)
|
||||
|
||||
expected_repr = "approx([1 ± 1.0e-06, 2 ± 2.0e-06, 3 ± 3.0e-06, 4 ± 4.0e-06])"
|
||||
assert repr(approx(expected)) == expected_repr
|
||||
|
||||
def test_allow_ordered_sequences_only(self) -> None:
|
||||
"""pytest.approx() should raise an error on unordered sequences (#9692)."""
|
||||
with pytest.raises(TypeError, match="only supports ordered sequences"):
|
||||
assert {1, 2, 3} == approx({1, 2, 3})
|
||||
|
|
|
@ -106,8 +106,8 @@ class TestMetafunc:
|
|||
with pytest.raises(
|
||||
fail.Exception,
|
||||
match=(
|
||||
r"In func: ids must be list of string/float/int/bool, found:"
|
||||
r" Exc\(from_gen\) \(type: <class .*Exc'>\) at index 2"
|
||||
r"In func: ids contains unsupported value Exc\(from_gen\) \(type: <class .*Exc'>\) at index 2. "
|
||||
r"Supported types are: .*"
|
||||
),
|
||||
):
|
||||
metafunc.parametrize("x", [1, 2, 3], ids=gen()) # type: ignore[arg-type]
|
||||
|
@ -285,7 +285,7 @@ class TestMetafunc:
|
|||
deadline=400.0
|
||||
) # very close to std deadline and CI boxes are not reliable in CPU power
|
||||
def test_idval_hypothesis(self, value) -> None:
|
||||
escaped = IdMaker([], [], None, None, None, None)._idval(value, "a", 6)
|
||||
escaped = IdMaker([], [], None, None, None, None, None)._idval(value, "a", 6)
|
||||
assert isinstance(escaped, str)
|
||||
escaped.encode("ascii")
|
||||
|
||||
|
@ -308,7 +308,8 @@ class TestMetafunc:
|
|||
]
|
||||
for val, expected in values:
|
||||
assert (
|
||||
IdMaker([], [], None, None, None, None)._idval(val, "a", 6) == expected
|
||||
IdMaker([], [], None, None, None, None, None)._idval(val, "a", 6)
|
||||
== expected
|
||||
)
|
||||
|
||||
def test_unicode_idval_with_config(self) -> None:
|
||||
|
@ -337,7 +338,7 @@ class TestMetafunc:
|
|||
("ação", MockConfig({option: False}), "a\\xe7\\xe3o"),
|
||||
]
|
||||
for val, config, expected in values:
|
||||
actual = IdMaker([], [], None, None, config, None)._idval(val, "a", 6)
|
||||
actual = IdMaker([], [], None, None, config, None, None)._idval(val, "a", 6)
|
||||
assert actual == expected
|
||||
|
||||
def test_bytes_idval(self) -> None:
|
||||
|
@ -351,7 +352,8 @@ class TestMetafunc:
|
|||
]
|
||||
for val, expected in values:
|
||||
assert (
|
||||
IdMaker([], [], None, None, None, None)._idval(val, "a", 6) == expected
|
||||
IdMaker([], [], None, None, None, None, None)._idval(val, "a", 6)
|
||||
== expected
|
||||
)
|
||||
|
||||
def test_class_or_function_idval(self) -> None:
|
||||
|
@ -367,7 +369,8 @@ class TestMetafunc:
|
|||
values = [(TestClass, "TestClass"), (test_function, "test_function")]
|
||||
for val, expected in values:
|
||||
assert (
|
||||
IdMaker([], [], None, None, None, None)._idval(val, "a", 6) == expected
|
||||
IdMaker([], [], None, None, None, None, None)._idval(val, "a", 6)
|
||||
== expected
|
||||
)
|
||||
|
||||
def test_notset_idval(self) -> None:
|
||||
|
@ -376,7 +379,9 @@ class TestMetafunc:
|
|||
|
||||
Regression test for #7686.
|
||||
"""
|
||||
assert IdMaker([], [], None, None, None, None)._idval(NOTSET, "a", 0) == "a0"
|
||||
assert (
|
||||
IdMaker([], [], None, None, None, None, None)._idval(NOTSET, "a", 0) == "a0"
|
||||
)
|
||||
|
||||
def test_idmaker_autoname(self) -> None:
|
||||
"""#250"""
|
||||
|
@ -387,6 +392,7 @@ class TestMetafunc:
|
|||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
).make_unique_parameterset_ids()
|
||||
assert result == ["string-1.0", "st-ring-2.0"]
|
||||
|
||||
|
@ -397,17 +403,18 @@ class TestMetafunc:
|
|||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
).make_unique_parameterset_ids()
|
||||
assert result == ["a0-1.0", "a1-b1"]
|
||||
# unicode mixing, issue250
|
||||
result = IdMaker(
|
||||
("a", "b"), [pytest.param({}, b"\xc3\xb4")], None, None, None, None
|
||||
("a", "b"), [pytest.param({}, b"\xc3\xb4")], None, None, None, None, None
|
||||
).make_unique_parameterset_ids()
|
||||
assert result == ["a0-\\xc3\\xb4"]
|
||||
|
||||
def test_idmaker_with_bytes_regex(self) -> None:
|
||||
result = IdMaker(
|
||||
("a"), [pytest.param(re.compile(b"foo"), 1.0)], None, None, None, None
|
||||
("a"), [pytest.param(re.compile(b"foo"), 1.0)], None, None, None, None, None
|
||||
).make_unique_parameterset_ids()
|
||||
assert result == ["foo"]
|
||||
|
||||
|
@ -433,6 +440,7 @@ class TestMetafunc:
|
|||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
).make_unique_parameterset_ids()
|
||||
assert result == [
|
||||
"1.0--1.1",
|
||||
|
@ -465,6 +473,7 @@ class TestMetafunc:
|
|||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
).make_unique_parameterset_ids()
|
||||
assert result == ["\\x00-1", "\\x05-2", "\\x00-3", "\\x05-4", "\\t-5", "\\t-6"]
|
||||
|
||||
|
@ -479,6 +488,7 @@ class TestMetafunc:
|
|||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
).make_unique_parameterset_ids()
|
||||
assert result == ["hello \\x00", "hello \\x05"]
|
||||
|
||||
|
@ -486,7 +496,7 @@ class TestMetafunc:
|
|||
enum = pytest.importorskip("enum")
|
||||
e = enum.Enum("Foo", "one, two")
|
||||
result = IdMaker(
|
||||
("a", "b"), [pytest.param(e.one, e.two)], None, None, None, None
|
||||
("a", "b"), [pytest.param(e.one, e.two)], None, None, None, None, None
|
||||
).make_unique_parameterset_ids()
|
||||
assert result == ["Foo.one-Foo.two"]
|
||||
|
||||
|
@ -509,6 +519,7 @@ class TestMetafunc:
|
|||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
).make_unique_parameterset_ids()
|
||||
assert result == ["10.0-IndexError()", "20-KeyError()", "three-b2"]
|
||||
|
||||
|
@ -529,6 +540,7 @@ class TestMetafunc:
|
|||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
).make_unique_parameterset_ids()
|
||||
assert result == ["a-a0", "a-a1", "a-a2"]
|
||||
|
||||
|
@ -560,7 +572,13 @@ class TestMetafunc:
|
|||
]
|
||||
for config, expected in values:
|
||||
result = IdMaker(
|
||||
("a",), [pytest.param("string")], lambda _: "ação", None, config, None
|
||||
("a",),
|
||||
[pytest.param("string")],
|
||||
lambda _: "ação",
|
||||
None,
|
||||
config,
|
||||
None,
|
||||
None,
|
||||
).make_unique_parameterset_ids()
|
||||
assert result == [expected]
|
||||
|
||||
|
@ -592,7 +610,7 @@ class TestMetafunc:
|
|||
]
|
||||
for config, expected in values:
|
||||
result = IdMaker(
|
||||
("a",), [pytest.param("string")], None, ["ação"], config, None
|
||||
("a",), [pytest.param("string")], None, ["ação"], config, None, None
|
||||
).make_unique_parameterset_ids()
|
||||
assert result == [expected]
|
||||
|
||||
|
@ -657,6 +675,7 @@ class TestMetafunc:
|
|||
["a", None],
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
).make_unique_parameterset_ids()
|
||||
assert result == ["a", "3-4"]
|
||||
|
||||
|
@ -668,6 +687,7 @@ class TestMetafunc:
|
|||
["a", None],
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
).make_unique_parameterset_ids()
|
||||
assert result == ["me", "you"]
|
||||
|
||||
|
@ -679,6 +699,7 @@ class TestMetafunc:
|
|||
["a", "a", "b", "c", "b"],
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
).make_unique_parameterset_ids()
|
||||
assert result == ["a0", "a1", "b0", "c", "b1"]
|
||||
|
||||
|
@ -1318,7 +1339,7 @@ class TestMetafuncFunctional:
|
|||
"""
|
||||
import pytest
|
||||
|
||||
@pytest.mark.parametrize("x, expected", [(1, 2), (3, 4), (5, 6)], ids=(None, 2, type))
|
||||
@pytest.mark.parametrize("x, expected", [(1, 2), (3, 4), (5, 6)], ids=(None, 2, OSError()))
|
||||
def test_ids_numbers(x,expected):
|
||||
assert x * 2 == expected
|
||||
"""
|
||||
|
@ -1326,8 +1347,8 @@ class TestMetafuncFunctional:
|
|||
result = pytester.runpytest()
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"In test_ids_numbers: ids must be list of string/float/int/bool,"
|
||||
" found: <class 'type'> (type: <class 'type'>) at index 2"
|
||||
"In test_ids_numbers: ids contains unsupported value OSError() (type: <class 'OSError'>) at index 2. "
|
||||
"Supported types are: str, bytes, int, float, complex, bool, enum, regex or anything with a __name__."
|
||||
]
|
||||
)
|
||||
|
||||
|
|
|
@ -191,10 +191,12 @@ class TestRaises:
|
|||
int("asdf")
|
||||
|
||||
msg = "with base 16"
|
||||
expr = "Regex pattern {!r} does not match \"invalid literal for int() with base 10: 'asdf'\".".format(
|
||||
msg
|
||||
expr = (
|
||||
"Regex pattern did not match.\n"
|
||||
f" Regex: {msg!r}\n"
|
||||
" Input: \"invalid literal for int() with base 10: 'asdf'\""
|
||||
)
|
||||
with pytest.raises(AssertionError, match=re.escape(expr)):
|
||||
with pytest.raises(AssertionError, match="(?m)" + re.escape(expr)):
|
||||
with pytest.raises(ValueError, match=msg):
|
||||
int("asdf", base=10)
|
||||
|
||||
|
@ -217,7 +219,7 @@ class TestRaises:
|
|||
with pytest.raises(AssertionError, match="'foo"):
|
||||
raise AssertionError("'bar")
|
||||
(msg,) = excinfo.value.args
|
||||
assert msg == 'Regex pattern "\'foo" does not match "\'bar".'
|
||||
assert msg == '''Regex pattern did not match.\n Regex: "'foo"\n Input: "'bar"'''
|
||||
|
||||
def test_match_failure_exact_string_message(self):
|
||||
message = "Oh here is a message with (42) numbers in parameters"
|
||||
|
@ -226,9 +228,10 @@ class TestRaises:
|
|||
raise AssertionError(message)
|
||||
(msg,) = excinfo.value.args
|
||||
assert msg == (
|
||||
"Regex pattern 'Oh here is a message with (42) numbers in "
|
||||
"parameters' does not match 'Oh here is a message with (42) "
|
||||
"numbers in parameters'. Did you mean to `re.escape()` the regex?"
|
||||
"Regex pattern did not match.\n"
|
||||
" Regex: 'Oh here is a message with (42) numbers in parameters'\n"
|
||||
" Input: 'Oh here is a message with (42) numbers in parameters'\n"
|
||||
" Did you mean to `re.escape()` the regex?"
|
||||
)
|
||||
|
||||
def test_raises_match_wrong_type(self):
|
||||
|
|
|
@ -83,7 +83,7 @@ class TestImportHookInstallation:
|
|||
"E assert {'failed': 1,... 'skipped': 0} == {'failed': 0,... 'skipped': 0}",
|
||||
"E Omitting 1 identical items, use -vv to show",
|
||||
"E Differing items:",
|
||||
"E Use -v to get the full diff",
|
||||
"E Use -v to get more diff",
|
||||
]
|
||||
)
|
||||
# XXX: unstable output.
|
||||
|
@ -376,7 +376,7 @@ class TestAssert_reprcompare:
|
|||
assert diff == [
|
||||
"b'spam' == b'eggs'",
|
||||
"At index 0 diff: b's' != b'e'",
|
||||
"Use -v to get the full diff",
|
||||
"Use -v to get more diff",
|
||||
]
|
||||
|
||||
def test_bytes_diff_verbose(self) -> None:
|
||||
|
@ -444,11 +444,19 @@ class TestAssert_reprcompare:
|
|||
"""
|
||||
expl = callequal(left, right, verbose=0)
|
||||
assert expl is not None
|
||||
assert expl[-1] == "Use -v to get the full diff"
|
||||
assert expl[-1] == "Use -v to get more diff"
|
||||
verbose_expl = callequal(left, right, verbose=1)
|
||||
assert verbose_expl is not None
|
||||
assert "\n".join(verbose_expl).endswith(textwrap.dedent(expected).strip())
|
||||
|
||||
def test_iterable_quiet(self) -> None:
|
||||
expl = callequal([1, 2], [10, 2], verbose=-1)
|
||||
assert expl == [
|
||||
"[1, 2] == [10, 2]",
|
||||
"At index 0 diff: 1 != 10",
|
||||
"Use -v to get more diff",
|
||||
]
|
||||
|
||||
def test_iterable_full_diff_ci(
|
||||
self, monkeypatch: MonkeyPatch, pytester: Pytester
|
||||
) -> None:
|
||||
|
@ -466,7 +474,7 @@ class TestAssert_reprcompare:
|
|||
|
||||
monkeypatch.delenv("CI", raising=False)
|
||||
result = pytester.runpytest()
|
||||
result.stdout.fnmatch_lines(["E Use -v to get the full diff"])
|
||||
result.stdout.fnmatch_lines(["E Use -v to get more diff"])
|
||||
|
||||
def test_list_different_lengths(self) -> None:
|
||||
expl = callequal([0, 1], [0, 1, 2])
|
||||
|
@ -699,32 +707,6 @@ class TestAssert_reprcompare:
|
|||
assert expl is not None
|
||||
assert len(expl) > 1
|
||||
|
||||
def test_repr_verbose(self) -> None:
|
||||
class Nums:
|
||||
def __init__(self, nums):
|
||||
self.nums = nums
|
||||
|
||||
def __repr__(self):
|
||||
return str(self.nums)
|
||||
|
||||
list_x = list(range(5000))
|
||||
list_y = list(range(5000))
|
||||
list_y[len(list_y) // 2] = 3
|
||||
nums_x = Nums(list_x)
|
||||
nums_y = Nums(list_y)
|
||||
|
||||
assert callequal(nums_x, nums_y) is None
|
||||
|
||||
expl = callequal(nums_x, nums_y, verbose=1)
|
||||
assert expl is not None
|
||||
assert "+" + repr(nums_x) in expl
|
||||
assert "-" + repr(nums_y) in expl
|
||||
|
||||
expl = callequal(nums_x, nums_y, verbose=2)
|
||||
assert expl is not None
|
||||
assert "+" + repr(nums_x) in expl
|
||||
assert "-" + repr(nums_y) in expl
|
||||
|
||||
def test_list_bad_repr(self) -> None:
|
||||
class A:
|
||||
def __repr__(self):
|
||||
|
@ -851,8 +833,6 @@ class TestAssert_reprcompare_dataclass:
|
|||
"E ",
|
||||
"E Drill down into differing attribute a:",
|
||||
"E a: 10 != 20",
|
||||
"E +10",
|
||||
"E -20",
|
||||
"E ",
|
||||
"E Drill down into differing attribute b:",
|
||||
"E b: 'ten' != 'xxx'",
|
||||
|
@ -1026,7 +1006,7 @@ class TestAssert_reprcompare_attrsclass:
|
|||
assert lines is None
|
||||
|
||||
def test_attrs_with_custom_eq(self) -> None:
|
||||
@attr.define
|
||||
@attr.define(slots=False)
|
||||
class SimpleDataObject:
|
||||
field_a = attr.ib()
|
||||
|
||||
|
@ -1059,7 +1039,7 @@ class TestAssert_reprcompare_namedtuple:
|
|||
" b: 'b' != 'c'",
|
||||
" - c",
|
||||
" + b",
|
||||
"Use -v to get the full diff",
|
||||
"Use -v to get more diff",
|
||||
]
|
||||
|
||||
def test_comparing_two_different_namedtuple(self) -> None:
|
||||
|
@ -1074,7 +1054,7 @@ class TestAssert_reprcompare_namedtuple:
|
|||
assert lines == [
|
||||
"NT1(a=1, b='b') == NT2(a=2, b='b')",
|
||||
"At index 0 diff: 1 != 2",
|
||||
"Use -v to get the full diff",
|
||||
"Use -v to get more diff",
|
||||
]
|
||||
|
||||
|
||||
|
@ -1648,7 +1628,7 @@ def test_raise_unprintable_assertion_error(pytester: Pytester) -> None:
|
|||
)
|
||||
|
||||
|
||||
def test_raise_assertion_error_raisin_repr(pytester: Pytester) -> None:
|
||||
def test_raise_assertion_error_raising_repr(pytester: Pytester) -> None:
|
||||
pytester.makepyfile(
|
||||
"""
|
||||
class RaisingRepr(object):
|
||||
|
@ -1659,9 +1639,15 @@ def test_raise_assertion_error_raisin_repr(pytester: Pytester) -> None:
|
|||
"""
|
||||
)
|
||||
result = pytester.runpytest()
|
||||
result.stdout.fnmatch_lines(
|
||||
["E AssertionError: <unprintable AssertionError object>"]
|
||||
)
|
||||
if sys.version_info >= (3, 11):
|
||||
# python 3.11 has native support for un-str-able exceptions
|
||||
result.stdout.fnmatch_lines(
|
||||
["E AssertionError: <exception str() failed>"]
|
||||
)
|
||||
else:
|
||||
result.stdout.fnmatch_lines(
|
||||
["E AssertionError: <unprintable AssertionError object>"]
|
||||
)
|
||||
|
||||
|
||||
def test_issue_1944(pytester: Pytester) -> None:
|
||||
|
@ -1709,3 +1695,18 @@ def test_assertion_location_with_coverage(pytester: Pytester) -> None:
|
|||
"*= 1 failed in*",
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def test_reprcompare_verbose_long() -> None:
|
||||
a = {f"v{i}": i for i in range(11)}
|
||||
b = a.copy()
|
||||
b["v2"] += 10
|
||||
lines = callop("==", a, b, verbose=2)
|
||||
assert lines is not None
|
||||
assert lines[0] == (
|
||||
"{'v0': 0, 'v1': 1, 'v2': 2, 'v3': 3, 'v4': 4, 'v5': 5, "
|
||||
"'v6': 6, 'v7': 7, 'v8': 8, 'v9': 9, 'v10': 10}"
|
||||
" == "
|
||||
"{'v0': 0, 'v1': 1, 'v2': 12, 'v3': 3, 'v4': 4, 'v5': 5, "
|
||||
"'v6': 6, 'v7': 7, 'v8': 8, 'v9': 9, 'v10': 10}"
|
||||
)
|
||||
|
|
|
@ -13,10 +13,12 @@ from functools import partial
|
|||
from pathlib import Path
|
||||
from typing import cast
|
||||
from typing import Dict
|
||||
from typing import Generator
|
||||
from typing import List
|
||||
from typing import Mapping
|
||||
from typing import Optional
|
||||
from typing import Set
|
||||
from unittest import mock
|
||||
|
||||
import _pytest._code
|
||||
import pytest
|
||||
|
@ -202,16 +204,8 @@ class TestAssertionRewrite:
|
|||
def f4() -> None:
|
||||
assert sys == 42 # type: ignore[comparison-overlap]
|
||||
|
||||
verbose = request.config.getoption("verbose")
|
||||
msg = getmsg(f4, {"sys": sys})
|
||||
if verbose > 0:
|
||||
assert msg == (
|
||||
"assert <module 'sys' (built-in)> == 42\n"
|
||||
" +<module 'sys' (built-in)>\n"
|
||||
" -42"
|
||||
)
|
||||
else:
|
||||
assert msg == "assert sys == 42"
|
||||
assert msg == "assert sys == 42"
|
||||
|
||||
def f5() -> None:
|
||||
assert cls == 42 # type: ignore[name-defined] # noqa: F821
|
||||
|
@ -222,20 +216,7 @@ class TestAssertionRewrite:
|
|||
msg = getmsg(f5, {"cls": X})
|
||||
assert msg is not None
|
||||
lines = msg.splitlines()
|
||||
if verbose > 1:
|
||||
assert lines == [
|
||||
f"assert {X!r} == 42",
|
||||
f" +{X!r}",
|
||||
" -42",
|
||||
]
|
||||
elif verbose > 0:
|
||||
assert lines == [
|
||||
"assert <class 'test_...e.<locals>.X'> == 42",
|
||||
f" +{X!r}",
|
||||
" -42",
|
||||
]
|
||||
else:
|
||||
assert lines == ["assert cls == 42"]
|
||||
assert lines == ["assert cls == 42"]
|
||||
|
||||
def test_assertrepr_compare_same_width(self, request) -> None:
|
||||
"""Should use same width/truncation with same initial width."""
|
||||
|
@ -277,14 +258,11 @@ class TestAssertionRewrite:
|
|||
msg = getmsg(f, {"cls": Y})
|
||||
assert msg is not None
|
||||
lines = msg.splitlines()
|
||||
if request.config.getoption("verbose") > 0:
|
||||
assert lines == ["assert 3 == 2", " +3", " -2"]
|
||||
else:
|
||||
assert lines == [
|
||||
"assert 3 == 2",
|
||||
" + where 3 = Y.foo",
|
||||
" + where Y = cls()",
|
||||
]
|
||||
assert lines == [
|
||||
"assert 3 == 2",
|
||||
" + where 3 = Y.foo",
|
||||
" + where Y = cls()",
|
||||
]
|
||||
|
||||
def test_assert_already_has_message(self) -> None:
|
||||
def f():
|
||||
|
@ -661,10 +639,7 @@ class TestAssertionRewrite:
|
|||
assert len(values) == 11
|
||||
|
||||
msg = getmsg(f)
|
||||
if request.config.getoption("verbose") > 0:
|
||||
assert msg == "assert 10 == 11\n +10\n -11"
|
||||
else:
|
||||
assert msg == "assert 10 == 11\n + where 10 = len([0, 1, 2, 3, 4, 5, ...])"
|
||||
assert msg == "assert 10 == 11\n + where 10 = len([0, 1, 2, 3, 4, 5, ...])"
|
||||
|
||||
def test_custom_reprcompare(self, monkeypatch) -> None:
|
||||
def my_reprcompare1(op, left, right) -> str:
|
||||
|
@ -730,10 +705,7 @@ class TestAssertionRewrite:
|
|||
msg = getmsg(f)
|
||||
assert msg is not None
|
||||
lines = util._format_lines([msg])
|
||||
if request.config.getoption("verbose") > 0:
|
||||
assert lines == ["assert 0 == 1\n +0\n -1"]
|
||||
else:
|
||||
assert lines == ["assert 0 == 1\n + where 1 = \\n{ \\n~ \\n}.a"]
|
||||
assert lines == ["assert 0 == 1\n + where 1 = \\n{ \\n~ \\n}.a"]
|
||||
|
||||
def test_custom_repr_non_ascii(self) -> None:
|
||||
def f() -> None:
|
||||
|
@ -1057,7 +1029,7 @@ class TestAssertionRewriteHookDetails:
|
|||
e = OSError()
|
||||
e.errno = 10
|
||||
raise e
|
||||
yield
|
||||
yield # type:ignore[unreachable]
|
||||
|
||||
monkeypatch.setattr(
|
||||
_pytest.assertion.rewrite, "atomic_write", atomic_write_failed
|
||||
|
@ -1376,7 +1348,7 @@ class TestEarlyRewriteBailout:
|
|||
@pytest.fixture
|
||||
def hook(
|
||||
self, pytestconfig, monkeypatch, pytester: Pytester
|
||||
) -> AssertionRewritingHook:
|
||||
) -> Generator[AssertionRewritingHook, None, None]:
|
||||
"""Returns a patched AssertionRewritingHook instance so we can configure its initial paths and track
|
||||
if PathFinder.find_spec has been called.
|
||||
"""
|
||||
|
@ -1397,11 +1369,11 @@ class TestEarlyRewriteBailout:
|
|||
|
||||
hook = AssertionRewritingHook(pytestconfig)
|
||||
# use default patterns, otherwise we inherit pytest's testing config
|
||||
hook.fnpats[:] = ["test_*.py", "*_test.py"]
|
||||
monkeypatch.setattr(hook, "_find_spec", spy_find_spec)
|
||||
hook.set_session(StubSession()) # type: ignore[arg-type]
|
||||
pytester.syspathinsert()
|
||||
return hook
|
||||
with mock.patch.object(hook, "fnpats", ["test_*.py", "*_test.py"]):
|
||||
monkeypatch.setattr(hook, "_find_spec", spy_find_spec)
|
||||
hook.set_session(StubSession()) # type: ignore[arg-type]
|
||||
pytester.syspathinsert()
|
||||
yield hook
|
||||
|
||||
def test_basic(self, pytester: Pytester, hook: AssertionRewritingHook) -> None:
|
||||
"""
|
||||
|
@ -1451,9 +1423,9 @@ class TestEarlyRewriteBailout:
|
|||
}
|
||||
)
|
||||
pytester.syspathinsert("tests")
|
||||
hook.fnpats[:] = ["tests/**.py"]
|
||||
assert hook.find_spec("file") is not None
|
||||
assert self.find_spec_calls == ["file"]
|
||||
with mock.patch.object(hook, "fnpats", ["tests/**.py"]):
|
||||
assert hook.find_spec("file") is not None
|
||||
assert self.find_spec_calls == ["file"]
|
||||
|
||||
@pytest.mark.skipif(
|
||||
sys.platform.startswith("win32"), reason="cannot remove cwd on Windows"
|
||||
|
|
|
@ -773,7 +773,7 @@ class TestLastFailed:
|
|||
result = pytester.runpytest("--lf", "--lfnf", "none")
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"collected 2 items / 2 deselected",
|
||||
"collected 2 items / 2 deselected / 0 selected",
|
||||
"run-last-failure: no previously failed tests, deselecting all items.",
|
||||
"deselected=2",
|
||||
"* 2 deselected in *",
|
||||
|
|
|
@ -651,7 +651,7 @@ class Test_getinitialnodes:
|
|||
for parent in col.listchain():
|
||||
assert parent.config is config
|
||||
|
||||
def test_pkgfile(self, pytester: Pytester) -> None:
|
||||
def test_pkgfile(self, pytester: Pytester, monkeypatch: MonkeyPatch) -> None:
|
||||
"""Verify nesting when a module is within a package.
|
||||
The parent chain should match: Module<x.py> -> Package<subdir> -> Session.
|
||||
Session's parent should always be None.
|
||||
|
@ -660,7 +660,8 @@ class Test_getinitialnodes:
|
|||
subdir = tmp_path.joinpath("subdir")
|
||||
x = ensure_file(subdir / "x.py")
|
||||
ensure_file(subdir / "__init__.py")
|
||||
with subdir.cwd():
|
||||
with monkeypatch.context() as mp:
|
||||
mp.chdir(subdir)
|
||||
config = pytester.parseconfigure(x)
|
||||
col = pytester.getnode(config, x)
|
||||
assert col is not None
|
||||
|
@ -1188,8 +1189,7 @@ def test_collect_with_chdir_during_import(pytester: Pytester) -> None:
|
|||
"""
|
||||
% (str(subdir),)
|
||||
)
|
||||
with pytester.path.cwd():
|
||||
result = pytester.runpytest()
|
||||
result = pytester.runpytest()
|
||||
result.stdout.fnmatch_lines(["*1 passed in*"])
|
||||
assert result.ret == 0
|
||||
|
||||
|
@ -1200,8 +1200,7 @@ def test_collect_with_chdir_during_import(pytester: Pytester) -> None:
|
|||
testpaths = .
|
||||
"""
|
||||
)
|
||||
with pytester.path.cwd():
|
||||
result = pytester.runpytest("--collect-only")
|
||||
result = pytester.runpytest("--collect-only")
|
||||
result.stdout.fnmatch_lines(["collected 1 item"])
|
||||
|
||||
|
||||
|
@ -1224,7 +1223,8 @@ def test_collect_pyargs_with_testpaths(
|
|||
)
|
||||
)
|
||||
monkeypatch.setenv("PYTHONPATH", str(pytester.path), prepend=os.pathsep)
|
||||
with root.cwd():
|
||||
with monkeypatch.context() as mp:
|
||||
mp.chdir(root)
|
||||
result = pytester.runpytest_subprocess()
|
||||
result.stdout.fnmatch_lines(["*1 passed in*"])
|
||||
|
||||
|
@ -1507,6 +1507,35 @@ class TestImportModeImportlib:
|
|||
]
|
||||
)
|
||||
|
||||
def test_using_python_path(self, pytester: Pytester) -> None:
|
||||
"""
|
||||
Dummy modules created by insert_missing_modules should not get in
|
||||
the way of modules that could be imported via python path (#9645).
|
||||
"""
|
||||
pytester.makeini(
|
||||
"""
|
||||
[pytest]
|
||||
pythonpath = .
|
||||
addopts = --import-mode importlib
|
||||
"""
|
||||
)
|
||||
pytester.makepyfile(
|
||||
**{
|
||||
"tests/__init__.py": "",
|
||||
"tests/conftest.py": "",
|
||||
"tests/subpath/__init__.py": "",
|
||||
"tests/subpath/helper.py": "",
|
||||
"tests/subpath/test_something.py": """
|
||||
import tests.subpath.helper
|
||||
|
||||
def test_something():
|
||||
assert True
|
||||
""",
|
||||
}
|
||||
)
|
||||
result = pytester.runpytest()
|
||||
result.stdout.fnmatch_lines("*1 passed in*")
|
||||
|
||||
|
||||
def test_does_not_crash_on_error_from_decorated_function(pytester: Pytester) -> None:
|
||||
"""Regression test for an issue around bad exception formatting due to
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
import enum
|
||||
import sys
|
||||
from functools import partial
|
||||
from functools import wraps
|
||||
from typing import TYPE_CHECKING
|
||||
|
@ -91,6 +92,7 @@ def test_get_real_func_partial() -> None:
|
|||
assert get_real_func(partial(foo)) is foo
|
||||
|
||||
|
||||
@pytest.mark.skipif(sys.version_info >= (3, 11), reason="couroutine removed")
|
||||
def test_is_generator_asyncio(pytester: Pytester) -> None:
|
||||
pytester.makepyfile(
|
||||
"""
|
||||
|
|
|
@ -163,7 +163,17 @@ class TestParseIni:
|
|||
pytester.path.joinpath("pytest.ini").write_text("addopts = -x")
|
||||
result = pytester.runpytest()
|
||||
assert result.ret != 0
|
||||
result.stderr.fnmatch_lines(["ERROR: *pytest.ini:1: no section header defined"])
|
||||
result.stderr.fnmatch_lines("ERROR: *pytest.ini:1: no section header defined")
|
||||
|
||||
def test_toml_parse_error(self, pytester: Pytester) -> None:
|
||||
pytester.makepyprojecttoml(
|
||||
"""
|
||||
\\"
|
||||
"""
|
||||
)
|
||||
result = pytester.runpytest()
|
||||
assert result.ret != 0
|
||||
result.stderr.fnmatch_lines("ERROR: *pyproject.toml: Invalid statement*")
|
||||
|
||||
@pytest.mark.xfail(reason="probably not needed")
|
||||
def test_confcutdir(self, pytester: Pytester) -> None:
|
||||
|
@ -1275,7 +1285,7 @@ def test_load_initial_conftest_last_ordering(_config_for_test):
|
|||
("_pytest.config", "nonwrapper"),
|
||||
(m.__module__, "nonwrapper"),
|
||||
("_pytest.legacypath", "nonwrapper"),
|
||||
("_pytest.pythonpath", "nonwrapper"),
|
||||
("_pytest.python_path", "nonwrapper"),
|
||||
("_pytest.capture", "wrapper"),
|
||||
("_pytest.warnings", "wrapper"),
|
||||
]
|
||||
|
|
|
@ -252,6 +252,34 @@ def test_conftest_confcutdir(pytester: Pytester) -> None:
|
|||
result.stdout.no_fnmatch_line("*warning: could not load initial*")
|
||||
|
||||
|
||||
def test_installed_conftest_is_picked_up(pytester: Pytester, tmp_path: Path) -> None:
|
||||
"""When using `--pyargs` to run tests in an installed packages (located e.g.
|
||||
in a site-packages in the PYTHONPATH), conftest files in there are picked
|
||||
up.
|
||||
|
||||
Regression test for #9767.
|
||||
"""
|
||||
# pytester dir - the source tree.
|
||||
# tmp_path - the simulated site-packages dir (not in source tree).
|
||||
|
||||
pytester.syspathinsert(tmp_path)
|
||||
pytester.makepyprojecttoml("[tool.pytest.ini_options]")
|
||||
tmp_path.joinpath("foo").mkdir()
|
||||
tmp_path.joinpath("foo", "__init__.py").touch()
|
||||
tmp_path.joinpath("foo", "conftest.py").write_text(
|
||||
textwrap.dedent(
|
||||
"""\
|
||||
import pytest
|
||||
@pytest.fixture
|
||||
def fix(): return None
|
||||
"""
|
||||
)
|
||||
)
|
||||
tmp_path.joinpath("foo", "test_it.py").write_text("def test_it(fix): pass")
|
||||
result = pytester.runpytest("--pyargs", "foo")
|
||||
assert result.ret == 0
|
||||
|
||||
|
||||
def test_conftest_symlink(pytester: Pytester) -> None:
|
||||
"""`conftest.py` discovery follows normal path resolution and does not resolve symlinks."""
|
||||
# Structure:
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
import inspect
|
||||
import sys
|
||||
import textwrap
|
||||
from pathlib import Path
|
||||
from typing import Callable
|
||||
|
@ -200,6 +201,7 @@ class TestDoctests:
|
|||
"Traceback (most recent call last):",
|
||||
' File "*/doctest.py", line *, in __run',
|
||||
" *",
|
||||
*((" *^^^^*",) if sys.version_info >= (3, 11) else ()),
|
||||
' File "<doctest test_doctest_unexpected_exception.txt[1]>", line 1, in <module>',
|
||||
"ZeroDivisionError: division by zero",
|
||||
"*/test_doctest_unexpected_exception.txt:2: UnexpectedException",
|
||||
|
@ -801,8 +803,8 @@ class TestDoctests:
|
|||
"""
|
||||
p = pytester.makepyfile(
|
||||
setup="""
|
||||
from setuptools import setup, find_packages
|
||||
if __name__ == '__main__':
|
||||
from setuptools import setup, find_packages
|
||||
setup(name='sample',
|
||||
version='0.0',
|
||||
description='description',
|
||||
|
|
|
@ -231,8 +231,6 @@ TESTCASES = [
|
|||
E ['a']
|
||||
E Drill down into differing attribute a:
|
||||
E a: 1 != 2
|
||||
E +1
|
||||
E -2
|
||||
""",
|
||||
id="Compare data classes",
|
||||
),
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import argparse
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
|
@ -44,16 +45,32 @@ def test_wrap_session_notify_exception(ret_exc, pytester: Pytester) -> None:
|
|||
assert result.ret == ExitCode.INTERNAL_ERROR
|
||||
assert result.stdout.lines[0] == "INTERNALERROR> Traceback (most recent call last):"
|
||||
|
||||
end_lines = (
|
||||
result.stdout.lines[-4:]
|
||||
if sys.version_info >= (3, 11)
|
||||
else result.stdout.lines[-3:]
|
||||
)
|
||||
|
||||
if exc == SystemExit:
|
||||
assert result.stdout.lines[-3:] == [
|
||||
assert end_lines == [
|
||||
f'INTERNALERROR> File "{c1}", line 4, in pytest_sessionstart',
|
||||
'INTERNALERROR> raise SystemExit("boom")',
|
||||
*(
|
||||
("INTERNALERROR> ^^^^^^^^^^^^^^^^^^^^^^^^",)
|
||||
if sys.version_info >= (3, 11)
|
||||
else ()
|
||||
),
|
||||
"INTERNALERROR> SystemExit: boom",
|
||||
]
|
||||
else:
|
||||
assert result.stdout.lines[-3:] == [
|
||||
assert end_lines == [
|
||||
f'INTERNALERROR> File "{c1}", line 4, in pytest_sessionstart',
|
||||
'INTERNALERROR> raise ValueError("boom")',
|
||||
*(
|
||||
("INTERNALERROR> ^^^^^^^^^^^^^^^^^^^^^^^^",)
|
||||
if sys.version_info >= (3, 11)
|
||||
else ()
|
||||
),
|
||||
"INTERNALERROR> ValueError: boom",
|
||||
]
|
||||
if returncode is False:
|
||||
|
@ -171,6 +188,12 @@ class TestResolveCollectionArgument:
|
|||
invocation_path, "pkg::foo::bar", as_pypath=True
|
||||
)
|
||||
|
||||
def test_parametrized_name_with_colons(self, invocation_path: Path) -> None:
|
||||
ret = resolve_collection_argument(
|
||||
invocation_path, "src/pkg/test.py::test[a::b]"
|
||||
)
|
||||
assert ret == (invocation_path / "src/pkg/test.py", ["test[a::b]"])
|
||||
|
||||
def test_does_not_exist(self, invocation_path: Path) -> None:
|
||||
"""Given a file/module that does not exist raises UsageError."""
|
||||
with pytest.raises(
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
import re
|
||||
import warnings
|
||||
from pathlib import Path
|
||||
from typing import cast
|
||||
from typing import List
|
||||
|
@ -58,30 +60,31 @@ def test_subclassing_both_item_and_collector_deprecated(
|
|||
request, tmp_path: Path
|
||||
) -> None:
|
||||
"""
|
||||
Verifies we warn on diamond inheritance
|
||||
as well as correctly managing legacy inheritance ctors with missing args
|
||||
as found in plugins
|
||||
Verifies we warn on diamond inheritance as well as correctly managing legacy
|
||||
inheritance constructors with missing args as found in plugins.
|
||||
"""
|
||||
|
||||
with pytest.warns(
|
||||
PytestWarning,
|
||||
match=(
|
||||
"(?m)SoWrong is an Item subclass and should not be a collector, however its bases File are collectors.\n"
|
||||
"Please split the Collectors and the Item into separate node types.\n.*"
|
||||
),
|
||||
):
|
||||
# We do not expect any warnings messages to issued during class definition.
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("error")
|
||||
|
||||
class SoWrong(nodes.Item, nodes.File):
|
||||
def __init__(self, fspath, parent):
|
||||
"""Legacy ctor with legacy call # don't wana see"""
|
||||
super().__init__(fspath, parent)
|
||||
|
||||
with pytest.warns(
|
||||
PytestWarning, match=".*SoWrong.* not using a cooperative constructor.*"
|
||||
):
|
||||
with pytest.warns(PytestWarning) as rec:
|
||||
SoWrong.from_parent(
|
||||
request.session, fspath=legacy_path(tmp_path / "broken.txt")
|
||||
)
|
||||
messages = [str(x.message) for x in rec]
|
||||
assert any(
|
||||
re.search(".*SoWrong.* not using a cooperative constructor.*", x)
|
||||
for x in messages
|
||||
)
|
||||
assert any(
|
||||
re.search("(?m)SoWrong .* should not be a collector", x) for x in messages
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
|
|
|
@ -345,7 +345,7 @@ def test_SkipTest_during_collection(pytester: Pytester) -> None:
|
|||
"""
|
||||
)
|
||||
result = pytester.runpytest(p)
|
||||
result.assert_outcomes(skipped=1, warnings=1)
|
||||
result.assert_outcomes(skipped=1, warnings=0)
|
||||
|
||||
|
||||
def test_SkipTest_in_test(pytester: Pytester) -> None:
|
||||
|
|
|
@ -562,15 +562,20 @@ class TestImportLibMode:
|
|||
result = module_name_from_path(Path("/home/foo/test_foo.py"), Path("/bar"))
|
||||
assert result == "home.foo.test_foo"
|
||||
|
||||
def test_insert_missing_modules(self) -> None:
|
||||
modules = {"src.tests.foo": ModuleType("src.tests.foo")}
|
||||
insert_missing_modules(modules, "src.tests.foo")
|
||||
assert sorted(modules) == ["src", "src.tests", "src.tests.foo"]
|
||||
def test_insert_missing_modules(
|
||||
self, monkeypatch: MonkeyPatch, tmp_path: Path
|
||||
) -> None:
|
||||
monkeypatch.chdir(tmp_path)
|
||||
# Use 'xxx' and 'xxy' as parent names as they are unlikely to exist and
|
||||
# don't end up being imported.
|
||||
modules = {"xxx.tests.foo": ModuleType("xxx.tests.foo")}
|
||||
insert_missing_modules(modules, "xxx.tests.foo")
|
||||
assert sorted(modules) == ["xxx", "xxx.tests", "xxx.tests.foo"]
|
||||
|
||||
mod = ModuleType("mod", doc="My Module")
|
||||
modules = {"src": mod}
|
||||
insert_missing_modules(modules, "src")
|
||||
assert modules == {"src": mod}
|
||||
modules = {"xxy": mod}
|
||||
insert_missing_modules(modules, "xxy")
|
||||
assert modules == {"xxy": mod}
|
||||
|
||||
modules = {}
|
||||
insert_missing_modules(modules, "")
|
||||
|
|
|
@ -618,14 +618,9 @@ def test_linematcher_string_api() -> None:
|
|||
|
||||
|
||||
def test_pytest_addopts_before_pytester(request, monkeypatch: MonkeyPatch) -> None:
|
||||
orig = os.environ.get("PYTEST_ADDOPTS", None)
|
||||
monkeypatch.setenv("PYTEST_ADDOPTS", "--orig-unused")
|
||||
pytester: Pytester = request.getfixturevalue("pytester")
|
||||
_: Pytester = request.getfixturevalue("pytester")
|
||||
assert "PYTEST_ADDOPTS" not in os.environ
|
||||
pytester._finalize()
|
||||
assert os.environ.get("PYTEST_ADDOPTS") == "--orig-unused"
|
||||
monkeypatch.undo()
|
||||
assert os.environ.get("PYTEST_ADDOPTS") == orig
|
||||
|
||||
|
||||
def test_run_stdin(pytester: Pytester) -> None:
|
||||
|
@ -743,8 +738,8 @@ def test_run_result_repr() -> None:
|
|||
|
||||
# known exit code
|
||||
r = pytester_mod.RunResult(1, outlines, errlines, duration=0.5)
|
||||
assert (
|
||||
repr(r) == "<RunResult ret=ExitCode.TESTS_FAILED len(stdout.lines)=3"
|
||||
assert repr(r) == (
|
||||
f"<RunResult ret={str(pytest.ExitCode.TESTS_FAILED)} len(stdout.lines)=3"
|
||||
" len(stderr.lines)=4 duration=0.50s>"
|
||||
)
|
||||
|
||||
|
|
|
@ -81,7 +81,7 @@ def test_no_ini(pytester: Pytester, file_structure) -> None:
|
|||
|
||||
|
||||
def test_clean_up(pytester: Pytester) -> None:
|
||||
"""Test that the pythonpath plugin cleans up after itself."""
|
||||
"""Test that the plugin cleans up after itself."""
|
||||
# This is tough to test behaviorly because the cleanup really runs last.
|
||||
# So the test make several implementation assumptions:
|
||||
# - Cleanup is done in pytest_unconfigure().
|
|
@ -1,4 +1,3 @@
|
|||
import re
|
||||
import warnings
|
||||
from typing import Optional
|
||||
|
||||
|
@ -263,7 +262,7 @@ class TestWarns:
|
|||
with pytest.warns(RuntimeWarning):
|
||||
warnings.warn("user", UserWarning)
|
||||
excinfo.match(
|
||||
r"DID NOT WARN. No warnings of type \(.+RuntimeWarning.+,\) were emitted. "
|
||||
r"DID NOT WARN. No warnings of type \(.+RuntimeWarning.+,\) were emitted.\n"
|
||||
r"The list of emitted warnings is: \[UserWarning\('user',?\)\]."
|
||||
)
|
||||
|
||||
|
@ -271,15 +270,15 @@ class TestWarns:
|
|||
with pytest.warns(UserWarning):
|
||||
warnings.warn("runtime", RuntimeWarning)
|
||||
excinfo.match(
|
||||
r"DID NOT WARN. No warnings of type \(.+UserWarning.+,\) were emitted. "
|
||||
r"The list of emitted warnings is: \[RuntimeWarning\('runtime',?\)\]."
|
||||
r"DID NOT WARN. No warnings of type \(.+UserWarning.+,\) were emitted.\n"
|
||||
r"The list of emitted warnings is: \[RuntimeWarning\('runtime',?\)]."
|
||||
)
|
||||
|
||||
with pytest.raises(pytest.fail.Exception) as excinfo:
|
||||
with pytest.warns(UserWarning):
|
||||
pass
|
||||
excinfo.match(
|
||||
r"DID NOT WARN. No warnings of type \(.+UserWarning.+,\) were emitted. "
|
||||
r"DID NOT WARN. No warnings of type \(.+UserWarning.+,\) were emitted.\n"
|
||||
r"The list of emitted warnings is: \[\]."
|
||||
)
|
||||
|
||||
|
@ -289,18 +288,14 @@ class TestWarns:
|
|||
warnings.warn("runtime", RuntimeWarning)
|
||||
warnings.warn("import", ImportWarning)
|
||||
|
||||
message_template = (
|
||||
"DID NOT WARN. No warnings of type {0} were emitted. "
|
||||
"The list of emitted warnings is: {1}."
|
||||
)
|
||||
excinfo.match(
|
||||
re.escape(
|
||||
message_template.format(
|
||||
warning_classes, [each.message for each in warninfo]
|
||||
)
|
||||
)
|
||||
messages = [each.message for each in warninfo]
|
||||
expected_str = (
|
||||
f"DID NOT WARN. No warnings of type {warning_classes} were emitted.\n"
|
||||
f"The list of emitted warnings is: {messages}."
|
||||
)
|
||||
|
||||
assert str(excinfo.value) == expected_str
|
||||
|
||||
def test_record(self) -> None:
|
||||
with pytest.warns(UserWarning) as record:
|
||||
warnings.warn("user", UserWarning)
|
||||
|
|
|
@ -783,6 +783,33 @@ class TestTerminalFunctional:
|
|||
result.stdout.no_fnmatch_line("*= 1 deselected =*")
|
||||
assert result.ret == 0
|
||||
|
||||
def test_selected_count_with_error(self, pytester: Pytester) -> None:
|
||||
pytester.makepyfile(
|
||||
test_selected_count_3="""
|
||||
def test_one():
|
||||
pass
|
||||
def test_two():
|
||||
pass
|
||||
def test_three():
|
||||
pass
|
||||
""",
|
||||
test_selected_count_error="""
|
||||
5/0
|
||||
def test_foo():
|
||||
pass
|
||||
def test_bar():
|
||||
pass
|
||||
""",
|
||||
)
|
||||
result = pytester.runpytest("-k", "test_t")
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"collected 3 items / 1 error / 1 deselected / 2 selected",
|
||||
"* ERROR collecting test_selected_count_error.py *",
|
||||
]
|
||||
)
|
||||
assert result.ret == ExitCode.INTERRUPTED
|
||||
|
||||
def test_no_skip_summary_if_failure(self, pytester: Pytester) -> None:
|
||||
pytester.makepyfile(
|
||||
"""
|
||||
|
|
|
@ -1472,3 +1472,56 @@ def test_do_cleanups_on_teardown_failure(pytester: Pytester) -> None:
|
|||
passed, skipped, failed = reprec.countoutcomes()
|
||||
assert failed == 2
|
||||
assert passed == 1
|
||||
|
||||
|
||||
def test_traceback_pruning(pytester: Pytester) -> None:
|
||||
"""Regression test for #9610 - doesn't crash during traceback pruning."""
|
||||
pytester.makepyfile(
|
||||
"""
|
||||
import unittest
|
||||
|
||||
class MyTestCase(unittest.TestCase):
|
||||
def __init__(self, test_method):
|
||||
unittest.TestCase.__init__(self, test_method)
|
||||
|
||||
class TestIt(MyTestCase):
|
||||
@classmethod
|
||||
def tearDownClass(cls) -> None:
|
||||
assert False
|
||||
|
||||
def test_it(self):
|
||||
pass
|
||||
"""
|
||||
)
|
||||
reprec = pytester.inline_run()
|
||||
passed, skipped, failed = reprec.countoutcomes()
|
||||
assert passed == 1
|
||||
assert failed == 1
|
||||
assert reprec.ret == 1
|
||||
|
||||
|
||||
def test_raising_unittest_skiptest_during_collection(
|
||||
pytester: Pytester,
|
||||
) -> None:
|
||||
pytester.makepyfile(
|
||||
"""
|
||||
import unittest
|
||||
|
||||
class TestIt(unittest.TestCase):
|
||||
def test_it(self): pass
|
||||
def test_it2(self): pass
|
||||
|
||||
raise unittest.SkipTest()
|
||||
|
||||
class TestIt2(unittest.TestCase):
|
||||
def test_it(self): pass
|
||||
def test_it2(self): pass
|
||||
"""
|
||||
)
|
||||
reprec = pytester.inline_run()
|
||||
passed, skipped, failed = reprec.countoutcomes()
|
||||
assert passed == 0
|
||||
# Unittest reports one fake test for a skipped module.
|
||||
assert skipped == 1
|
||||
assert failed == 0
|
||||
assert reprec.ret == ExitCode.NO_TESTS_COLLECTED
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
import os
|
||||
import sys
|
||||
import warnings
|
||||
from typing import List
|
||||
from typing import Optional
|
||||
|
@ -774,3 +775,57 @@ class TestStackLevel:
|
|||
"*Unknown pytest.mark.unknown*",
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def test_resource_warning(pytester: Pytester, monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
# Some platforms (notably PyPy) don't have tracemalloc.
|
||||
# We choose to explicitly not skip this in case tracemalloc is not
|
||||
# available, using `importorskip("tracemalloc")` for example,
|
||||
# because we want to ensure the same code path does not break in those platforms.
|
||||
try:
|
||||
import tracemalloc # noqa
|
||||
|
||||
has_tracemalloc = True
|
||||
except ImportError:
|
||||
has_tracemalloc = False
|
||||
|
||||
# Explicitly disable PYTHONTRACEMALLOC in case pytest's test suite is running
|
||||
# with it enabled.
|
||||
monkeypatch.delenv("PYTHONTRACEMALLOC", raising=False)
|
||||
|
||||
pytester.makepyfile(
|
||||
"""
|
||||
def open_file(p):
|
||||
f = p.open("r")
|
||||
assert p.read_text() == "hello"
|
||||
|
||||
def test_resource_warning(tmp_path):
|
||||
p = tmp_path.joinpath("foo.txt")
|
||||
p.write_text("hello")
|
||||
open_file(p)
|
||||
"""
|
||||
)
|
||||
result = pytester.run(sys.executable, "-Xdev", "-m", "pytest")
|
||||
expected_extra = (
|
||||
[
|
||||
"*ResourceWarning* unclosed file*",
|
||||
"*Enable tracemalloc to get traceback where the object was allocated*",
|
||||
"*See https* for more info.",
|
||||
]
|
||||
if has_tracemalloc
|
||||
else []
|
||||
)
|
||||
result.stdout.fnmatch_lines([*expected_extra, "*1 passed*"])
|
||||
|
||||
monkeypatch.setenv("PYTHONTRACEMALLOC", "20")
|
||||
|
||||
result = pytester.run(sys.executable, "-Xdev", "-m", "pytest")
|
||||
expected_extra = (
|
||||
[
|
||||
"*ResourceWarning* unclosed file*",
|
||||
"*Object allocated at*",
|
||||
]
|
||||
if has_tracemalloc
|
||||
else []
|
||||
)
|
||||
result.stdout.fnmatch_lines([*expected_extra, "*1 passed*"])
|
||||
|
|
Loading…
Reference in New Issue