Compare commits

..

34 Commits

Author SHA1 Message Date
pytest bot
2f2f1a601e Prepare release version 7.1.2 2022-04-23 11:33:44 +00:00
github-actions[bot]
5c04f3a1a2 [7.1.x] Fix wrong log_file docs (#9879)
Co-authored-by: Zac Hatfield-Dodds <zac.hatfield.dodds@gmail.com>
2022-04-22 18:40:15 +00:00
Bruno Oliveira
078733c005 Merge pull request #9872 from pytest-dev/backport-9871-to-7.1.x 2022-04-21 00:07:25 -03:00
Zac Hatfield-Dodds
3a7ead6bcf [7.1.x] fix: move 'import getpass' statement to try-clause 2022-04-21 02:46:24 +00:00
Bruno Oliveira
6d75333780 [7.1.x] Increase stacklevel to point at user's code (#9870)
Co-authored-by: Hugo van Kemenade <hugovk@users.noreply.github.com>
2022-04-20 14:38:00 -03:00
Hugo van Kemenade
ddbb998aed [7.1.x] Increase stacklevel to point at user's code 2022-04-20 16:09:01 +00:00
Bruno Oliveira
0ec5886ad5 Merge pull request #9855 from pytest-dev/backport-9854-to-7.1.x 2022-04-12 14:05:44 -03:00
Bruno Oliveira
f2469fca37 [7.1.x] Docs: link to easy issues in contributing guide 2022-04-12 16:42:32 +00:00
Bruno Oliveira
94ec0f8ad8 Merge pull request #9846 from pytest-dev/backport-9842-to-7.1.x 2022-04-09 11:25:28 -03:00
Anthony Sottile
5ef96fdb53 [7.1.x] fix comparison of dataclasses with InitVar 2022-04-09 00:48:09 +00:00
Bruno Oliveira
7a501fb313 Merge pull request #9844 from pytest-dev/backport-9843-to-7.1.x 2022-04-08 21:46:44 -03:00
Anthony Sottile
1769c66def [7.1.x] update pre-commit hooks 2022-04-08 20:21:19 -04:00
Bruno Oliveira
840c418de6 [7.1.x] temporarily pin jinja2 version for docs build 2022-04-08 23:35:13 +00:00
Bruno Oliveira
6461e2e385 Merge pull request #9805 from pytest-dev/backport-9804-to-7.1.x 2022-03-21 13:39:05 -03:00
Bruno Oliveira
b55b7f1ad4 [7.1.x] Change directories during some tests in test_collection.py 2022-03-21 16:13:52 +00:00
Bruno Oliveira
d9794ed3cf Merge pull request #9801 from pytest-dev/backport-9800-to-7.1.x 2022-03-21 08:38:29 -03:00
Bruno Oliveira
8b33683cbf [7.1.x] Fix CI for Python 3.11 2022-03-21 11:20:19 +00:00
Bruno Oliveira
1d2e50faa6 Merge pull request #9799 from pytest-dev/backport-9798-to-7.1.x 2022-03-20 22:55:24 -03:00
Bruno Oliveira
6820ab2bd4 Merge pull request #9795 from pytest-dev/backport-9794-to-7.1.x
[7.1.x] Split test/deploy workflows
2022-03-20 22:35:44 -03:00
Kian Eliasi
78356dc353 [7.1.x] Remove unnecessary numpy import 2022-03-21 01:35:22 +00:00
Bruno Oliveira
f1c27608ec [7.1.x] Split test/deploy workflows 2022-03-19 12:45:41 +00:00
Bruno Oliveira
0ceaa57d9d Merge pull request #9790 from pytest-dev/backport-9789-to-7.1.x
[7.1.x] [style]: fix typo in docstring
2022-03-18 19:01:32 -03:00
Bruno Oliveira
93fad3286b [7.1.x] [style]: fix typo in docstring 2022-03-18 21:59:16 +00:00
Ran Benita
b9462ed7d0 Merge pull request #9785 from pytest-dev/release-7.1.1
Prepare release 7.1.1
2022-03-17 23:17:11 +02:00
pytest bot
0ffe9e0742 Prepare release version 7.1.1 2022-03-17 20:21:30 +00:00
Ran Benita
6f2c1ec035 Merge pull request #9784 from pytest-dev/backport-9768-to-7.1.x
[7.1.x] testing: fix tests when run under `-v` or `-vv`
2022-03-17 22:13:28 +02:00
Ran Benita
a65c47a1a4 Merge pull request #9783 from pytest-dev/backport-9780-to-7.1.x
[7.1.x] config: restore pre-pytest 7.1.0 confcutdir exclusion behavior
2022-03-17 18:34:49 +02:00
pre-commit-ci[bot]
30d995ed25 [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
2022-03-17 16:27:25 +00:00
Ran Benita
10a14d1318 [7.1.x] testing: fix tests when run under -v or -vv 2022-03-17 16:26:18 +00:00
pre-commit-ci[bot]
f4cfc596c6 [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
2022-03-17 16:16:12 +00:00
Ran Benita
f1df8074b3 [7.1.x] config: restore pre-pytest 7.1.0 confcutdir exclusion behavior 2022-03-17 16:15:03 +00:00
Ran Benita
7d4d1ecde6 Merge pull request #9758 from pytest-dev/release-7.1.0
Prepare release 7.1.0
2022-03-13 16:58:05 +02:00
pre-commit-ci[bot]
1dbffcc0b4 [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
2022-03-13 13:44:48 +00:00
pytest bot
d53a5fb371 Prepare release version 7.1.0 2022-03-13 13:43:44 +00:00
230 changed files with 6803 additions and 22104 deletions

View File

@@ -9,9 +9,3 @@ updates:
allow:
- dependency-type: direct
- dependency-type: indirect
- package-ecosystem: github-actions
directory: /
schedule:
interval: weekly
time: "03:00"
open-pull-requests-limit: 10

View File

@@ -22,7 +22,7 @@ jobs:
pull-requests: write
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v2
with:
fetch-depth: 0
persist-credentials: true

View File

@@ -1,86 +1,52 @@
name: deploy
on:
workflow_dispatch:
inputs:
version:
description: 'Release version'
required: true
default: '1.2.3'
push:
tags:
# These tags are protected, see:
# https://github.com/pytest-dev/pytest/settings/tag_protection
- "[0-9]+.[0-9]+.[0-9]+"
- "[0-9]+.[0-9]+.[0-9]+rc[0-9]+"
# Set permissions at the job level.
permissions: {}
jobs:
package:
runs-on: ubuntu-latest
env:
SETUPTOOLS_SCM_PRETEND_VERSION: ${{ github.event.inputs.version }}
timeout-minutes: 10
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- name: Build and Check Package
uses: hynek/build-and-inspect-python-package@v1.5.4
deploy:
if: github.repository == 'pytest-dev/pytest'
needs: [package]
runs-on: ubuntu-latest
environment: deploy
timeout-minutes: 30
permissions:
id-token: write
contents: write
steps:
- uses: actions/checkout@v4
- name: Download Package
uses: actions/download-artifact@v3
with:
name: Packages
path: dist
- name: Publish package to PyPI
uses: pypa/gh-action-pypi-publish@v1.8.11
- name: Push tag
run: |
git config user.name "pytest bot"
git config user.email "pytestbot@gmail.com"
git tag --annotate --message=v${{ github.event.inputs.version }} ${{ github.event.inputs.version }} ${{ github.sha }}
git push origin ${{ github.event.inputs.version }}
release-notes:
# todo: generate the content in the build job
# the goal being of using a github action script to push the release data
# after success instead of creating a complete python/tox env
needs: [deploy]
runs-on: ubuntu-latest
timeout-minutes: 30
permissions:
contents: write
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v2
with:
fetch-depth: 0
persist-credentials: false
- name: Set up Python
uses: actions/setup-python@v5
uses: actions/setup-python@v2
with:
python-version: "3.11"
python-version: "3.7"
- name: Install tox
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install --upgrade tox
pip install --upgrade build tox
- name: Build package
run: |
python -m build
- name: Publish package to PyPI
uses: pypa/gh-action-pypi-publish@master
with:
user: __token__
password: ${{ secrets.pypi_token }}
- name: Publish GitHub release notes
env:

View File

@@ -27,12 +27,12 @@ jobs:
pull-requests: write
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Set up Python
uses: actions/setup-python@v5
uses: actions/setup-python@v2
with:
python-version: "3.8"

View File

@@ -1,23 +0,0 @@
name: close needs-information issues
on:
schedule:
- cron: "30 1 * * *"
workflow_dispatch:
jobs:
close-issues:
runs-on: ubuntu-latest
permissions:
issues: write
steps:
- uses: actions/stale@v9
with:
debug-only: false
days-before-issue-stale: 14
days-before-issue-close: 7
only-labels: "status: needs information"
stale-issue-label: "stale"
stale-issue-message: "This issue is stale because it has been open for 14 days with no activity."
close-issue-message: "This issue was closed because it has been inactive for 7 days since being marked as stale."
days-before-pr-stale: -1
days-before-pr-close: -1

View File

@@ -18,28 +18,11 @@ on:
env:
PYTEST_ADDOPTS: "--color=yes"
# Cancel running jobs for the same workflow and branch.
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
# Set permissions at the job level.
permissions: {}
jobs:
package:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- name: Build and Check Package
uses: hynek/build-and-inspect-python-package@v1.5.4
build:
needs: [package]
runs-on: ${{ matrix.os }}
timeout-minutes: 45
permissions:
@@ -49,41 +32,46 @@ jobs:
fail-fast: false
matrix:
name: [
"windows-py37",
"windows-py37-pluggy",
"windows-py38",
"windows-py38-pluggy",
"windows-py39",
"windows-py310",
"windows-py311",
"windows-py312",
"ubuntu-py37",
"ubuntu-py37-pluggy",
"ubuntu-py37-freeze",
"ubuntu-py38",
"ubuntu-py38-pluggy",
"ubuntu-py38-freeze",
"ubuntu-py39",
"ubuntu-py310",
"ubuntu-py311",
"ubuntu-py312",
"ubuntu-pypy3",
"macos-py37",
"macos-py38",
"macos-py39",
"macos-py310",
"macos-py312",
"docs",
"doctesting",
"plugins",
]
include:
- name: "windows-py37"
python: "3.7"
os: windows-latest
tox_env: "py37-numpy"
- name: "windows-py37-pluggy"
python: "3.7"
os: windows-latest
tox_env: "py37-pluggymain-xdist"
- name: "windows-py38"
python: "3.8"
os: windows-latest
tox_env: "py38-unittestextras"
use_coverage: true
- name: "windows-py38-pluggy"
python: "3.8"
os: windows-latest
tox_env: "py38-pluggymain-pylib-xdist"
- name: "windows-py39"
python: "3.9"
os: windows-latest
@@ -93,27 +81,27 @@ jobs:
os: windows-latest
tox_env: "py310-xdist"
- name: "windows-py311"
python: "3.11"
python: "3.11-dev"
os: windows-latest
tox_env: "py311"
- name: "windows-py312"
python: "3.12-dev"
os: windows-latest
tox_env: "py312"
- name: "ubuntu-py37"
python: "3.7"
os: ubuntu-latest
tox_env: "py37-lsof-numpy-pexpect"
use_coverage: true
- name: "ubuntu-py37-pluggy"
python: "3.7"
os: ubuntu-latest
tox_env: "py37-pluggymain-xdist"
- name: "ubuntu-py37-freeze"
python: "3.7"
os: ubuntu-latest
tox_env: "py37-freeze"
- name: "ubuntu-py38"
python: "3.8"
os: ubuntu-latest
tox_env: "py38-lsof-numpy-pexpect"
use_coverage: true
- name: "ubuntu-py38-pluggy"
python: "3.8"
os: ubuntu-latest
tox_env: "py38-pluggymain-pylib-xdist"
- name: "ubuntu-py38-freeze"
python: "3.8"
os: ubuntu-latest
tox_env: "py38-freeze"
tox_env: "py38-xdist"
- name: "ubuntu-py39"
python: "3.9"
os: ubuntu-latest
@@ -123,66 +111,57 @@ jobs:
os: ubuntu-latest
tox_env: "py310-xdist"
- name: "ubuntu-py311"
python: "3.11"
python: "3.11-dev"
os: ubuntu-latest
tox_env: "py311"
use_coverage: true
- name: "ubuntu-py312"
python: "3.12-dev"
os: ubuntu-latest
tox_env: "py312"
use_coverage: true
- name: "ubuntu-pypy3"
python: "pypy-3.8"
python: "pypy-3.7"
os: ubuntu-latest
tox_env: "pypy3-xdist"
- name: "macos-py37"
python: "3.7"
os: macos-latest
tox_env: "py37-xdist"
- name: "macos-py38"
python: "3.8"
os: macos-latest
tox_env: "py38-xdist"
use_coverage: true
- name: "macos-py39"
python: "3.9"
os: macos-latest
tox_env: "py39-xdist"
use_coverage: true
- name: "macos-py310"
python: "3.10"
os: macos-latest
tox_env: "py310-xdist"
- name: "macos-py312"
python: "3.12-dev"
os: macos-latest
tox_env: "py312-xdist"
- name: "plugins"
python: "3.12"
python: "3.9"
os: ubuntu-latest
tox_env: "plugins"
- name: "docs"
python: "3.7"
os: ubuntu-latest
tox_env: "docs"
- name: "doctesting"
python: "3.8"
python: "3.7"
os: ubuntu-latest
tox_env: "doctesting"
use_coverage: true
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v2
with:
fetch-depth: 0
persist-credentials: false
- name: Download Package
uses: actions/download-artifact@v3
with:
name: Packages
path: dist
- name: Set up Python ${{ matrix.python }}
uses: actions/setup-python@v5
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python }}
check-latest: ${{ endsWith(matrix.python, '-dev') }}
- name: Install dependencies
run: |
@@ -191,13 +170,11 @@ jobs:
- name: Test without coverage
if: "! matrix.use_coverage"
shell: bash
run: tox run -e ${{ matrix.tox_env }} --installpkg `find dist/*.tar.gz`
run: "tox -e ${{ matrix.tox_env }}"
- name: Test with coverage
if: "matrix.use_coverage"
shell: bash
run: tox run -e ${{ matrix.tox_env }}-coverage --installpkg `find dist/*.tar.gz`
run: "tox -e ${{ matrix.tox_env }}-coverage"
- name: Generate coverage report
if: "matrix.use_coverage"
@@ -205,8 +182,7 @@ jobs:
- name: Upload coverage to Codecov
if: "matrix.use_coverage"
uses: codecov/codecov-action@v3
continue-on-error: true
uses: codecov/codecov-action@v2
with:
fail_ci_if_error: true
files: ./coverage.xml

View File

@@ -11,7 +11,7 @@ on:
permissions: {}
jobs:
update-plugin-list:
createPullRequest:
if: github.repository_owner == 'pytest-dev'
runs-on: ubuntu-latest
permissions:
@@ -20,33 +20,25 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Setup Python
uses: actions/setup-python@v5
uses: actions/setup-python@v2
with:
python-version: "3.11"
cache: pip
- name: requests-cache
uses: actions/cache@v3
with:
path: ~/.cache/pytest-plugin-list/
key: plugins-http-cache-${{ github.run_id }} # Can use time based key as well
restore-keys: plugins-http-cache-
python-version: 3.8
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install packaging requests tabulate[widechars] tqdm requests-cache platformdirs
pip install packaging requests tabulate[widechars] tqdm
- name: Update Plugin List
run: python scripts/update-plugin-list.py
- name: Create Pull Request
uses: peter-evans/create-pull-request@153407881ec5c347639a548ade7d8ad1d6740e38
uses: peter-evans/create-pull-request@2455e1596942c2902952003bbb574afbbe2ab2e6
with:
commit-message: '[automated] Update plugin list'
author: 'pytest bot <pytestbot@users.noreply.github.com>'

1
.gitignore vendored
View File

@@ -50,7 +50,6 @@ coverage.xml
.project
.settings
.vscode
__pycache__/
# generated by pip
pip-wheel-metadata/

View File

@@ -1,16 +1,16 @@
repos:
- repo: https://github.com/psf/black
rev: 23.12.1
rev: 22.3.0
hooks:
- id: black
args: [--safe, --quiet]
- repo: https://github.com/asottile/blacken-docs
rev: 1.16.0
rev: v1.12.1
hooks:
- id: blacken-docs
additional_dependencies: [black==23.7.0]
additional_dependencies: [black==20.8b1]
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.5.0
rev: v4.1.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
@@ -20,8 +20,8 @@ repos:
- id: debug-statements
exclude: _pytest/(debugging|hookspec).py
language_version: python3
- repo: https://github.com/PyCQA/autoflake
rev: v2.2.1
- repo: https://github.com/myint/autoflake
rev: v1.4
hooks:
- id: autoflake
name: autoflake
@@ -29,47 +29,46 @@ repos:
language: python
files: \.py$
- repo: https://github.com/PyCQA/flake8
rev: 6.1.0
rev: 4.0.1
hooks:
- id: flake8
language_version: python3
additional_dependencies:
- flake8-typing-imports==1.12.0
- flake8-docstrings==1.5.0
- repo: https://github.com/asottile/reorder-python-imports
rev: v3.12.0
- repo: https://github.com/asottile/reorder_python_imports
rev: v3.0.1
hooks:
- id: reorder-python-imports
args: ['--application-directories=.:src', --py38-plus]
args: ['--application-directories=.:src', --py37-plus]
- repo: https://github.com/asottile/pyupgrade
rev: v3.15.0
rev: v2.31.0
hooks:
- id: pyupgrade
args: [--py38-plus]
args: [--py37-plus]
- repo: https://github.com/asottile/setup-cfg-fmt
rev: v2.5.0
rev: v1.20.0
hooks:
- id: setup-cfg-fmt
args: ["--max-py-version=3.12", "--include-version-classifiers"]
args: [--max-py-version=3.10]
- repo: https://github.com/pre-commit/pygrep-hooks
rev: v1.10.0
rev: v1.9.0
hooks:
- id: python-use-type-annotations
- repo: https://github.com/pre-commit/mirrors-mypy
rev: v1.8.0
rev: v0.931
hooks:
- id: mypy
files: ^(src/|testing/)
args: []
additional_dependencies:
- iniconfig>=1.1.0
- py>=1.8.2
- attrs>=19.2.0
- packaging
- tomli
- types-atomicwrites
- types-pkg_resources
# for mypy running on python>=3.11 since exceptiongroup is only a dependency
# on <3.11
- exceptiongroup>=1.0.0rc8
- repo: local
hooks:
- id: rst
@@ -102,7 +101,7 @@ repos:
types: [python]
- id: py-path-deprecated
name: py.path usage is deprecated
exclude: docs|src/_pytest/deprecated.py|testing/deprecated_test.py|src/_pytest/legacypath.py
exclude: docs|src/_pytest/deprecated.py|testing/deprecated_test.py
language: pygrep
entry: \bpy\.path\.local
types: [python]

View File

@@ -2,16 +2,9 @@ version: 2
python:
install:
# Install pytest first, then doc/en/requirements.txt.
# This order is important to honor any pins in doc/en/requirements.txt
# when the pinned library is also a dependency of pytest.
- method: pip
path: .
- requirements: doc/en/requirements.txt
sphinx:
configuration: doc/en/conf.py
fail_on_warning: true
- requirements: doc/en/requirements.txt
- method: pip
path: .
build:
os: ubuntu-20.04

81
AUTHORS
View File

@@ -8,19 +8,13 @@ Abdeali JK
Abdelrahman Elbehery
Abhijeet Kasurde
Adam Johnson
Adam Stewart
Adam Uhlir
Ahn Ki-Wook
Akhilesh Ramakrishnan
Akiomi Kamakura
Alan Velasco
Alessio Izzo
Alex Jones
Alex Lambson
Alexander Johnson
Alexander King
Alexei Kozlenok
Alice Purcell
Allan Feldman
Aly Sivji
Amir Elkess
@@ -48,26 +42,19 @@ Ariel Pillemer
Armin Rigo
Aron Coyle
Aron Curzon
Arthur Richard
Ashish Kurmi
Aviral Verma
Aviv Palivoda
Babak Keyvani
Barney Gale
Ben Gartner
Ben Webb
Benjamin Peterson
Benjamin Schubert
Bernard Pratz
Bo Wu
Bob Ippolito
Brian Dorsey
Brian Larsen
Brian Maissy
Brian Okken
Brianna Laugher
Bruno Oliveira
Cal Jacobson
Cal Leeming
Carl Friedrich Bolz
Carlos Jenkins
@@ -75,12 +62,9 @@ Ceridwen
Charles Cloud
Charles Machalow
Charnjit SiNGH (CCSJ)
Cheuk Ting Ho
Chris Mahoney
Chris Lamb
Chris NeJame
Chris Rose
Chris Wheeler
Christian Boelsen
Christian Fetzer
Christian Neumüller
@@ -99,8 +83,6 @@ Damian Skrzypczak
Daniel Grana
Daniel Hahler
Daniel Nuri
Daniel Sánchez Castelló
Daniel Valenzuela Zenteno
Daniel Wandschneider
Daniele Procida
Danielle Jenkins
@@ -133,20 +115,15 @@ Eric Hunsberger
Eric Liu
Eric Siegerman
Erik Aronesty
Erik Hasse
Erik M. Bray
Evan Kepner
Evgeny Seliverstov
Fabien Zarifian
Fabio Zadrozny
Felix Hofstätter
Felix Nieuwenhuizen
Feng Ma
Florian Bruhin
Florian Dahlitz
Floris Bruynooghe
Fraser Stark
Gabriel Landau
Gabriel Reis
Garvit Shubham
Gene Wood
@@ -172,12 +149,8 @@ Ian Bicking
Ian Lesperance
Ilya Konstantinov
Ionuț Turturică
Isaac Virshup
Israel Fruchter
Itxaso Aizpurua
Iwan Briquemont
Jaap Broekhuizen
Jake VanderPlas
Jakob van Santen
Jakub Mitoraj
James Bourbeau
@@ -189,11 +162,8 @@ Javier Romero
Jeff Rackauckas
Jeff Widman
Jenni Rinker
Jens Tröger
John Eddie Ayson
John Litborn
John Towler
Jon Parise
Jon Sonesen
Jonas Obrist
Jordan Guymon
@@ -203,20 +173,17 @@ Joseph Hunkeler
Josh Karpel
Joshua Bronson
Jurko Gospodnetić
Justice Ndou
Justyna Janczyszyn
Justice Ndou
Kale Kundert
Kamran Ahmad
Kenny Y
Karl O. Pinc
Karthikeyan Singaravelan
Katarzyna Jachim
Katarzyna Król
Katerina Koukiou
Keri Volans
Kevin C
Kevin Cox
Kevin Hierro Carrasco
Kevin J. Foley
Kian Eliasi
Kian-Meng Ang
@@ -238,14 +205,12 @@ Maho
Maik Figura
Mandeep Bhutani
Manuel Krebber
Marc Mueller
Marc Schlaich
Marcelo Duarte Trevisani
Marcin Bachry
Marco Gorelli
Mark Abramowitz
Mark Dickinson
Marko Pacak
Markus Unterwaditzer
Martijn Faassen
Martin Altmayer
@@ -259,6 +224,7 @@ Matthias Hafner
Maxim Filipenko
Maximilian Cosmo Sitter
mbyt
Mickey Pashov
Michael Aquilina
Michael Birtwell
Michael Droettboom
@@ -266,29 +232,23 @@ Michael Goerz
Michael Krebs
Michael Seifert
Michal Wajszczuk
Michał Górny
Michał Zięba
Mickey Pashov
Mihai Capotă
Mihail Milushev
Mike Hoyle (hoylemd)
Mike Lundy
Milan Lesnek
Miro Hrončok
Nathaniel Compton
Nathaniel Waisbrot
Ned Batchelder
Neil Martin
Neven Mundar
Nicholas Devenish
Nicholas Murphy
Niclas Olofsson
Nicolas Delaby
Nikolay Kondratyev
Nipunn Koorapati
Olga Matoula
Oleg Pidsadnyi
Oleg Sushchenko
Olga Matoula
Oliver Bestwalter
Omar Kohl
Omer Hadari
@@ -296,16 +256,13 @@ Ondřej Súkup
Oscar Benjamin
Parth Patel
Patrick Hayes
Patrick Lannigan
Paul Müller
Paul Reece
Pauli Virtanen
Pavel Karateev
Paweł Adamczak
Pedro Algarvio
Petter Strandmark
Philipp Loose
Pierre Sassoulas
Pieter Mulder
Piotr Banaszkiewicz
Piotr Helm
@@ -315,19 +272,15 @@ Prashant Sharma
Pulkit Goyal
Punyashloka Biswal
Quentin Pradet
q0w
Ralf Schmitt
Ralph Giles
Ram Rachum
Ralph Giles
Ran Benita
Raphael Castaneda
Raphael Pierzina
Rafal Semik
Raquel Alegre
Ravi Chandra
Reagan Lee
Robert Holt
Roberto Aldera
Roberto Polli
Roland Puntaier
Romain Dorgueil
@@ -336,36 +289,25 @@ Ronny Pfannschmidt
Ross Lawley
Ruaridh Williamson
Russel Winder
Ryan Puddephatt
Ryan Wooden
Sadra Barikbin
Saiprasad Kale
Samuel Colvin
Samuel Dion-Girardeau
Samuel Searles-Bryant
Samuel Therrien (Avasam)
Samuele Pedroni
Sanket Duthade
Sankt Petersbug
Saravanan Padmanaban
Sean Malloy
Segev Finer
Serhii Mozghovyi
Seth Junot
Shantanu Jain
Sharad Nair
Shubham Adep
Simon Blanchard
Simon Gomizelj
Simon Holesch
Simon Kerr
Skylar Downes
Srinivas Reddy Thatiparthy
Stefaan Lippens
Stefan Farmbauer
Stefan Scherfke
Stefan Zimmermann
Stefanie Molin
Stefano Taschini
Steffen Allner
Stephan Obermann
@@ -375,38 +317,28 @@ Tadek Teleżyński
Takafumi Arakaki
Taneli Hukkinen
Tanvi Mehta
Tanya Agarwal
Tarcisio Fischer
Tareq Alayan
Tatiana Ovary
Ted Xiao
Terje Runde
Thomas Grainger
Thomas Hisch
Tim Hoffmann
Tim Strazny
TJ Bruno
Tobias Diez
Tom Dalton
Tom Viner
Tomáš Gavenčiak
Tomer Keren
Tony Narlock
Tor Colvin
Trevor Bekolay
Tushar Sadhwani
Tyler Goodlet
Tyler Smart
Tzu-ping Chung
Vasily Kuznetsov
Victor Maryama
Victor Rodriguez
Victor Uriarte
Vidar T. Fauske
Vijay Arora
Virgil Dupras
Vitaly Lashmanov
Vivaan Verma
Vlad Dragos
Vlad Radziuk
Vladyslav Rachek
@@ -419,14 +351,9 @@ Wouter van Ackooy
Xixi Zhao
Xuan Luong
Xuecong Liao
Yannick Péroux
Yoav Caspi
Yuliang Shao
Yusuke Kadowaki
Yuval Shimon
Zac Hatfield-Dodds
Zachary Kneupper
Zachary OBrien
Zhouxin Qiu
Zoltán Máté
Zsolt Cserna

View File

@@ -50,7 +50,7 @@ Fix bugs
--------
Look through the `GitHub issues for bugs <https://github.com/pytest-dev/pytest/labels/type:%20bug>`_.
See also the `"good first issue" issues <https://github.com/pytest-dev/pytest/labels/good%20first%20issue>`_
See also the `"status: easy" issues <https://github.com/pytest-dev/pytest/labels/status%3A%20easy>`_
that are friendly to new contributors.
:ref:`Talk <contact>` to developers to find out how you can fix specific bugs. To indicate that you are going
@@ -197,12 +197,11 @@ Short version
~~~~~~~~~~~~~
#. Fork the repository.
#. Fetch tags from upstream if necessary (if you cloned only main `git fetch --tags https://github.com/pytest-dev/pytest`).
#. Enable and install `pre-commit <https://pre-commit.com>`_ to ensure style-guides and code checks are followed.
#. Follow `PEP-8 <https://www.python.org/dev/peps/pep-0008/>`_ for naming.
#. Follow **PEP-8** for naming and `black <https://github.com/psf/black>`_ for formatting.
#. Tests are run using ``tox``::
tox -e linting,py39
tox -e linting,py37
The test environments above are usually enough to cover most cases locally.
@@ -224,7 +223,7 @@ changes you want to review and merge. Pull requests are stored on
Once you send a pull request, we can discuss its potential modifications and
even add more commits to it later on. There's an excellent tutorial on how Pull
Requests work in the
`GitHub Help Center <https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/about-pull-requests>`_.
`GitHub Help Center <https://help.github.com/articles/using-pull-requests/>`_.
Here is a simple overview, with pytest-specific bits:
@@ -237,7 +236,6 @@ Here is a simple overview, with pytest-specific bits:
$ git clone git@github.com:YOUR_GITHUB_USERNAME/pytest.git
$ cd pytest
$ git fetch --tags https://github.com/pytest-dev/pytest
# now, create your own branch off "main":
$ git checkout -b your-bugfix-branch-name main
@@ -246,11 +244,6 @@ Here is a simple overview, with pytest-specific bits:
be released in micro releases whereas features will be released in
minor releases and incompatible changes in major releases.
You will need the tags to test locally, so be sure you have the tags from the main repository. If you suspect you don't, set the main repository as upstream and fetch the tags::
$ git remote add upstream https://github.com/pytest-dev/pytest
$ git fetch upstream --tags
If you need some help with Git, follow this quick start
guide: https://git.wiki.kernel.org/index.php/QuickStart
@@ -274,24 +267,24 @@ Here is a simple overview, with pytest-specific bits:
#. Run all the tests
You need to have Python 3.8 or later available in your system. Now
You need to have Python 3.7 available in your system. Now
running tests is as simple as issuing this command::
$ tox -e linting,py39
$ tox -e linting,py37
This command will run tests via the "tox" tool against Python 3.9
This command will run tests via the "tox" tool against Python 3.7
and also perform "lint" coding-style checks.
#. You can now edit your local working copy and run the tests again as necessary. Please follow `PEP-8 <https://www.python.org/dev/peps/pep-0008/>`_ for naming.
#. You can now edit your local working copy and run the tests again as necessary. Please follow PEP-8 for naming.
You can pass different options to ``tox``. For example, to run tests on Python 3.9 and pass options to pytest
You can pass different options to ``tox``. For example, to run tests on Python 3.7 and pass options to pytest
(e.g. enter pdb on failure) to pytest you can do::
$ tox -e py39 -- --pdb
$ tox -e py37 -- --pdb
Or to only run tests in a particular test module on Python 3.9::
Or to only run tests in a particular test module on Python 3.7::
$ tox -e py39 -- testing/test_config.py
$ tox -e py37 -- testing/test_config.py
When committing, ``pre-commit`` will re-format the files if necessary.
@@ -387,7 +380,7 @@ them.
Backporting bug fixes for the next patch release
------------------------------------------------
Pytest makes a feature release every few weeks or months. In between, patch releases
Pytest makes feature release every few weeks or months. In between, patch releases
are made to the previous feature release, containing bug fixes only. The bug fixes
usually fix regressions, but may be any change that should reach users before the
next feature release.
@@ -396,7 +389,7 @@ Suppose for example that the latest release was 1.2.3, and you want to include
a bug fix in 1.2.4 (check https://github.com/pytest-dev/pytest/releases for the
actual latest release). The procedure for this is:
#. First, make sure the bug is fixed in the ``main`` branch, with a regular pull
#. First, make sure the bug is fixed the ``main`` branch, with a regular pull
request, as described above. An exception to this is if the bug fix is not
applicable to ``main`` anymore.

View File

@@ -20,7 +20,7 @@
:target: https://codecov.io/gh/pytest-dev/pytest
:alt: Code coverage Status
.. image:: https://github.com/pytest-dev/pytest/actions/workflows/test.yml/badge.svg
.. image:: https://github.com/pytest-dev/pytest/workflows/test/badge.svg
:target: https://github.com/pytest-dev/pytest/actions?query=workflow%3Atest
.. image:: https://results.pre-commit.ci/badge/github/pytest-dev/pytest/main.svg
@@ -100,7 +100,7 @@ Features
- Can run `unittest <https://docs.pytest.org/en/stable/how-to/unittest.html>`_ (or trial),
`nose <https://docs.pytest.org/en/stable/how-to/nose.html>`_ test suites out of the box
- Python 3.8+ or PyPy3
- Python 3.7+ or PyPy3
- Rich plugin architecture, with over 850+ `external plugins <https://docs.pytest.org/en/latest/reference/plugin_list.html>`_ and thriving community

View File

@@ -133,12 +133,14 @@ Releasing
Both automatic and manual processes described above follow the same steps from this point onward.
#. After all tests pass and the PR has been approved, trigger the ``deploy`` job
in https://github.com/pytest-dev/pytest/actions/workflows/deploy.yml, using the ``release-MAJOR.MINOR.PATCH`` branch
as source.
#. After all tests pass and the PR has been approved, tag the release commit
in the ``release-MAJOR.MINOR.PATCH`` branch and push it. This will publish to PyPI::
This job will require approval from ``pytest-dev/core``, after which it will publish to PyPI
and tag the repository.
git fetch upstream
git tag MAJOR.MINOR.PATCH upstream/release-MAJOR.MINOR.PATCH
git push upstream MAJOR.MINOR.PATCH
Wait for the deploy to complete, then make sure it is `available on PyPI <https://pypi.org/project/pytest>`_.
#. Merge the PR. **Make sure it's not squash-merged**, so that the tagged commit ends up in the main branch.

View File

@@ -14,7 +14,7 @@ Each file should be named like ``<ISSUE>.<TYPE>.rst``, where
``<ISSUE>`` is an issue number, and ``<TYPE>`` is one of:
* ``feature``: new user facing features, like new command-line options and new behavior.
* ``improvement``: improvement of existing functionality, usually without requiring user intervention (for example, new fields being written in ``--junit-xml``, improved colors in terminal, etc).
* ``improvement``: improvement of existing functionality, usually without requiring user intervention (for example, new fields being written in ``--junitxml``, improved colors in terminal, etc).
* ``bugfix``: fixes a bug.
* ``doc``: documentation improvement, like rewording an entire session or adding missing docs.
* ``deprecation``: feature deprecation.

View File

@@ -17,6 +17,7 @@
<li><a href="{{ pathto('changelog') }}">Changelog</a></li>
<li><a href="{{ pathto('contributing') }}">Contributing</a></li>
<li><a href="{{ pathto('backwards-compatibility') }}">Backwards Compatibility</a></li>
<li><a href="{{ pathto('py27-py34-deprecation') }}">Python 2.7 and 3.4 Support</a></li>
<li><a href="{{ pathto('sponsor') }}">Sponsor</a></li>
<li><a href="{{ pathto('tidelift') }}">pytest for Enterprise</a></li>
<li><a href="{{ pathto('license') }}">License</a></li>
@@ -29,3 +30,5 @@
{%- endif %}
<hr>
<a href="{{ pathto('genindex') }}">Index</a>
<hr>

View File

@@ -6,19 +6,6 @@ Release announcements
:maxdepth: 2
release-8.0.0rc1
release-7.4.4
release-7.4.3
release-7.4.2
release-7.4.1
release-7.4.0
release-7.3.2
release-7.3.1
release-7.3.0
release-7.2.2
release-7.2.1
release-7.2.0
release-7.1.3
release-7.1.2
release-7.1.1
release-7.1.0

View File

@@ -1,28 +0,0 @@
pytest-7.1.3
=======================================
pytest 7.1.3 has just been released to PyPI.
This is a bug-fix release, being a drop-in replacement. To upgrade::
pip install --upgrade pytest
The full changelog is available at https://docs.pytest.org/en/stable/changelog.html.
Thanks to all of the contributors to this release:
* Anthony Sottile
* Bruno Oliveira
* Gergely Kalmár
* Nipunn Koorapati
* Pax
* Sviatoslav Sydorenko
* Tim Hoffmann
* Tony Narlock
* Wolfremium
* Zach OBrien
* aizpurua23a
Happy testing,
The pytest Development Team

View File

@@ -1,93 +0,0 @@
pytest-7.2.0
=======================================
The pytest team is proud to announce the 7.2.0 release!
This release contains new features, improvements, and bug fixes,
the full list of changes is available in the changelog:
https://docs.pytest.org/en/stable/changelog.html
For complete documentation, please visit:
https://docs.pytest.org/en/stable/
As usual, you can upgrade from PyPI via:
pip install -U pytest
Thanks to all of the contributors to this release:
* Aaron Berdy
* Adam Turner
* Albert Villanova del Moral
* Alice Purcell
* Anthony Sottile
* Anton Yakutovich
* Babak Keyvani
* Brandon Chinn
* Bruno Oliveira
* Chanvin Xiao
* Cheuk Ting Ho
* Chris Wheeler
* EmptyRabbit
* Ezio Melotti
* Florian Best
* Florian Bruhin
* Fredrik Berndtsson
* Gabriel Landau
* Gergely Kalmár
* Hugo van Kemenade
* James Gerity
* John Litborn
* Jon Parise
* Kevin C
* Kian Eliasi
* MatthewFlamm
* Miro Hrončok
* Nate Meyvis
* Neil Girdhar
* Nhieuvu1802
* Nipunn Koorapati
* Ofek Lev
* Paul Müller
* Paul Reece
* Pax
* Pete Baughman
* Peyman Salehi
* Philipp A
* Ran Benita
* Robert O'Shea
* Ronny Pfannschmidt
* Rowin
* Ruth Comer
* Samuel Colvin
* Samuel Gaist
* Sandro Tosi
* Shantanu
* Simon K
* Stephen Rosen
* Sviatoslav Sydorenko
* Tatiana Ovary
* Thierry Moisan
* Thomas Grainger
* Tim Hoffmann
* Tobias Diez
* Tony Narlock
* Vivaan Verma
* Wolfremium
* Zac Hatfield-Dodds
* Zach OBrien
* aizpurua23a
* gresm
* holesch
* itxasos23
* johnkangw
* skhomuti
* sommersoft
* wodny
* zx.qiu
Happy testing,
The pytest Development Team

View File

@@ -1,25 +0,0 @@
pytest-7.2.1
=======================================
pytest 7.2.1 has just been released to PyPI.
This is a bug-fix release, being a drop-in replacement. To upgrade::
pip install --upgrade pytest
The full changelog is available at https://docs.pytest.org/en/stable/changelog.html.
Thanks to all of the contributors to this release:
* Anthony Sottile
* Bruno Oliveira
* Daniel Valenzuela
* Kadino
* Prerak Patel
* Ronny Pfannschmidt
* Santiago Castro
* s-padmanaban
Happy testing,
The pytest Development Team

View File

@@ -1,25 +0,0 @@
pytest-7.2.2
=======================================
pytest 7.2.2 has just been released to PyPI.
This is a bug-fix release, being a drop-in replacement. To upgrade::
pip install --upgrade pytest
The full changelog is available at https://docs.pytest.org/en/stable/changelog.html.
Thanks to all of the contributors to this release:
* Bruno Oliveira
* Garvit Shubham
* Mahesh Vashishtha
* Ramsey
* Ronny Pfannschmidt
* Teejay
* q0w
* vin01
Happy testing,
The pytest Development Team

View File

@@ -1,130 +0,0 @@
pytest-7.3.0
=======================================
The pytest team is proud to announce the 7.3.0 release!
This release contains new features, improvements, and bug fixes,
the full list of changes is available in the changelog:
https://docs.pytest.org/en/stable/changelog.html
For complete documentation, please visit:
https://docs.pytest.org/en/stable/
As usual, you can upgrade from PyPI via:
pip install -U pytest
Thanks to all of the contributors to this release:
* Aaron Berdy
* Adam Turner
* Albert Villanova del Moral
* Alessio Izzo
* Alex Hadley
* Alice Purcell
* Anthony Sottile
* Anton Yakutovich
* Ashish Kurmi
* Babak Keyvani
* Billy
* Brandon Chinn
* Bruno Oliveira
* Cal Jacobson
* Chanvin Xiao
* Cheuk Ting Ho
* Chris Wheeler
* Daniel Garcia Moreno
* Daniel Scheffler
* Daniel Valenzuela
* EmptyRabbit
* Ezio Melotti
* Felix Hofstätter
* Florian Best
* Florian Bruhin
* Fredrik Berndtsson
* Gabriel Landau
* Garvit Shubham
* Gergely Kalmár
* HTRafal
* Hugo van Kemenade
* Ilya Konstantinov
* Itxaso Aizpurua
* James Gerity
* Jay
* John Litborn
* Jon Parise
* Jouke Witteveen
* Kadino
* Kevin C
* Kian Eliasi
* Klaus Rettinghaus
* Kodi Arfer
* Mahesh Vashishtha
* Manuel Jacob
* Marko Pacak
* MatthewFlamm
* Miro Hrončok
* Nate Meyvis
* Neil Girdhar
* Nhieuvu1802
* Nipunn Koorapati
* Ofek Lev
* Paul Kehrer
* Paul Müller
* Paul Reece
* Pax
* Pete Baughman
* Peyman Salehi
* Philipp A
* Pierre Sassoulas
* Prerak Patel
* Ramsey
* Ran Benita
* Robert O'Shea
* Ronny Pfannschmidt
* Rowin
* Ruth Comer
* Samuel Colvin
* Samuel Gaist
* Sandro Tosi
* Santiago Castro
* Shantanu
* Simon K
* Stefanie Molin
* Stephen Rosen
* Sviatoslav Sydorenko
* Tatiana Ovary
* Teejay
* Thierry Moisan
* Thomas Grainger
* Tim Hoffmann
* Tobias Diez
* Tony Narlock
* Vivaan Verma
* Wolfremium
* Yannick PÉROUX
* Yusuke Kadowaki
* Zac Hatfield-Dodds
* Zach OBrien
* aizpurua23a
* bitzge
* bluthej
* gresm
* holesch
* itxasos23
* johnkangw
* q0w
* rdb
* s-padmanaban
* skhomuti
* sommersoft
* vin01
* wim glenn
* wodny
* zx.qiu
Happy testing,
The pytest Development Team

View File

@@ -1,18 +0,0 @@
pytest-7.3.1
=======================================
pytest 7.3.1 has just been released to PyPI.
This is a bug-fix release, being a drop-in replacement. To upgrade::
pip install --upgrade pytest
The full changelog is available at https://docs.pytest.org/en/stable/changelog.html.
Thanks to all of the contributors to this release:
* Ran Benita
Happy testing,
The pytest Development Team

View File

@@ -1,21 +0,0 @@
pytest-7.3.2
=======================================
pytest 7.3.2 has just been released to PyPI.
This is a bug-fix release, being a drop-in replacement. To upgrade::
pip install --upgrade pytest
The full changelog is available at https://docs.pytest.org/en/stable/changelog.html.
Thanks to all of the contributors to this release:
* Adam J. Stewart
* Alessio Izzo
* Bruno Oliveira
* Ran Benita
Happy testing,
The pytest Development Team

View File

@@ -1,49 +0,0 @@
pytest-7.4.0
=======================================
The pytest team is proud to announce the 7.4.0 release!
This release contains new features, improvements, and bug fixes,
the full list of changes is available in the changelog:
https://docs.pytest.org/en/stable/changelog.html
For complete documentation, please visit:
https://docs.pytest.org/en/stable/
As usual, you can upgrade from PyPI via:
pip install -U pytest
Thanks to all of the contributors to this release:
* Adam J. Stewart
* Alessio Izzo
* Alex
* Alex Lambson
* Brian Larsen
* Bruno Oliveira
* Bryan Ricker
* Chris Mahoney
* Facundo Batista
* Florian Bruhin
* Jarrett Keifer
* Kenny Y
* Miro Hrončok
* Ran Benita
* Roberto Aldera
* Ronny Pfannschmidt
* Sergey Kim
* Stefanie Molin
* Vijay Arora
* Ville Skyttä
* Zac Hatfield-Dodds
* bzoracler
* leeyueh
* nondescryptid
* theirix
Happy testing,
The pytest Development Team

View File

@@ -1,20 +0,0 @@
pytest-7.4.1
=======================================
pytest 7.4.1 has just been released to PyPI.
This is a bug-fix release, being a drop-in replacement. To upgrade::
pip install --upgrade pytest
The full changelog is available at https://docs.pytest.org/en/stable/changelog.html.
Thanks to all of the contributors to this release:
* Bruno Oliveira
* Florian Bruhin
* Ran Benita
Happy testing,
The pytest Development Team

View File

@@ -1,18 +0,0 @@
pytest-7.4.2
=======================================
pytest 7.4.2 has just been released to PyPI.
This is a bug-fix release, being a drop-in replacement. To upgrade::
pip install --upgrade pytest
The full changelog is available at https://docs.pytest.org/en/stable/changelog.html.
Thanks to all of the contributors to this release:
* Bruno Oliveira
Happy testing,
The pytest Development Team

View File

@@ -1,19 +0,0 @@
pytest-7.4.3
=======================================
pytest 7.4.3 has just been released to PyPI.
This is a bug-fix release, being a drop-in replacement. To upgrade::
pip install --upgrade pytest
The full changelog is available at https://docs.pytest.org/en/stable/changelog.html.
Thanks to all of the contributors to this release:
* Bruno Oliveira
* Marc Mueller
Happy testing,
The pytest Development Team

View File

@@ -1,20 +0,0 @@
pytest-7.4.4
=======================================
pytest 7.4.4 has just been released to PyPI.
This is a bug-fix release, being a drop-in replacement. To upgrade::
pip install --upgrade pytest
The full changelog is available at https://docs.pytest.org/en/stable/changelog.html.
Thanks to all of the contributors to this release:
* Bruno Oliveira
* Ran Benita
* Zac Hatfield-Dodds
Happy testing,
The pytest Development Team

View File

@@ -1,82 +0,0 @@
pytest-8.0.0rc1
=======================================
The pytest team is proud to announce the 8.0.0rc1 release!
This release contains new features, improvements, bug fixes, and breaking changes, so users
are encouraged to take a look at the CHANGELOG carefully:
https://docs.pytest.org/en/stable/changelog.html
For complete documentation, please visit:
https://docs.pytest.org/en/stable/
As usual, you can upgrade from PyPI via:
pip install -U pytest
Thanks to all of the contributors to this release:
* Akhilesh Ramakrishnan
* Aleksandr Brodin
* Anthony Sottile
* Arthur Richard
* Avasam
* Benjamin Schubert
* Bruno Oliveira
* Carsten Grohmann
* Cheukting
* Chris Mahoney
* Christoph Anton Mitterer
* DetachHead
* Erik Hasse
* Florian Bruhin
* Fraser Stark
* Ha Pam
* Hugo van Kemenade
* Isaac Virshup
* Israel Fruchter
* Jens Tröger
* Jon Parise
* Kenny Y
* Lesnek
* Marc Mueller
* Michał Górny
* Mihail Milushev
* Milan Lesnek
* Miro Hrončok
* Patrick Lannigan
* Ran Benita
* Reagan Lee
* Ronny Pfannschmidt
* Sadra Barikbin
* Sean Malloy
* Sean Patrick Malloy
* Sharad Nair
* Simon Blanchard
* Sourabh Beniwal
* Stefaan Lippens
* Tanya Agarwal
* Thomas Grainger
* Tom Mortimer-Jones
* Tushar Sadhwani
* Tyler Smart
* Uday Kumar
* Warren Markham
* WarrenTheRabbit
* Zac Hatfield-Dodds
* Ziad Kermadi
* akhilramkee
* antosikv
* bowugit
* mickeypash
* neilmartin2000
* pomponchik
* ryanpudd
* touilleWoman
* ubaumann
Happy testing,
The pytest Development Team

View File

@@ -22,7 +22,7 @@ b) transitional: the old and new API don't conflict
We will only start the removal of deprecated functionality in major releases (e.g. if we deprecate something in 3.0 we will start to remove it in 4.0), and keep it around for at least two minor releases (e.g. if we deprecate something in 3.9 and 4.0 is the next release, we start to remove it in 5.0, not in 4.0).
A deprecated feature scheduled to be removed in major version X will use the warning class `PytestRemovedInXWarning` (a subclass of :class:`~pytest.PytestDeprecationWarning`).
A deprecated feature scheduled to be removed in major version X will use the warning class `PytestRemovedInXWarning` (a subclass of :class:`~pytest.PytestDeprecationwarning`).
When the deprecation expires (e.g. 4.0 is released), we won't remove the deprecated functionality immediately, but will use the standard warning filters to turn `PytestRemovedInXWarning` (e.g. `PytestRemovedIn4Warning`) into **errors** by default. This approach makes it explicit that removal is imminent, and still gives you time to turn the deprecated feature into a warning instead of an error so it can be dealt with in your own time. In the next minor release (e.g. 4.1), the feature will be effectively removed.
@@ -77,21 +77,3 @@ Deprecation Roadmap
Features currently deprecated and removed in previous releases can be found in :ref:`deprecations`.
We track future deprecation and removal of features using milestones and the `deprecation <https://github.com/pytest-dev/pytest/issues?q=label%3A%22type%3A+deprecation%22>`_ and `removal <https://github.com/pytest-dev/pytest/labels/type%3A%20removal>`_ labels on GitHub.
Python version support
======================
Released pytest versions support all Python versions that are actively maintained at the time of the release:
============== ===================
pytest version min. Python version
============== ===================
8.0+ 3.8+
7.1+ 3.7+
6.2 - 7.0 3.6+
5.0 - 6.1 3.5+
3.3 - 4.6 2.7, 3.4+
============== ===================
`Status of Python Versions <https://devguide.python.org/versions/>`__.

View File

@@ -18,11 +18,11 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a
$ pytest --fixtures -v
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python
cachedir: .pytest_cache
rootdir: /home/sweet/project
collected 0 items
cache -- .../_pytest/cacheprovider.py:526
cache -- .../_pytest/cacheprovider.py:510
Return a cache object that can persist state between testing sessions.
cache.get(key, default)
@@ -33,93 +33,39 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a
Values can be any object handled by the json stdlib module.
capsysbinary -- .../_pytest/capture.py:1008
Enable bytes capturing of writes to ``sys.stdout`` and ``sys.stderr``.
The captured output is made available via ``capsysbinary.readouterr()``
method calls, which return a ``(out, err)`` namedtuple.
``out`` and ``err`` will be ``bytes`` objects.
Returns an instance of :class:`CaptureFixture[bytes] <pytest.CaptureFixture>`.
Example:
.. code-block:: python
def test_output(capsysbinary):
print("hello")
captured = capsysbinary.readouterr()
assert captured.out == b"hello\n"
capfd -- .../_pytest/capture.py:1036
Enable text capturing of writes to file descriptors ``1`` and ``2``.
The captured output is made available via ``capfd.readouterr()`` method
calls, which return a ``(out, err)`` namedtuple.
``out`` and ``err`` will be ``text`` objects.
Returns an instance of :class:`CaptureFixture[str] <pytest.CaptureFixture>`.
Example:
.. code-block:: python
def test_system_echo(capfd):
os.system('echo "hello"')
captured = capfd.readouterr()
assert captured.out == "hello\n"
capfdbinary -- .../_pytest/capture.py:1064
Enable bytes capturing of writes to file descriptors ``1`` and ``2``.
The captured output is made available via ``capfd.readouterr()`` method
calls, which return a ``(out, err)`` namedtuple.
``out`` and ``err`` will be ``byte`` objects.
Returns an instance of :class:`CaptureFixture[bytes] <pytest.CaptureFixture>`.
Example:
.. code-block:: python
def test_system_echo(capfdbinary):
os.system('echo "hello"')
captured = capfdbinary.readouterr()
assert captured.out == b"hello\n"
capsys -- .../_pytest/capture.py:980
capsys -- .../_pytest/capture.py:878
Enable text capturing of writes to ``sys.stdout`` and ``sys.stderr``.
The captured output is made available via ``capsys.readouterr()`` method
calls, which return a ``(out, err)`` namedtuple.
``out`` and ``err`` will be ``text`` objects.
Returns an instance of :class:`CaptureFixture[str] <pytest.CaptureFixture>`.
capsysbinary -- .../_pytest/capture.py:895
Enable bytes capturing of writes to ``sys.stdout`` and ``sys.stderr``.
Example:
The captured output is made available via ``capsysbinary.readouterr()``
method calls, which return a ``(out, err)`` namedtuple.
``out`` and ``err`` will be ``bytes`` objects.
.. code-block:: python
capfd -- .../_pytest/capture.py:912
Enable text capturing of writes to file descriptors ``1`` and ``2``.
def test_output(capsys):
print("hello")
captured = capsys.readouterr()
assert captured.out == "hello\n"
The captured output is made available via ``capfd.readouterr()`` method
calls, which return a ``(out, err)`` namedtuple.
``out`` and ``err`` will be ``text`` objects.
doctest_namespace [session scope] -- .../_pytest/doctest.py:743
capfdbinary -- .../_pytest/capture.py:929
Enable bytes capturing of writes to file descriptors ``1`` and ``2``.
The captured output is made available via ``capfd.readouterr()`` method
calls, which return a ``(out, err)`` namedtuple.
``out`` and ``err`` will be ``byte`` objects.
doctest_namespace [session scope] -- .../_pytest/doctest.py:731
Fixture that returns a :py:class:`dict` that will be injected into the
namespace of doctests.
Usually this fixture is used in conjunction with another ``autouse`` fixture:
.. code-block:: python
@pytest.fixture(autouse=True)
def add_np(doctest_namespace):
doctest_namespace["np"] = numpy
For more details: :ref:`doctest_namespace`.
pytestconfig [session scope] -- .../_pytest/fixtures.py:1365
pytestconfig [session scope] -- .../_pytest/fixtures.py:1334
Session-scoped fixture that returns the session's :class:`pytest.Config`
object.
@@ -163,10 +109,7 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a
record_testsuite_property("ARCH", "PPC")
record_testsuite_property("STORAGE_TYPE", "CEPH")
:param name:
The property name.
:param value:
The property value. Will be converted to a string.
``name`` must be a string, ``value`` will be converted to a string and properly xml-escaped.
.. warning::
@@ -174,10 +117,10 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a
`pytest-xdist <https://github.com/pytest-dev/pytest-xdist>`__ plugin. See
:issue:`7767` for details.
tmpdir_factory [session scope] -- .../_pytest/legacypath.py:300
tmpdir_factory [session scope] -- .../_pytest/legacypath.py:295
Return a :class:`pytest.TempdirFactory` instance for the test session.
tmpdir -- .../_pytest/legacypath.py:307
tmpdir -- .../_pytest/legacypath.py:302
Return a temporary directory path object which is unique to each test
function invocation, created as a sub directory of the base temporary
directory.
@@ -189,14 +132,9 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a
The returned object is a `legacy_path`_ object.
.. note::
These days, it is preferred to use ``tmp_path``.
:ref:`About the tmpdir and tmpdir_factory fixtures<tmpdir and tmpdir_factory>`.
.. _legacy_path: https://py.readthedocs.io/en/latest/path.html
caplog -- .../_pytest/logging.py:593
caplog -- .../_pytest/logging.py:487
Access and control log capturing.
Captured logs are available through the following properties/methods::
@@ -207,49 +145,42 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a
* caplog.record_tuples -> list of (logger_name, level, message) tuples
* caplog.clear() -> clear captured records and formatted log output string
monkeypatch -- .../_pytest/monkeypatch.py:30
monkeypatch -- .../_pytest/monkeypatch.py:29
A convenient fixture for monkey-patching.
The fixture provides these methods to modify objects, dictionaries, or
:data:`os.environ`:
The fixture provides these methods to modify objects, dictionaries or
os.environ::
* :meth:`monkeypatch.setattr(obj, name, value, raising=True) <pytest.MonkeyPatch.setattr>`
* :meth:`monkeypatch.delattr(obj, name, raising=True) <pytest.MonkeyPatch.delattr>`
* :meth:`monkeypatch.setitem(mapping, name, value) <pytest.MonkeyPatch.setitem>`
* :meth:`monkeypatch.delitem(obj, name, raising=True) <pytest.MonkeyPatch.delitem>`
* :meth:`monkeypatch.setenv(name, value, prepend=None) <pytest.MonkeyPatch.setenv>`
* :meth:`monkeypatch.delenv(name, raising=True) <pytest.MonkeyPatch.delenv>`
* :meth:`monkeypatch.syspath_prepend(path) <pytest.MonkeyPatch.syspath_prepend>`
* :meth:`monkeypatch.chdir(path) <pytest.MonkeyPatch.chdir>`
* :meth:`monkeypatch.context() <pytest.MonkeyPatch.context>`
monkeypatch.setattr(obj, name, value, raising=True)
monkeypatch.delattr(obj, name, raising=True)
monkeypatch.setitem(mapping, name, value)
monkeypatch.delitem(obj, name, raising=True)
monkeypatch.setenv(name, value, prepend=None)
monkeypatch.delenv(name, raising=True)
monkeypatch.syspath_prepend(path)
monkeypatch.chdir(path)
All modifications will be undone after the requesting test function or
fixture has finished. The ``raising`` parameter determines if a :class:`KeyError`
or :class:`AttributeError` will be raised if the set/deletion operation does not have the
specified target.
fixture has finished. The ``raising`` parameter determines if a KeyError
or AttributeError will be raised if the set/deletion operation has no target.
To undo modifications done by the fixture in a contained scope,
use :meth:`context() <pytest.MonkeyPatch.context>`.
recwarn -- .../_pytest/recwarn.py:30
recwarn -- .../_pytest/recwarn.py:29
Return a :class:`WarningsRecorder` instance that records all warnings emitted by test functions.
See https://docs.pytest.org/en/latest/how-to/capture-warnings.html for information
See https://docs.python.org/library/how-to/capture-warnings.html for information
on warning categories.
tmp_path_factory [session scope] -- .../_pytest/tmpdir.py:239
tmp_path_factory [session scope] -- .../_pytest/tmpdir.py:184
Return a :class:`pytest.TempPathFactory` instance for the test session.
tmp_path -- .../_pytest/tmpdir.py:254
tmp_path -- .../_pytest/tmpdir.py:199
Return a temporary directory path object which is unique to each test
function invocation, created as a sub directory of the base temporary
directory.
By default, a new base temporary directory is created each test session,
and old bases are removed after 3 sessions, to aid in debugging.
This behavior can be configured with :confval:`tmp_path_retention_count` and
:confval:`tmp_path_retention_policy`.
If ``--basetemp`` is used then it is cleared each session. See :ref:`base
and old bases are removed after 3 sessions, to aid in debugging. If
``--basetemp`` is used then it is cleared each session. See :ref:`base
temporary directory`.
The returned object is a :class:`pathlib.Path` object.

File diff suppressed because it is too large Load Diff

View File

@@ -15,10 +15,12 @@
#
# The full version, including alpha/beta/rc tags.
# The short X.Y version.
import ast
import os
import shutil
import sys
from textwrap import dedent
from typing import List
from typing import TYPE_CHECKING
from _pytest import __version__ as version
@@ -36,7 +38,6 @@ release = ".".join(version.split(".")[:2])
autodoc_member_order = "bysource"
autodoc_typehints = "description"
autodoc_typehints_description_target = "documented"
todo_include_todos = 1
latex_engine = "lualatex"
@@ -161,58 +162,14 @@ linkcheck_workers = 5
_repo = "https://github.com/pytest-dev/pytest"
extlinks = {
"bpo": ("https://bugs.python.org/issue%s", "bpo-%s"),
"pypi": ("https://pypi.org/project/%s/", "%s"),
"issue": (f"{_repo}/issues/%s", "issue #%s"),
"pull": (f"{_repo}/pull/%s", "pull request #%s"),
"user": ("https://github.com/%s", "@%s"),
"bpo": ("https://bugs.python.org/issue%s", "bpo-"),
"pypi": ("https://pypi.org/project/%s/", ""),
"issue": (f"{_repo}/issues/%s", "issue #"),
"pull": (f"{_repo}/pull/%s", "pull request #"),
"user": ("https://github.com/%s", "@"),
}
nitpicky = True
nitpick_ignore = [
# TODO (fix in pluggy?)
("py:class", "HookCaller"),
("py:class", "HookspecMarker"),
("py:exc", "PluginValidationError"),
# Might want to expose/TODO (https://github.com/pytest-dev/pytest/issues/7469)
("py:class", "ExceptionRepr"),
("py:class", "Exit"),
("py:class", "SubRequest"),
("py:class", "SubRequest"),
("py:class", "TerminalReporter"),
("py:class", "_pytest._code.code.TerminalRepr"),
("py:class", "_pytest.fixtures.FixtureFunctionMarker"),
("py:class", "_pytest.logging.LogCaptureHandler"),
("py:class", "_pytest.mark.structures.ParameterSet"),
# Intentionally undocumented/private
("py:class", "_pytest._code.code.Traceback"),
("py:class", "_pytest._py.path.LocalPath"),
("py:class", "_pytest.capture.CaptureResult"),
("py:class", "_pytest.compat.NotSetType"),
("py:class", "_pytest.python.PyCollector"),
("py:class", "_pytest.python.PyobjMixin"),
("py:class", "_pytest.python_api.RaisesContext"),
("py:class", "_pytest.recwarn.WarningsChecker"),
("py:class", "_pytest.reports.BaseReport"),
# Undocumented third parties
("py:class", "_tracing.TagTracerSub"),
("py:class", "warnings.WarningMessage"),
# Undocumented type aliases
("py:class", "LEGACY_PATH"),
("py:class", "_PluggyPlugin"),
# TypeVars
("py:class", "_pytest._code.code.E"),
("py:class", "_pytest.fixtures.FixtureFunction"),
("py:class", "_pytest.nodes._NodeType"),
("py:class", "_pytest.python_api.E"),
("py:class", "_pytest.recwarn.T"),
("py:class", "_pytest.runner.TResult"),
("py:obj", "_pytest.fixtures.FixtureValue"),
("py:obj", "_pytest.stash.T"),
]
# -- Options for HTML output ---------------------------------------------------
sys.path.append(os.path.abspath("_themes"))
@@ -290,7 +247,7 @@ html_sidebars = {
html_domain_indices = True
# If false, no index is generated.
html_use_index = False
html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
@@ -363,9 +320,7 @@ latex_domain_indices = False
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
("how-to/usage", "pytest", "pytest usage", ["holger krekel at merlinux eu"], 1)
]
man_pages = [("usage", "pytest", "pytest usage", ["holger krekel at merlinux eu"], 1)]
# -- Options for Epub output ---------------------------------------------------
@@ -383,7 +338,7 @@ epub_copyright = "2013, holger krekel et alii"
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be an ISBN number
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
# epub_identifier = ''
@@ -435,7 +390,6 @@ intersphinx_mapping = {
"tox": ("https://tox.wiki/en/stable", None),
"virtualenv": ("https://virtualenv.pypa.io/en/stable", None),
"setuptools": ("https://setuptools.pypa.io/en/stable", None),
"packaging": ("https://packaging.python.org/en/latest", None),
}
@@ -463,6 +417,8 @@ def configure_logging(app: "sphinx.application.Sphinx") -> None:
def setup(app: "sphinx.application.Sphinx") -> None:
# from sphinx.ext.autodoc import cut_lines
# app.connect('autodoc-process-docstring', cut_lines(4, what=['module']))
app.add_crossref_type(
"fixture",
"fixture",
@@ -493,6 +449,25 @@ def setup(app: "sphinx.application.Sphinx") -> None:
configure_logging(app)
# Make Sphinx mark classes with "final" when decorated with @final.
# We need this because we import final from pytest._compat, not from
# typing (for Python < 3.8 compat), so Sphinx doesn't detect it.
# To keep things simple we accept any `@final` decorator.
# Ref: https://github.com/pytest-dev/pytest/pull/7780
import sphinx.pycode.ast
import sphinx.pycode.parser
original_is_final = sphinx.pycode.parser.VariableCommentPicker.is_final
def patched_is_final(self, decorators: List[ast.expr]) -> bool:
if original_is_final(self, decorators):
return True
return any(
sphinx.pycode.ast.unparse(decorator) == "final" for decorator in decorators
)
sphinx.pycode.parser.VariableCommentPicker.is_final = patched_is_final
# legacypath.py monkey-patches pytest.Testdir in. Import the file so
# that autodoc can discover references to it.
import _pytest.legacypath # noqa: F401

View File

@@ -85,6 +85,7 @@ Further topics
backwards-compatibility
deprecations
py27-py34-deprecation
contributing
development_guide

View File

@@ -18,113 +18,6 @@ Deprecated Features
Below is a complete list of all pytest features which are considered deprecated. Using those features will issue
:class:`~pytest.PytestWarning` or subclasses, which can be filtered using :ref:`standard warning filters <warnings>`.
.. _nose-deprecation:
Support for tests written for nose
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. deprecated:: 7.2
Support for running tests written for `nose <https://nose.readthedocs.io/en/latest/>`__ is now deprecated.
``nose`` has been in maintenance mode-only for years, and maintaining the plugin is not trivial as it spills
over the code base (see :issue:`9886` for more details).
setup/teardown
^^^^^^^^^^^^^^
One thing that might catch users by surprise is that plain ``setup`` and ``teardown`` methods are not pytest native,
they are in fact part of the ``nose`` support.
.. code-block:: python
class Test:
def setup(self):
self.resource = make_resource()
def teardown(self):
self.resource.close()
def test_foo(self):
...
def test_bar(self):
...
Native pytest support uses ``setup_method`` and ``teardown_method`` (see :ref:`xunit-method-setup`), so the above should be changed to:
.. code-block:: python
class Test:
def setup_method(self):
self.resource = make_resource()
def teardown_method(self):
self.resource.close()
def test_foo(self):
...
def test_bar(self):
...
This is easy to do in an entire code base by doing a simple find/replace.
@with_setup
^^^^^^^^^^^
Code using `@with_setup <with-setup-nose>`_ such as this:
.. code-block:: python
from nose.tools import with_setup
def setup_some_resource():
...
def teardown_some_resource():
...
@with_setup(setup_some_resource, teardown_some_resource)
def test_foo():
...
Will also need to be ported to a supported pytest style. One way to do it is using a fixture:
.. code-block:: python
import pytest
def setup_some_resource():
...
def teardown_some_resource():
...
@pytest.fixture
def some_resource():
setup_some_resource()
yield
teardown_some_resource()
def test_foo(some_resource):
...
.. _`with-setup-nose`: https://nose.readthedocs.io/en/latest/testing_tools.html?highlight=with_setup#nose.tools.with_setup
.. _instance-collector-deprecation:
The ``pytest.Instance`` collector
@@ -177,7 +70,7 @@ arguments they only pass on to the superclass.
resolved in future versions as we slowly get rid of the :pypi:`py`
dependency (see :issue:`9283` for a longer discussion).
Due to the ongoing migration of methods like :meth:`~pytest.Item.reportinfo`
Due to the ongoing migration of methods like :meth:`~_pytest.Item.reportinfo`
which still is expected to return a ``py.path.local`` object, nodes still have
both ``fspath`` (``py.path.local``) and ``path`` (``pathlib.Path``) attributes,
no matter what argument was used in the constructor. We expect to deprecate the
@@ -185,50 +78,6 @@ no matter what argument was used in the constructor. We expect to deprecate the
.. _legacy-path-hooks-deprecated:
Configuring hook specs/impls using markers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Before pluggy, pytest's plugin library, was its own package and had a clear API,
pytest just used ``pytest.mark`` to configure hooks.
The :py:func:`pytest.hookimpl` and :py:func:`pytest.hookspec` decorators
have been available since years and should be used instead.
.. code-block:: python
@pytest.mark.tryfirst
def pytest_runtest_call():
...
# or
def pytest_runtest_call():
...
pytest_runtest_call.tryfirst = True
should be changed to:
.. code-block:: python
@pytest.hookimpl(tryfirst=True)
def pytest_runtest_call():
...
Changed ``hookimpl`` attributes:
* ``tryfirst``
* ``trylast``
* ``optionalhook``
* ``hookwrapper``
Changed ``hookwrapper`` attributes:
* ``firstresult``
* ``historic``
``py.path.local`` arguments for hooks replaced with ``pathlib.Path``
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -336,7 +185,7 @@ Diamond inheritance between :class:`pytest.Collector` and :class:`pytest.Item`
.. deprecated:: 7.0
Defining a custom pytest node type which is both an :class:`~pytest.Item` and a :class:`~pytest.Collector` (e.g. :class:`~pytest.File`) now issues a warning.
Defining a custom pytest node type which is both an :class:`pytest.Item <Item>` and a :class:`pytest.Collector <Collector>` (e.g. :class:`pytest.File <File>`) now issues a warning.
It was never sanely supported and triggers hard to debug errors.
Some plugins providing linting/code analysis have been using this as a hack.
@@ -348,8 +197,8 @@ Instead, a separate collector node should be used, which collects the item. See
.. _uncooperative-constructors-deprecated:
Constructors of custom :class:`~_pytest.nodes.Node` subclasses should take ``**kwargs``
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Constructors of custom :class:`pytest.Node` subclasses should take ``**kwargs``
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. deprecated:: 7.0
@@ -380,25 +229,6 @@ conflicts (such as :class:`pytest.File` now taking ``path`` instead of
``fspath``, as :ref:`outlined above <node-ctor-fspath-deprecation>`), a
deprecation warning is now raised.
Applying a mark to a fixture function
-------------------------------------
.. deprecated:: 7.4
Applying a mark to a fixture function never had any effect, but it is a common user error.
.. code-block:: python
@pytest.mark.usefixtures("clean_database")
@pytest.fixture
def user() -> User:
...
Users expected in this case that the ``usefixtures`` mark would have its intended effect of using the ``clean_database`` fixture when ``user`` was invoked, when in fact it has no effect at all.
Now pytest will issue a warning when it encounters this problem, and will raise an error in the future versions.
Backward compatibilities in ``Parser.addoption``
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -422,47 +252,6 @@ or ``pytest.warns(Warning)``.
See :ref:`warns use cases` for examples.
Returning non-None value in test functions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. deprecated:: 7.2
A :class:`pytest.PytestReturnNotNoneWarning` is now emitted if a test function returns something other than `None`.
This prevents a common mistake among beginners that expect that returning a `bool` would cause a test to pass or fail, for example:
.. code-block:: python
@pytest.mark.parametrize(
["a", "b", "result"],
[
[1, 2, 5],
[2, 3, 8],
[5, 3, 18],
],
)
def test_foo(a, b, result):
return foo(a, b) == result
Given that pytest ignores the return value, this might be surprising that it will never fail.
The proper fix is to change the `return` to an `assert`:
.. code-block:: python
@pytest.mark.parametrize(
["a", "b", "result"],
[
[1, 2, 5],
[2, 3, 8],
[5, 3, 18],
],
)
def test_foo(a, b, result):
assert foo(a, b) == result
The ``--strict`` command-line option
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -486,127 +275,12 @@ The ``yield_fixture`` function/decorator
It has been so for a very long time, so can be search/replaced safely.
Removed Features and Breaking Changes
-------------------------------------
Removed Features
----------------
As stated in our :ref:`backwards-compatibility` policy, deprecated features are removed only in major releases after
an appropriate period of deprecation has passed.
Some breaking changes which could not be deprecated are also listed.
Collection changes in pytest 8
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Added a new :class:`pytest.Directory` base collection node, which all collector nodes for filesystem directories are expected to subclass.
This is analogous to the existing :class:`pytest.File` for file nodes.
Changed :class:`pytest.Package` to be a subclass of :class:`pytest.Directory`.
A ``Package`` represents a filesystem directory which is a Python package,
i.e. contains an ``__init__.py`` file.
:class:`pytest.Package` now only collects files in its own directory; previously it collected recursively.
Sub-directories are collected as sub-collector nodes, thus creating a collection tree which mirrors the filesystem hierarchy.
:attr:`session.name <pytest.Session.name>` is now ``""``; previously it was the rootdir directory name.
This matches :attr:`session.nodeid <_pytest.nodes.Node.nodeid>` which has always been `""`.
Added a new :class:`pytest.Dir` concrete collection node, a subclass of :class:`pytest.Directory`.
This node represents a filesystem directory, which is not a :class:`pytest.Package`,
i.e. does not contain an ``__init__.py`` file.
Similarly to ``Package``, it only collects the files in its own directory,
while collecting sub-directories as sub-collector nodes.
Files and directories are now collected in alphabetical order jointly, unless changed by a plugin.
Previously, files were collected before directories.
The collection tree now contains directories/packages up to the :ref:`rootdir <rootdir>`,
for initial arguments that are found within the rootdir.
For files outside the rootdir, only the immediate directory/package is collected --
note however that collecting from outside the rootdir is discouraged.
As an example, given the following filesystem tree::
myroot/
pytest.ini
top/
├── aaa
│ └── test_aaa.py
├── test_a.py
├── test_b
│ ├── __init__.py
│ └── test_b.py
├── test_c.py
└── zzz
├── __init__.py
└── test_zzz.py
the collection tree, as shown by `pytest --collect-only top/` but with the otherwise-hidden :class:`~pytest.Session` node added for clarity,
is now the following::
<Session>
<Dir myroot>
<Dir top>
<Dir aaa>
<Module test_aaa.py>
<Function test_it>
<Module test_a.py>
<Function test_it>
<Package test_b>
<Module test_b.py>
<Function test_it>
<Module test_c.py>
<Function test_it>
<Package zzz>
<Module test_zzz.py>
<Function test_it>
Previously, it was::
<Session>
<Module top/test_a.py>
<Function test_it>
<Module top/test_c.py>
<Function test_it>
<Module top/aaa/test_aaa.py>
<Function test_it>
<Package test_b>
<Module test_b.py>
<Function test_it>
<Package zzz>
<Module test_zzz.py>
<Function test_it>
Code/plugins which rely on a specific shape of the collection tree might need to update.
:class:`pytest.Package` is no longer a :class:`pytest.Module` or :class:`pytest.File`
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. versionchanged:: 8.0
The ``Package`` collector node designates a Python package, that is, a directory with an `__init__.py` file.
Previously ``Package`` was a subtype of ``pytest.Module`` (which represents a single Python module),
the module being the `__init__.py` file.
This has been deemed a design mistake (see :issue:`11137` and :issue:`7777` for details).
The ``path`` property of ``Package`` nodes now points to the package directory instead of the ``__init__.py`` file.
Note that a ``Module`` node for ``__init__.py`` (which is not a ``Package``) may still exist,
if it is picked up during collection (e.g. if you configured :confval:`python_files` to include ``__init__.py`` files).
Collecting ``__init__.py`` files no longer collects package
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. versionremoved:: 8.0
Running `pytest pkg/__init__.py` now collects the `pkg/__init__.py` file (module) only.
Previously, it collected the entire `pkg` package, including other test files in the directory, but excluding tests in the `__init__.py` file itself
(unless :confval:`python_files` was changed to allow `__init__.py` file).
To collect the entire package, specify just the directory: `pytest pkg`.
The ``pytest.collect`` module
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -730,7 +404,7 @@ By using ``legacy`` you will keep using the legacy/xunit1 format when upgrading
pytest 6.0, where the default format will be ``xunit2``.
In order to let users know about the transition, pytest will issue a warning in case
the ``--junit-xml`` option is given in the command line but ``junit_family`` is not explicitly
the ``--junitxml`` option is given in the command line but ``junit_family`` is not explicitly
configured in ``pytest.ini``.
Services known to support the ``xunit2`` format:
@@ -1186,7 +860,7 @@ that are then turned into proper test methods. Example:
.. code-block:: python
def check(x, y):
assert x**x == y
assert x ** x == y
def test_squared():
@@ -1201,7 +875,7 @@ This form of test function doesn't support fixtures properly, and users should s
@pytest.mark.parametrize("x, y", [(2, 4), (3, 9)])
def test_squared(x, y):
assert x**x == y
assert x ** x == y
.. _internal classes accessed through node deprecated:

View File

@@ -25,7 +25,7 @@ example: specifying and selecting acceptance tests
self.tmpdir = request.config.mktemp(request.function.__name__, numbered=True)
def run(self, *cmd):
"""called by test code to execute an acceptance test."""
""" called by test code to execute an acceptance test. """
self.tmpdir.chdir()
return subprocess.check_output(cmd).decode()

View File

@@ -1 +1 @@
collect_ignore = ["nonpython", "customdirectory"]
collect_ignore = ["nonpython"]

View File

@@ -1,77 +0,0 @@
.. _`custom directory collectors`:
Using a custom directory collector
====================================================
By default, pytest collects directories using :class:`pytest.Package`, for directories with ``__init__.py`` files,
and :class:`pytest.Dir` for other directories.
If you want to customize how a directory is collected, you can write your own :class:`pytest.Directory` collector,
and use :hook:`pytest_collect_directory` to hook it up.
.. _`directory manifest plugin`:
A basic example for a directory manifest file
--------------------------------------------------------------
Suppose you want to customize how collection is done on a per-directory basis.
Here is an example ``conftest.py`` plugin that allows directories to contain a ``manifest.json`` file,
which defines how the collection should be done for the directory.
In this example, only a simple list of files is supported,
however you can imagine adding other keys, such as exclusions and globs.
.. include:: customdirectory/conftest.py
:literal:
You can create a ``manifest.json`` file and some test files:
.. include:: customdirectory/tests/manifest.json
:literal:
.. include:: customdirectory/tests/test_first.py
:literal:
.. include:: customdirectory/tests/test_second.py
:literal:
.. include:: customdirectory/tests/test_third.py
:literal:
An you can now execute the test specification:
.. code-block:: pytest
customdirectory $ pytest
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
rootdir: /home/sweet/project/customdirectory
configfile: pytest.ini
collected 2 items
tests/test_first.py . [ 50%]
tests/test_second.py . [100%]
============================ 2 passed in 0.12s =============================
.. regendoc:wipe
Notice how ``test_three.py`` was not executed, because it is not listed in the manifest.
You can verify that your custom collector appears in the collection tree:
.. code-block:: pytest
customdirectory $ pytest --collect-only
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
rootdir: /home/sweet/project/customdirectory
configfile: pytest.ini
collected 2 items
<Dir customdirectory>
<ManifestDirectory tests>
<Module test_first.py>
<Function test_1>
<Module test_second.py>
<Function test_2>
======================== 2 tests collected in 0.12s ========================

View File

@@ -1,28 +0,0 @@
# content of conftest.py
import json
import pytest
class ManifestDirectory(pytest.Directory):
def collect(self):
# The standard pytest behavior is to loop over all `test_*.py` files and
# call `pytest_collect_file` on each file. This collector instead reads
# the `manifest.json` file and only calls `pytest_collect_file` for the
# files defined there.
manifest_path = self.path / "manifest.json"
manifest = json.loads(manifest_path.read_text(encoding="utf-8"))
ihook = self.ihook
for file in manifest["files"]:
yield from ihook.pytest_collect_file(
file_path=self.path / file, parent=self
)
@pytest.hookimpl
def pytest_collect_directory(path, parent):
# Use our custom collector for directories containing a `mainfest.json` file.
if path.joinpath("manifest.json").is_file():
return ManifestDirectory.from_parent(parent=parent, path=path)
# Otherwise fallback to the standard behavior.
return None

View File

@@ -1,6 +0,0 @@
{
"files": [
"test_first.py",
"test_second.py"
]
}

View File

@@ -1,3 +0,0 @@
# content of test_first.py
def test_1():
pass

View File

@@ -1,3 +0,0 @@
# content of test_second.py
def test_2():
pass

View File

@@ -1,3 +0,0 @@
# content of test_third.py
def test_3():
pass

View File

@@ -17,7 +17,7 @@ def b(a, order):
@pytest.fixture
def c(b, order):
def c(a, b, order):
order.append("c")

View File

@@ -32,4 +32,3 @@ The following examples aim at various use cases you might encounter.
special
pythoncollection
nonpython
customdirectory

View File

@@ -45,7 +45,7 @@ You can then restrict a test run to only run tests marked with ``webtest``:
$ pytest -v -m webtest
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python
cachedir: .pytest_cache
rootdir: /home/sweet/project
collecting ... collected 4 items / 3 deselected / 1 selected
@@ -60,7 +60,7 @@ Or the inverse, running all tests except the webtest ones:
$ pytest -v -m "not webtest"
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python
cachedir: .pytest_cache
rootdir: /home/sweet/project
collecting ... collected 4 items / 1 deselected / 3 selected
@@ -82,7 +82,7 @@ tests based on their module, class, method, or function name:
$ pytest -v test_server.py::TestClass::test_method
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python
cachedir: .pytest_cache
rootdir: /home/sweet/project
collecting ... collected 1 item
@@ -97,7 +97,7 @@ You can also select on the class:
$ pytest -v test_server.py::TestClass
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python
cachedir: .pytest_cache
rootdir: /home/sweet/project
collecting ... collected 1 item
@@ -112,7 +112,7 @@ Or select multiple nodes:
$ pytest -v test_server.py::TestClass test_server.py::test_send_http
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python
cachedir: .pytest_cache
rootdir: /home/sweet/project
collecting ... collected 2 items
@@ -136,7 +136,7 @@ Or select multiple nodes:
Node IDs for failing tests are displayed in the test summary info
when running pytest with the ``-rf`` option. You can also
construct Node IDs from the output of ``pytest --collect-only``.
construct Node IDs from the output of ``pytest --collectonly``.
Using ``-k expr`` to select tests based on their name
-------------------------------------------------------
@@ -156,7 +156,7 @@ The expression matching is now case-insensitive.
$ pytest -v -k http # running with the above defined example module
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python
cachedir: .pytest_cache
rootdir: /home/sweet/project
collecting ... collected 4 items / 3 deselected / 1 selected
@@ -171,7 +171,7 @@ And you can also run all tests except the ones that match the keyword:
$ pytest -k "not send_http" -v
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python
cachedir: .pytest_cache
rootdir: /home/sweet/project
collecting ... collected 4 items / 1 deselected / 3 selected
@@ -188,7 +188,7 @@ Or to select "http" and "quick" tests:
$ pytest -k "http or quick" -v
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python
cachedir: .pytest_cache
rootdir: /home/sweet/project
collecting ... collected 4 items / 2 deselected / 2 selected
@@ -246,9 +246,9 @@ You can ask which markers exist for your test suite - the list includes our just
@pytest.mark.usefixtures(fixturename1, fixturename2, ...): mark tests as needing all of the specified fixtures. see https://docs.pytest.org/en/stable/explanation/fixtures.html#usefixtures
@pytest.mark.tryfirst: mark a hook implementation function such that the plugin machinery will try to call it first/as early as possible. DEPRECATED, use @pytest.hookimpl(tryfirst=True) instead.
@pytest.mark.tryfirst: mark a hook implementation function such that the plugin machinery will try to call it first/as early as possible.
@pytest.mark.trylast: mark a hook implementation function such that the plugin machinery will try to call it last/as late as possible. DEPRECATED, use @pytest.hookimpl(trylast=True) instead.
@pytest.mark.trylast: mark a hook implementation function such that the plugin machinery will try to call it last/as late as possible.
For an example on how to add and work with markers from a plugin, see
@@ -346,7 +346,7 @@ Custom marker and command line option to control test runs
Plugins can provide custom markers and implement specific behaviour
based on it. This is a self-contained example which adds a command
line option and a parametrized test function marker to run tests
specified via named environments:
specifies via named environments:
.. code-block:: python
@@ -375,7 +375,7 @@ specified via named environments:
envnames = [mark.args[0] for mark in item.iter_markers(name="env")]
if envnames:
if item.config.getoption("-E") not in envnames:
pytest.skip(f"test requires env in {envnames!r}")
pytest.skip("test requires env in {!r}".format(envnames))
A test file using this local plugin:
@@ -397,7 +397,7 @@ the test needs:
$ pytest -E stage2
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
collected 1 item
@@ -411,7 +411,7 @@ and here is one that specifies exactly the environment needed:
$ pytest -E stage1
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
collected 1 item
@@ -438,9 +438,9 @@ The ``--markers`` option always gives you a list of available markers:
@pytest.mark.usefixtures(fixturename1, fixturename2, ...): mark tests as needing all of the specified fixtures. see https://docs.pytest.org/en/stable/explanation/fixtures.html#usefixtures
@pytest.mark.tryfirst: mark a hook implementation function such that the plugin machinery will try to call it first/as early as possible. DEPRECATED, use @pytest.hookimpl(tryfirst=True) instead.
@pytest.mark.tryfirst: mark a hook implementation function such that the plugin machinery will try to call it first/as early as possible.
@pytest.mark.trylast: mark a hook implementation function such that the plugin machinery will try to call it last/as late as possible. DEPRECATED, use @pytest.hookimpl(trylast=True) instead.
@pytest.mark.trylast: mark a hook implementation function such that the plugin machinery will try to call it last/as late as possible.
.. _`passing callables to custom markers`:
@@ -528,7 +528,7 @@ test function. From a conftest file we can read it like this:
def pytest_runtest_setup(item):
for mark in item.iter_markers(name="glob"):
print(f"glob args={mark.args} kwargs={mark.kwargs}")
print("glob args={} kwargs={}".format(mark.args, mark.kwargs))
sys.stdout.flush()
Let's run this without capturing output and see what we get:
@@ -558,7 +558,6 @@ for your particular platform, you could use the following plugin:
# content of conftest.py
#
import sys
import pytest
ALL = set("darwin linux win32".split())
@@ -568,7 +567,7 @@ for your particular platform, you could use the following plugin:
supported_platforms = ALL.intersection(mark.name for mark in item.iter_markers())
plat = sys.platform
if supported_platforms and plat not in supported_platforms:
pytest.skip(f"cannot run on platform {plat}")
pytest.skip("cannot run on platform {}".format(plat))
then tests will be skipped if they were specified for a different platform.
Let's do a little test file to show how this looks like:
@@ -604,14 +603,14 @@ then you will see two tests skipped and two executed tests as expected:
$ pytest -rs # this option reports skip reasons
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
collected 4 items
test_plat.py s.s. [100%]
========================= short test summary info ==========================
SKIPPED [2] conftest.py:13: cannot run on platform linux
SKIPPED [2] conftest.py:12: cannot run on platform linux
======================= 2 passed, 2 skipped in 0.12s =======================
Note that if you specify a platform via the marker-command line option like this:
@@ -620,7 +619,7 @@ Note that if you specify a platform via the marker-command line option like this
$ pytest -m linux
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
collected 4 items / 3 deselected / 1 selected
@@ -683,7 +682,7 @@ We can now use the ``-m option`` to select one set:
$ pytest -m interface --tb=short
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
collected 4 items / 2 deselected / 2 selected
@@ -709,7 +708,7 @@ or to select both "event" and "interface" tests:
$ pytest -m "interface or event" --tb=short
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
collected 4 items / 1 deselected / 3 selected

View File

@@ -1,13 +1,14 @@
"""Module containing a parametrized tests testing cross-python serialization
via the pickle module."""
"""
module containing a parametrized tests testing cross-python
serialization via the pickle module.
"""
import shutil
import subprocess
import textwrap
import pytest
pythonlist = ["python3.9", "python3.10", "python3.11"]
pythonlist = ["python3.5", "python3.6", "python3.7"]
@pytest.fixture(params=pythonlist)
@@ -42,7 +43,7 @@ class Python:
)
)
)
subprocess.run((self.pythonpath, str(dumpfile)), check=True)
subprocess.check_call((self.pythonpath, str(dumpfile)))
def load_and_is_true(self, expression):
loadfile = self.picklefile.with_name("load.py")
@@ -62,7 +63,7 @@ class Python:
)
)
print(loadfile)
subprocess.run((self.pythonpath, str(loadfile)), check=True)
subprocess.check_call((self.pythonpath, str(loadfile)))
@pytest.mark.parametrize("obj", [42, {}, {1: 3}])

View File

@@ -9,7 +9,7 @@ Working with non-python tests
A basic example for specifying tests in Yaml files
--------------------------------------------------------------
.. _`pytest-yamlwsgi`: https://pypi.org/project/pytest-yamlwsgi/
.. _`pytest-yamlwsgi`: http://bitbucket.org/aafshar/pytest-yamlwsgi/src/tip/pytest_yamlwsgi.py
Here is an example ``conftest.py`` (extracted from Ali Afshar's special purpose `pytest-yamlwsgi`_ plugin). This ``conftest.py`` will collect ``test*.yaml`` files and will execute the yaml-formatted content as custom tests:
@@ -28,7 +28,7 @@ now execute the test specification:
nonpython $ pytest test_simple.yaml
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project/nonpython
collected 2 items
@@ -64,7 +64,7 @@ consulted when reporting in ``verbose`` mode:
nonpython $ pytest -v
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python
cachedir: .pytest_cache
rootdir: /home/sweet/project/nonpython
collecting ... collected 2 items
@@ -90,7 +90,7 @@ interesting to just look at the collection tree:
nonpython $ pytest --collect-only
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project/nonpython
collected 2 items

View File

@@ -12,7 +12,7 @@ class YamlFile(pytest.File):
# We need a yaml parser, e.g. PyYAML.
import yaml
raw = yaml.safe_load(self.path.open(encoding="utf-8"))
raw = yaml.safe_load(self.path.open())
for name, spec in sorted(raw.items()):
yield YamlItem.from_parent(self, name=name, spec=spec)
@@ -38,7 +38,6 @@ class YamlItem(pytest.Item):
" no further details known at this point.",
]
)
return super().repr_failure(excinfo)
def reportinfo(self):
return self.path, 0, f"usecase: {self.name}"

View File

@@ -4,6 +4,8 @@
Parametrizing tests
=================================================
.. currentmodule:: _pytest.python
``pytest`` allows to easily parametrize test functions.
For basic docs, see :ref:`parametrize-basics`.
@@ -158,20 +160,19 @@ objects, they are still using the default pytest representation:
$ pytest test_time.py --collect-only
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
collected 8 items
<Dir parametrize.rst-189>
<Module test_time.py>
<Function test_timedistance_v0[a0-b0-expected0]>
<Function test_timedistance_v0[a1-b1-expected1]>
<Function test_timedistance_v1[forward]>
<Function test_timedistance_v1[backward]>
<Function test_timedistance_v2[20011212-20011211-expected0]>
<Function test_timedistance_v2[20011211-20011212-expected1]>
<Function test_timedistance_v3[forward]>
<Function test_timedistance_v3[backward]>
<Module test_time.py>
<Function test_timedistance_v0[a0-b0-expected0]>
<Function test_timedistance_v0[a1-b1-expected1]>
<Function test_timedistance_v1[forward]>
<Function test_timedistance_v1[backward]>
<Function test_timedistance_v2[20011212-20011211-expected0]>
<Function test_timedistance_v2[20011211-20011212-expected1]>
<Function test_timedistance_v3[forward]>
<Function test_timedistance_v3[backward]>
======================== 8 tests collected in 0.12s ========================
@@ -184,7 +185,7 @@ A quick port of "testscenarios"
Here is a quick port to run tests configured with :pypi:`testscenarios`,
an add-on from Robert Collins for the standard unittest framework. We
only have to work a bit to construct the correct arguments for pytest's
:py:func:`Metafunc.parametrize <pytest.Metafunc.parametrize>`:
:py:func:`Metafunc.parametrize`:
.. code-block:: python
@@ -221,7 +222,7 @@ this is a fully self-contained example which you can run with:
$ pytest test_scenarios.py
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
collected 4 items
@@ -235,17 +236,16 @@ If you just collect tests you'll also nicely see 'advanced' and 'basic' as varia
$ pytest --collect-only test_scenarios.py
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
collected 4 items
<Dir parametrize.rst-189>
<Module test_scenarios.py>
<Class TestSampleWithScenarios>
<Function test_demo1[basic]>
<Function test_demo2[basic]>
<Function test_demo1[advanced]>
<Function test_demo2[advanced]>
<Module test_scenarios.py>
<Class TestSampleWithScenarios>
<Function test_demo1[basic]>
<Function test_demo2[basic]>
<Function test_demo1[advanced]>
<Function test_demo2[advanced]>
======================== 4 tests collected in 0.12s ========================
@@ -314,14 +314,13 @@ Let's first see how it looks like at collection time:
$ pytest test_backends.py --collect-only
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
collected 2 items
<Dir parametrize.rst-189>
<Module test_backends.py>
<Function test_db_initialized[d1]>
<Function test_db_initialized[d2]>
<Module test_backends.py>
<Function test_db_initialized[d1]>
<Function test_db_initialized[d2]>
======================== 2 tests collected in 0.12s ========================
@@ -413,7 +412,7 @@ The result of this test will be successful:
$ pytest -v test_indirect_list.py
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python
cachedir: .pytest_cache
rootdir: /home/sweet/project
collecting ... collected 1 item
@@ -484,8 +483,8 @@ argument sets to use for each test function. Let's run it:
FAILED test_parametrize.py::TestClass::test_equals[1-2] - assert 1 == 2
1 failed, 2 passed in 0.12s
Parametrization with multiple fixtures
--------------------------------------
Indirect parametrization with multiple fixtures
--------------------------------------------------------------
Here is a stripped down real-life example of using parametrized
testing for testing serialization of objects between different python
@@ -503,14 +502,15 @@ Running it results in some skips if we don't have all the python interpreters in
.. code-block:: pytest
. $ pytest -rs -q multipython.py
ssssssssssss...ssssssssssss [100%]
sssssssssssssssssssssssssss [100%]
========================= short test summary info ==========================
SKIPPED [12] multipython.py:68: 'python3.9' not found
SKIPPED [12] multipython.py:68: 'python3.11' not found
3 passed, 24 skipped in 0.12s
SKIPPED [9] multipython.py:29: 'python3.5' not found
SKIPPED [9] multipython.py:29: 'python3.6' not found
SKIPPED [9] multipython.py:29: 'python3.7' not found
27 skipped in 0.12s
Parametrization of optional implementations/imports
---------------------------------------------------
Indirect parametrization of optional implementations/imports
--------------------------------------------------------------------
If you want to compare the outcomes of several implementations of a given
API, you can write test functions that receive the already imported implementations
@@ -567,14 +567,14 @@ If you run this with reporting for skips enabled:
$ pytest -rs test_module.py
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
collected 2 items
test_module.py .s [100%]
========================= short test summary info ==========================
SKIPPED [1] test_module.py:3: could not import 'opt2': No module named 'opt2'
SKIPPED [1] conftest.py:12: could not import 'opt2': No module named 'opt2'
======================= 1 passed, 1 skipped in 0.12s =======================
You'll see that we don't have an ``opt2`` module and thus the second test run
@@ -628,7 +628,7 @@ Then run ``pytest`` with verbose mode and with only the ``basic`` marker:
$ pytest -v -m basic
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python
cachedir: .pytest_cache
rootdir: /home/sweet/project
collecting ... collected 24 items / 21 deselected / 3 selected
@@ -657,34 +657,52 @@ Use :func:`pytest.raises` with the
:ref:`pytest.mark.parametrize ref` decorator to write parametrized tests
in which some tests raise exceptions and others do not.
``contextlib.nullcontext`` can be used to test cases that are not expected to
raise exceptions but that should result in some value. The value is given as the
``enter_result`` parameter, which will be available as the ``with`` statements
target (``e`` in the example below).
For example:
It is helpful to define a no-op context manager ``does_not_raise`` to serve
as a complement to ``raises``. For example:
.. code-block:: python
from contextlib import nullcontext
from contextlib import contextmanager
import pytest
@contextmanager
def does_not_raise():
yield
@pytest.mark.parametrize(
"example_input,expectation",
[
(3, nullcontext(2)),
(2, nullcontext(3)),
(1, nullcontext(6)),
(3, does_not_raise()),
(2, does_not_raise()),
(1, does_not_raise()),
(0, pytest.raises(ZeroDivisionError)),
],
)
def test_division(example_input, expectation):
"""Test how much I know division."""
with expectation as e:
assert (6 / example_input) == e
with expectation:
assert (6 / example_input) is not None
In the example above, the first three test cases should run without any
exceptions, while the fourth should raise a``ZeroDivisionError`` exception,
which is expected by pytest.
In the example above, the first three test cases should run unexceptionally,
while the fourth should raise ``ZeroDivisionError``.
If you're only supporting Python 3.7+, you can simply use ``nullcontext``
to define ``does_not_raise``:
.. code-block:: python
from contextlib import nullcontext as does_not_raise
Or, if you're supporting Python 3.3+ you can use:
.. code-block:: python
from contextlib import ExitStack as does_not_raise
Or, if desired, you can ``pip install contextlib2`` and use:
.. code-block:: python
from contextlib2 import nullcontext as does_not_raise

View File

@@ -147,16 +147,14 @@ The test collection would look like this:
$ pytest --collect-only
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
configfile: pytest.ini
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project, configfile: pytest.ini
collected 2 items
<Dir pythoncollection.rst-190>
<Module check_myapp.py>
<Class CheckMyApp>
<Function simple_check>
<Function complex_check>
<Module check_myapp.py>
<Class CheckMyApp>
<Function simple_check>
<Function complex_check>
======================== 2 tests collected in 0.12s ========================
@@ -210,18 +208,15 @@ You can always peek at the collection tree without running tests like this:
. $ pytest --collect-only pythoncollection.py
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
configfile: pytest.ini
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project, configfile: pytest.ini
collected 3 items
<Dir pythoncollection.rst-190>
<Dir CWD>
<Module pythoncollection.py>
<Function test_function>
<Class TestClass>
<Function test_method>
<Function test_anothermethod>
<Module CWD/pythoncollection.py>
<Function test_function>
<Class TestClass>
<Function test_method>
<Function test_anothermethod>
======================== 3 tests collected in 0.12s ========================
@@ -294,9 +289,8 @@ file will be left out:
$ pytest --collect-only
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
configfile: pytest.ini
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project, configfile: pytest.ini
collected 0 items
======================= no tests collected in 0.12s ========================

View File

@@ -9,7 +9,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
assertion $ pytest failure_demo.py
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project/assertion
collected 44 items
@@ -80,7 +80,6 @@ Here is a nice run of several failures and how ``pytest`` presents things:
def test_eq_text(self):
> assert "spam" == "eggs"
E AssertionError: assert 'spam' == 'eggs'
E
E - eggs
E + spam
@@ -92,7 +91,6 @@ Here is a nice run of several failures and how ``pytest`` presents things:
def test_eq_similar_text(self):
> assert "foo 1 bar" == "foo 2 bar"
E AssertionError: assert 'foo 1 bar' == 'foo 2 bar'
E
E - foo 2 bar
E ? ^
E + foo 1 bar
@@ -106,7 +104,6 @@ Here is a nice run of several failures and how ``pytest`` presents things:
def test_eq_multiline_text(self):
> assert "foo\nspam\nbar" == "foo\neggs\nbar"
E AssertionError: assert 'foo\nspam\nbar' == 'foo\neggs\nbar'
E
E foo
E - eggs
E + spam
@@ -122,7 +119,6 @@ Here is a nice run of several failures and how ``pytest`` presents things:
b = "1" * 100 + "b" + "2" * 100
> assert a == b
E AssertionError: assert '111111111111...2222222222222' == '111111111111...2222222222222'
E
E Skipping 90 identical leading characters in diff, use -v to show
E Skipping 91 identical trailing characters in diff, use -v to show
E - 1111111111b222222222
@@ -140,12 +136,12 @@ Here is a nice run of several failures and how ``pytest`` presents things:
b = "1\n" * 100 + "b" + "2\n" * 100
> assert a == b
E AssertionError: assert '1\n1\n1\n1\n...n2\n2\n2\n2\n' == '1\n1\n1\n1\n...n2\n2\n2\n2\n'
E
E Skipping 190 identical leading characters in diff, use -v to show
E Skipping 191 identical trailing characters in diff, use -v to show
E 1
E 1
E 1
E 1
E 1...
E
E ...Full output truncated (7 lines hidden), use '-vv' to show
@@ -158,7 +154,6 @@ Here is a nice run of several failures and how ``pytest`` presents things:
def test_eq_list(self):
> assert [0, 1, 2] == [0, 1, 3]
E assert [0, 1, 2] == [0, 1, 3]
E
E At index 2 diff: 2 != 3
E Use -v to get more diff
@@ -172,7 +167,6 @@ Here is a nice run of several failures and how ``pytest`` presents things:
b = [0] * 100 + [2] + [3] * 100
> assert a == b
E assert [0, 0, 0, 0, 0, 0, ...] == [0, 0, 0, 0, 0, 0, ...]
E
E At index 100 diff: 1 != 2
E Use -v to get more diff
@@ -184,15 +178,15 @@ Here is a nice run of several failures and how ``pytest`` presents things:
def test_eq_dict(self):
> assert {"a": 0, "b": 1, "c": 0} == {"a": 0, "b": 2, "d": 0}
E AssertionError: assert {'a': 0, 'b': 1, 'c': 0} == {'a': 0, 'b': 2, 'd': 0}
E
E Omitting 1 identical items, use -vv to show
E Differing items:
E {'b': 1} != {'b': 2}
E Left contains 1 more item:
E {'c': 0}
E Right contains 1 more item:
E {'d': 0}
E Use -v to get more diff
E {'d': 0}...
E
E ...Full output truncated (2 lines hidden), use '-vv' to show
failure_demo.py:71: AssertionError
_________________ TestSpecialisedExplanations.test_eq_set __________________
@@ -201,16 +195,16 @@ Here is a nice run of several failures and how ``pytest`` presents things:
def test_eq_set(self):
> assert {0, 10, 11, 12} == {0, 20, 21}
E assert {0, 10, 11, 12} == {0, 20, 21}
E
E AssertionError: assert {0, 10, 11, 12} == {0, 20, 21}
E Extra items in the left set:
E 10
E 11
E 12
E Extra items in the right set:
E 20
E 21
E Use -v to get more diff
E 21...
E
E ...Full output truncated (2 lines hidden), use '-vv' to show
failure_demo.py:74: AssertionError
_____________ TestSpecialisedExplanations.test_eq_longer_list ______________
@@ -220,7 +214,6 @@ Here is a nice run of several failures and how ``pytest`` presents things:
def test_eq_longer_list(self):
> assert [1, 2] == [1, 2, 3]
E assert [1, 2] == [1, 2, 3]
E
E Right contains one more item: 3
E Use -v to get more diff
@@ -242,15 +235,15 @@ Here is a nice run of several failures and how ``pytest`` presents things:
text = "some multiline\ntext\nwhich\nincludes foo\nand a\ntail"
> assert "foo" not in text
E AssertionError: assert 'foo' not in 'some multil...nand a\ntail'
E
E 'foo' is contained here:
E some multiline
E text
E which
E includes foo
E ? +++
E and a
E tail
E and a...
E
E ...Full output truncated (2 lines hidden), use '-vv' to show
failure_demo.py:84: AssertionError
___________ TestSpecialisedExplanations.test_not_in_text_single ____________
@@ -261,7 +254,6 @@ Here is a nice run of several failures and how ``pytest`` presents things:
text = "single foo line"
> assert "foo" not in text
E AssertionError: assert 'foo' not in 'single foo line'
E
E 'foo' is contained here:
E single foo line
E ? +++
@@ -275,7 +267,6 @@ Here is a nice run of several failures and how ``pytest`` presents things:
text = "head " * 50 + "foo " + "tail " * 20
> assert "foo" not in text
E AssertionError: assert 'foo' not in 'head head h...l tail tail '
E
E 'foo' is contained here:
E head head foo tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail
E ? +++
@@ -289,7 +280,6 @@ Here is a nice run of several failures and how ``pytest`` presents things:
text = "head " * 50 + "f" * 70 + "tail " * 20
> assert "f" * 70 not in text
E AssertionError: assert 'fffffffffff...ffffffffffff' not in 'head head h...l tail tail '
E
E 'ffffffffffffffffff...fffffffffffffffffff' is contained here:
E head head fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffftail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail
E ? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
@@ -317,9 +307,9 @@ Here is a nice run of several failures and how ``pytest`` presents things:
E ['b']
E
E Drill down into differing attribute b:
E b: 'b' != 'c'
E - c
E + b
E b: 'b' != 'c'...
E
E ...Full output truncated (3 lines hidden), use '-vv' to show
failure_demo.py:108: AssertionError
________________ TestSpecialisedExplanations.test_eq_attrs _________________
@@ -344,9 +334,9 @@ Here is a nice run of several failures and how ``pytest`` presents things:
E ['b']
E
E Drill down into differing attribute b:
E b: 'b' != 'c'
E - c
E + b
E b: 'b' != 'c'...
E
E ...Full output truncated (3 lines hidden), use '-vv' to show
failure_demo.py:120: AssertionError
______________________________ test_attribute ______________________________
@@ -683,7 +673,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
FAILED failure_demo.py::TestSpecialisedExplanations::test_eq_list - asser...
FAILED failure_demo.py::TestSpecialisedExplanations::test_eq_list_long - ...
FAILED failure_demo.py::TestSpecialisedExplanations::test_eq_dict - Asser...
FAILED failure_demo.py::TestSpecialisedExplanations::test_eq_set - assert...
FAILED failure_demo.py::TestSpecialisedExplanations::test_eq_set - Assert...
FAILED failure_demo.py::TestSpecialisedExplanations::test_eq_longer_list
FAILED failure_demo.py::TestSpecialisedExplanations::test_in_list - asser...
FAILED failure_demo.py::TestSpecialisedExplanations::test_not_in_text_multiline

View File

@@ -168,7 +168,7 @@ Now we'll get feedback on a bad argument:
If you need to provide more detailed error messages, you can use the
``type`` parameter and raise :exc:`pytest.UsageError`:
``type`` parameter and raise ``pytest.UsageError``:
.. code-block:: python
@@ -232,7 +232,7 @@ directory with the above conftest.py:
$ pytest
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
collected 0 items
@@ -296,7 +296,7 @@ and when running it will see a skipped "slow" test:
$ pytest -rs # "-rs" means report details on the little 's'
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
collected 2 items
@@ -312,7 +312,7 @@ Or run it including the ``slow`` marked test:
$ pytest --runslow
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
collected 2 items
@@ -342,7 +342,7 @@ Example:
def checkconfig(x):
__tracebackhide__ = True
if not hasattr(x, "config"):
pytest.fail(f"not configured: {x}")
pytest.fail("not configured: {}".format(x))
def test_something():
@@ -376,7 +376,6 @@ this to make sure unexpected exception types aren't hidden:
.. code-block:: python
import operator
import pytest
@@ -387,7 +386,7 @@ this to make sure unexpected exception types aren't hidden:
def checkconfig(x):
__tracebackhide__ = operator.methodcaller("errisinstance", ConfigException)
if not hasattr(x, "config"):
raise ConfigException(f"not configured: {x}")
raise ConfigException("not configured: {}".format(x))
def test_something():
@@ -456,7 +455,7 @@ which will add the string to the test header accordingly:
$ pytest
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
project deps: mylib-1.1
rootdir: /home/sweet/project
collected 0 items
@@ -484,7 +483,7 @@ which will add info only when run with "--v":
$ pytest -v
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python
cachedir: .pytest_cache
info1: did you know that ...
did you?
@@ -499,7 +498,7 @@ and nothing when run plainly:
$ pytest
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
collected 0 items
@@ -538,7 +537,7 @@ Now we can profile which test functions execute the slowest:
$ pytest --durations=3
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
collected 3 items
@@ -566,7 +565,6 @@ an ``incremental`` marker which is to be used on classes:
# content of conftest.py
from typing import Dict, Tuple
import pytest
# store history of failures per test class name and per index in parametrize (if parametrize used)
@@ -610,7 +608,7 @@ an ``incremental`` marker which is to be used on classes:
test_name = _test_failed_incremental[cls_name].get(parametrize_index, None)
# if name found, test has failed for the combination of class name & test name
if test_name is not None:
pytest.xfail(f"previous test failed ({test_name})")
pytest.xfail("previous test failed ({})".format(test_name))
These two hook implementations work together to abort incremental-marked
@@ -644,7 +642,7 @@ If we run this:
$ pytest -rx
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
collected 4 items
@@ -661,7 +659,8 @@ If we run this:
test_step.py:11: AssertionError
========================= short test summary info ==========================
XFAIL test_step.py::TestUserHandling::test_deletion - reason: previous test failed (test_modification)
XFAIL test_step.py::TestUserHandling::test_deletion
reason: previous test failed (test_modification)
================== 1 failed, 2 passed, 1 xfailed in 0.12s ==================
We'll see that ``test_deletion`` was not executed because ``test_modification``
@@ -691,7 +690,7 @@ Here is an example for making a ``db`` fixture available in a directory:
pass
@pytest.fixture(scope="package")
@pytest.fixture(scope="session")
def db():
return DB()
@@ -726,14 +725,14 @@ We can run this:
$ pytest
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
collected 7 items
a/test_db.py F [ 14%]
a/test_db2.py F [ 28%]
b/test_error.py E [ 42%]
test_step.py .Fx. [100%]
test_step.py .Fx. [ 57%]
a/test_db.py F [ 71%]
a/test_db2.py F [ 85%]
b/test_error.py E [100%]
================================== ERRORS ==================================
_______________________ ERROR at setup of test_root ________________________
@@ -745,39 +744,39 @@ We can run this:
/home/sweet/project/b/test_error.py:1
================================= FAILURES =================================
_________________________________ test_a1 __________________________________
db = <conftest.DB object at 0xdeadbeef0002>
def test_a1(db):
> assert 0, db # to show value
E AssertionError: <conftest.DB object at 0xdeadbeef0002>
E assert 0
a/test_db.py:2: AssertionError
_________________________________ test_a2 __________________________________
db = <conftest.DB object at 0xdeadbeef0002>
def test_a2(db):
> assert 0, db # to show value
E AssertionError: <conftest.DB object at 0xdeadbeef0002>
E assert 0
a/test_db2.py:2: AssertionError
____________________ TestUserHandling.test_modification ____________________
self = <test_step.TestUserHandling object at 0xdeadbeef0003>
self = <test_step.TestUserHandling object at 0xdeadbeef0002>
def test_modification(self):
> assert 0
E assert 0
test_step.py:11: AssertionError
_________________________________ test_a1 __________________________________
db = <conftest.DB object at 0xdeadbeef0003>
def test_a1(db):
> assert 0, db # to show value
E AssertionError: <conftest.DB object at 0xdeadbeef0003>
E assert 0
a/test_db.py:2: AssertionError
_________________________________ test_a2 __________________________________
db = <conftest.DB object at 0xdeadbeef0003>
def test_a2(db):
> assert 0, db # to show value
E AssertionError: <conftest.DB object at 0xdeadbeef0003>
E assert 0
a/test_db2.py:2: AssertionError
========================= short test summary info ==========================
FAILED test_step.py::TestUserHandling::test_modification - assert 0
FAILED a/test_db.py::test_a1 - AssertionError: <conftest.DB object at 0x7...
FAILED a/test_db2.py::test_a2 - AssertionError: <conftest.DB object at 0x...
FAILED test_step.py::TestUserHandling::test_modification - assert 0
ERROR b/test_error.py::test_root
============= 3 failed, 2 passed, 1 xfailed, 1 error in 0.12s ==============
@@ -803,20 +802,20 @@ case we just write some information out to a ``failures`` file:
# content of conftest.py
import pytest
import os.path
import pytest
@pytest.hookimpl(wrapper=True, tryfirst=True)
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
# execute all other hooks to obtain the report object
rep = yield
outcome = yield
rep = outcome.get_result()
# we only look at actual failing test calls, not setup/teardown
if rep.when == "call" and rep.failed:
mode = "a" if os.path.exists("failures") else "w"
with open("failures", mode, encoding="utf-8") as f:
with open("failures", mode) as f:
# let's also access a fixture for the fun of it
if "tmp_path" in item.fixturenames:
extra = " ({})".format(item.funcargs["tmp_path"])
@@ -825,8 +824,6 @@ case we just write some information out to a ``failures`` file:
f.write(rep.nodeid + extra + "\n")
return rep
if you then have failing tests:
@@ -846,7 +843,7 @@ and run them:
$ pytest test_module.py
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
collected 2 items
@@ -893,23 +890,20 @@ here is a little example implemented via a local plugin:
.. code-block:: python
# content of conftest.py
from typing import Dict
import pytest
from pytest import StashKey, CollectReport
phase_report_key = StashKey[Dict[str, CollectReport]]()
@pytest.hookimpl(wrapper=True, tryfirst=True)
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
# execute all other hooks to obtain the report object
rep = yield
outcome = yield
rep = outcome.get_result()
# store test results for each phase of a call, which can
# set a report attribute for each phase of a call, which can
# be "setup", "call", "teardown"
item.stash.setdefault(phase_report_key, {})[rep.when] = rep
return rep
setattr(item, "rep_" + rep.when, rep)
@pytest.fixture
@@ -917,11 +911,11 @@ here is a little example implemented via a local plugin:
yield
# request.node is an "item" because we use the default
# "function" scope
report = request.node.stash[phase_report_key]
if report["setup"].failed:
print("setting up a test failed or skipped", request.node.nodeid)
elif ("call" not in report) or report["call"].failed:
print("executing test failed or skipped", request.node.nodeid)
if request.node.rep_setup.failed:
print("setting up a test failed!", request.node.nodeid)
elif request.node.rep_setup.passed:
if request.node.rep_call.failed:
print("executing test failed", request.node.nodeid)
if you then have failing tests:
@@ -955,12 +949,12 @@ and run it:
$ pytest -s test_module.py
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
collected 3 items
test_module.py Esetting up a test failed or skipped test_module.py::test_setup_fails
Fexecuting test failed or skipped test_module.py::test_call_fails
test_module.py Esetting up a test failed! test_module.py::test_setup_fails
Fexecuting test failed test_module.py::test_call_fails
F
================================== ERRORS ==================================
@@ -1072,7 +1066,6 @@ like ``pytest-timeout`` they must be imported explicitly and passed on to pytest
# contents of app_main.py
import sys
import pytest_timeout # Third party plugin
if len(sys.argv) > 1 and sys.argv[1] == "--pytest":
@@ -1090,4 +1083,4 @@ application with standard ``pytest`` command-line options:
.. code-block:: bash
./app_main --pytest --verbose --tb=long --junit=xml=results.xml test-suite/
./app_main --pytest --verbose --tb=long --junitxml=results.xml test-suite/

View File

@@ -34,7 +34,7 @@ a function/method call.
**Assert** is where we look at that resulting state and check if it looks how
we'd expect after the dust has settled. It's where we gather evidence to say the
behavior does or does not align with what we expect. The ``assert`` in our test
behavior does or does not aligns with what we expect. The ``assert`` in our test
is where we take that measurement/observation and apply our judgement to it. If
something should be green, we'd say ``assert thing == "green"``.

View File

@@ -162,7 +162,7 @@ A note about fixture cleanup
----------------------------
pytest does not do any special processing for :data:`SIGTERM <signal.SIGTERM>` and
``SIGQUIT`` signals (:data:`SIGINT <signal.SIGINT>` is handled naturally
:data:`SIGQUIT <signal.SIGQUIT>` signals (:data:`SIGINT <signal.SIGINT>` is handled naturally
by the Python runtime via :class:`KeyboardInterrupt`), so fixtures that manage external resources which are important
to be cleared when the Python process is terminated (by those signals) might leak resources.

View File

@@ -94,7 +94,7 @@ Mark Lapierre discusses the `Pros and Cons of Quarantined Tests <https://dev.to/
CI tools that rerun on failure
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Azure Pipelines (the Azure cloud CI/CD tool, formerly Visual Studio Team Services or VSTS) has a feature to `identify flaky tests <https://docs.microsoft.com/en-us/previous-versions/azure/devops/2017/dec-11-vsts?view=tfs-2017#identify-flaky-tests>`_ and rerun failed tests.
Azure Pipelines (the Azure cloud CI/CD tool, formerly Visual Studio Team Services or VSTS) has a feature to `identify flaky tests <https://docs.microsoft.com/en-us/azure/devops/release-notes/2017/dec-11-vsts#identify-flaky-tests>`_ and rerun failed tests.

View File

@@ -12,27 +12,41 @@ For development, we recommend you use :mod:`venv` for virtual environments and
as well as the ``pytest`` package itself.
This ensures your code and dependencies are isolated from your system Python installation.
Create a ``pyproject.toml`` file in the root of your repository as described in
:doc:`packaging:tutorials/packaging-projects`.
The first few lines should look like this:
Next, place a ``pyproject.toml`` file in the root of your package:
.. code-block:: toml
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
requires = ["setuptools>=42", "wheel"]
build-backend = "setuptools.build_meta"
[project]
name = "PACKAGENAME"
version = "PACKAGEVERSION"
and a ``setup.cfg`` file containing your package's metadata with the following minimum content:
where ``PACKAGENAME`` and ``PACKAGEVERSION`` are the name and version of your package respectively.
.. code-block:: ini
[metadata]
name = PACKAGENAME
[options]
packages = find:
where ``PACKAGENAME`` is the name of your package.
.. note::
If your pip version is older than ``21.3``, you'll also need a ``setup.py`` file:
.. code-block:: python
from setuptools import setup
setup()
You can then install your package in "editable" mode by running from the same directory:
.. code-block:: bash
pip install -e .
pip install -e .
which lets you change your source code (both tests and application) and rerun tests at will.
@@ -51,8 +65,8 @@ Conventions for Python test discovery
* In those directories, search for ``test_*.py`` or ``*_test.py`` files, imported by their `test package name`_.
* From those files, collect test items:
* ``test`` prefixed test functions or methods outside of class.
* ``test`` prefixed test functions or methods inside ``Test`` prefixed test classes (without an ``__init__`` method). Methods decorated with ``@staticmethod`` and ``@classmethods`` are also considered.
* ``test`` prefixed test functions or methods outside of class
* ``test`` prefixed test functions or methods inside ``Test`` prefixed test classes (without an ``__init__`` method)
For examples of how to customize your test discovery :doc:`/example/pythoncollection`.
@@ -75,11 +89,11 @@ to keep tests separate from actual application code (often a good idea):
.. code-block:: text
pyproject.toml
src/
mypkg/
__init__.py
app.py
view.py
setup.cfg
mypkg/
__init__.py
app.py
view.py
tests/
test_app.py
test_view.py
@@ -89,57 +103,84 @@ This has the following benefits:
* Your tests can run against an installed version after executing ``pip install .``.
* Your tests can run against the local copy with an editable install after executing ``pip install --editable .``.
For new projects, we recommend to use ``importlib`` :ref:`import mode <import-modes>`
(see which-import-mode_ for a detailed explanation).
To this end, add the following to your ``pyproject.toml``:
.. code-block:: toml
[tool.pytest.ini_options]
addopts = [
"--import-mode=importlib",
]
.. _src-layout:
Generally, but especially if you use the default import mode ``prepend``,
it is **strongly** suggested to use a ``src`` layout.
Here, your application root package resides in a sub-directory of your root,
i.e. ``src/mypkg/`` instead of ``mypkg``.
This layout prevents a lot of common pitfalls and has many benefits,
which are better explained in this excellent `blog post`_ by Ionel Cristian Mărieș.
.. _blog post: https://blog.ionelmc.ro/2014/05/25/python-packaging/#the-structure>
* If you don't use an editable install and are relying on the fact that Python by default puts the current
directory in ``sys.path`` to import your package, you can execute ``python -m pytest`` to execute the tests against the
local copy directly, without using ``pip``.
.. note::
If you do not use an editable install and use the ``src`` layout as above you need to extend the Python's
search path for module files to execute the tests against the local copy directly. You can do it in an
ad-hoc manner by setting the ``PYTHONPATH`` environment variable:
.. code-block:: bash
PYTHONPATH=src pytest
or in a permanent manner by using the :confval:`pythonpath` configuration variable and adding the
following to your ``pyproject.toml``:
.. code-block:: toml
[tool.pytest.ini_options]
pythonpath = "src"
.. note::
If you do not use an editable install and not use the ``src`` layout (``mypkg`` directly in the root
directory) you can rely on the fact that Python by default puts the current directory in ``sys.path`` to
import your package and run ``python -m pytest`` to execute the tests against the local copy directly.
See :ref:`pytest vs python -m pytest` for more information about the difference between calling ``pytest`` and
``python -m pytest``.
Note that this scheme has a drawback if you are using ``prepend`` :ref:`import mode <import-modes>`
(which is the default): your test files must have **unique names**, because
``pytest`` will import them as *top-level* modules since there are no packages
to derive a full package name from. In other words, the test files in the example above will
be imported as ``test_app`` and ``test_view`` top-level modules by adding ``tests/`` to
``sys.path``.
If you need to have test modules with the same name, you might add ``__init__.py`` files to your
``tests`` folder and subfolders, changing them to packages:
.. code-block:: text
pyproject.toml
setup.cfg
mypkg/
...
tests/
__init__.py
foo/
__init__.py
test_view.py
bar/
__init__.py
test_view.py
Now pytest will load the modules as ``tests.foo.test_view`` and ``tests.bar.test_view``, allowing
you to have modules with the same name. But now this introduces a subtle problem: in order to load
the test modules from the ``tests`` directory, pytest prepends the root of the repository to
``sys.path``, which adds the side-effect that now ``mypkg`` is also importable.
This is problematic if you are using a tool like `tox`_ to test your package in a virtual environment,
because you want to test the *installed* version of your package, not the local code from the repository.
.. _`src-layout`:
In this situation, it is **strongly** suggested to use a ``src`` layout where application root package resides in a
sub-directory of your root:
.. code-block:: text
pyproject.toml
setup.cfg
src/
mypkg/
__init__.py
app.py
view.py
tests/
__init__.py
foo/
__init__.py
test_view.py
bar/
__init__.py
test_view.py
This layout prevents a lot of common pitfalls and has many benefits, which are better explained in this excellent
`blog post by Ionel Cristian Mărieș <https://blog.ionelmc.ro/2014/05/25/python-packaging/#the-structure>`_.
.. note::
The new ``--import-mode=importlib`` (see :ref:`import-modes`) doesn't have
any of the drawbacks above because ``sys.path`` is not changed when importing
test modules, so users that run
into this issue are strongly encouraged to try it and report if the new option works well for them.
The ``src`` directory layout is still strongly recommended however.
Tests as part of application code
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -150,11 +191,12 @@ want to distribute them along with your application:
.. code-block:: text
pyproject.toml
[src/]mypkg/
setup.cfg
mypkg/
__init__.py
app.py
view.py
tests/
test/
__init__.py
test_app.py
test_view.py
@@ -212,56 +254,6 @@ Note that this layout also works in conjunction with the ``src`` layout mentione
much less surprising.
.. _which-import-mode:
Choosing an import mode
^^^^^^^^^^^^^^^^^^^^^^^
For historical reasons, pytest defaults to the ``prepend`` :ref:`import mode <import-modes>`
instead of the ``importlib`` import mode we recommend for new projects.
The reason lies in the way the ``prepend`` mode works:
Since there are no packages to derive a full package name from,
``pytest`` will import your test files as *top-level* modules.
The test files in the first example (:ref:`src layout <src-layout>`) would be imported as
``test_app`` and ``test_view`` top-level modules by adding ``tests/`` to ``sys.path``.
This results in a drawback compared to the import mode ``importlib``:
your test files must have **unique names**.
If you need to have test modules with the same name,
as a workaround you might add ``__init__.py`` files to your ``tests`` folder and subfolders,
changing them to packages:
.. code-block:: text
pyproject.toml
mypkg/
...
tests/
__init__.py
foo/
__init__.py
test_view.py
bar/
__init__.py
test_view.py
Now pytest will load the modules as ``tests.foo.test_view`` and ``tests.bar.test_view``,
allowing you to have modules with the same name.
But now this introduces a subtle problem:
in order to load the test modules from the ``tests`` directory,
pytest prepends the root of the repository to ``sys.path``,
which adds the side-effect that now ``mypkg`` is also importable.
This is problematic if you are using a tool like tox_ to test your package in a virtual environment,
because you want to test the *installed* version of your package,
not the local code from the repository.
The ``importlib`` import mode does not have any of the drawbacks above,
because ``sys.path`` is not changed when importing test modules.
.. _`buildout`: http://www.buildout.org/en/latest/
.. _`use tox`:
@@ -271,8 +263,8 @@ tox
Once you are done with your work and want to make sure that your actual
package passes all tests you may want to look into :doc:`tox <tox:index>`, the
virtualenv test automation tool.
``tox`` helps you to setup virtualenv environments with pre-defined
virtualenv test automation tool and its :doc:`pytest support <tox:example/pytest>`.
tox helps you to setup virtualenv environments with pre-defined
dependencies and then executing a pre-configured test command with
options. It will run tests against the installed package and not
against your source code checkout, helping to detect packaging
@@ -294,20 +286,3 @@ See also `pypa/setuptools#1684 <https://github.com/pypa/setuptools/issues/1684>`
setuptools intends to
`remove the test command <https://github.com/pypa/setuptools/issues/931>`_.
Checking with flake8-pytest-style
---------------------------------
In order to ensure that pytest is being used correctly in your project,
it can be helpful to use the `flake8-pytest-style <https://github.com/m-burst/flake8-pytest-style>`_ flake8 plugin.
flake8-pytest-style checks for common mistakes and coding style violations in pytest code,
such as incorrect use of fixtures, test function names, and markers.
By using this plugin, you can catch these errors early in the development process
and ensure that your pytest code is consistent and easy to maintain.
A list of the lints detected by flake8-pytest-style can be found on its `PyPI page <https://pypi.org/project/flake8-pytest-style/>`_.
.. note::
flake8-pytest-style is not an official pytest project. Some of the rules enforce certain style choices, such as using `@pytest.fixture()` over `@pytest.fixture`, but you can configure the plugin to fit your preferred style.

View File

@@ -16,7 +16,7 @@ import process can be controlled through the ``--import-mode`` command-line flag
these values:
* ``prepend`` (default): the directory path containing each module will be inserted into the *beginning*
of :py:data:`sys.path` if not already there, and then imported with the :func:`importlib.import_module <importlib.import_module>` function.
of :py:data:`sys.path` if not already there, and then imported with the :func:`__import__ <__import__>` builtin.
This requires test module names to be unique when the test directory tree is not arranged in
packages, because the modules will put in :py:data:`sys.modules` after importing.
@@ -24,7 +24,7 @@ these values:
This is the classic mechanism, dating back from the time Python 2 was still supported.
* ``append``: the directory containing each module is appended to the end of :py:data:`sys.path` if not already
there, and imported with :func:`importlib.import_module <importlib.import_module>`.
there, and imported with ``__import__``.
This better allows to run test modules against installed versions of a package even if the
package under test has the same import root. For example:
@@ -43,21 +43,12 @@ these values:
Same as ``prepend``, requires test module names to be unique when the test directory tree is
not arranged in packages, because the modules will put in :py:data:`sys.modules` after importing.
* ``importlib``: new in pytest-6.0, this mode uses more fine control mechanisms provided by :mod:`importlib` to import test modules. This gives full control over the import process, and doesn't require changing :py:data:`sys.path`.
* ``importlib``: new in pytest-6.0, this mode uses :mod:`importlib` to import test modules. This gives full control over the import process, and doesn't require changing :py:data:`sys.path`.
For this reason this doesn't require test module names to be unique.
One drawback however is that test modules are non-importable by each other. Also, utility
modules in the tests directories are not automatically importable because the tests directory is no longer
added to :py:data:`sys.path`.
Initially we intended to make ``importlib`` the default in future releases, however it is clear now that
it has its own set of drawbacks so the default will remain ``prepend`` for the foreseeable future.
.. seealso::
The :confval:`pythonpath` configuration variable.
For this reason this doesn't require test module names to be unique, but also makes test
modules non-importable by each other.
We intend to make ``importlib`` the default in future releases, depending on feedback.
``prepend`` and ``append`` import modes scenarios
-------------------------------------------------

View File

@@ -11,6 +11,8 @@ funcarg mechanism, see :ref:`historical funcargs and pytest.funcargs`.
If you are new to pytest, then you can simply ignore this
section and read the other sections.
.. currentmodule:: _pytest
Shortcomings of the previous ``pytest_funcarg__`` mechanism
--------------------------------------------------------------
@@ -44,7 +46,7 @@ There are several limitations and difficulties with this approach:
2. parametrizing the "db" resource is not straight forward:
you need to apply a "parametrize" decorator or implement a
:hook:`pytest_generate_tests` hook
:py:func:`~hookspec.pytest_generate_tests` hook
calling :py:func:`~pytest.Metafunc.parametrize` which
performs parametrization at the places where the resource
is used. Moreover, you need to modify the factory to use an
@@ -92,7 +94,7 @@ Direct parametrization of funcarg resource factories
Previously, funcarg factories could not directly cause parametrization.
You needed to specify a ``@parametrize`` decorator on your test function
or implement a :hook:`pytest_generate_tests` hook to perform
or implement a ``pytest_generate_tests`` hook to perform
parametrization, i.e. calling a test multiple times with different value
sets. pytest-2.3 introduces a decorator for use on the factory itself:

View File

@@ -9,7 +9,7 @@ Get Started
Install ``pytest``
----------------------------------------
``pytest`` requires: Python 3.8+ or PyPy3.
``pytest`` requires: Python 3.7+ or PyPy3.
1. Run the following command in your command line:
@@ -22,7 +22,7 @@ Install ``pytest``
.. code-block:: bash
$ pytest --version
pytest 8.0.0rc1
pytest 7.1.2
.. _`simpletest`:
@@ -47,7 +47,7 @@ The test
$ pytest
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
collected 1 item
@@ -97,30 +97,6 @@ Use the :ref:`raises <assertraises>` helper to assert that some code raises an e
with pytest.raises(SystemExit):
f()
You can also use the context provided by :ref:`raises <assertraises>` to
assert that an expected exception is part of a raised :class:`ExceptionGroup`:
.. code-block:: python
# content of test_exceptiongroup.py
import pytest
def f():
raise ExceptionGroup(
"Group message",
[
RuntimeError(),
],
)
def test_exception_in_group():
with pytest.raises(ExceptionGroup) as excinfo:
f()
assert excinfo.group_contains(RuntimeError)
assert not excinfo.group_contains(TypeError)
Execute the test function with “quiet” reporting mode:
.. code-block:: pytest

View File

@@ -112,7 +112,7 @@ More details can be found in the :pull:`original PR <3317>`.
.. note::
in a future major release of pytest we will introduce class based markers,
at which point markers will no longer be limited to instances of :py:class:`~pytest.Mark`.
at which point markers will no longer be limited to instances of :py:class:`~_pytest.mark.Mark`.
cache plugin integrated into the core

View File

@@ -29,7 +29,7 @@ you will see the return value of the function call:
$ pytest test_assert1.py
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
collected 1 item
@@ -54,13 +54,14 @@ operators. (See :ref:`tbreportdemo`). This allows you to use the
idiomatic python constructs without boilerplate code while not losing
introspection information.
If a message is specified with the assertion like this:
However, if you specify a message with the assertion like this:
.. code-block:: python
assert a % 2 == 0, "value was odd, should be even"
it is printed alongside the assertion introspection in the traceback.
then no assertion introspection takes places at all and the message
will be simply shown in the traceback.
See :ref:`assert-details` for more information on assertion introspection.
@@ -98,27 +99,6 @@ and if you need to have access to the actual exception info you may use:
the actual exception raised. The main attributes of interest are
``.type``, ``.value`` and ``.traceback``.
Note that ``pytest.raises`` will match the exception type or any subclasses (like the standard ``except`` statement).
If you want to check if a block of code is raising an exact exception type, you need to check that explicitly:
.. code-block:: python
def test_foo_not_implemented():
def foo():
raise NotImplementedError
with pytest.raises(RuntimeError) as excinfo:
foo()
assert excinfo.type is RuntimeError
The :func:`pytest.raises` call will succeed, even though the function raises :class:`NotImplementedError`, because
:class:`NotImplementedError` is a subclass of :class:`RuntimeError`; however the following `assert` statement will
catch the problem.
Matching exception messages
~~~~~~~~~~~~~~~~~~~~~~~~~~~
You can pass a ``match`` keyword parameter to the context-manager to test
that a regular expression matches on the string representation of an exception
(similar to the ``TestCase.assertRaisesRegex`` method from ``unittest``):
@@ -136,113 +116,36 @@ that a regular expression matches on the string representation of an exception
with pytest.raises(ValueError, match=r".* 123 .*"):
myfunc()
Notes:
The regexp parameter of the ``match`` method is matched with the ``re.search``
function, so in the above example ``match='123'`` would have worked as
well.
* The ``match`` parameter is matched with the :func:`re.search`
function, so in the above example ``match='123'`` would have worked as well.
* The ``match`` parameter also matches against `PEP-678 <https://peps.python.org/pep-0678/>`__ ``__notes__``.
.. _`assert-matching-exception-groups`:
Matching exception groups
~~~~~~~~~~~~~~~~~~~~~~~~~
You can also use the :func:`excinfo.group_contains() <pytest.ExceptionInfo.group_contains>`
method to test for exceptions returned as part of an :class:`ExceptionGroup`:
There's an alternate form of the :func:`pytest.raises` function where you pass
a function that will be executed with the given ``*args`` and ``**kwargs`` and
assert that the given exception is raised:
.. code-block:: python
def test_exception_in_group():
with pytest.raises(RuntimeError) as excinfo:
raise ExceptionGroup(
"Group message",
[
RuntimeError("Exception 123 raised"),
],
)
assert excinfo.group_contains(RuntimeError, match=r".* 123 .*")
assert not excinfo.group_contains(TypeError)
The optional ``match`` keyword parameter works the same way as for
:func:`pytest.raises`.
By default ``group_contains()`` will recursively search for a matching
exception at any level of nested ``ExceptionGroup`` instances. You can
specify a ``depth`` keyword parameter if you only want to match an
exception at a specific level; exceptions contained directly in the top
``ExceptionGroup`` would match ``depth=1``.
.. code-block:: python
def test_exception_in_group_at_given_depth():
with pytest.raises(RuntimeError) as excinfo:
raise ExceptionGroup(
"Group message",
[
RuntimeError(),
ExceptionGroup(
"Nested group",
[
TypeError(),
],
),
],
)
assert excinfo.group_contains(RuntimeError, depth=1)
assert excinfo.group_contains(TypeError, depth=2)
assert not excinfo.group_contains(RuntimeError, depth=2)
assert not excinfo.group_contains(TypeError, depth=1)
Alternate form (legacy)
~~~~~~~~~~~~~~~~~~~~~~~
There is an alternate form where you pass
a function that will be executed, along ``*args`` and ``**kwargs``, and :func:`pytest.raises`
will execute the function with the arguments and assert that the given exception is raised:
.. code-block:: python
def func(x):
if x <= 0:
raise ValueError("x needs to be larger than zero")
pytest.raises(ValueError, func, x=-1)
pytest.raises(ExpectedException, func, *args, **kwargs)
The reporter will provide you with helpful output in case of failures such as *no
exception* or *wrong exception*.
This form was the original :func:`pytest.raises` API, developed before the ``with`` statement was
added to the Python language. Nowadays, this form is rarely used, with the context-manager form (using ``with``)
being considered more readable.
Nonetheless, this form is fully supported and not deprecated in any way.
xfail mark and pytest.raises
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
It is also possible to specify a ``raises`` argument to
:ref:`pytest.mark.xfail <pytest.mark.xfail ref>`, which checks that the test is failing in a more
Note that it is also possible to specify a "raises" argument to
``pytest.mark.xfail``, which checks that the test is failing in a more
specific way than just having any exception raised:
.. code-block:: python
def f():
raise IndexError()
@pytest.mark.xfail(raises=IndexError)
def test_f():
f()
This will only "xfail" if the test fails by raising ``IndexError`` or subclasses.
* Using :ref:`pytest.mark.xfail <pytest.mark.xfail ref>` with the ``raises`` parameter is probably better for something
like documenting unfixed bugs (where the test describes what "should" happen) or bugs in dependencies.
* Using :func:`pytest.raises` is likely to be better for cases where you are
testing exceptions your own code is deliberately raising, which is the majority of cases.
Using :func:`pytest.raises` is likely to be better for cases where you are
testing exceptions your own code is deliberately raising, whereas using
``@pytest.mark.xfail`` with a check function is probably better for something
like documenting unfixed bugs (where the test describes what "should" happen)
or bugs in dependencies.
.. _`assertwarns`:
@@ -280,7 +183,7 @@ if you run this module:
$ pytest test_assert2.py
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
collected 1 item
@@ -294,7 +197,6 @@ if you run this module:
set2 = set("8035")
> assert set1 == set2
E AssertionError: assert {'0', '1', '3', '8'} == {'0', '3', '5', '8'}
E
E Extra items in the left set:
E '1'
E Extra items in the right set:
@@ -336,7 +238,7 @@ file which provides an alternative explanation for ``Foo`` objects:
if isinstance(left, Foo) and isinstance(right, Foo) and op == "==":
return [
"Comparing Foo instances:",
f" vals: {left.val} != {right.val}",
" vals: {} != {}".format(left.val, right.val),
]
now, given this test module:

View File

@@ -86,7 +86,7 @@ If you then run it with ``--lf``:
$ pytest --lf
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
collected 2 items
run-last-failure: rerun previous 2 failures
@@ -132,7 +132,7 @@ of ``FF`` and dots):
$ pytest --ff
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
collected 50 items
run-last-failure: rerun previous 2 failures first
@@ -176,21 +176,14 @@ with more recent files coming first.
Behavior when no tests failed in the last run
---------------------------------------------
The ``--lfnf/--last-failed-no-failures`` option governs the behavior of ``--last-failed``.
Determines whether to execute tests when there are no previously (known)
failures or when no cached ``lastfailed`` data was found.
There are two options:
* ``all``: when there are no known test failures, runs all tests (the full test suite). This is the default.
* ``none``: when there are no known test failures, just emits a message stating this and exit successfully.
Example:
When no tests failed in the last run, or when no cached ``lastfailed`` data was
found, ``pytest`` can be configured either to run all of the tests or no tests,
using the ``--last-failed-no-failures`` option, which takes one of the following values:
.. code-block:: bash
pytest --last-failed --last-failed-no-failures all # runs the full test suite (default behavior)
pytest --last-failed --last-failed-no-failures none # runs no tests and exits successfully
pytest --last-failed --last-failed-no-failures all # run all tests (default behavior)
pytest --last-failed --last-failed-no-failures none # run no tests and exit
The new config.cache object
--------------------------------
@@ -206,6 +199,7 @@ across pytest invocations:
# content of test_caching.py
import pytest
import time
def expensive_computation():
@@ -213,12 +207,12 @@ across pytest invocations:
@pytest.fixture
def mydata(pytestconfig):
val = pytestconfig.cache.get("example/value", None)
def mydata(request):
val = request.config.cache.get("example/value", None)
if val is None:
expensive_computation()
val = 42
pytestconfig.cache.set("example/value", val)
request.config.cache.set("example/value", val)
return val
@@ -240,7 +234,7 @@ If you run this command for the first time, you can see the print statement:
> assert mydata == 23
E assert 42 == 23
test_caching.py:19: AssertionError
test_caching.py:20: AssertionError
-------------------------- Captured stdout setup ---------------------------
running expensive computation...
========================= short test summary info ==========================
@@ -263,7 +257,7 @@ the cache and nothing will be printed:
> assert mydata == 23
E assert 42 == 23
test_caching.py:19: AssertionError
test_caching.py:20: AssertionError
========================= short test summary info ==========================
FAILED test_caching.py::test_function - assert 42 == 23
1 failed in 0.12s
@@ -281,7 +275,7 @@ You can always peek at the content of the cache using the
$ pytest --cache-show
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
cachedir: /home/sweet/project/.pytest_cache
--------------------------- cache values for '*' ---------------------------
@@ -303,7 +297,7 @@ filtering:
$ pytest --cache-show example/*
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
cachedir: /home/sweet/project/.pytest_cache
----------------------- cache values for 'example/*' -----------------------

View File

@@ -83,7 +83,7 @@ of the failing function and hide the other one:
$ pytest
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
collected 2 items

View File

@@ -28,7 +28,7 @@ Running pytest now produces this output:
$ pytest test_show_warnings.py
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
collected 1 item
@@ -42,8 +42,6 @@ Running pytest now produces this output:
-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html
======================= 1 passed, 1 warning in 0.12s =======================
.. _`controlling-warnings`:
Controlling warnings
--------------------
@@ -109,18 +107,6 @@ When a warning matches more than one option in the list, the action for the last
is performed.
.. note::
The ``-W`` flag and the ``filterwarnings`` ini option use warning filters that are
similar in structure, but each configuration option interprets its filter
differently. For example, *message* in ``filterwarnings`` is a string containing a
regular expression that the start of the warning message must match,
case-insensitively, while *message* in ``-W`` is a literal string that the start of
the warning message must contain (case-insensitively), ignoring any whitespace at
the start or end of message. Consult the `warning filter`_ documentation for more
details.
.. _`filterwarnings`:
``@pytest.mark.filterwarnings``
@@ -190,14 +176,11 @@ using an external system.
DeprecationWarning and PendingDeprecationWarning
------------------------------------------------
By default pytest will display ``DeprecationWarning`` and ``PendingDeprecationWarning`` warnings from
user code and third-party libraries, as recommended by :pep:`565`.
This helps users keep their code modern and avoid breakages when deprecated warnings are effectively removed.
However, in the specific case where users capture any type of warnings in their test, either with
:func:`pytest.warns`, :func:`pytest.deprecated_call` or using the :ref:`recwarn <recwarn>` fixture,
no warning will be displayed at all.
Sometimes it is useful to hide some specific deprecation warnings that happen in code that you have no control over
(such as third-party libraries), in which case you might use the warning filters options (ini or marks) to ignore
those warnings.
@@ -214,9 +197,6 @@ For example:
This will ignore all warnings of type ``DeprecationWarning`` where the start of the message matches
the regular expression ``".*U.*mode is deprecated"``.
See :ref:`@pytest.mark.filterwarnings <filterwarnings>` and
:ref:`Controlling warnings <controlling-warnings>` for more examples.
.. note::
If warnings are configured at the interpreter level, using
@@ -265,15 +245,14 @@ when called with a ``17`` argument.
Asserting warnings with the warns function
------------------------------------------
You can check that code raises a particular warning using :func:`pytest.warns`,
which works in a similar manner to :ref:`raises <assertraises>` (except that
:ref:`raises <assertraises>` does not capture all exceptions, only the
``expected_exception``):
which works in a similar manner to :ref:`raises <assertraises>`:
.. code-block:: python
import warnings
import pytest
@@ -281,35 +260,21 @@ which works in a similar manner to :ref:`raises <assertraises>` (except that
with pytest.warns(UserWarning):
warnings.warn("my warning", UserWarning)
The test will fail if the warning in question is not raised. Use the keyword
argument ``match`` to assert that the warning matches a text or regex.
To match a literal string that may contain regular expression metacharacters like ``(`` or ``.``, the pattern can
first be escaped with ``re.escape``.
The test will fail if the warning in question is not raised. The keyword
argument ``match`` to assert that the exception matches a text or regex::
Some examples:
.. code-block:: pycon
>>> with warns(UserWarning, match="must be 0 or None"):
>>> with warns(UserWarning, match='must be 0 or None'):
... warnings.warn("value must be 0 or None", UserWarning)
...
>>> with warns(UserWarning, match=r"must be \d+$"):
>>> with warns(UserWarning, match=r'must be \d+$'):
... warnings.warn("value must be 42", UserWarning)
...
>>> with warns(UserWarning, match=r"must be \d+$"):
>>> with warns(UserWarning, match=r'must be \d+$'):
... warnings.warn("this is not here", UserWarning)
...
Traceback (most recent call last):
...
Failed: DID NOT WARN. No warnings of type ...UserWarning... were emitted...
>>> with warns(UserWarning, match=re.escape("issue with foo() func")):
... warnings.warn("issue with foo() func")
...
You can also call :func:`pytest.warns` on a function or code string:
.. code-block:: python
@@ -382,6 +347,8 @@ warnings: a WarningsRecorder instance. To view the recorded warnings, you can
iterate over this instance, call ``len`` on it to get the number of recorded
warnings, or index into it to get a particular recorded warning.
.. currentmodule:: _pytest.warnings
Full API: :class:`~_pytest.recwarn.WarningsRecorder`.
.. _`warns use cases`:
@@ -391,32 +358,20 @@ Additional use cases of warnings in tests
Here are some use cases involving warnings that often come up in tests, and suggestions on how to deal with them:
- To ensure that **at least one** of the indicated warnings is issued, use:
- To ensure that **at least one** warning is emitted, use:
.. code-block:: python
def test_warning():
with pytest.warns((RuntimeWarning, UserWarning)):
...
- To ensure that **only** certain warnings are issued, use:
.. code-block:: python
def test_warning(recwarn):
with pytest.warns():
...
assert len(recwarn) == 1
user_warning = recwarn.pop(UserWarning)
assert issubclass(user_warning.category, UserWarning)
- To ensure that **no** warnings are emitted, use:
.. code-block:: python
def test_warning():
with warnings.catch_warnings():
warnings.simplefilter("error")
...
with warnings.catch_warnings():
warnings.simplefilter("error")
...
- To suppress warnings, use:

View File

@@ -30,7 +30,7 @@ then you can just invoke ``pytest`` directly:
$ pytest
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
collected 1 item
@@ -58,7 +58,7 @@ and functions, including from test modules:
$ pytest --doctest-modules
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
collected 2 items
@@ -126,17 +126,14 @@ pytest also introduces new options:
in expected doctest output.
* ``NUMBER``: when enabled, floating-point numbers only need to match as far as
the precision you have written in the expected doctest output. The numbers are
compared using :func:`pytest.approx` with relative tolerance equal to the
precision. For example, the following output would only need to match to 2
decimal places when comparing ``3.14`` to
``pytest.approx(math.pi, rel=10**-2)``::
the precision you have written in the expected doctest output. For example,
the following output would only need to match to 2 decimal places::
>>> math.pi
3.14
If you wrote ``3.1416`` then the actual output would need to match to
approximately 4 decimal places; and so on.
If you wrote ``3.1416`` then the actual output would need to match to 4
decimal places; and so on.
This avoids false positives caused by limited floating-point precision, like
this::
@@ -242,6 +239,7 @@ which can then be used in your doctests directly:
>>> len(a)
10
"""
pass
Note that like the normal ``conftest.py``, the fixtures are discovered in the directory tree conftest is in.
Meaning that if you put your doctest with your source code, the relevant conftest.py needs to be in the same directory tree.

View File

@@ -135,6 +135,10 @@ Warning about unraisable exceptions and unhandled thread exceptions
.. versionadded:: 6.2
.. note::
These features only work on Python>=3.8.
Unhandled exceptions are exceptions that are raised in a situation in which
they cannot propagate to a caller. The most common case is an exception raised
in a :meth:`__del__ <object.__del__>` implementation.

View File

@@ -398,9 +398,8 @@ access the fixture function:
.. code-block:: python
# content of conftest.py
import smtplib
import pytest
import smtplib
@pytest.fixture(scope="module")
@@ -433,7 +432,7 @@ marked ``smtp_connection`` fixture function. Running the test looks like this:
$ pytest test_module.py
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
collected 2 items
@@ -610,10 +609,10 @@ Here's what that might look like:
.. code-block:: python
# content of test_emaillib.py
from emaillib import Email, MailAdminClient
import pytest
from emaillib import Email, MailAdminClient
@pytest.fixture
def mail_admin():
@@ -631,7 +630,6 @@ Here's what that might look like:
def receiving_user(mail_admin):
user = mail_admin.create_user()
yield user
user.clear_mailbox()
mail_admin.delete_user(user)
@@ -685,10 +683,10 @@ Here's how the previous example would look using the ``addfinalizer`` method:
.. code-block:: python
# content of test_emaillib.py
from emaillib import Email, MailAdminClient
import pytest
from emaillib import Email, MailAdminClient
@pytest.fixture
def mail_admin():
@@ -738,87 +736,6 @@ does offer some nuances for when you're in a pinch.
. [100%]
1 passed in 0.12s
Note on finalizer order
""""""""""""""""""""""""
Finalizers are executed in a first-in-last-out order.
For yield fixtures, the first teardown code to run is from the right-most fixture, i.e. the last test parameter.
.. code-block:: python
# content of test_finalizers.py
import pytest
def test_bar(fix_w_yield1, fix_w_yield2):
print("test_bar")
@pytest.fixture
def fix_w_yield1():
yield
print("after_yield_1")
@pytest.fixture
def fix_w_yield2():
yield
print("after_yield_2")
.. code-block:: pytest
$ pytest -s test_finalizers.py
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
collected 1 item
test_finalizers.py test_bar
.after_yield_2
after_yield_1
============================ 1 passed in 0.12s =============================
For finalizers, the first fixture to run is last call to `request.addfinalizer`.
.. code-block:: python
# content of test_finalizers.py
from functools import partial
import pytest
@pytest.fixture
def fix_w_finalizers(request):
request.addfinalizer(partial(print, "finalizer_2"))
request.addfinalizer(partial(print, "finalizer_1"))
def test_bar(fix_w_finalizers):
print("test_bar")
.. code-block:: pytest
$ pytest -s test_finalizers.py
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
collected 1 item
test_finalizers.py test_bar
.finalizer_1
finalizer_2
============================ 1 passed in 0.12s =============================
This is so because yield fixtures use `addfinalizer` behind the scenes: when the fixture executes, `addfinalizer` registers a function that resumes the generator, which in turn calls the teardown code.
.. _`safe teardowns`:
Safe teardowns
@@ -835,10 +752,10 @@ above):
.. code-block:: python
# content of test_emaillib.py
from emaillib import Email, MailAdminClient
import pytest
from emaillib import Email, MailAdminClient
@pytest.fixture
def setup():
@@ -1113,9 +1030,8 @@ read an optional server URL from the test module which uses our fixture:
.. code-block:: python
# content of conftest.py
import smtplib
import pytest
import smtplib
@pytest.fixture(scope="module")
@@ -1123,7 +1039,7 @@ read an optional server URL from the test module which uses our fixture:
server = getattr(request.module, "smtpserver", "smtp.gmail.com")
smtp_connection = smtplib.SMTP(server, 587, timeout=5)
yield smtp_connection
print(f"finalizing {smtp_connection} ({server})")
print("finalizing {} ({})".format(smtp_connection, server))
smtp_connection.close()
We use the ``request.module`` attribute to optionally obtain an
@@ -1237,6 +1153,7 @@ If the data created by the factory requires managing, the fixture can take care
@pytest.fixture
def make_customer_record():
created_records = []
def _make_customer_record(name):
@@ -1271,21 +1188,20 @@ configured in multiple ways.
Extending the previous example, we can flag the fixture to create two
``smtp_connection`` fixture instances which will cause all tests using the fixture
to run twice. The fixture function gets access to each parameter
through the special :py:class:`request <pytest.FixtureRequest>` object:
through the special :py:class:`request <FixtureRequest>` object:
.. code-block:: python
# content of conftest.py
import smtplib
import pytest
import smtplib
@pytest.fixture(scope="module", params=["smtp.gmail.com", "mail.python.org"])
def smtp_connection(request):
smtp_connection = smtplib.SMTP(request.param, 587, timeout=5)
yield smtp_connection
print(f"finalizing {smtp_connection}")
print("finalizing {}".format(smtp_connection))
smtp_connection.close()
The main change is the declaration of ``params`` with
@@ -1414,30 +1330,27 @@ Running the above tests results in the following test IDs being used:
$ pytest --collect-only
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
collected 12 items
collected 11 items
<Dir fixtures.rst-208>
<Module test_anothersmtp.py>
<Function test_showhelo[smtp.gmail.com]>
<Function test_showhelo[mail.python.org]>
<Module test_emaillib.py>
<Function test_email_received>
<Module test_finalizers.py>
<Function test_bar>
<Module test_ids.py>
<Function test_a[spam]>
<Function test_a[ham]>
<Function test_b[eggs]>
<Function test_b[1]>
<Module test_module.py>
<Function test_ehlo[smtp.gmail.com]>
<Function test_noop[smtp.gmail.com]>
<Function test_ehlo[mail.python.org]>
<Function test_noop[mail.python.org]>
<Module test_anothersmtp.py>
<Function test_showhelo[smtp.gmail.com]>
<Function test_showhelo[mail.python.org]>
<Module test_emaillib.py>
<Function test_email_received>
<Module test_ids.py>
<Function test_a[spam]>
<Function test_a[ham]>
<Function test_b[eggs]>
<Function test_b[1]>
<Module test_module.py>
<Function test_ehlo[smtp.gmail.com]>
<Function test_noop[smtp.gmail.com]>
<Function test_ehlo[mail.python.org]>
<Function test_noop[mail.python.org]>
======================= 12 tests collected in 0.12s ========================
======================= 11 tests collected in 0.12s ========================
.. _`fixture-parametrize-marks`:
@@ -1469,7 +1382,7 @@ Running this test will *skip* the invocation of ``data_set`` with value ``2``:
$ pytest test_fixture_marks.py -v
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python
cachedir: .pytest_cache
rootdir: /home/sweet/project
collecting ... collected 3 items
@@ -1519,7 +1432,7 @@ Here we declare an ``app`` fixture which receives the previously defined
$ pytest -v test_appsetup.py
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python
cachedir: .pytest_cache
rootdir: /home/sweet/project
collecting ... collected 2 items
@@ -1590,7 +1503,7 @@ to show the setup/teardown flow:
def test_2(otherarg, modarg):
print(f" RUN test2 with otherarg {otherarg} and modarg {modarg}")
print(" RUN test2 with otherarg {} and modarg {}".format(otherarg, modarg))
Let's run the tests in verbose mode and with looking at the print-output:
@@ -1599,7 +1512,7 @@ Let's run the tests in verbose mode and with looking at the print-output:
$ pytest -v -s test_module.py
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y -- $PYTHON_PREFIX/bin/python
cachedir: .pytest_cache
rootdir: /home/sweet/project
collecting ... collected 8 items
@@ -1691,7 +1604,6 @@ and declare its use in a test module via a ``usefixtures`` marker:
# content of test_setenv.py
import os
import pytest
@@ -1699,7 +1611,7 @@ and declare its use in a test module via a ``usefixtures`` marker:
class TestDirectoryInit:
def test_cwd_starts_empty(self):
assert os.listdir(os.getcwd()) == []
with open("myfile", "w", encoding="utf-8") as f:
with open("myfile", "w") as f:
f.write("hello")
def test_cwd_again_starts_empty(self):
@@ -1753,7 +1665,8 @@ into an ini-file:
def my_fixture_that_sadly_wont_use_my_other_fixture():
...
This generates a deprecation warning, and will become an error in Pytest 8.
Currently this will not generate any error or warning, but this is intended
to be handled by :issue:`3664`.
.. _`override fixtures`:
@@ -1771,6 +1684,8 @@ Given the tests file structure is:
::
tests/
__init__.py
conftest.py
# content of tests/conftest.py
import pytest
@@ -1785,6 +1700,8 @@ Given the tests file structure is:
assert username == 'username'
subfolder/
__init__.py
conftest.py
# content of tests/subfolder/conftest.py
import pytest
@@ -1793,8 +1710,8 @@ Given the tests file structure is:
def username(username):
return 'overridden-' + username
test_something_else.py
# content of tests/subfolder/test_something_else.py
test_something.py
# content of tests/subfolder/test_something.py
def test_username(username):
assert username == 'overridden-username'
@@ -1810,6 +1727,8 @@ Given the tests file structure is:
::
tests/
__init__.py
conftest.py
# content of tests/conftest.py
import pytest
@@ -1851,6 +1770,8 @@ Given the tests file structure is:
::
tests/
__init__.py
conftest.py
# content of tests/conftest.py
import pytest
@@ -1887,6 +1808,8 @@ Given the tests file structure is:
::
tests/
__init__.py
conftest.py
# content of tests/conftest.py
import pytest

View File

@@ -55,13 +55,6 @@ These options can also be customized through ``pytest.ini`` file:
log_format = %(asctime)s %(levelname)s %(message)s
log_date_format = %Y-%m-%d %H:%M:%S
Specific loggers can be disabled via ``--log-disable={logger_name}``.
This argument can be passed multiple times:
.. code-block:: bash
pytest --log-disable=main --log-disable=testing
Further it is possible to disable reporting of captured content (stdout,
stderr and logs) on failed tests completely with:
@@ -80,6 +73,7 @@ messages. This is supported by the ``caplog`` fixture:
def test_foo(caplog):
caplog.set_level(logging.INFO)
pass
By default the level is set on the root logger,
however as a convenience it is also possible to set the log level of any
@@ -89,6 +83,7 @@ logger:
def test_foo(caplog):
caplog.set_level(logging.CRITICAL, logger="root.baz")
pass
The log levels set are restored automatically at the end of the test.
@@ -166,19 +161,14 @@ the records for the ``setup`` and ``call`` stages during teardown like so:
x.message for x in caplog.get_records(when) if x.levelno == logging.WARNING
]
if messages:
pytest.fail(f"warning messages encountered during testing: {messages}")
pytest.fail(
"warning messages encountered during testing: {}".format(messages)
)
The full API is available at :class:`pytest.LogCaptureFixture`.
.. warning::
The ``caplog`` fixture adds a handler to the root logger to capture logs. If the root logger is
modified during a test, for example with ``logging.config.dictConfig``, this handler may be
removed and cause no logs to be captured. To avoid this, ensure that any root logger configuration
only adds to the existing handlers.
.. _live_logs:
@@ -190,8 +180,8 @@ logging records as they are emitted directly into the console.
You can specify the logging level for which log records with equal or higher
level are printed to the console by passing ``--log-cli-level``. This setting
accepts the logging level names or numeric values as seen in
:ref:`logging's documentation <python:levels>`.
accepts the logging level names as seen in python's documentation or an integer
as the logging level num.
Additionally, you can also specify ``--log-cli-format`` and
``--log-cli-date-format`` which mirror and default to ``--log-format`` and
@@ -212,8 +202,9 @@ Note that relative paths for the log-file location, whether passed on the CLI or
config file, are always resolved relative to the current working directory.
You can also specify the logging level for the log file by passing
``--log-file-level``. This setting accepts the logging level names or numeric
values as seen in :ref:`logging's documentation <python:levels>`.
``--log-file-level``. This setting accepts the logging level names as seen in
python's documentation(ie, uppercased level names) or an integer as the logging
level num.
Additionally, you can also specify ``--log-file-format`` and
``--log-file-date-format`` which are equal to ``--log-format`` and
@@ -241,7 +232,7 @@ through ``add_color_level()``. Example:
.. code-block:: python
@pytest.hookimpl(trylast=True)
@pytest.hookimpl
def pytest_configure(config):
logging_plugin = config.pluginmanager.get_plugin("logging-plugin")

View File

@@ -3,7 +3,7 @@
How to monkeypatch/mock modules and environments
================================================================
.. currentmodule:: pytest
.. currentmodule:: _pytest.monkeypatch
Sometimes tests need to invoke functionality which depends
on global settings or which invokes code which cannot be easily
@@ -14,16 +14,17 @@ environment variable, or to modify ``sys.path`` for importing.
The ``monkeypatch`` fixture provides these helper methods for safely patching and mocking
functionality in tests:
* :meth:`monkeypatch.setattr(obj, name, value, raising=True) <pytest.MonkeyPatch.setattr>`
* :meth:`monkeypatch.delattr(obj, name, raising=True) <pytest.MonkeyPatch.delattr>`
* :meth:`monkeypatch.setitem(mapping, name, value) <pytest.MonkeyPatch.setitem>`
* :meth:`monkeypatch.delitem(obj, name, raising=True) <pytest.MonkeyPatch.delitem>`
* :meth:`monkeypatch.setenv(name, value, prepend=None) <pytest.MonkeyPatch.setenv>`
* :meth:`monkeypatch.delenv(name, raising=True) <pytest.MonkeyPatch.delenv>`
* :meth:`monkeypatch.syspath_prepend(path) <pytest.MonkeyPatch.syspath_prepend>`
* :meth:`monkeypatch.chdir(path) <pytest.MonkeyPatch.chdir>`
* :meth:`monkeypatch.context() <pytest.MonkeyPatch.context>`
.. code-block:: python
monkeypatch.setattr(obj, name, value, raising=True)
monkeypatch.setattr("somemodule.obj.name", value, raising=True)
monkeypatch.delattr(obj, name, raising=True)
monkeypatch.setitem(mapping, name, value)
monkeypatch.delitem(obj, name, raising=True)
monkeypatch.setenv(name, value, prepend=None)
monkeypatch.delenv(name, raising=True)
monkeypatch.syspath_prepend(path)
monkeypatch.chdir(path)
All modifications will be undone after the requesting
test function or fixture has finished. The ``raising``
@@ -54,16 +55,13 @@ during a test.
5. Use :py:meth:`monkeypatch.syspath_prepend <MonkeyPatch.syspath_prepend>` to modify ``sys.path`` which will also
call ``pkg_resources.fixup_namespace_packages`` and :py:func:`importlib.invalidate_caches`.
6. Use :py:meth:`monkeypatch.context <MonkeyPatch.context>` to apply patches only in a specific scope, which can help
control teardown of complex fixtures or patches to the stdlib.
See the `monkeypatch blog post`_ for some introduction material
and a discussion of its motivation.
.. _`monkeypatch blog post`: https://tetamap.wordpress.com//2009/03/03/monkeypatching-in-unit-tests-done-right/
Monkeypatching functions
------------------------
Simple example: monkeypatching functions
----------------------------------------
Consider a scenario where you are working with user directories. In the context of
testing, you do not want your test to depend on the running user. ``monkeypatch``
@@ -135,10 +133,10 @@ This can be done in our test file by defining a class to represent ``r``.
# this is the previous code block example
import app
# custom class to be the mock return value
# will override the requests.Response returned from requests.get
class MockResponse:
# mock json() method always returns a specific testing dictionary
@staticmethod
def json():
@@ -146,6 +144,7 @@ This can be done in our test file by defining a class to represent ``r``.
def test_get_json(monkeypatch):
# Any arguments may be passed and mock_get() will always return our
# mocked object, which only has the .json() method.
def mock_get(*args, **kwargs):
@@ -180,7 +179,6 @@ This mock can be shared across tests using a ``fixture``:
# app.py that includes the get_json() function
import app
# custom class to be the mock return value of requests.get()
class MockResponse:
@staticmethod
@@ -358,6 +356,7 @@ For testing purposes we can patch the ``DEFAULT_CONFIG`` dictionary to specific
def test_connection(monkeypatch):
# Patch the values of DEFAULT_CONFIG to specific
# testing values only for this test.
monkeypatch.setitem(app.DEFAULT_CONFIG, "user", "test_user")
@@ -382,6 +381,7 @@ You can use the :py:meth:`monkeypatch.delitem <MonkeyPatch.delitem>` to remove v
def test_missing_user(monkeypatch):
# patch the DEFAULT_CONFIG t be missing the 'user' key
monkeypatch.delitem(app.DEFAULT_CONFIG, "user", raising=False)
@@ -402,7 +402,6 @@ separate fixtures for each potential mock and reference them in the needed tests
# app.py with the connection string function
import app
# all of the mocks are moved into separated fixtures
@pytest.fixture
def mock_test_user(monkeypatch):
@@ -424,6 +423,7 @@ separate fixtures for each potential mock and reference them in the needed tests
# tests reference only the fixture mocks that are needed
def test_connection(mock_test_user, mock_test_database):
expected = "User Id=test_user; Location=test_db;"
result = app.create_connection_string()
@@ -431,11 +431,12 @@ separate fixtures for each potential mock and reference them in the needed tests
def test_missing_user(mock_missing_default_user):
with pytest.raises(KeyError):
_ = app.create_connection_string()
.. currentmodule:: pytest
.. currentmodule:: _pytest.monkeypatch
API Reference
-------------

View File

@@ -5,9 +5,6 @@ How to run tests written for nose
``pytest`` has basic support for running tests written for nose_.
.. warning::
This functionality has been deprecated and is likely to be removed in ``pytest 8.x``.
.. _nosestyle:
Usage
@@ -26,8 +23,8 @@ make use of pytest's capabilities.
Supported nose Idioms
----------------------
* ``setup()`` and ``teardown()`` at module/class/method level: any function or method called ``setup`` will be called during the setup phase for each test, same for ``teardown``.
* ``SkipTest`` exceptions and markers
* setup and teardown at module/class/method level
* SkipTest exceptions and markers
* setup/teardown decorators
* ``__test__`` attribute on modules/classes/functions
* general usage of nose utilities
@@ -47,7 +44,8 @@ Unsupported idioms / known issues
- nose imports test modules with the same import path (e.g.
``tests.test_mode``) but different file system paths
(e.g. ``tests/test_mode.py`` and ``other/tests/test_mode.py``)
by extending sys.path/import semantics. pytest does not do that. Note that
by extending sys.path/import semantics. pytest does not do that
but there is discussion in :issue:`268` for adding some support. Note that
`nose2 choose to avoid this sys.path/import hackery <https://nose2.readthedocs.io/en/latest/differences.html#test-discovery-and-loading>`_.
If you place a conftest.py file in the root directory of your project
@@ -65,34 +63,16 @@ Unsupported idioms / known issues
- no nose-configuration is recognized.
- ``yield``-based methods are
- ``yield``-based methods are unsupported as of pytest 4.1.0. They are
fundamentally incompatible with pytest because they don't support fixtures
properly since collection and test execution are separated.
Here is a table comparing the default supported naming conventions for both
nose and pytest.
========= ========================== ======= =====
what default naming convention pytest nose
========= ========================== ======= =====
module ``test*.py``
module ``test_*.py`` ✅ ✅
module ``*_test.py``
module ``*_tests.py``
class ``*(unittest.TestCase)`` ✅ ✅
method ``test_*`` ✅ ✅
class ``Test*``
method ``test_*``
function ``test_*``
========= ========================== ======= =====
Migrating from nose to pytest
------------------------------
`nose2pytest <https://github.com/pytest-dev/nose2pytest>`_ is a Python script
and pytest plugin to help convert Nose-based tests into pytest-based tests.
Specifically, the script transforms ``nose.tools.assert_*`` function calls into
Specifically, the script transforms nose.tools.assert_* function calls into
raw assert statements, while preserving format of original arguments
as much as possible.

View File

@@ -12,15 +12,8 @@ Examples for modifying traceback printing:
.. code-block:: bash
pytest --showlocals # show local variables in tracebacks
pytest -l # show local variables (shortcut)
pytest --no-showlocals # hide local variables (if addopts enables them)
pytest --capture=fd # default, capture at the file descriptor level
pytest --capture=sys # capture at the sys level
pytest --capture=no # don't capture
pytest -s # don't capture (shortcut)
pytest --capture=tee-sys # capture to logs but also output to sys level streams
pytest --showlocals # show local variables in tracebacks
pytest -l # show local variables (shortcut)
pytest --tb=auto # (default) 'long' tracebacks for the first and last
# entry, but 'short' style for the other entries
@@ -42,16 +35,6 @@ option you make sure a trace is shown.
Verbosity
--------------------------------------------------
Examples for modifying printing verbosity:
.. code-block:: bash
pytest --quiet # quiet - less verbose - mode
pytest -q # quiet - less verbose - mode (shortcut)
pytest -v # increase verbosity, display individual test names
pytest -vv # more verbose, display more details from the test output
pytest -vvv # not a standard , but may be used for even more detail in certain setups
The ``-v`` flag controls the verbosity of pytest output in various aspects: test session progress, assertion
details when tests fail, fixtures details with ``--fixtures``, etc.
@@ -100,7 +83,6 @@ Executing pytest normally gives us this output (we are skipping the header to fo
fruits2 = ["banana", "apple", "orange", "melon", "kiwi"]
> assert fruits1 == fruits2
E AssertionError: assert ['banana', 'a...elon', 'kiwi'] == ['banana', 'a...elon', 'kiwi']
E
E At index 2 diff: 'grapes' != 'orange'
E Use -v to get more diff
@@ -112,7 +94,6 @@ Executing pytest normally gives us this output (we are skipping the header to fo
number_to_text2 = {str(x * 10): x * 10 for x in range(5)}
> assert number_to_text1 == number_to_text2
E AssertionError: assert {'0': 0, '1':..., '3': 3, ...} == {'0': 0, '10'...'30': 30, ...}
E
E Omitting 1 identical items, use -vv to show
E Left contains 4 more items:
E {'1': 1, '2': 2, '3': 3, '4': 4}
@@ -164,15 +145,12 @@ Now we can increase pytest's verbosity:
fruits2 = ["banana", "apple", "orange", "melon", "kiwi"]
> assert fruits1 == fruits2
E AssertionError: assert ['banana', 'a...elon', 'kiwi'] == ['banana', 'a...elon', 'kiwi']
E
E At index 2 diff: 'grapes' != 'orange'
E
E Full diff:
E [
E 'banana',
E 'apple',...
E
E ...Full output truncated (7 lines hidden), use '-vv' to show
E - ['banana', 'apple', 'orange', 'melon', 'kiwi']
E ? ^ ^^
E + ['banana', 'apple', 'grapes', 'melon', 'kiwi']
E ? ^ ^ +
test_verbosity_example.py:8: AssertionError
____________________________ test_numbers_fail _____________________________
@@ -182,15 +160,15 @@ Now we can increase pytest's verbosity:
number_to_text2 = {str(x * 10): x * 10 for x in range(5)}
> assert number_to_text1 == number_to_text2
E AssertionError: assert {'0': 0, '1':..., '3': 3, ...} == {'0': 0, '10'...'30': 30, ...}
E
E Omitting 1 identical items, use -vv to show
E Left contains 4 more items:
E {'1': 1, '2': 2, '3': 3, '4': 4}
E Right contains 4 more items:
E {'10': 10, '20': 20, '30': 30, '40': 40}
E ...
E Full diff:
E - {'0': 0, '10': 10, '20': 20, '30': 30, '40': 40}...
E
E ...Full output truncated (16 lines hidden), use '-vv' to show
E ...Full output truncated (3 lines hidden), use '-vv' to show
test_verbosity_example.py:14: AssertionError
___________________________ test_long_text_fail ____________________________
@@ -236,20 +214,12 @@ Now if we increase verbosity even more:
fruits2 = ["banana", "apple", "orange", "melon", "kiwi"]
> assert fruits1 == fruits2
E AssertionError: assert ['banana', 'apple', 'grapes', 'melon', 'kiwi'] == ['banana', 'apple', 'orange', 'melon', 'kiwi']
E
E At index 2 diff: 'grapes' != 'orange'
E
E Full diff:
E [
E 'banana',
E 'apple',
E - 'orange',
E ? ^ ^^
E + 'grapes',
E ? ^ ^ +
E 'melon',
E 'kiwi',
E ]
E - ['banana', 'apple', 'orange', 'melon', 'kiwi']
E ? ^ ^^
E + ['banana', 'apple', 'grapes', 'melon', 'kiwi']
E ? ^ ^ +
test_verbosity_example.py:8: AssertionError
____________________________ test_numbers_fail _____________________________
@@ -259,30 +229,16 @@ Now if we increase verbosity even more:
number_to_text2 = {str(x * 10): x * 10 for x in range(5)}
> assert number_to_text1 == number_to_text2
E AssertionError: assert {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4} == {'0': 0, '10': 10, '20': 20, '30': 30, '40': 40}
E
E Common items:
E {'0': 0}
E Left contains 4 more items:
E {'1': 1, '2': 2, '3': 3, '4': 4}
E Right contains 4 more items:
E {'10': 10, '20': 20, '30': 30, '40': 40}
E
E Full diff:
E {
E '0': 0,
E - '10': 10,
E ? - -
E + '1': 1,
E - '20': 20,
E ? - -
E + '2': 2,
E - '30': 30,
E ? - -
E + '3': 3,
E - '40': 40,
E ? - -
E + '4': 4,
E }
E - {'0': 0, '10': 10, '20': 20, '30': 30, '40': 40}
E ? - - - - - - - -
E + {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4}
test_verbosity_example.py:14: AssertionError
___________________________ test_long_text_fail ____________________________
@@ -313,20 +269,6 @@ situations, for example you are shown even fixtures that start with ``_`` if you
Using higher verbosity levels (``-vvv``, ``-vvvv``, ...) is supported, but has no effect in pytest itself at the moment,
however some plugins might make use of higher verbosity.
.. _`pytest.fine_grained_verbosity`:
Fine-grained verbosity
~~~~~~~~~~~~~~~~~~~~~~
In addition to specifying the application wide verbosity level, it is possible to control specific aspects independently.
This is done by setting a verbosity level in the configuration file for the specific aspect of the output.
:confval:`verbosity_assertions`: Controls how verbose the assertion output should be when pytest is executed. Running
``pytest --no-header`` with a value of ``2`` would have the same output as the previous example, but each test inside
the file is shown by a single character in the output.
(Note: currently this is the only option available, but more might be added in the future).
.. _`pytest.detailed_failed_tests_usage`:
Producing a detailed summary report
@@ -381,7 +323,7 @@ Example:
$ pytest -ra
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
collected 6 items
@@ -406,7 +348,8 @@ Example:
test_example.py:14: AssertionError
========================= short test summary info ==========================
SKIPPED [1] test_example.py:22: skipping this test
XFAIL test_example.py::test_xfail - reason: xfailing this test
XFAIL test_example.py::test_xfail
reason: xfailing this test
XPASS test_example.py::test_xpass always xfail
ERROR test_example.py::test_error - assert 0
FAILED test_example.py::test_fail - assert 0
@@ -437,7 +380,7 @@ More than one character can be used, so for example to only see failed and skipp
$ pytest -rfs
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
collected 6 items
@@ -472,7 +415,7 @@ captured output:
$ pytest -rpP
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
collected 6 items
@@ -535,7 +478,7 @@ integration servers, use this invocation:
.. code-block:: bash
pytest --junit-xml=path
pytest --junitxml=path
to create an XML file at ``path``.

View File

@@ -56,7 +56,7 @@ them in turn:
$ pytest
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
collected 3 items
@@ -167,7 +167,7 @@ Let's run this:
$ pytest
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
collected 3 items

View File

@@ -51,9 +51,6 @@ Here is a little annotated list for some popular plugins:
* :pypi:`pytest-flakes`:
check source code with pyflakes.
* :pypi:`allure-pytest`:
report test results via `allure-framework <https://github.com/allure-framework/>`_.
To see a complete list of all plugins with their latest testing
status against different pytest and Python versions, please visit
:ref:`plugin-list`.

View File

@@ -69,7 +69,6 @@ It is also possible to skip the whole module using
.. code-block:: python
import sys
import pytest
if not sys.platform.startswith("win"):
@@ -410,7 +409,6 @@ test instances when using parametrize:
.. code-block:: python
import sys
import pytest

View File

@@ -24,8 +24,8 @@ created in the `base temporary directory`_.
d = tmp_path / "sub"
d.mkdir()
p = d / "hello.txt"
p.write_text(CONTENT, encoding="utf-8")
assert p.read_text(encoding="utf-8") == CONTENT
p.write_text(CONTENT)
assert p.read_text() == CONTENT
assert len(list(tmp_path.iterdir())) == 1
assert 0
@@ -36,7 +36,7 @@ Running this would result in a passed test except for the last
$ pytest test_tmp_path.py
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
collected 1 item
@@ -51,8 +51,8 @@ Running this would result in a passed test except for the last
d = tmp_path / "sub"
d.mkdir()
p = d / "hello.txt"
p.write_text(CONTENT, encoding="utf-8")
assert p.read_text(encoding="utf-8") == CONTENT
p.write_text(CONTENT)
assert p.read_text() == CONTENT
assert len(list(tmp_path.iterdir())) == 1
> assert 0
E assert 0
@@ -104,21 +104,8 @@ The ``tmpdir`` and ``tmpdir_factory`` fixtures
The ``tmpdir`` and ``tmpdir_factory`` fixtures are similar to ``tmp_path``
and ``tmp_path_factory``, but use/return legacy `py.path.local`_ objects
rather than standard :class:`pathlib.Path` objects.
.. note::
These days, it is preferred to use ``tmp_path`` and ``tmp_path_factory``.
In order to help modernize old code bases, one can run pytest with the legacypath
plugin disabled:
.. code-block:: bash
pytest -p no:legacypath
This will trigger errors on tests using the legacy paths.
It can also be permanently set as part of the :confval:`addopts` parameter in the
config file.
rather than standard :class:`pathlib.Path` objects. These days, prefer to
use ``tmp_path`` and ``tmp_path_factory``.
See :fixture:`tmpdir <tmpdir>` :fixture:`tmpdir_factory <tmpdir_factory>`
API for details.
@@ -131,12 +118,10 @@ The default base temporary directory
Temporary directories are by default created as sub-directories of
the system temporary directory. The base name will be ``pytest-NUM`` where
``NUM`` will be incremented with each test run.
By default, entries older than 3 temporary directories will be removed.
This behavior can be configured with :confval:`tmp_path_retention_count` and
:confval:`tmp_path_retention_policy`.
``NUM`` will be incremented with each test run. Moreover, entries older
than 3 temporary directories will be removed.
Using the ``--basetemp``
The number of entries currently cannot be changed, but using the ``--basetemp``
option will remove the directory before every run, effectively meaning the temporary directories
of only the most recent run will be kept.

View File

@@ -27,15 +27,12 @@ Almost all ``unittest`` features are supported:
* ``setUpClass/tearDownClass``;
* ``setUpModule/tearDownModule``;
.. _`pytest-subtests`: https://github.com/pytest-dev/pytest-subtests
.. _`load_tests protocol`: https://docs.python.org/3/library/unittest.html#load-tests-protocol
Additionally, :ref:`subtests <python:subtests>` are supported by the
`pytest-subtests`_ plugin.
Up to this point pytest does not have support for the following features:
* `load_tests protocol`_;
* :ref:`subtests <python:subtests>`;
Benefits out of the box
-----------------------
@@ -118,7 +115,6 @@ fixture definition:
# content of test_unittest_db.py
import unittest
import pytest
@@ -140,7 +136,7 @@ the ``self.db`` values in the traceback:
$ pytest test_unittest_db.py
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
collected 2 items
@@ -157,7 +153,7 @@ the ``self.db`` values in the traceback:
E AssertionError: <conftest.db_class.<locals>.DummyDB object at 0xdeadbeef0001>
E assert 0
test_unittest_db.py:11: AssertionError
test_unittest_db.py:10: AssertionError
___________________________ MyTest.test_method2 ____________________________
self = <test_unittest_db.MyTest testMethod=test_method2>
@@ -167,7 +163,7 @@ the ``self.db`` values in the traceback:
E AssertionError: <conftest.db_class.<locals>.DummyDB object at 0xdeadbeef0001>
E assert 0
test_unittest_db.py:14: AssertionError
test_unittest_db.py:13: AssertionError
========================= short test summary info ==========================
FAILED test_unittest_db.py::MyTest::test_method1 - AssertionError: <conft...
FAILED test_unittest_db.py::MyTest::test_method2 - AssertionError: <conft...
@@ -198,19 +194,19 @@ creation of a per-test temporary directory:
.. code-block:: python
# content of test_unittest_cleandir.py
import unittest
import os
import pytest
import unittest
class MyTest(unittest.TestCase):
@pytest.fixture(autouse=True)
def initdir(self, tmp_path, monkeypatch):
monkeypatch.chdir(tmp_path) # change to pytest-provided temporary directory
tmp_path.joinpath("samplefile.ini").write_text("# testdata", encoding="utf-8")
tmp_path.joinpath("samplefile.ini").write_text("# testdata")
def test_method(self):
with open("samplefile.ini", encoding="utf-8") as f:
with open("samplefile.ini") as f:
s = f.read()
assert "testdata" in s

View File

@@ -35,43 +35,31 @@ Pytest supports several ways to run and select tests from the command-line.
.. code-block:: bash
pytest -k 'MyClass and not method'
pytest -k "MyClass and not method"
This will run tests which contain names that match the given *string expression* (case-insensitive),
which can include Python operators that use filenames, class names and function names as variables.
The example above will run ``TestMyClass.test_something`` but not ``TestMyClass.test_method_simple``.
Use ``""`` instead of ``''`` in expression when running this on Windows
.. _nodeids:
**Run tests by collection arguments**
**Run tests by node ids**
Pass the module filename relative to the working directory, followed by specifiers like the class name and function name
separated by ``::`` characters, and parameters from parameterization enclosed in ``[]``.
Each collected test is assigned a unique ``nodeid`` which consist of the module filename followed
by specifiers like class names, function names and parameters from parametrization, separated by ``::`` characters.
To run a specific test within a module:
.. code-block:: bash
pytest tests/test_mod.py::test_func
pytest test_mod.py::test_func
To run all tests in a class:
Another example specifying a test method in the command line:
.. code-block:: bash
pytest tests/test_mod.py::TestClass
Specifying a specific test method:
.. code-block:: bash
pytest tests/test_mod.py::TestClass::test_method
Specifying a specific parametrization of a test:
.. code-block:: bash
pytest tests/test_mod.py::test_func[x1,y2]
pytest test_mod.py::TestClass::test_method
**Run tests by marker expressions**
@@ -184,8 +172,7 @@ You can invoke ``pytest`` from Python code directly:
this acts as if you would call "pytest" from the command line.
It will not raise :class:`SystemExit` but return the :ref:`exit code <exit-codes>` instead.
If you don't pass it any arguments, ``main`` reads the arguments from the command line arguments of the process (:data:`sys.argv`), which may be undesirable.
You can pass in options and arguments explicitly:
You can pass in options and arguments:
.. code-block:: python
@@ -196,9 +183,8 @@ You can specify additional plugins to ``pytest.main``:
.. code-block:: python
# content of myinvoke.py
import sys
import pytest
import sys
class MyPlugin:

View File

@@ -56,17 +56,23 @@ The remaining hook functions will not be called in this case.
.. _`hookwrapper`:
hook wrappers: executing around other hooks
hookwrapper: executing around other hooks
-------------------------------------------------
.. currentmodule:: _pytest.core
pytest plugins can implement hook wrappers which wrap the execution
of other hook implementations. A hook wrapper is a generator function
which yields exactly once. When pytest invokes hooks it first executes
hook wrappers and passes the same arguments as to the regular hooks.
At the yield point of the hook wrapper pytest will execute the next hook
implementations and return their result to the yield point, or will
propagate an exception if they raised.
implementations and return their result to the yield point in the form of
a :py:class:`Result <pluggy._Result>` instance which encapsulates a result or
exception info. The yield point itself will thus typically not raise
exceptions (unless there are bugs).
Here is an example definition of a hook wrapper:
@@ -75,35 +81,26 @@ Here is an example definition of a hook wrapper:
import pytest
@pytest.hookimpl(wrapper=True)
@pytest.hookimpl(hookwrapper=True)
def pytest_pyfunc_call(pyfuncitem):
do_something_before_next_hook_executes()
# If the outcome is an exception, will raise the exception.
res = yield
outcome = yield
# outcome.excinfo may be None or a (cls, val, tb) tuple
new_res = post_process_result(res)
res = outcome.get_result() # will raise if outcome was exception
# Override the return value to the plugin system.
return new_res
post_process_result(res)
The hook wrapper needs to return a result for the hook, or raise an exception.
outcome.force_result(new_res) # to override the return value to the plugin system
In many cases, the wrapper only needs to perform tracing or other side effects
around the actual hook implementations, in which case it can return the result
value of the ``yield``. The simplest (though useless) hook wrapper is
``return (yield)``.
In other cases, the wrapper wants the adjust or adapt the result, in which case
it can return a new value. If the result of the underlying hook is a mutable
object, the wrapper may modify that result, but it's probably better to avoid it.
If the hook implementation failed with an exception, the wrapper can handle that
exception using a ``try-catch-finally`` around the ``yield``, by propagating it,
supressing it, or raising a different exception entirely.
Note that hook wrappers don't return results themselves, they merely
perform tracing or other side effects around the actual hook implementations.
If the result of the underlying hook is a mutable object, they may modify
that result but it's probably better to avoid it.
For more information, consult the
:ref:`pluggy documentation about hook wrappers <pluggy:hookwrappers>`.
:ref:`pluggy documentation about hookwrappers <pluggy:hookwrappers>`.
.. _plugin-hookorder:
@@ -133,14 +130,11 @@ after others, i.e. the position in the ``N``-sized list of functions:
# Plugin 3
@pytest.hookimpl(wrapper=True)
@pytest.hookimpl(hookwrapper=True)
def pytest_collection_modifyitems(items):
# will execute even before the tryfirst one above!
try:
return (yield)
finally:
# will execute after all non-wrappers executed
...
outcome = yield
# will execute after all non-hookwrappers executed
Here is the order of execution:
@@ -155,13 +149,13 @@ Here is the order of execution:
Plugin1).
4. Plugin3's pytest_collection_modifyitems then executing the code after the yield
point. The yield receives the result from calling the non-wrappers, or raises
an exception if the non-wrappers raised.
point. The yield receives a :py:class:`Result <pluggy._Result>` instance which encapsulates
the result from calling the non-wrappers. Wrappers shall not modify the result.
It's possible to use ``tryfirst`` and ``trylast`` also on hook wrappers
in which case it will influence the ordering of hook wrappers among each other.
It's possible to use ``tryfirst`` and ``trylast`` also in conjunction with
``hookwrapper=True`` in which case it will influence the ordering of hookwrappers
among each other.
.. _`declaringhooks`:
Declaring new hooks
------------------------
@@ -171,11 +165,13 @@ Declaring new hooks
This is a quick overview on how to add new hooks and how they work in general, but a more complete
overview can be found in `the pluggy documentation <https://pluggy.readthedocs.io/en/latest/>`__.
.. currentmodule:: _pytest.hookspec
Plugins and ``conftest.py`` files may declare new hooks that can then be
implemented by other plugins in order to alter behaviour or interact with
the new plugin:
.. autofunction:: _pytest.hookspec.pytest_addhooks
.. autofunction:: pytest_addhooks
:noindex:
Hooks are usually declared as do-nothing functions that contain only
@@ -198,7 +194,7 @@ class or module can then be passed to the ``pluginmanager`` using the ``pytest_a
.. code-block:: python
def pytest_addhooks(pluginmanager):
"""This example assumes the hooks are grouped in the 'sample_hook' module."""
""" This example assumes the hooks are grouped in the 'sample_hook' module. """
from my_app.tests import sample_hook
pluginmanager.add_hookspecs(sample_hook)
@@ -253,19 +249,18 @@ and use pytest_addoption as follows:
# contents of hooks.py
# Use firstresult=True because we only want one plugin to define this
# default value
@hookspec(firstresult=True)
def pytest_config_file_default_value():
"""Return the default value for the config file command line option."""
""" Return the default value for the config file command line option. """
# contents of myplugin.py
def pytest_addhooks(pluginmanager):
"""This example assumes the hooks are grouped in the 'hooks' module."""
""" This example assumes the hooks are grouped in the 'hooks' module. """
from . import hooks
pluginmanager.add_hookspecs(hooks)

View File

@@ -147,32 +147,29 @@ Making your plugin installable by others
If you want to make your plugin externally available, you
may define a so-called entry point for your distribution so
that ``pytest`` finds your plugin module. Entry points are
a feature that is provided by :std:doc:`setuptools <setuptools:index>`.
that ``pytest`` finds your plugin module. Entry points are
a feature that is provided by :std:doc:`setuptools:index`. pytest looks up
the ``pytest11`` entrypoint to discover its
plugins and you can thus make your plugin available by defining
it in your setuptools-invocation:
pytest looks up the ``pytest11`` entrypoint to discover its
plugins, thus you can make your plugin available by defining
it in your ``pyproject.toml`` file.
.. sourcecode:: python
.. sourcecode:: toml
# sample ./setup.py file
from setuptools import setup
# sample ./pyproject.toml file
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[project]
name = "myproject"
classifiers = [
"Framework :: Pytest",
]
[project.entry-points.pytest11]
myproject = "myproject.pluginmodule"
setup(
name="myproject",
packages=["myproject"],
# the following makes a plugin available to pytest
entry_points={"pytest11": ["name_of_plugin = myproject.pluginmodule"]},
# custom PyPI classifier for pytest plugins
classifiers=["Framework :: Pytest"],
)
If a package is installed this way, ``pytest`` will load
``myproject.pluginmodule`` as a plugin which can define
:ref:`hooks <hook-reference>`. Confirm registration with ``pytest --trace-config``
:ref:`hooks <hook-reference>`.
.. note::
@@ -370,7 +367,7 @@ string value of ``Hello World!`` if we do not supply a value or ``Hello
def _hello(name=None):
if not name:
name = request.config.getoption("name")
return f"Hello {name}!"
return "Hello {name}!".format(name=name)
return _hello
@@ -448,9 +445,8 @@ in our ``pytest.ini`` to tell pytest where to look for example files.
$ pytest
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
configfile: pytest.ini
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project, configfile: pytest.ini
collected 2 items
test_example.py .. [100%]

View File

@@ -32,7 +32,7 @@ which will usually be called once for all the functions:
.. code-block:: python
def setup_module(module):
"""setup any state specific to the execution of the given module."""
""" setup any state specific to the execution of the given module."""
def teardown_module(module):
@@ -63,8 +63,6 @@ and after all test methods of the class are called:
setup_class.
"""
.. _xunit-method-setup:
Method and function level setup/teardown
-----------------------------------------------

View File

@@ -2,10 +2,16 @@
.. sidebar:: Next Open Trainings
- `Professional Testing with Python <https://python-academy.com/courses/python_course_testing.html>`_, via `Python Academy <https://www.python-academy.com/>`_, **March 5th to 7th 2024** (3 day in-depth training), **Leipzig, Germany / Remote**
- `PyConDE <https://2022.pycon.de/program/W93DBJ/>`__, April 11th 2022 (3h), Berlin, Germany
- `PyConIT <https://pycon.it/en/talk/pytest-simple-rapid-and-fun-testing-with-python>`__, June 3rd 2022 (4h), Florence, Italy
- `Professional Testing with Python <https://python-academy.com/courses/python_course_testing.html>`_, via `Python Academy <https://www.python-academy.com/>`_, March 7th to 9th 2023 (3 day in-depth training), Remote and Leipzig, Germany
Also see :doc:`previous talks and blogposts <talks>`.
..
- `Europython <https://ep2022.europython.eu/>`__, July 11th to 17th (3h), Dublin, Ireland
- `CH Open Workshoptage <https://workshoptage.ch/>`__ (German), September 6th to 8th (1 day), Bern, Switzerland
.. _features:
pytest: helps you write better programs
@@ -17,10 +23,12 @@ The ``pytest`` framework makes it easy to write small, readable tests, and can
scale to support complex functional testing for applications and libraries.
``pytest`` requires: Python 3.8+ or PyPy3.
``pytest`` requires: Python 3.7+ or PyPy3.
**PyPI package name**: :pypi:`pytest`
**Documentation as PDF**: `download latest <https://media.readthedocs.org/pdf/pytest/latest/pytest.pdf>`_
A quick example
---------------
@@ -42,7 +50,7 @@ To execute it:
$ pytest
=========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-8.x.y, pluggy-1.x.y
platform linux -- Python 3.x.y, pytest-7.x.y, pluggy-1.x.y
rootdir: /home/sweet/project
collected 1 item
@@ -76,7 +84,7 @@ Features
- Can run :ref:`unittest <unittest>` (including trial) and :ref:`nose <noseintegration>` test suites out of the box
- Python 3.8+ or PyPy 3
- Python 3.7+ or PyPy 3
- Rich plugin architecture, with over 800+ :ref:`external plugins <plugin-list>` and thriving community
@@ -96,6 +104,11 @@ Bugs/Requests
Please use the `GitHub issue tracker <https://github.com/pytest-dev/pytest/issues>`_ to submit bugs or request features.
Changelog
---------
Consult the :ref:`Changelog <changelog>` page for fixes and enhancements of each version.
Support pytest
--------------
@@ -128,3 +141,13 @@ Security
pytest has never been associated with a security vulnerability, but in any case, to report a
security vulnerability please use the `Tidelift security contact <https://tidelift.com/security>`_.
Tidelift will coordinate the fix and disclosure.
License
-------
Copyright Holger Krekel and others, 2004.
Distributed under the terms of the `MIT`_ license, pytest is free and open source software.
.. _`MIT`: https://github.com/pytest-dev/pytest/blob/main/LICENSE

View File

@@ -0,0 +1,99 @@
Python 2.7 and 3.4 support
==========================
It is demanding on the maintainers of an open source project to support many Python versions, as
there's extra cost of keeping code compatible between all versions, while holding back on
features only made possible on newer Python versions.
In case of Python 2 and 3, the difference between the languages makes it even more prominent,
because many new Python 3 features cannot be used in a Python 2/3 compatible code base.
Python 2.7 EOL has been reached :pep:`in 2020 <0373#maintenance-releases>`, with
the last release made in April, 2020.
Python 3.4 EOL has been reached :pep:`in 2019 <0429#release-schedule>`, with the last release made in March, 2019.
For those reasons, in Jun 2019 it was decided that **pytest 4.6** series will be the last to support Python 2.7 and 3.4.
What this means for general users
---------------------------------
Thanks to the `python_requires`_ setuptools option,
Python 2.7 and Python 3.4 users using a modern pip version
will install the last pytest 4.6.X version automatically even if 5.0 or later versions
are available on PyPI.
Users should ensure they are using the latest pip and setuptools versions for this to work.
Maintenance of 4.6.X versions
-----------------------------
Until January 2020, the pytest core team ported many bug-fixes from the main release into the
``4.6.x`` branch, with several 4.6.X releases being made along the year.
From now on, the core team will **no longer actively backport patches**, but the ``4.6.x``
branch will continue to exist so the community itself can contribute patches.
The core team will be happy to accept those patches, and make new 4.6.X releases **until mid-2020**
(but consider that date as a ballpark, after that date the team might still decide to make new releases
for critical bugs).
.. _`python_requires`: https://packaging.python.org/guides/distributing-packages-using-setuptools/#python-requires
Technical aspects
~~~~~~~~~~~~~~~~~
(This section is a transcript from :issue:`5275`).
In this section we describe the technical aspects of the Python 2.7 and 3.4 support plan.
.. _what goes into 4.6.x releases:
What goes into 4.6.X releases
+++++++++++++++++++++++++++++
New 4.6.X releases will contain bug fixes only.
When will 4.6.X releases happen
+++++++++++++++++++++++++++++++
New 4.6.X releases will happen after we have a few bugs in place to release, or if a few weeks have
passed (say a single bug has been fixed a month after the latest 4.6.X release).
No hard rules here, just ballpark.
Who will handle applying bug fixes
++++++++++++++++++++++++++++++++++
We core maintainers expect that people still using Python 2.7/3.4 and being affected by
bugs to step up and provide patches and/or port bug fixes from the active branches.
We will be happy to guide users interested in doing so, so please don't hesitate to ask.
**Backporting changes into 4.6**
Please follow these instructions:
#. ``git fetch --all --prune``
#. ``git checkout origin/4.6.x -b backport-XXXX`` # use the PR number here
#. Locate the merge commit on the PR, in the *merged* message, for example:
nicoddemus merged commit 0f8b462 into pytest-dev:features
#. ``git cherry-pick -m1 REVISION`` # use the revision you found above (``0f8b462``).
#. Open a PR targeting ``4.6.x``:
* Prefix the message with ``[4.6]`` so it is an obvious backport
* Delete the PR body, it usually contains a duplicate commit message.
**Providing new PRs to 4.6**
Fresh pull requests to ``4.6.x`` will be accepted provided that
the equivalent code in the active branches does not contain that bug (for example, a bug is specific
to Python 2 only).
Bug fixes that also happen in the mainstream version should be first fixed
there, and then backported as per instructions above.

View File

@@ -29,11 +29,9 @@ pytest.ini
``pytest.ini`` files take precedence over other files, even when empty.
Alternatively, the hidden version ``.pytest.ini`` can be used.
.. code-block:: ini
# pytest.ini or .pytest.ini
# pytest.ini
[pytest]
minversion = 6.0
addopts = -ra -q
@@ -90,7 +88,7 @@ and can also be used to hold pytest configuration if they have a ``[pytest]`` se
setup.cfg
~~~~~~~~~
``setup.cfg`` files are general purpose configuration files, used originally by ``distutils`` (now deprecated) and `setuptools <https://setuptools.pypa.io/en/latest/userguide/declarative_config.html>`__, and can also be used to hold pytest configuration
``setup.cfg`` files are general purpose configuration files, used originally by :doc:`distutils <distutils/configfile>`, and can also be used to hold pytest configuration
if they have a ``[tool:pytest]`` section.
.. code-block:: ini

View File

@@ -11,6 +11,9 @@ Fixtures reference
.. seealso:: :ref:`about-fixtures`
.. seealso:: :ref:`how-to-fixtures`
.. currentmodule:: _pytest.python
.. _`Dependency injection`: https://en.wikipedia.org/wiki/Dependency_injection
@@ -73,13 +76,15 @@ Built-in fixtures
:class:`pathlib.Path` objects.
:fixture:`tmpdir`
Provide a `py.path.local <https://py.readthedocs.io/en/latest/path.html>`_ object to a temporary
Provide a :class:`py.path.local` object to a temporary
directory which is unique to each test function;
replaced by :fixture:`tmp_path`.
.. _`py.path.local`: https://py.readthedocs.io/en/latest/path.html
:fixture:`tmpdir_factory`
Make session-scoped temporary directories and return
``py.path.local`` objects;
:class:`py.path.local` objects;
replaced by :fixture:`tmp_path_factory`.
@@ -93,7 +98,7 @@ Fixture availability is determined from the perspective of the test. A fixture
is only available for tests to request if they are in the scope that fixture is
defined in. If a fixture is defined inside a class, it can only be requested by
tests inside that class. But if a fixture is defined inside the global scope of
the module, then every test in that module, even if it's defined inside a class,
the module, than every test in that module, even if it's defined inside a class,
can request it.
Similarly, a test can also only be affected by an autouse fixture if that test
@@ -330,7 +335,7 @@ For example:
.. literalinclude:: /example/fixtures/test_fixtures_order_dependencies.py
If we map out what depends on what, we get something that looks like this:
If we map out what depends on what, we get something that look like this:
.. image:: /example/fixtures/test_fixtures_order_dependencies.*
:align: center

View File

@@ -8,8 +8,8 @@ Reference guides
.. toctree::
:maxdepth: 1
reference
fixtures
customize
exit-codes
plugin_list
customize
reference
exit-codes

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,11 +1,10 @@
pallets-sphinx-themes
pluggy>=1.2.0
pygments-pytest>=2.3.0
pluggy>=1.0
pygments-pytest>=2.2.0
sphinx-removed-in>=0.2.0
sphinx>=5,<8
sphinx>=3.1,<4
sphinxcontrib-trio
sphinxcontrib-svg2pdfconverter
# Pin packaging because it no longer handles 'latest' version, which
# is the version that is assigned to the docs.
# See https://github.com/pytest-dev/pytest/pull/10578#issuecomment-1348249045.
packaging <22
# XXX: sphinx<4 is broken with latest jinja2
jinja2<3.1

View File

@@ -17,8 +17,6 @@ Books
Talks and blog postings
---------------------------------------------
- Training: `pytest - simple, rapid and fun testing with Python <https://www.youtube.com/watch?v=ofPHJrAOaTE>`_, Florian Bruhin, PyConDE 2022
- `pytest: Simple, rapid and fun testing with Python, <https://youtu.be/cSJ-X3TbQ1c?t=15752>`_ (@ 4:22:32), Florian Bruhin, WeAreDevelopers World Congress 2021
- Webinar: `pytest: Test Driven Development für Python (German) <https://bruhin.software/ins-pytest/>`_, Florian Bruhin, via mylearning.ch, 2020

View File

@@ -0,0 +1,12 @@
import sys
from distutils.core import setup
if __name__ == "__main__":
if "sdist" not in sys.argv[1:]:
raise ValueError("please use 'pytest' pypi package instead of 'py.test'")
setup(
name="py.test",
version="0.0",
description="please use 'pytest' for installation",
)

View File

@@ -1,7 +1,9 @@
[build-system]
requires = [
# sync with setup.py until we discard non-pep-517/518
"setuptools>=45.0",
"setuptools-scm[toml]>=6.2.3",
"wheel",
]
build-backend = "setuptools.build_meta"
@@ -16,12 +18,7 @@ python_classes = ["Test", "Acceptance"]
python_functions = ["test"]
# NOTE: "doc" is not included here, but gets tested explicitly via "doctesting".
testpaths = ["testing"]
norecursedirs = [
"testing/example_scripts",
".*",
"build",
"dist",
]
norecursedirs = ["testing/example_scripts"]
xfail_strict = true
filterwarnings = [
"error",
@@ -41,9 +38,6 @@ filterwarnings = [
# Those are caught/handled by pyupgrade, and not easy to filter with the
# module being the filename (with .py removed).
"default:invalid escape sequence:DeprecationWarning",
# ignore not yet fixed warnings for hook markers
"default:.*not marked using pytest.hook.*",
"ignore:.*not marked using pytest.hook.*::xdist.*",
# ignore use of unregistered marks, because we use many to test the implementation
"ignore::_pytest.warning_types.PytestUnknownMarkWarning",
# https://github.com/benjaminp/six/issues/341
@@ -117,9 +111,4 @@ template = "changelog/_template.rst"
showcontent = true
[tool.black]
target-version = ['py38']
# check-wheel-contents is executed by the build-and-inspect-python-package action.
[tool.check-wheel-contents]
# W009: Wheel contains multiple toplevel library entries
ignore = "W009"
target-version = ['py37']

View File

@@ -31,22 +31,10 @@ class InvalidFeatureRelease(Exception):
SLUG = "pytest-dev/pytest"
PR_BODY = """\
Created by the [prepare release pr]\
(https://github.com/pytest-dev/pytest/actions/workflows/prepare-release-pr.yml) workflow.
Created automatically from manual trigger.
Once all builds pass and it has been **approved** by one or more maintainers, start the \
[deploy](https://github.com/pytest-dev/pytest/actions/workflows/deploy.yml) workflow, using these parameters:
* `Use workflow from`: `release-{version}`.
* `Release version`: `{version}`.
Or execute on the command line:
```console
gh workflow run deploy.yml -r release-{version} -f version={version}
```
After the workflow has been approved by a core maintainer, the package will be uploaded to PyPI automatically.
Once all builds pass and it has been **approved** by one or more maintainers, the build
can be released by pushing a tag `{version}` to this repository.
"""

View File

@@ -7,9 +7,7 @@ def main():
Platform agnostic wrapper script for towncrier.
Fixes the issue (#7251) where windows users are unable to natively run tox -e docs to build pytest docs.
"""
with open(
"doc/en/_changelog_towncrier_draft.rst", "w", encoding="utf-8"
) as draft_file:
with open("doc/en/_changelog_towncrier_draft.rst", "w") as draft_file:
return call(("towncrier", "--draft"), stdout=draft_file)

View File

@@ -5,40 +5,19 @@ from textwrap import dedent
from textwrap import indent
import packaging.version
import platformdirs
import requests
import tabulate
import wcwidth
from requests_cache import CachedResponse
from requests_cache import CachedSession
from requests_cache import OriginalResponse
from requests_cache import SQLiteCache
from tqdm import tqdm
FILE_HEAD = r"""
.. Note this file is autogenerated by scripts/update-plugin-list.py - usually weekly via github action
.. _plugin-list:
Pytest Plugin List
==================
Below is an automated compilation of ``pytest``` plugins available on `PyPI <https://pypi.org>`_.
It includes PyPI projects whose names begin with "pytest-" and a handful of manually selected projects.
Packages classified as inactive are excluded.
For detailed insights into how this list is generated,
please refer to `the update script <https://github.com/pytest-dev/pytest/blob/main/scripts/update-plugin-list.py>`_.
.. warning::
Please be aware that this list is not a curated collection of projects
and does not undergo a systematic review process.
It serves purely as an informational resource to aid in the discovery of ``pytest`` plugins.
Do not presume any endorsement from the ``pytest`` project or its developers,
and always conduct your own quality assessment before incorporating any of these plugins into your own projects.
Plugin List
===========
PyPI projects that match "pytest-\*" are considered plugins and are listed
automatically. Packages classified as inactive are excluded.
.. The following conditional uses a different format for this list when
creating a PDF, because otherwise the table gets far too wide for the
@@ -54,11 +33,6 @@ DEVELOPMENT_STATUS_CLASSIFIERS = (
"Development Status :: 6 - Mature",
"Development Status :: 7 - Inactive",
)
ADDITIONAL_PROJECTS = { # set of additional projects to consider as plugins
"logassert",
"nuts",
"flask_fixture",
}
def escape_rst(text: str) -> str:
@@ -74,50 +48,22 @@ def escape_rst(text: str) -> str:
return text
def project_response_with_refresh(
session: CachedSession, name: str, last_serial: int
) -> OriginalResponse | CachedResponse:
"""Get a http cached pypi project
force refresh in case of last serial mismatch
"""
response = session.get(f"https://pypi.org/pypi/{name}/json")
if int(response.headers.get("X-PyPI-Last-Serial", -1)) != last_serial:
response = session.get(f"https://pypi.org/pypi/{name}/json", refresh=True)
return response
def get_session() -> CachedSession:
"""Configures the requests-cache session"""
cache_path = platformdirs.user_cache_path("pytest-plugin-list")
cache_path.mkdir(exist_ok=True, parents=True)
cache_file = cache_path.joinpath("http_cache.sqlite3")
return CachedSession(backend=SQLiteCache(cache_file))
def pytest_plugin_projects_from_pypi(session: CachedSession) -> dict[str, int]:
response = session.get(
"https://pypi.org/simple",
headers={"Accept": "application/vnd.pypi.simple.v1+json"},
refresh=True,
)
return {
name: p["_last-serial"]
for p in response.json()["projects"]
if (name := p["name"]).startswith("pytest-") or name in ADDITIONAL_PROJECTS
}
def iter_plugins():
session = get_session()
name_2_serial = pytest_plugin_projects_from_pypi(session)
regex = r">([\d\w-]*)</a>"
response = requests.get("https://pypi.org/simple")
for name, last_serial in tqdm(name_2_serial.items(), smoothing=0):
response = project_response_with_refresh(session, name, last_serial)
matches = list(
match
for match in re.finditer(regex, response.text)
if match.groups()[0].startswith("pytest-")
)
for match in tqdm(matches, smoothing=0):
name = match.groups()[0]
response = requests.get(f"https://pypi.org/pypi/{name}/json")
if response.status_code == 404:
# Some packages, like pytest-azurepipelines42, are included in https://pypi.org/simple
# but return 404 on the JSON API. Skip.
# Some packages, like pytest-azurepipelines42, are included in https://pypi.org/simple but
# return 404 on the JSON API. Skip.
continue
response.raise_for_status()
info = response.json()["info"]
@@ -132,23 +78,11 @@ def iter_plugins():
requires = "N/A"
if info["requires_dist"]:
for requirement in info["requires_dist"]:
if re.match(r"pytest(?![-.\w])", requirement):
if requirement == "pytest" or "pytest " in requirement:
requires = requirement
break
def version_sort_key(version_string):
"""
Return the sort key for the given version string
returned by the API.
"""
try:
return packaging.version.parse(version_string)
except packaging.version.InvalidVersion:
# Use a hard-coded pre-release version.
return packaging.version.Version("0.0.0alpha")
releases = response.json()["releases"]
for release in sorted(releases, key=version_sort_key, reverse=True):
for release in sorted(releases, key=packaging.version.parse, reverse=True):
if releases[release]:
release_date = datetime.date.fromisoformat(
releases[release][-1]["upload_time_iso_8601"].split("T")[0]
@@ -156,9 +90,7 @@ def iter_plugins():
last_release = release_date.strftime("%b %d, %Y")
break
name = f':pypi:`{info["name"]}`'
summary = ""
if info["summary"]:
summary = escape_rst(info["summary"].replace("\n", ""))
summary = escape_rst(info["summary"].replace("\n", ""))
yield {
"name": name,
"summary": summary.strip(),
@@ -185,12 +117,12 @@ def plugin_definitions(plugins):
def main():
plugins = [*iter_plugins()]
plugins = list(iter_plugins())
reference_dir = pathlib.Path("doc", "en", "reference")
plugin_list = reference_dir / "plugin_list.rst"
with plugin_list.open("w", encoding="UTF-8") as f:
with plugin_list.open("w") as f:
f.write(FILE_HEAD)
f.write(f"This list contains {len(plugins)} plugins.\n\n")
f.write(".. only:: not latex\n\n")

View File

@@ -6,7 +6,7 @@ long_description_content_type = text/x-rst
url = https://docs.pytest.org/en/latest/
author = Holger Krekel, Bruno Oliveira, Ronny Pfannschmidt, Floris Bruynooghe, Brianna Laugher, Florian Bruhin and others
license = MIT
license_files = LICENSE
license_file = LICENSE
platforms = unix, linux, osx, cygwin, win32
classifiers =
Development Status :: 6 - Mature
@@ -17,11 +17,10 @@ classifiers =
Operating System :: POSIX
Programming Language :: Python :: 3
Programming Language :: Python :: 3 :: Only
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9
Programming Language :: Python :: 3.10
Programming Language :: Python :: 3.11
Programming Language :: Python :: 3.12
Topic :: Software Development :: Libraries
Topic :: Software Development :: Testing
Topic :: Utilities
@@ -37,20 +36,21 @@ packages =
_pytest
_pytest._code
_pytest._io
_pytest._py
_pytest.assertion
_pytest.config
_pytest.mark
pytest
py_modules = py
install_requires =
attrs>=19.2.0
iniconfig
packaging
pluggy>=1.3.0,<2.0
pluggy>=0.12,<2.0
py>=1.8.2
tomli>=1.0.0
atomicwrites>=1.0;sys_platform=="win32"
colorama;sys_platform=="win32"
exceptiongroup>=1.0.0rc8;python_version<"3.11"
tomli>=1.0.0;python_version<"3.11"
python_requires = >=3.8
importlib-metadata>=0.12;python_version<"3.8"
python_requires = >=3.7
package_dir =
=src
setup_requires =
@@ -66,13 +66,11 @@ console_scripts =
[options.extras_require]
testing =
argcomplete
attrs>=19.2.0
hypothesis>=3.56
mock
nose
pygments>=2.7.2
requests
setuptools
xmlschema
[options.package_data]
@@ -96,6 +94,7 @@ mypy_path = src
check_untyped_defs = True
disallow_any_generics = True
ignore_missing_imports = True
no_implicit_optional = True
show_error_codes = True
strict_equality = True
warn_redundant_casts = True

4
setup.py Normal file
View File

@@ -0,0 +1,4 @@
from setuptools import setup
if __name__ == "__main__":
setup()

View File

@@ -78,15 +78,15 @@ class FastFilesCompleter:
def __call__(self, prefix: str, **kwargs: Any) -> List[str]:
# Only called on non option completions.
if os.sep in prefix[1:]:
prefix_dir = len(os.path.dirname(prefix) + os.sep)
if os.path.sep in prefix[1:]:
prefix_dir = len(os.path.dirname(prefix) + os.path.sep)
else:
prefix_dir = 0
completion = []
globbed = []
if "*" not in prefix and "?" not in prefix:
# We are on unix, otherwise no bash.
if not prefix or prefix[-1] == os.sep:
if not prefix or prefix[-1] == os.path.sep:
globbed.extend(glob(prefix + ".*"))
prefix += "*"
globbed.extend(glob(prefix))

Some files were not shown because too many files have changed in this diff Show More