Compare commits
408 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5b499bafb2 | ||
|
|
62c0d82d64 | ||
|
|
d526053af3 | ||
|
|
2c7614a0e1 | ||
|
|
b9a8465ce4 | ||
|
|
1cc974c95d | ||
|
|
c03e46f1ad | ||
|
|
f2d87dcf6c | ||
|
|
914441557c | ||
|
|
8aba863a63 | ||
|
|
aa79b1c00c | ||
|
|
117f52dcf3 | ||
|
|
9191857b5f | ||
|
|
7718d8c972 | ||
|
|
7a96f3f970 | ||
|
|
2fbea0e5e4 | ||
|
|
4910036b76 | ||
|
|
0b039b14aa | ||
|
|
7807c263bc | ||
|
|
b71f873189 | ||
|
|
a19ae2af22 | ||
|
|
0274c08b8a | ||
|
|
829941a061 | ||
|
|
2e345fd277 | ||
|
|
400393cfe4 | ||
|
|
459c5f4e49 | ||
|
|
f06ae5297b | ||
|
|
30de66944d | ||
|
|
02c737fe4e | ||
|
|
01655b114e | ||
|
|
a92ac0d4f6 | ||
|
|
802c77ad2f | ||
|
|
acb62ba619 | ||
|
|
df0cff18ac | ||
|
|
46a0888352 | ||
|
|
34b4e21606 | ||
|
|
a886015bfd | ||
|
|
09dee292ca | ||
|
|
2301fa61de | ||
|
|
d3549df5b9 | ||
|
|
b85d98edbb | ||
|
|
f4b1c1184f | ||
|
|
86a4eb6008 | ||
|
|
013d0e66c7 | ||
|
|
554bff8cc1 | ||
|
|
d2f74d342e | ||
|
|
430de12f35 | ||
|
|
d5eed3bb9c | ||
|
|
4b104ba222 | ||
|
|
c765b83a2a | ||
|
|
443af11861 | ||
|
|
4e02248b84 | ||
|
|
43a499e6fa | ||
|
|
e2fa2b621c | ||
|
|
0fc11b6f3c | ||
|
|
d2c1a04532 | ||
|
|
b8e65d03bf | ||
|
|
f37ea715d8 | ||
|
|
45d36ddb47 | ||
|
|
355954df5d | ||
|
|
a93c50ccb9 | ||
|
|
1cae76b0fe | ||
|
|
1b7597ac91 | ||
|
|
21680ffa77 | ||
|
|
8076f48eae | ||
|
|
0ae27714d1 | ||
|
|
92432ac45c | ||
|
|
937f945946 | ||
|
|
829a5986e8 | ||
|
|
54dbfb5167 | ||
|
|
70f0b77c72 | ||
|
|
2a8b463b38 | ||
|
|
12bf458719 | ||
|
|
114dba56f8 | ||
|
|
abb853f482 | ||
|
|
8208a376cc | ||
|
|
f078984c2e | ||
|
|
dba62f8a46 | ||
|
|
f7bf914108 | ||
|
|
917195ea8e | ||
|
|
e7cd00ac92 | ||
|
|
693c3b7f61 | ||
|
|
fb3ae5eaa9 | ||
|
|
c8d23c206b | ||
|
|
c5de8e8c50 | ||
|
|
f360147758 | ||
|
|
65bd1b8a93 | ||
|
|
882f3a4cd7 | ||
|
|
56e430f74e | ||
|
|
654d8da9f7 | ||
|
|
ace3a02cd4 | ||
|
|
f013a5e8c1 | ||
|
|
fbd8ff9502 | ||
|
|
737a1bf947 | ||
|
|
843ca03770 | ||
|
|
5cbc06a453 | ||
|
|
da23aa3419 | ||
|
|
28bf3816e7 | ||
|
|
d6ce2e5858 | ||
|
|
5ac498ea96 | ||
|
|
6765aca0d1 | ||
|
|
72fc43952b | ||
|
|
6896dbc5ca | ||
|
|
49c6aebbc7 | ||
|
|
fb12d2a612 | ||
|
|
8e51563384 | ||
|
|
61dfd0a94f | ||
|
|
f9cafd1c94 | ||
|
|
b10f28949d | ||
|
|
b0f090890c | ||
|
|
f0a4a13e48 | ||
|
|
ff80464b47 | ||
|
|
480dd9e6d6 | ||
|
|
ceb4f3f701 | ||
|
|
ea3ebec117 | ||
|
|
bf3b26b3f7 | ||
|
|
84569ca4da | ||
|
|
af21e6b45c | ||
|
|
5c5966f62d | ||
|
|
31b1c4ca0c | ||
|
|
61b76c7f5f | ||
|
|
c35544a0f7 | ||
|
|
5bc3ad8e27 | ||
|
|
a930ca0b45 | ||
|
|
d4b85da8c7 | ||
|
|
0025e4408f | ||
|
|
81cc73103a | ||
|
|
97d8e9fbec | ||
|
|
220a2a1bc9 | ||
|
|
2e2e895b4b | ||
|
|
bce45052a6 | ||
|
|
2f48ae4e66 | ||
|
|
6061ecf95a | ||
|
|
e032904413 | ||
|
|
ece774f0eb | ||
|
|
bc49d6ff99 | ||
|
|
2b9ca34280 | ||
|
|
e38561037d | ||
|
|
0a57124063 | ||
|
|
13f02af97d | ||
|
|
f2ed796c41 | ||
|
|
b3f8fabac8 | ||
|
|
22d91a3c3a | ||
|
|
322a0f0a33 | ||
|
|
58149459a5 | ||
|
|
852fb6a4ae | ||
|
|
437d6452c1 | ||
|
|
e37ff3042e | ||
|
|
ecd2de25a1 | ||
|
|
c607697400 | ||
|
|
0996f3dbc5 | ||
|
|
a0dbf2ab99 | ||
|
|
ddbe733666 | ||
|
|
470e686a70 | ||
|
|
765f75a8f1 | ||
|
|
6b5152ae13 | ||
|
|
10ca84ffc5 | ||
|
|
e393a73890 | ||
|
|
bed3918cbc | ||
|
|
31dfbb4668 | ||
|
|
b4d75ad31d | ||
|
|
ec6d0fa4d7 | ||
|
|
fa8a658458 | ||
|
|
66f20b6f5e | ||
|
|
6ba5e3c071 | ||
|
|
b900b4155f | ||
|
|
364ae5d723 | ||
|
|
84c7fef836 | ||
|
|
aaea4e52ef | ||
|
|
0cacdadc97 | ||
|
|
6c56070df1 | ||
|
|
f9f41e69a8 | ||
|
|
b2ce6f3200 | ||
|
|
e0b584d048 | ||
|
|
a0ff5deabf | ||
|
|
97b85a17ae | ||
|
|
de7ba5958b | ||
|
|
8a498700da | ||
|
|
45c894b73f | ||
|
|
65342db7a4 | ||
|
|
d391274f39 | ||
|
|
c5fa1d1c3e | ||
|
|
a304dbb519 | ||
|
|
d19df5efa2 | ||
|
|
6663cb054c | ||
|
|
b27e40cbf1 | ||
|
|
ee52a8a5f8 | ||
|
|
8f04bd003c | ||
|
|
93fd9debe3 | ||
|
|
ff428bfee1 | ||
|
|
0f7c7a99bf | ||
|
|
041ea3704b | ||
|
|
d94b4b031f | ||
|
|
e253029ad0 | ||
|
|
43617a8c47 | ||
|
|
56bf7446f6 | ||
|
|
8e42c5b7db | ||
|
|
0571e1ee8e | ||
|
|
b0a6161d41 | ||
|
|
06fa2bc0b8 | ||
|
|
8abd4aec6e | ||
|
|
858010e214 | ||
|
|
96424272a1 | ||
|
|
e44a2ef653 | ||
|
|
c6e3ff3ce5 | ||
|
|
a31098a74e | ||
|
|
7e8044f9b8 | ||
|
|
b81173ea0c | ||
|
|
c8f7e50c47 | ||
|
|
c0e53a61e6 | ||
|
|
494ac28a32 | ||
|
|
dc75b6af47 | ||
|
|
7573747cda | ||
|
|
2db05b6582 | ||
|
|
4318698bae | ||
|
|
e668aaf885 | ||
|
|
58e6a09db4 | ||
|
|
6718a2f028 | ||
|
|
c081c01eb1 | ||
|
|
f8e1d58e8f | ||
|
|
18024467ff | ||
|
|
2ad36b1402 | ||
|
|
6ca3e1e425 | ||
|
|
ecd072ea94 | ||
|
|
dda21935a7 | ||
|
|
cc464f6b96 | ||
|
|
6a43c8cd94 | ||
|
|
63fe547d9f | ||
|
|
b709e61892 | ||
|
|
465b2d998a | ||
|
|
184ef92f0b | ||
|
|
73bbff2b74 | ||
|
|
4ccaa987d4 | ||
|
|
3a4a815c41 | ||
|
|
dae455e8a3 | ||
|
|
0594dba5ce | ||
|
|
f1183c2422 | ||
|
|
685ca96c71 | ||
|
|
ccf6c3cb46 | ||
|
|
ceca35b94a | ||
|
|
803d68847b | ||
|
|
0bd02cd1bc | ||
|
|
0b8b006db4 | ||
|
|
73b74c74c9 | ||
|
|
4d782dc13f | ||
|
|
e1756fc631 | ||
|
|
5d7686951c | ||
|
|
80c5f6e609 | ||
|
|
0b47e51d08 | ||
|
|
5eeb5ee960 | ||
|
|
ed2b715f4c | ||
|
|
7e08e09473 | ||
|
|
2051e30b9b | ||
|
|
f339147d12 | ||
|
|
c04767f946 | ||
|
|
6d040370ed | ||
|
|
4a2fdce62b | ||
|
|
32a5e80a6d | ||
|
|
0e8a8f94f6 | ||
|
|
8f23e19bcb | ||
|
|
f5c1f3df71 | ||
|
|
9f66102869 | ||
|
|
865e84d206 | ||
|
|
3f5622c577 | ||
|
|
1fb3f63f35 | ||
|
|
fc2ad1dbed | ||
|
|
fb6dad60a0 | ||
|
|
02053bf556 | ||
|
|
ff5317a7f3 | ||
|
|
766fc23151 | ||
|
|
08734bdd18 | ||
|
|
915ecb0dac | ||
|
|
53cd7fd2ea | ||
|
|
8532e991a5 | ||
|
|
ebc0cea226 | ||
|
|
65133018f3 | ||
|
|
ac7ee40aaf | ||
|
|
308b733b9d | ||
|
|
76c6ed8b05 | ||
|
|
7f519f8ab7 | ||
|
|
0bf363472e | ||
|
|
e3e57a755b | ||
|
|
79d5fc3a0b | ||
|
|
322d686ab4 | ||
|
|
f75f7c1925 | ||
|
|
698c4e75fd | ||
|
|
4749dca764 | ||
|
|
649d23c8a8 | ||
|
|
1460ad6027 | ||
|
|
78cd1a07d0 | ||
|
|
c3178a176d | ||
|
|
df1d1105b0 | ||
|
|
2b11b2c093 | ||
|
|
adb8edbae1 | ||
|
|
3cff5e252d | ||
|
|
7412df0920 | ||
|
|
990133f804 | ||
|
|
df68808d29 | ||
|
|
b64d9402ca | ||
|
|
8d3a5dcd1b | ||
|
|
24bd51bda0 | ||
|
|
9374114370 | ||
|
|
eb13530560 | ||
|
|
5530d3e15d | ||
|
|
ea79eb5c3f | ||
|
|
c8b904a406 | ||
|
|
d45fa7b212 | ||
|
|
852ca7ad59 | ||
|
|
45ba736c81 | ||
|
|
c453fe7053 | ||
|
|
cc005af47e | ||
|
|
20c624efcf | ||
|
|
b2be6c1a30 | ||
|
|
c43a9c83ee | ||
|
|
e804e419bc | ||
|
|
992e7f7771 | ||
|
|
bd1a2e6435 | ||
|
|
1d137fd2fe | ||
|
|
f1f1862b19 | ||
|
|
6f0a5789fb | ||
|
|
cc78a533ae | ||
|
|
fd0b3e2e8b | ||
|
|
f3dbe5a308 | ||
|
|
1da8ce65a6 | ||
|
|
1dd5f088fa | ||
|
|
6a73714b00 | ||
|
|
6371243c10 | ||
|
|
19035f4b55 | ||
|
|
48ed437e70 | ||
|
|
14d3d9187f | ||
|
|
a37d1df089 | ||
|
|
97cd5f0deb | ||
|
|
8b2fcf517c | ||
|
|
42e60d935a | ||
|
|
b3759372ad | ||
|
|
f5d2b199e2 | ||
|
|
12133d4eb7 | ||
|
|
148f2fc72c | ||
|
|
5d9d12a6be | ||
|
|
dde27a2305 | ||
|
|
10fa66e5b5 | ||
|
|
5e26304d81 | ||
|
|
d0860a339b | ||
|
|
fcbfdef11b | ||
|
|
b84f826fc8 | ||
|
|
ec46864922 | ||
|
|
c36a90531a | ||
|
|
3fa329c9e9 | ||
|
|
a70e5f119e | ||
|
|
b6b7185b7b | ||
|
|
4fb7a91a5e | ||
|
|
ff5e98c654 | ||
|
|
2662c400ba | ||
|
|
d8d835c1f5 | ||
|
|
06029d11d3 | ||
|
|
4c0ba6017d | ||
|
|
c70ecd49ca | ||
|
|
50edab8004 | ||
|
|
b4b9f788af | ||
|
|
a7e49e6c07 | ||
|
|
2b1ae8a66d | ||
|
|
2ebb69b50a | ||
|
|
4fca86e2af | ||
|
|
9ad00714ba | ||
|
|
b549438423 | ||
|
|
377888140f | ||
|
|
df377b589f | ||
|
|
87ce586d29 | ||
|
|
f599172add | ||
|
|
e20b39d928 | ||
|
|
4f33f46a02 | ||
|
|
159704421e | ||
|
|
0f965e57a2 | ||
|
|
8ad99c5cab | ||
|
|
da3f836ee3 | ||
|
|
37ecca3ba9 | ||
|
|
3d0ecd03ed | ||
|
|
eb5b2e0db5 | ||
|
|
47d92a0d96 | ||
|
|
7f1bf44aa8 | ||
|
|
5fec793bc7 | ||
|
|
e04936fc29 | ||
|
|
0d4636b056 | ||
|
|
a65edf6711 | ||
|
|
e88aa957ae | ||
|
|
8907fedc79 | ||
|
|
1f5a61e4ef | ||
|
|
1410d3dc9a | ||
|
|
7538aa7bb9 | ||
|
|
db34bf01b6 | ||
|
|
9434541090 | ||
|
|
cc90bcce4c | ||
|
|
15ea5cef46 | ||
|
|
32a8d503a2 | ||
|
|
8c734dfc2f | ||
|
|
08ded2927a | ||
|
|
1c9dcf1f39 | ||
|
|
cab4069f42 | ||
|
|
4f6c67658c | ||
|
|
cda9ce198a | ||
|
|
9121138a1b | ||
|
|
407d74be27 | ||
|
|
deade370b9 | ||
|
|
00810b9b2a | ||
|
|
bcc08ffe4d | ||
|
|
ba1fc02a9b | ||
|
|
38d687f7c7 | ||
|
|
adebfd0a84 |
4
.github/FUNDING.yml
vendored
Normal file
4
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
# info:
|
||||
# * https://help.github.com/en/articles/displaying-a-sponsor-button-in-your-repository
|
||||
# * https://tidelift.com/subscription/how-to-connect-tidelift-with-github
|
||||
tidelift: pypi/pytest
|
||||
4
.github/PULL_REQUEST_TEMPLATE.md
vendored
4
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,11 +1,10 @@
|
||||
<!--
|
||||
Thanks for submitting a PR, your contribution is really appreciated!
|
||||
|
||||
Here's a quick checklist that should be present in PRs.
|
||||
Here is a quick checklist that should be present in PRs.
|
||||
(please delete this text from the final description, this is just a guideline)
|
||||
-->
|
||||
|
||||
- [ ] Create a new changelog file in the `changelog` folder, with a name like `<ISSUE NUMBER>.<TYPE>.rst`. See [changelog/README.rst](https://github.com/pytest-dev/pytest/blob/master/changelog/README.rst) for details.
|
||||
- [ ] Target the `master` branch for bug fixes, documentation updates and trivial changes.
|
||||
- [ ] Target the `features` branch for new features and removals/deprecations.
|
||||
- [ ] Include documentation when adding new features.
|
||||
@@ -13,4 +12,5 @@ Here's a quick checklist that should be present in PRs.
|
||||
|
||||
Unless your change is trivial or a small documentation fix (e.g., a typo or reword of a small section) please:
|
||||
|
||||
- [ ] Create a new changelog file in the `changelog` folder, with a name like `<ISSUE NUMBER>.<TYPE>.rst`. See [changelog/README.rst](https://github.com/pytest-dev/pytest/blob/master/changelog/README.rst) for details.
|
||||
- [ ] Add yourself to `AUTHORS` in alphabetical order;
|
||||
|
||||
@@ -13,10 +13,11 @@ repos:
|
||||
additional_dependencies: [black==19.3b0]
|
||||
language_version: python3
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v2.1.0
|
||||
rev: v2.2.2
|
||||
hooks:
|
||||
- id: trailing-whitespace
|
||||
- id: end-of-file-fixer
|
||||
- id: fix-encoding-pragma
|
||||
- id: check-yaml
|
||||
- id: debug-statements
|
||||
exclude: _pytest/debugging.py
|
||||
@@ -45,7 +46,7 @@ repos:
|
||||
- id: rst
|
||||
name: rst
|
||||
entry: rst-lint --encoding utf-8
|
||||
files: ^(CHANGELOG.rst|HOWTORELEASE.rst|README.rst|changelog/.*)$
|
||||
files: ^(CHANGELOG.rst|HOWTORELEASE.rst|README.rst|TIDELIFT.rst|changelog/.*)$
|
||||
language: python
|
||||
additional_dependencies: [pygments, restructuredtext_lint]
|
||||
- id: changelogs-rst
|
||||
|
||||
25
.travis.yml
25
.travis.yml
@@ -13,6 +13,10 @@ env:
|
||||
global:
|
||||
- PYTEST_ADDOPTS=-vv
|
||||
|
||||
# setuptools-scm needs all tags in order to obtain a proper version
|
||||
git:
|
||||
depth: false
|
||||
|
||||
install:
|
||||
- python -m pip install --upgrade --pre tox
|
||||
|
||||
@@ -20,9 +24,6 @@ jobs:
|
||||
include:
|
||||
# OSX tests - first (in test stage), since they are the slower ones.
|
||||
- &test-macos
|
||||
# NOTE: (tests with) pexpect appear to be buggy on Travis,
|
||||
# at least with coverage.
|
||||
# Log: https://travis-ci.org/pytest-dev/pytest/jobs/500358864
|
||||
os: osx
|
||||
osx_image: xcode10.1
|
||||
language: generic
|
||||
@@ -33,7 +34,7 @@ jobs:
|
||||
- python -V
|
||||
- test $(python -c 'import sys; print("%d%d" % sys.version_info[0:2])') = 27
|
||||
- <<: *test-macos
|
||||
env: TOXENV=py37-xdist
|
||||
env: TOXENV=py37-pexpect,py37-xdist PYTEST_COVERAGE=1
|
||||
before_install:
|
||||
- which python3
|
||||
- python3 -V
|
||||
@@ -98,8 +99,17 @@ jobs:
|
||||
|
||||
- stage: deploy
|
||||
python: '3.6'
|
||||
install: pip install -U setuptools setuptools_scm
|
||||
install: pip install -U setuptools setuptools_scm tox
|
||||
script: skip
|
||||
# token to upload github release notes: GH_RELEASE_NOTES_TOKEN
|
||||
env:
|
||||
- secure: "OjOeL7/0JUDkV00SsTs732e8vQjHynpbG9FKTNtZZJ+1Zn4Cib+hAlwmlBnvVukML0X60YpcfjnC4quDOIGLPsh5zeXnvJmYtAIIUNQXjWz8NhcGYrhyzuP1rqV22U68RTCdmOq3lMYU/W2acwHP7T49PwJtOiUM5kF120UAQ0Zi5EmkqkIvH8oM5mO9Dlver+/U7Htpz9rhKrHBXQNCMZI6yj2aUyukqB2PN2fjAlDbCF//+FmvYw9NjT4GeFOSkTCf4ER9yfqs7yglRfwiLtOCZ2qKQhWZNsSJDB89rxIRXWavJUjJKeY2EW2/NkomYJDpqJLIF4JeFRw/HhA47CYPeo6BJqyyNV+0CovL1frpWfi9UQw2cMbgFUkUIUk3F6DD59PHNIOX2R/HX56dQsw7WKl3QuHlCOkICXYg8F7Ta684IoKjeTX03/6QNOkURfDBwfGszY0FpbxrjCSWKom6RyZdyidnESaxv9RzjcIRZVh1rp8KMrwS1OrwRSdG0zjlsPr49hWMenN/8fKgcHTV4/r1Tj6mip0dorSRCrgUNIeRBKgmui6FS8642ab5JNKOxMteVPVR2sFuhjOQ0Jy+PmvceYY9ZMWc3+/B/KVh0dZ3hwvLGZep/vxDS2PwCA5/xw31714vT5LxidKo8yECjBynMU/wUTTS695D3NY="
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
# required by publish_gh_release_notes
|
||||
- pandoc
|
||||
after_deploy: tox -e publish_gh_release_notes
|
||||
deploy:
|
||||
provider: pypi
|
||||
user: nicoddemus
|
||||
@@ -115,6 +125,9 @@ matrix:
|
||||
allow_failures:
|
||||
- python: '3.8-dev'
|
||||
env: TOXENV=py38-xdist
|
||||
# Temporary (https://github.com/pytest-dev/pytest/pull/5334).
|
||||
- env: TOXENV=pypy3-xdist
|
||||
python: 'pypy3'
|
||||
|
||||
before_script:
|
||||
- |
|
||||
@@ -130,7 +143,7 @@ before_script:
|
||||
export _PYTEST_TOX_EXTRA_DEP=coverage-enable-subprocess
|
||||
fi
|
||||
|
||||
script: tox --recreate
|
||||
script: tox
|
||||
|
||||
after_success:
|
||||
- |
|
||||
|
||||
5
AUTHORS
5
AUTHORS
@@ -9,6 +9,7 @@ Abhijeet Kasurde
|
||||
Adam Johnson
|
||||
Adam Uhlir
|
||||
Ahn Ki-Wook
|
||||
Akiomi Kamakura
|
||||
Alan Velasco
|
||||
Alexander Johnson
|
||||
Alexei Kozlenok
|
||||
@@ -86,6 +87,7 @@ Endre Galaczi
|
||||
Eric Hunsberger
|
||||
Eric Siegerman
|
||||
Erik M. Bray
|
||||
Evan Kepner
|
||||
Fabien Zarifian
|
||||
Fabio Zadrozny
|
||||
Feng Ma
|
||||
@@ -133,6 +135,7 @@ Kale Kundert
|
||||
Katarzyna Jachim
|
||||
Katerina Koukiou
|
||||
Kevin Cox
|
||||
Kevin J. Foley
|
||||
Kodi B. Arfer
|
||||
Kostis Anagnostopoulos
|
||||
Kristoffer Nordström
|
||||
@@ -194,6 +197,7 @@ Paweł Adamczak
|
||||
Pedro Algarvio
|
||||
Pieter Mulder
|
||||
Piotr Banaszkiewicz
|
||||
Pulkit Goyal
|
||||
Punyashloka Biswal
|
||||
Quentin Pradet
|
||||
Ralf Schmitt
|
||||
@@ -211,6 +215,7 @@ Ross Lawley
|
||||
Russel Winder
|
||||
Ryan Wooden
|
||||
Samuel Dion-Girardeau
|
||||
Samuel Searles-Bryant
|
||||
Samuele Pedroni
|
||||
Sankt Petersbug
|
||||
Segev Finer
|
||||
|
||||
328
CHANGELOG.rst
328
CHANGELOG.rst
@@ -18,6 +18,328 @@ with advance notice in the **Deprecations** section of releases.
|
||||
|
||||
.. towncrier release notes start
|
||||
|
||||
pytest 4.6.6 (2019-10-11)
|
||||
=========================
|
||||
|
||||
Bug Fixes
|
||||
---------
|
||||
|
||||
- `#5523 <https://github.com/pytest-dev/pytest/issues/5523>`_: Fixed using multiple short options together in the command-line (for example ``-vs``) in Python 3.8+.
|
||||
|
||||
|
||||
- `#5537 <https://github.com/pytest-dev/pytest/issues/5537>`_: Replace ``importlib_metadata`` backport with ``importlib.metadata`` from the
|
||||
standard library on Python 3.8+.
|
||||
|
||||
|
||||
- `#5806 <https://github.com/pytest-dev/pytest/issues/5806>`_: Fix "lexer" being used when uploading to bpaste.net from ``--pastebin`` to "text".
|
||||
|
||||
|
||||
- `#5902 <https://github.com/pytest-dev/pytest/issues/5902>`_: Fix warnings about deprecated ``cmp`` attribute in ``attrs>=19.2``.
|
||||
|
||||
|
||||
|
||||
Trivial/Internal Changes
|
||||
------------------------
|
||||
|
||||
- `#5801 <https://github.com/pytest-dev/pytest/issues/5801>`_: Fixes python version checks (detected by ``flake8-2020``) in case python4 becomes a thing.
|
||||
|
||||
|
||||
pytest 4.6.5 (2019-08-05)
|
||||
=========================
|
||||
|
||||
Bug Fixes
|
||||
---------
|
||||
|
||||
- `#4344 <https://github.com/pytest-dev/pytest/issues/4344>`_: Fix RuntimeError/StopIteration when trying to collect package with "__init__.py" only.
|
||||
|
||||
|
||||
- `#5478 <https://github.com/pytest-dev/pytest/issues/5478>`_: Fix encode error when using unicode strings in exceptions with ``pytest.raises``.
|
||||
|
||||
|
||||
- `#5524 <https://github.com/pytest-dev/pytest/issues/5524>`_: Fix issue where ``tmp_path`` and ``tmpdir`` would not remove directories containing files marked as read-only,
|
||||
which could lead to pytest crashing when executed a second time with the ``--basetemp`` option.
|
||||
|
||||
|
||||
- `#5547 <https://github.com/pytest-dev/pytest/issues/5547>`_: ``--step-wise`` now handles ``xfail(strict=True)`` markers properly.
|
||||
|
||||
|
||||
- `#5650 <https://github.com/pytest-dev/pytest/issues/5650>`_: Improved output when parsing an ini configuration file fails.
|
||||
|
||||
|
||||
pytest 4.6.4 (2019-06-28)
|
||||
=========================
|
||||
|
||||
Bug Fixes
|
||||
---------
|
||||
|
||||
- `#5404 <https://github.com/pytest-dev/pytest/issues/5404>`_: Emit a warning when attempting to unwrap a broken object raises an exception,
|
||||
for easier debugging (`#5080 <https://github.com/pytest-dev/pytest/issues/5080>`__).
|
||||
|
||||
|
||||
- `#5444 <https://github.com/pytest-dev/pytest/issues/5444>`_: Fix ``--stepwise`` mode when the first file passed on the command-line fails to collect.
|
||||
|
||||
|
||||
- `#5482 <https://github.com/pytest-dev/pytest/issues/5482>`_: Fix bug introduced in 4.6.0 causing collection errors when passing
|
||||
more than 2 positional arguments to ``pytest.mark.parametrize``.
|
||||
|
||||
|
||||
- `#5505 <https://github.com/pytest-dev/pytest/issues/5505>`_: Fix crash when discovery fails while using ``-p no:terminal``.
|
||||
|
||||
|
||||
pytest 4.6.3 (2019-06-11)
|
||||
=========================
|
||||
|
||||
Bug Fixes
|
||||
---------
|
||||
|
||||
- `#5383 <https://github.com/pytest-dev/pytest/issues/5383>`_: ``-q`` has again an impact on the style of the collected items
|
||||
(``--collect-only``) when ``--log-cli-level`` is used.
|
||||
|
||||
|
||||
- `#5389 <https://github.com/pytest-dev/pytest/issues/5389>`_: Fix regressions of `#5063 <https://github.com/pytest-dev/pytest/pull/5063>`__ for ``importlib_metadata.PathDistribution`` which have their ``files`` attribute being ``None``.
|
||||
|
||||
|
||||
- `#5390 <https://github.com/pytest-dev/pytest/issues/5390>`_: Fix regression where the ``obj`` attribute of ``TestCase`` items was no longer bound to methods.
|
||||
|
||||
|
||||
pytest 4.6.2 (2019-06-03)
|
||||
=========================
|
||||
|
||||
Bug Fixes
|
||||
---------
|
||||
|
||||
- `#5370 <https://github.com/pytest-dev/pytest/issues/5370>`_: Revert unrolling of ``all()`` to fix ``NameError`` on nested comprehensions.
|
||||
|
||||
|
||||
- `#5371 <https://github.com/pytest-dev/pytest/issues/5371>`_: Revert unrolling of ``all()`` to fix incorrect handling of generators with ``if``.
|
||||
|
||||
|
||||
- `#5372 <https://github.com/pytest-dev/pytest/issues/5372>`_: Revert unrolling of ``all()`` to fix incorrect assertion when using ``all()`` in an expression.
|
||||
|
||||
|
||||
pytest 4.6.1 (2019-06-02)
|
||||
=========================
|
||||
|
||||
Bug Fixes
|
||||
---------
|
||||
|
||||
- `#5354 <https://github.com/pytest-dev/pytest/issues/5354>`_: Fix ``pytest.mark.parametrize`` when the argvalues is an iterator.
|
||||
|
||||
|
||||
- `#5358 <https://github.com/pytest-dev/pytest/issues/5358>`_: Fix assertion rewriting of ``all()`` calls to deal with non-generators.
|
||||
|
||||
|
||||
pytest 4.6.0 (2019-05-31)
|
||||
=========================
|
||||
|
||||
Important
|
||||
---------
|
||||
|
||||
The ``4.6.X`` series will be the last series to support **Python 2 and Python 3.4**.
|
||||
|
||||
For more details, see our `Python 2.7 and 3.4 support plan <https://docs.pytest.org/en/latest/py27-py34-deprecation.html>`__.
|
||||
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- `#4559 <https://github.com/pytest-dev/pytest/issues/4559>`_: Added the ``junit_log_passing_tests`` ini value which can be used to enable or disable logging of passing test output in the Junit XML file.
|
||||
|
||||
|
||||
- `#4956 <https://github.com/pytest-dev/pytest/issues/4956>`_: pytester's ``testdir.spawn`` uses ``tmpdir`` as HOME/USERPROFILE directory.
|
||||
|
||||
|
||||
- `#5062 <https://github.com/pytest-dev/pytest/issues/5062>`_: Unroll calls to ``all`` to full for-loops with assertion rewriting for better failure messages, especially when using Generator Expressions.
|
||||
|
||||
|
||||
- `#5063 <https://github.com/pytest-dev/pytest/issues/5063>`_: Switch from ``pkg_resources`` to ``importlib-metadata`` for entrypoint detection for improved performance and import time.
|
||||
|
||||
|
||||
- `#5091 <https://github.com/pytest-dev/pytest/issues/5091>`_: The output for ini options in ``--help`` has been improved.
|
||||
|
||||
|
||||
- `#5269 <https://github.com/pytest-dev/pytest/issues/5269>`_: ``pytest.importorskip`` includes the ``ImportError`` now in the default ``reason``.
|
||||
|
||||
|
||||
- `#5311 <https://github.com/pytest-dev/pytest/issues/5311>`_: Captured logs that are output for each failing test are formatted using the
|
||||
ColoredLevelFormatter.
|
||||
|
||||
|
||||
- `#5312 <https://github.com/pytest-dev/pytest/issues/5312>`_: Improved formatting of multiline log messages in Python 3.
|
||||
|
||||
|
||||
|
||||
Bug Fixes
|
||||
---------
|
||||
|
||||
- `#2064 <https://github.com/pytest-dev/pytest/issues/2064>`_: The debugging plugin imports the wrapped ``Pdb`` class (``--pdbcls``) on-demand now.
|
||||
|
||||
|
||||
- `#4908 <https://github.com/pytest-dev/pytest/issues/4908>`_: The ``pytest_enter_pdb`` hook gets called with post-mortem (``--pdb``).
|
||||
|
||||
|
||||
- `#5036 <https://github.com/pytest-dev/pytest/issues/5036>`_: Fix issue where fixtures dependent on other parametrized fixtures would be erroneously parametrized.
|
||||
|
||||
|
||||
- `#5256 <https://github.com/pytest-dev/pytest/issues/5256>`_: Handle internal error due to a lone surrogate unicode character not being representable in Jython.
|
||||
|
||||
|
||||
- `#5257 <https://github.com/pytest-dev/pytest/issues/5257>`_: Ensure that ``sys.stdout.mode`` does not include ``'b'`` as it is a text stream.
|
||||
|
||||
|
||||
- `#5278 <https://github.com/pytest-dev/pytest/issues/5278>`_: Pytest's internal python plugin can be disabled using ``-p no:python`` again.
|
||||
|
||||
|
||||
- `#5286 <https://github.com/pytest-dev/pytest/issues/5286>`_: Fix issue with ``disable_test_id_escaping_and_forfeit_all_rights_to_community_support`` option not working when using a list of test IDs in parametrized tests.
|
||||
|
||||
|
||||
- `#5330 <https://github.com/pytest-dev/pytest/issues/5330>`_: Show the test module being collected when emitting ``PytestCollectionWarning`` messages for
|
||||
test classes with ``__init__`` and ``__new__`` methods to make it easier to pin down the problem.
|
||||
|
||||
|
||||
- `#5333 <https://github.com/pytest-dev/pytest/issues/5333>`_: Fix regression in 4.5.0 with ``--lf`` not re-running all tests with known failures from non-selected tests.
|
||||
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- `#5250 <https://github.com/pytest-dev/pytest/issues/5250>`_: Expand docs on use of ``setenv`` and ``delenv`` with ``monkeypatch``.
|
||||
|
||||
|
||||
pytest 4.5.0 (2019-05-11)
|
||||
=========================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- `#4826 <https://github.com/pytest-dev/pytest/issues/4826>`_: A warning is now emitted when unknown marks are used as a decorator.
|
||||
This is often due to a typo, which can lead to silently broken tests.
|
||||
|
||||
|
||||
- `#4907 <https://github.com/pytest-dev/pytest/issues/4907>`_: Show XFail reason as part of JUnitXML message field.
|
||||
|
||||
|
||||
- `#5013 <https://github.com/pytest-dev/pytest/issues/5013>`_: Messages from crash reports are displayed within test summaries now, truncated to the terminal width.
|
||||
|
||||
|
||||
- `#5023 <https://github.com/pytest-dev/pytest/issues/5023>`_: New flag ``--strict-markers`` that triggers an error when unknown markers (e.g. those not registered using the `markers option`_ in the configuration file) are used in the test suite.
|
||||
|
||||
The existing ``--strict`` option has the same behavior currently, but can be augmented in the future for additional checks.
|
||||
|
||||
.. _`markers option`: https://docs.pytest.org/en/latest/reference.html#confval-markers
|
||||
|
||||
|
||||
- `#5026 <https://github.com/pytest-dev/pytest/issues/5026>`_: Assertion failure messages for sequences and dicts contain the number of different items now.
|
||||
|
||||
|
||||
- `#5034 <https://github.com/pytest-dev/pytest/issues/5034>`_: Improve reporting with ``--lf`` and ``--ff`` (run-last-failure).
|
||||
|
||||
|
||||
- `#5035 <https://github.com/pytest-dev/pytest/issues/5035>`_: The ``--cache-show`` option/action accepts an optional glob to show only matching cache entries.
|
||||
|
||||
|
||||
- `#5059 <https://github.com/pytest-dev/pytest/issues/5059>`_: Standard input (stdin) can be given to pytester's ``Testdir.run()`` and ``Testdir.popen()``.
|
||||
|
||||
|
||||
- `#5068 <https://github.com/pytest-dev/pytest/issues/5068>`_: The ``-r`` option learnt about ``A`` to display all reports (including passed ones) in the short test summary.
|
||||
|
||||
|
||||
- `#5108 <https://github.com/pytest-dev/pytest/issues/5108>`_: The short test summary is displayed after passes with output (``-rP``).
|
||||
|
||||
|
||||
- `#5172 <https://github.com/pytest-dev/pytest/issues/5172>`_: The ``--last-failed`` (``--lf``) option got smarter and will now skip entire files if all tests
|
||||
of that test file have passed in previous runs, greatly speeding up collection.
|
||||
|
||||
|
||||
- `#5177 <https://github.com/pytest-dev/pytest/issues/5177>`_: Introduce new specific warning ``PytestWarning`` subclasses to make it easier to filter warnings based on the class, rather than on the message. The new subclasses are:
|
||||
|
||||
|
||||
* ``PytestAssertRewriteWarning``
|
||||
|
||||
* ``PytestCacheWarning``
|
||||
|
||||
* ``PytestCollectionWarning``
|
||||
|
||||
* ``PytestConfigWarning``
|
||||
|
||||
* ``PytestUnhandledCoroutineWarning``
|
||||
|
||||
* ``PytestUnknownMarkWarning``
|
||||
|
||||
|
||||
- `#5202 <https://github.com/pytest-dev/pytest/issues/5202>`_: New ``record_testsuite_property`` session-scoped fixture allows users to log ``<property>`` tags at the ``testsuite``
|
||||
level with the ``junitxml`` plugin.
|
||||
|
||||
The generated XML is compatible with the latest xunit standard, contrary to
|
||||
the properties recorded by ``record_property`` and ``record_xml_attribute``.
|
||||
|
||||
|
||||
- `#5214 <https://github.com/pytest-dev/pytest/issues/5214>`_: The default logging format has been changed to improve readability. Here is an
|
||||
example of a previous logging message::
|
||||
|
||||
test_log_cli_enabled_disabled.py 3 CRITICAL critical message logged by test
|
||||
|
||||
This has now become::
|
||||
|
||||
CRITICAL root:test_log_cli_enabled_disabled.py:3 critical message logged by test
|
||||
|
||||
The formatting can be changed through the `log_format <https://docs.pytest.org/en/latest/reference.html#confval-log_format>`__ configuration option.
|
||||
|
||||
|
||||
- `#5220 <https://github.com/pytest-dev/pytest/issues/5220>`_: ``--fixtures`` now also shows fixture scope for scopes other than ``"function"``.
|
||||
|
||||
|
||||
|
||||
Bug Fixes
|
||||
---------
|
||||
|
||||
- `#5113 <https://github.com/pytest-dev/pytest/issues/5113>`_: Deselected items from plugins using ``pytest_collect_modifyitems`` as a hookwrapper are correctly reported now.
|
||||
|
||||
|
||||
- `#5144 <https://github.com/pytest-dev/pytest/issues/5144>`_: With usage errors ``exitstatus`` is set to ``EXIT_USAGEERROR`` in the ``pytest_sessionfinish`` hook now as expected.
|
||||
|
||||
|
||||
- `#5235 <https://github.com/pytest-dev/pytest/issues/5235>`_: ``outcome.exit`` is not used with ``EOF`` in the pdb wrapper anymore, but only with ``quit``.
|
||||
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- `#4935 <https://github.com/pytest-dev/pytest/issues/4935>`_: Expand docs on registering marks and the effect of ``--strict``.
|
||||
|
||||
|
||||
|
||||
Trivial/Internal Changes
|
||||
------------------------
|
||||
|
||||
- `#4942 <https://github.com/pytest-dev/pytest/issues/4942>`_: ``logging.raiseExceptions`` is not set to ``False`` anymore.
|
||||
|
||||
|
||||
- `#5013 <https://github.com/pytest-dev/pytest/issues/5013>`_: pytest now depends on `wcwidth <https://pypi.org/project/wcwidth>`__ to properly track unicode character sizes for more precise terminal output.
|
||||
|
||||
|
||||
- `#5059 <https://github.com/pytest-dev/pytest/issues/5059>`_: pytester's ``Testdir.popen()`` uses ``stdout`` and ``stderr`` via keyword arguments with defaults now (``subprocess.PIPE``).
|
||||
|
||||
|
||||
- `#5069 <https://github.com/pytest-dev/pytest/issues/5069>`_: The code for the short test summary in the terminal was moved to the terminal plugin.
|
||||
|
||||
|
||||
- `#5082 <https://github.com/pytest-dev/pytest/issues/5082>`_: Improved validation of kwargs for various methods in the pytester plugin.
|
||||
|
||||
|
||||
- `#5202 <https://github.com/pytest-dev/pytest/issues/5202>`_: ``record_property`` now emits a ``PytestWarning`` when used with ``junit_family=xunit2``: the fixture generates
|
||||
``property`` tags as children of ``testcase``, which is not permitted according to the most
|
||||
`recent schema <https://github.com/jenkinsci/xunit-plugin/blob/master/
|
||||
src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd>`__.
|
||||
|
||||
|
||||
- `#5239 <https://github.com/pytest-dev/pytest/issues/5239>`_: Pin ``pluggy`` to ``< 1.0`` so we don't update to ``1.0`` automatically when
|
||||
it gets released: there are planned breaking changes, and we want to ensure
|
||||
pytest properly supports ``pluggy 1.0``.
|
||||
|
||||
|
||||
pytest 4.4.2 (2019-05-08)
|
||||
=========================
|
||||
|
||||
@@ -494,7 +816,7 @@ Removals
|
||||
See our `docs <https://docs.pytest.org/en/latest/deprecations.html#passing-command-line-string-to-pytest-main>`__ on information on how to update your code.
|
||||
|
||||
|
||||
- `#3086 <https://github.com/pytest-dev/pytest/issues/3086>`_: ``[pytest]`` section in **setup.cfg** files is not longer supported, use ``[tool:pytest]`` instead. ``setup.cfg`` files
|
||||
- `#3086 <https://github.com/pytest-dev/pytest/issues/3086>`_: ``[pytest]`` section in **setup.cfg** files is no longer supported, use ``[tool:pytest]`` instead. ``setup.cfg`` files
|
||||
are meant for use with ``distutils``, and a section named ``pytest`` has notoriously been a source of conflicts and bugs.
|
||||
|
||||
Note that for **pytest.ini** and **tox.ini** files the section remains ``[pytest]``.
|
||||
@@ -1497,7 +1819,7 @@ Bug Fixes
|
||||
- `#2220 <https://github.com/pytest-dev/pytest/issues/2220>`_: Fix a bug where fixtures overridden by direct parameters (for example parametrization) were being instantiated even if they were not being used by a test.
|
||||
|
||||
|
||||
- `#3695 <https://github.com/pytest-dev/pytest/issues/3695>`_: Fix ``ApproxNumpy`` initialisation argument mixup, ``abs`` and ``rel`` tolerances were flipped causing strange comparsion results.
|
||||
- `#3695 <https://github.com/pytest-dev/pytest/issues/3695>`_: Fix ``ApproxNumpy`` initialisation argument mixup, ``abs`` and ``rel`` tolerances were flipped causing strange comparison results.
|
||||
Add tests to check ``abs`` and ``rel`` tolerances for ``np.array`` and test for expecting ``nan`` with ``np.array()``
|
||||
|
||||
|
||||
@@ -1716,7 +2038,7 @@ Features
|
||||
exits the debugger. On python 3.2 and higher, use CTRL+D. (`#3299
|
||||
<https://github.com/pytest-dev/pytest/issues/3299>`_)
|
||||
|
||||
- pytest not longer changes the log level of the root logger when the
|
||||
- pytest no longer changes the log level of the root logger when the
|
||||
``log-level`` parameter has greater numeric value than that of the level of
|
||||
the root logger, which makes it play better with custom logging configuration
|
||||
in user code. (`#3307 <https://github.com/pytest-dev/pytest/issues/3307>`_)
|
||||
|
||||
20
README.rst
20
README.rst
@@ -108,6 +108,26 @@ Changelog
|
||||
Consult the `Changelog <https://docs.pytest.org/en/latest/changelog.html>`__ page for fixes and enhancements of each version.
|
||||
|
||||
|
||||
Support pytest
|
||||
--------------
|
||||
|
||||
You can support pytest by obtaining a `Tideflift subscription`_.
|
||||
|
||||
Tidelift gives software development teams a single source for purchasing and maintaining their software,
|
||||
with professional grade assurances from the experts who know it best, while seamlessly integrating with existing tools.
|
||||
|
||||
|
||||
.. _`Tideflift subscription`: https://tidelift.com/subscription/pkg/pypi-pytest?utm_source=pypi-pytest&utm_medium=referral&utm_campaign=readme
|
||||
|
||||
|
||||
Security
|
||||
^^^^^^^^
|
||||
|
||||
pytest has never been associated with a security vunerability, but in any case, to report a
|
||||
security vulnerability please use the `Tidelift security contact <https://tidelift.com/security>`_.
|
||||
Tidelift will coordinate the fix and disclosure.
|
||||
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
|
||||
57
TIDELIFT.rst
Normal file
57
TIDELIFT.rst
Normal file
@@ -0,0 +1,57 @@
|
||||
========
|
||||
Tidelift
|
||||
========
|
||||
|
||||
pytest is a member of `Tidelift`_. This document describes how the core team manages
|
||||
Tidelift-related activities.
|
||||
|
||||
What is it
|
||||
==========
|
||||
|
||||
Tidelift aims to make Open Source sustainable by offering subscriptions to companies which rely
|
||||
on Open Source packages. This subscription allows it to pay maintainers of those Open Source
|
||||
packages to aid sustainability of the work.
|
||||
|
||||
Funds
|
||||
=====
|
||||
|
||||
It was decided in the `mailing list`_ that the Tidelift contribution will be split evenly between
|
||||
members of the `contributors team`_ interested in receiving funding.
|
||||
|
||||
The current list of contributors receiving funding are:
|
||||
|
||||
* `@asottile`_
|
||||
* `@blueyed`_
|
||||
* `@nicoddemus`_
|
||||
|
||||
Contributors interested in receiving a part of the funds just need to submit a PR adding their
|
||||
name to the list. Contributors that want to stop receiving the funds should also submit a PR
|
||||
in the same way.
|
||||
|
||||
The PR should mention `@pytest-dev/tidelift-admins`_ so appropriate changes
|
||||
can be made in the Tidelift platform.
|
||||
|
||||
After the PR has been accepted and merged, the contributor should register in the `Tidelift`_
|
||||
platform and follow the instructions there, including signing an `agreement`_.
|
||||
|
||||
Admins
|
||||
======
|
||||
|
||||
A few people have admin access to the Tidelift dashboard to make changes. Those people
|
||||
are part of the `@pytest-dev/tidelift-admins`_ team.
|
||||
|
||||
`Core contributors`_ interested in helping out with Tidelift maintenance are welcome! We don't
|
||||
expect much work here other than the occasional adding/removal of a contributor from receiving
|
||||
funds. Just drop a line to one of the `@pytest-dev/tidelift-admins`_ or use the mailing list.
|
||||
|
||||
|
||||
.. _`Tidelift`: https://tidelift.com
|
||||
.. _`mailing list`: https://mail.python.org/pipermail/pytest-dev/2019-May/004716.html
|
||||
.. _`contributors team`: https://github.com/orgs/pytest-dev/teams/contributors
|
||||
.. _`core contributors`: https://github.com/orgs/pytest-dev/teams/core/members
|
||||
.. _`@pytest-dev/tidelift-admins`: https://github.com/orgs/pytest-dev/teams/tidelift-admins/members
|
||||
.. _`agreement`: https://tidelift.com/docs/lifting/agreement
|
||||
|
||||
.. _`@asottile`: https://github.com/asottile
|
||||
.. _`@blueyed`: https://github.com/blueyed
|
||||
.. _`@nicoddemus`: https://github.com/nicoddemus
|
||||
@@ -5,7 +5,6 @@ trigger:
|
||||
variables:
|
||||
PYTEST_ADDOPTS: "--junitxml=build/test-results/$(tox.env).xml -vv"
|
||||
python.needs_vc: False
|
||||
python.exe: "python"
|
||||
COVERAGE_FILE: "$(Build.Repository.LocalPath)/.coverage"
|
||||
COVERAGE_PROCESS_START: "$(Build.Repository.LocalPath)/.coveragerc"
|
||||
PYTEST_COVERAGE: '0'
|
||||
@@ -42,15 +41,13 @@ jobs:
|
||||
# Also seen with py27-nobyte (using xdist), and py27-xdist.
|
||||
# But no exception with py27-pexpect,py27-twisted,py27-numpy.
|
||||
PYTEST_COVERAGE: '1'
|
||||
pypy:
|
||||
python.version: 'pypy2'
|
||||
tox.env: 'pypy'
|
||||
python.exe: 'pypy'
|
||||
# NOTE: pypy3 fails to install pip currently due to an internal error.
|
||||
# -- pypy2 and pypy3 are disabled for now: #5279 --
|
||||
# pypy:
|
||||
# python.version: 'pypy2'
|
||||
# tox.env: 'pypy'
|
||||
# pypy3:
|
||||
# python.version: 'pypy3'
|
||||
# tox.env: 'pypy3'
|
||||
# python.exe: 'pypy3'
|
||||
py34-xdist:
|
||||
python.version: '3.4'
|
||||
tox.env: 'py34-xdist'
|
||||
@@ -94,12 +91,12 @@ jobs:
|
||||
condition: eq(variables['python.needs_vc'], True)
|
||||
displayName: 'Install VC for py27'
|
||||
|
||||
- script: $(python.exe) -m pip install --upgrade pip && $(python.exe) -m pip install tox
|
||||
- script: python -m pip install tox
|
||||
displayName: 'Install tox'
|
||||
|
||||
- script: |
|
||||
call scripts/setup-coverage-vars.bat || goto :eof
|
||||
$(python.exe) -m tox -e $(tox.env)
|
||||
python -m tox -e $(tox.env)
|
||||
displayName: 'Run tests'
|
||||
|
||||
- task: PublishTestResults@2
|
||||
@@ -112,6 +109,5 @@ jobs:
|
||||
displayName: 'Report and upload coverage'
|
||||
condition: eq(variables['PYTEST_COVERAGE'], '1')
|
||||
env:
|
||||
PYTHON: $(python.exe)
|
||||
CODECOV_TOKEN: $(CODECOV_TOKEN)
|
||||
PYTEST_CODECOV_NAME: $(tox.env)
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import sys
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# 10000 iterations, just for relative comparison
|
||||
# 2.7.5 3.3.2
|
||||
# FilesCompleter 75.1109 69.2116
|
||||
|
||||
@@ -1,2 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
for i in range(1000):
|
||||
exec("def test_func_%d(): pass" % i)
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import pytest
|
||||
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from six.moves import range
|
||||
|
||||
import pytest
|
||||
|
||||
@@ -43,7 +43,7 @@ clean:
|
||||
|
||||
regen: REGENDOC_FILES:=*.rst */*.rst
|
||||
regen:
|
||||
PYTHONDONTWRITEBYTECODE=1 PYTEST_ADDOPTS=-pno:hypothesis COLUMNS=76 regendoc --update ${REGENDOC_FILES} ${REGENDOC_ARGS}
|
||||
PYTHONDONTWRITEBYTECODE=1 PYTEST_ADDOPTS="-pno:hypothesis -Wignore::pytest.PytestUnknownMarkWarning" COLUMNS=76 regendoc --update ${REGENDOC_FILES} ${REGENDOC_ARGS}
|
||||
|
||||
html:
|
||||
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# flasky extensions. flasky pygments style based on tango style
|
||||
from pygments.style import Style
|
||||
from pygments.token import Comment
|
||||
|
||||
@@ -6,6 +6,14 @@ Release announcements
|
||||
:maxdepth: 2
|
||||
|
||||
|
||||
release-4.6.6
|
||||
release-4.6.5
|
||||
release-4.6.4
|
||||
release-4.6.3
|
||||
release-4.6.2
|
||||
release-4.6.1
|
||||
release-4.6.0
|
||||
release-4.5.0
|
||||
release-4.4.2
|
||||
release-4.4.1
|
||||
release-4.4.0
|
||||
|
||||
35
doc/en/announce/release-4.5.0.rst
Normal file
35
doc/en/announce/release-4.5.0.rst
Normal file
@@ -0,0 +1,35 @@
|
||||
pytest-4.5.0
|
||||
=======================================
|
||||
|
||||
The pytest team is proud to announce the 4.5.0 release!
|
||||
|
||||
pytest is a mature Python testing tool with more than a 2000 tests
|
||||
against itself, passing on many different interpreters and platforms.
|
||||
|
||||
This release contains a number of bugs fixes and improvements, so users are encouraged
|
||||
to take a look at the CHANGELOG:
|
||||
|
||||
https://docs.pytest.org/en/latest/changelog.html
|
||||
|
||||
For complete documentation, please visit:
|
||||
|
||||
https://docs.pytest.org/en/latest/
|
||||
|
||||
As usual, you can upgrade from pypi via:
|
||||
|
||||
pip install -U pytest
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Anthony Sottile
|
||||
* Bruno Oliveira
|
||||
* Daniel Hahler
|
||||
* Floris Bruynooghe
|
||||
* Pulkit Goyal
|
||||
* Samuel Searles-Bryant
|
||||
* Zac Hatfield-Dodds
|
||||
* Zac-HD
|
||||
|
||||
|
||||
Happy testing,
|
||||
The Pytest Development Team
|
||||
43
doc/en/announce/release-4.6.0.rst
Normal file
43
doc/en/announce/release-4.6.0.rst
Normal file
@@ -0,0 +1,43 @@
|
||||
pytest-4.6.0
|
||||
=======================================
|
||||
|
||||
The pytest team is proud to announce the 4.6.0 release!
|
||||
|
||||
pytest is a mature Python testing tool with more than a 2000 tests
|
||||
against itself, passing on many different interpreters and platforms.
|
||||
|
||||
This release contains a number of bugs fixes and improvements, so users are encouraged
|
||||
to take a look at the CHANGELOG:
|
||||
|
||||
https://docs.pytest.org/en/latest/changelog.html
|
||||
|
||||
For complete documentation, please visit:
|
||||
|
||||
https://docs.pytest.org/en/latest/
|
||||
|
||||
As usual, you can upgrade from pypi via:
|
||||
|
||||
pip install -U pytest
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Akiomi Kamakura
|
||||
* Anthony Sottile
|
||||
* Bruno Oliveira
|
||||
* Daniel Hahler
|
||||
* David Röthlisberger
|
||||
* Evan Kepner
|
||||
* Jeffrey Rackauckas
|
||||
* MyComputer
|
||||
* Nikita Krokosh
|
||||
* Raul Tambre
|
||||
* Thomas Hisch
|
||||
* Tim Hoffmann
|
||||
* Tomer Keren
|
||||
* Victor Maryama
|
||||
* danielx123
|
||||
* oleg-yegorov
|
||||
|
||||
|
||||
Happy testing,
|
||||
The Pytest Development Team
|
||||
19
doc/en/announce/release-4.6.1.rst
Normal file
19
doc/en/announce/release-4.6.1.rst
Normal file
@@ -0,0 +1,19 @@
|
||||
pytest-4.6.1
|
||||
=======================================
|
||||
|
||||
pytest 4.6.1 has just been released to PyPI.
|
||||
|
||||
This is a bug-fix release, being a drop-in replacement. To upgrade::
|
||||
|
||||
pip install --upgrade pytest
|
||||
|
||||
The full changelog is available at https://docs.pytest.org/en/latest/changelog.html.
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Anthony Sottile
|
||||
* Bruno Oliveira
|
||||
|
||||
|
||||
Happy testing,
|
||||
The pytest Development Team
|
||||
18
doc/en/announce/release-4.6.2.rst
Normal file
18
doc/en/announce/release-4.6.2.rst
Normal file
@@ -0,0 +1,18 @@
|
||||
pytest-4.6.2
|
||||
=======================================
|
||||
|
||||
pytest 4.6.2 has just been released to PyPI.
|
||||
|
||||
This is a bug-fix release, being a drop-in replacement. To upgrade::
|
||||
|
||||
pip install --upgrade pytest
|
||||
|
||||
The full changelog is available at https://docs.pytest.org/en/latest/changelog.html.
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Anthony Sottile
|
||||
|
||||
|
||||
Happy testing,
|
||||
The pytest Development Team
|
||||
21
doc/en/announce/release-4.6.3.rst
Normal file
21
doc/en/announce/release-4.6.3.rst
Normal file
@@ -0,0 +1,21 @@
|
||||
pytest-4.6.3
|
||||
=======================================
|
||||
|
||||
pytest 4.6.3 has just been released to PyPI.
|
||||
|
||||
This is a bug-fix release, being a drop-in replacement. To upgrade::
|
||||
|
||||
pip install --upgrade pytest
|
||||
|
||||
The full changelog is available at https://docs.pytest.org/en/latest/changelog.html.
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Anthony Sottile
|
||||
* Bruno Oliveira
|
||||
* Daniel Hahler
|
||||
* Dirk Thomas
|
||||
|
||||
|
||||
Happy testing,
|
||||
The pytest Development Team
|
||||
22
doc/en/announce/release-4.6.4.rst
Normal file
22
doc/en/announce/release-4.6.4.rst
Normal file
@@ -0,0 +1,22 @@
|
||||
pytest-4.6.4
|
||||
=======================================
|
||||
|
||||
pytest 4.6.4 has just been released to PyPI.
|
||||
|
||||
This is a bug-fix release, being a drop-in replacement. To upgrade::
|
||||
|
||||
pip install --upgrade pytest
|
||||
|
||||
The full changelog is available at https://docs.pytest.org/en/latest/changelog.html.
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Anthony Sottile
|
||||
* Bruno Oliveira
|
||||
* Daniel Hahler
|
||||
* Thomas Grainger
|
||||
* Zac Hatfield-Dodds
|
||||
|
||||
|
||||
Happy testing,
|
||||
The pytest Development Team
|
||||
21
doc/en/announce/release-4.6.5.rst
Normal file
21
doc/en/announce/release-4.6.5.rst
Normal file
@@ -0,0 +1,21 @@
|
||||
pytest-4.6.5
|
||||
=======================================
|
||||
|
||||
pytest 4.6.5 has just been released to PyPI.
|
||||
|
||||
This is a bug-fix release, being a drop-in replacement. To upgrade::
|
||||
|
||||
pip install --upgrade pytest
|
||||
|
||||
The full changelog is available at https://docs.pytest.org/en/latest/changelog.html.
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Anthony Sottile
|
||||
* Bruno Oliveira
|
||||
* Daniel Hahler
|
||||
* Thomas Grainger
|
||||
|
||||
|
||||
Happy testing,
|
||||
The pytest Development Team
|
||||
20
doc/en/announce/release-4.6.6.rst
Normal file
20
doc/en/announce/release-4.6.6.rst
Normal file
@@ -0,0 +1,20 @@
|
||||
pytest-4.6.6
|
||||
=======================================
|
||||
|
||||
pytest 4.6.6 has just been released to PyPI.
|
||||
|
||||
This is a bug-fix release, being a drop-in replacement. To upgrade::
|
||||
|
||||
pip install --upgrade pytest
|
||||
|
||||
The full changelog is available at https://docs.pytest.org/en/latest/changelog.html.
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Anthony Sottile
|
||||
* Bruno Oliveira
|
||||
* Michael Goerz
|
||||
|
||||
|
||||
Happy testing,
|
||||
The pytest Development Team
|
||||
@@ -27,33 +27,39 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a
|
||||
name of your plugin or application to avoid clashes with other cache users.
|
||||
|
||||
Values can be any object handled by the json stdlib module.
|
||||
|
||||
capsys
|
||||
Enable text capturing of writes to ``sys.stdout`` and ``sys.stderr``.
|
||||
|
||||
The captured output is made available via ``capsys.readouterr()`` method
|
||||
calls, which return a ``(out, err)`` namedtuple.
|
||||
``out`` and ``err`` will be ``text`` objects.
|
||||
|
||||
capsysbinary
|
||||
Enable bytes capturing of writes to ``sys.stdout`` and ``sys.stderr``.
|
||||
|
||||
The captured output is made available via ``capsysbinary.readouterr()``
|
||||
method calls, which return a ``(out, err)`` namedtuple.
|
||||
``out`` and ``err`` will be ``bytes`` objects.
|
||||
|
||||
capfd
|
||||
Enable text capturing of writes to file descriptors ``1`` and ``2``.
|
||||
|
||||
The captured output is made available via ``capfd.readouterr()`` method
|
||||
calls, which return a ``(out, err)`` namedtuple.
|
||||
``out`` and ``err`` will be ``text`` objects.
|
||||
|
||||
capfdbinary
|
||||
Enable bytes capturing of writes to file descriptors ``1`` and ``2``.
|
||||
|
||||
The captured output is made available via ``capfd.readouterr()`` method
|
||||
calls, which return a ``(out, err)`` namedtuple.
|
||||
``out`` and ``err`` will be ``byte`` objects.
|
||||
doctest_namespace
|
||||
|
||||
doctest_namespace [session scope]
|
||||
Fixture that returns a :py:class:`dict` that will be injected into the namespace of doctests.
|
||||
pytestconfig
|
||||
|
||||
pytestconfig [session scope]
|
||||
Session-scoped fixture that returns the :class:`_pytest.config.Config` object.
|
||||
|
||||
Example::
|
||||
@@ -61,6 +67,7 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a
|
||||
def test_foo(pytestconfig):
|
||||
if pytestconfig.getoption("verbose") > 0:
|
||||
...
|
||||
|
||||
record_property
|
||||
Add an extra properties the calling test.
|
||||
User properties become part of the test report and are available to the
|
||||
@@ -72,10 +79,26 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a
|
||||
|
||||
def test_function(record_property):
|
||||
record_property("example_key", 1)
|
||||
|
||||
record_xml_attribute
|
||||
Add extra xml attributes to the tag for the calling test.
|
||||
The fixture is callable with ``(name, value)``, with value being
|
||||
automatically xml-encoded
|
||||
|
||||
record_testsuite_property [session scope]
|
||||
Records a new ``<property>`` tag as child of the root ``<testsuite>``. This is suitable to
|
||||
writing global information regarding the entire test suite, and is compatible with ``xunit2`` JUnit family.
|
||||
|
||||
This is a ``session``-scoped fixture which is called with ``(name, value)``. Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def test_foo(record_testsuite_property):
|
||||
record_testsuite_property("ARCH", "PPC")
|
||||
record_testsuite_property("STORAGE_TYPE", "CEPH")
|
||||
|
||||
``name`` must be a string, ``value`` will be converted to a string and properly xml-escaped.
|
||||
|
||||
caplog
|
||||
Access and control log capturing.
|
||||
|
||||
@@ -85,6 +108,7 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a
|
||||
* caplog.records -> list of logging.LogRecord instances
|
||||
* caplog.record_tuples -> list of (logger_name, level, message) tuples
|
||||
* caplog.clear() -> clear captured records and formatted log output string
|
||||
|
||||
monkeypatch
|
||||
The returned ``monkeypatch`` fixture provides these
|
||||
helper methods to modify objects, dictionaries or os.environ::
|
||||
@@ -102,15 +126,19 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a
|
||||
test function or fixture has finished. The ``raising``
|
||||
parameter determines if a KeyError or AttributeError
|
||||
will be raised if the set/deletion operation has no target.
|
||||
|
||||
recwarn
|
||||
Return a :class:`WarningsRecorder` instance that records all warnings emitted by test functions.
|
||||
|
||||
See http://docs.python.org/library/warnings.html for information
|
||||
on warning categories.
|
||||
tmpdir_factory
|
||||
|
||||
tmpdir_factory [session scope]
|
||||
Return a :class:`_pytest.tmpdir.TempdirFactory` instance for the test session.
|
||||
tmp_path_factory
|
||||
|
||||
tmp_path_factory [session scope]
|
||||
Return a :class:`_pytest.tmpdir.TempPathFactory` instance for the test session.
|
||||
|
||||
tmpdir
|
||||
Return a temporary directory path object
|
||||
which is unique to each test function invocation,
|
||||
@@ -119,6 +147,7 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a
|
||||
path object.
|
||||
|
||||
.. _`py.path.local`: https://py.readthedocs.io/en/latest/path.html
|
||||
|
||||
tmp_path
|
||||
Return a temporary directory path object
|
||||
which is unique to each test function invocation,
|
||||
@@ -130,6 +159,7 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a
|
||||
|
||||
in python < 3.6 this is a pathlib2.Path
|
||||
|
||||
|
||||
no tests ran in 0.12 seconds
|
||||
|
||||
You can also interactively ask for help, e.g. by typing on the Python interactive prompt something like::
|
||||
|
||||
@@ -247,7 +247,7 @@ See the :ref:`cache-api` for more details.
|
||||
|
||||
|
||||
Inspecting Cache content
|
||||
-------------------------------
|
||||
------------------------
|
||||
|
||||
You can always peek at the content of the cache using the
|
||||
``--cache-show`` command line option:
|
||||
@@ -260,7 +260,7 @@ You can always peek at the content of the cache using the
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
------------------------------- cache values -------------------------------
|
||||
--------------------------- cache values for '*' ---------------------------
|
||||
cache/lastfailed contains:
|
||||
{'test_50.py::test_num[17]': True,
|
||||
'test_50.py::test_num[25]': True,
|
||||
@@ -277,8 +277,25 @@ You can always peek at the content of the cache using the
|
||||
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
|
||||
``--cache-show`` takes an optional argument to specify a glob pattern for
|
||||
filtering:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest --cache-show example/*
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
----------------------- cache values for 'example/*' -----------------------
|
||||
example/value contains:
|
||||
42
|
||||
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
|
||||
Clearing Cache content
|
||||
-------------------------------
|
||||
----------------------
|
||||
|
||||
You can instruct pytest to clear all cache files and values
|
||||
by adding the ``--cache-clear`` option like this:
|
||||
|
||||
@@ -1 +1,2 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
collect_ignore = ["conf.py"]
|
||||
|
||||
@@ -50,6 +50,7 @@ Full pytest documentation
|
||||
projects
|
||||
faq
|
||||
contact
|
||||
tidelift
|
||||
|
||||
.. only:: html
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import _pytest._code
|
||||
import pytest
|
||||
from pytest import raises
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import py
|
||||
|
||||
import pytest
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
hello = "world"
|
||||
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import py
|
||||
|
||||
failure_demo = py.path.local(__file__).dirpath("failure_demo.py")
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
def setup_module(module):
|
||||
module.TestStateFullThing.classcount = 0
|
||||
|
||||
|
||||
@@ -1 +1,2 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
collect_ignore = ["nonpython"]
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import pytest
|
||||
|
||||
|
||||
|
||||
@@ -1 +1,2 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
|
||||
@@ -1,2 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
def test_quick(setup):
|
||||
pass
|
||||
|
||||
@@ -1 +1,2 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
def test_something(setup):
|
||||
assert setup.timecostly == 1
|
||||
|
||||
|
||||
@@ -4,7 +4,9 @@
|
||||
Working with custom markers
|
||||
=================================================
|
||||
|
||||
Here are some example using the :ref:`mark` mechanism.
|
||||
Here are some examples using the :ref:`mark` mechanism.
|
||||
|
||||
.. _`mark run`:
|
||||
|
||||
Marking test functions and selecting them for a run
|
||||
----------------------------------------------------
|
||||
@@ -259,7 +261,7 @@ For an example on how to add and work with markers from a plugin, see
|
||||
* Asking for existing markers via ``pytest --markers`` gives good output
|
||||
|
||||
* Typos in function markers are treated as an error if you use
|
||||
the ``--strict`` option.
|
||||
the ``--strict-markers`` option.
|
||||
|
||||
.. _`scoped-marking`:
|
||||
|
||||
@@ -619,9 +621,9 @@ then you will see two tests skipped and two executed tests as expected:
|
||||
collected 4 items
|
||||
|
||||
test_plat.py s.s. [100%]
|
||||
|
||||
========================= short test summary info ==========================
|
||||
SKIPPED [2] $REGENDOC_TMPDIR/conftest.py:13: cannot run on platform linux
|
||||
|
||||
=================== 2 passed, 2 skipped in 0.12 seconds ====================
|
||||
|
||||
Note that if you specify a platform via the marker-command line option like this:
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
module containing a parametrized tests testing cross-python
|
||||
serialization via the pickle module.
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# content of conftest.py
|
||||
import pytest
|
||||
|
||||
|
||||
@@ -434,10 +434,11 @@ Running it results in some skips if we don't have all the python interpreters in
|
||||
.. code-block:: pytest
|
||||
|
||||
. $ pytest -rs -q multipython.py
|
||||
...sss...sssssssss...sss... [100%]
|
||||
...ssssssssssssssssssssssss [100%]
|
||||
========================= short test summary info ==========================
|
||||
SKIPPED [15] $REGENDOC_TMPDIR/CWD/multipython.py:30: 'python3.4' not found
|
||||
12 passed, 15 skipped in 0.12 seconds
|
||||
SKIPPED [12] $REGENDOC_TMPDIR/CWD/multipython.py:31: 'python3.4' not found
|
||||
SKIPPED [12] $REGENDOC_TMPDIR/CWD/multipython.py:31: 'python3.5' not found
|
||||
3 passed, 24 skipped in 0.12 seconds
|
||||
|
||||
Indirect parametrization of optional implementations/imports
|
||||
--------------------------------------------------------------------
|
||||
@@ -492,9 +493,9 @@ If you run this with reporting for skips enabled:
|
||||
collected 2 items
|
||||
|
||||
test_module.py .s [100%]
|
||||
========================= short test summary info ==========================
|
||||
SKIPPED [1] $REGENDOC_TMPDIR/conftest.py:11: could not import 'opt2'
|
||||
|
||||
========================= short test summary info ==========================
|
||||
SKIPPED [1] $REGENDOC_TMPDIR/conftest.py:11: could not import 'opt2': No module named 'opt2'
|
||||
=================== 1 passed, 1 skipped in 0.12 seconds ====================
|
||||
|
||||
You'll see that we don't have an ``opt2`` module and thus the second test run
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import sys
|
||||
|
||||
import pytest
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
def test_exception_syntax():
|
||||
try:
|
||||
0 / 0
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# run this with $ pytest --collect-only test_collectonly.py
|
||||
#
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
> assert param1 * 2 < param2
|
||||
E assert (3 * 2) < 6
|
||||
|
||||
failure_demo.py:20: AssertionError
|
||||
failure_demo.py:21: AssertionError
|
||||
_________________________ TestFailing.test_simple __________________________
|
||||
|
||||
self = <failure_demo.TestFailing object at 0xdeadbeef>
|
||||
@@ -43,7 +43,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
E + where 42 = <function TestFailing.test_simple.<locals>.f at 0xdeadbeef>()
|
||||
E + and 43 = <function TestFailing.test_simple.<locals>.g at 0xdeadbeef>()
|
||||
|
||||
failure_demo.py:31: AssertionError
|
||||
failure_demo.py:32: AssertionError
|
||||
____________________ TestFailing.test_simple_multiline _____________________
|
||||
|
||||
self = <failure_demo.TestFailing object at 0xdeadbeef>
|
||||
@@ -51,7 +51,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
def test_simple_multiline(self):
|
||||
> otherfunc_multi(42, 6 * 9)
|
||||
|
||||
failure_demo.py:34:
|
||||
failure_demo.py:35:
|
||||
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
||||
|
||||
a = 42, b = 54
|
||||
@@ -60,7 +60,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
> assert a == b
|
||||
E assert 42 == 54
|
||||
|
||||
failure_demo.py:15: AssertionError
|
||||
failure_demo.py:16: AssertionError
|
||||
___________________________ TestFailing.test_not ___________________________
|
||||
|
||||
self = <failure_demo.TestFailing object at 0xdeadbeef>
|
||||
@@ -73,7 +73,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
E assert not 42
|
||||
E + where 42 = <function TestFailing.test_not.<locals>.f at 0xdeadbeef>()
|
||||
|
||||
failure_demo.py:40: AssertionError
|
||||
failure_demo.py:41: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_eq_text _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -84,7 +84,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
E - spam
|
||||
E + eggs
|
||||
|
||||
failure_demo.py:45: AssertionError
|
||||
failure_demo.py:46: AssertionError
|
||||
_____________ TestSpecialisedExplanations.test_eq_similar_text _____________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -97,7 +97,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
E + foo 2 bar
|
||||
E ? ^
|
||||
|
||||
failure_demo.py:48: AssertionError
|
||||
failure_demo.py:49: AssertionError
|
||||
____________ TestSpecialisedExplanations.test_eq_multiline_text ____________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -110,7 +110,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
E + eggs
|
||||
E bar
|
||||
|
||||
failure_demo.py:51: AssertionError
|
||||
failure_demo.py:52: AssertionError
|
||||
______________ TestSpecialisedExplanations.test_eq_long_text _______________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -127,7 +127,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
E + 1111111111b222222222
|
||||
E ? ^
|
||||
|
||||
failure_demo.py:56: AssertionError
|
||||
failure_demo.py:57: AssertionError
|
||||
_________ TestSpecialisedExplanations.test_eq_long_text_multiline __________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -147,7 +147,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
E
|
||||
E ...Full output truncated (7 lines hidden), use '-vv' to show
|
||||
|
||||
failure_demo.py:61: AssertionError
|
||||
failure_demo.py:62: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_eq_list _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -158,7 +158,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
E At index 2 diff: 2 != 3
|
||||
E Use -v to get the full diff
|
||||
|
||||
failure_demo.py:64: AssertionError
|
||||
failure_demo.py:65: AssertionError
|
||||
______________ TestSpecialisedExplanations.test_eq_list_long _______________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -171,7 +171,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
E At index 100 diff: 1 != 2
|
||||
E Use -v to get the full diff
|
||||
|
||||
failure_demo.py:69: AssertionError
|
||||
failure_demo.py:70: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_eq_dict _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -182,14 +182,14 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
E Omitting 1 identical items, use -vv to show
|
||||
E Differing items:
|
||||
E {'b': 1} != {'b': 2}
|
||||
E Left contains more items:
|
||||
E Left contains 1 more item:
|
||||
E {'c': 0}
|
||||
E Right contains more items:
|
||||
E Right contains 1 more item:
|
||||
E {'d': 0}...
|
||||
E
|
||||
E ...Full output truncated (2 lines hidden), use '-vv' to show
|
||||
|
||||
failure_demo.py:72: AssertionError
|
||||
failure_demo.py:73: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_eq_set __________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -207,7 +207,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
E
|
||||
E ...Full output truncated (2 lines hidden), use '-vv' to show
|
||||
|
||||
failure_demo.py:75: AssertionError
|
||||
failure_demo.py:76: AssertionError
|
||||
_____________ TestSpecialisedExplanations.test_eq_longer_list ______________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -215,10 +215,10 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
def test_eq_longer_list(self):
|
||||
> assert [1, 2] == [1, 2, 3]
|
||||
E assert [1, 2] == [1, 2, 3]
|
||||
E Right contains more items, first extra item: 3
|
||||
E Right contains one more item: 3
|
||||
E Use -v to get the full diff
|
||||
|
||||
failure_demo.py:78: AssertionError
|
||||
failure_demo.py:79: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_in_list _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -227,7 +227,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
> assert 1 in [0, 2, 3, 4, 5]
|
||||
E assert 1 in [0, 2, 3, 4, 5]
|
||||
|
||||
failure_demo.py:81: AssertionError
|
||||
failure_demo.py:82: AssertionError
|
||||
__________ TestSpecialisedExplanations.test_not_in_text_multiline __________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -246,7 +246,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
E
|
||||
E ...Full output truncated (2 lines hidden), use '-vv' to show
|
||||
|
||||
failure_demo.py:85: AssertionError
|
||||
failure_demo.py:86: AssertionError
|
||||
___________ TestSpecialisedExplanations.test_not_in_text_single ____________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -259,7 +259,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
E single foo line
|
||||
E ? +++
|
||||
|
||||
failure_demo.py:89: AssertionError
|
||||
failure_demo.py:90: AssertionError
|
||||
_________ TestSpecialisedExplanations.test_not_in_text_single_long _________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -272,7 +272,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
E head head foo tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail
|
||||
E ? +++
|
||||
|
||||
failure_demo.py:93: AssertionError
|
||||
failure_demo.py:94: AssertionError
|
||||
______ TestSpecialisedExplanations.test_not_in_text_single_long_term _______
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -285,7 +285,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
E head head fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffftail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail
|
||||
E ? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
|
||||
|
||||
failure_demo.py:97: AssertionError
|
||||
failure_demo.py:98: AssertionError
|
||||
______________ TestSpecialisedExplanations.test_eq_dataclass _______________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -306,7 +306,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
E Differing attributes:
|
||||
E b: 'b' != 'c'
|
||||
|
||||
failure_demo.py:109: AssertionError
|
||||
failure_demo.py:110: AssertionError
|
||||
________________ TestSpecialisedExplanations.test_eq_attrs _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -327,7 +327,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
E Differing attributes:
|
||||
E b: 'b' != 'c'
|
||||
|
||||
failure_demo.py:121: AssertionError
|
||||
failure_demo.py:122: AssertionError
|
||||
______________________________ test_attribute ______________________________
|
||||
|
||||
def test_attribute():
|
||||
@@ -339,7 +339,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
E assert 1 == 2
|
||||
E + where 1 = <failure_demo.test_attribute.<locals>.Foo object at 0xdeadbeef>.b
|
||||
|
||||
failure_demo.py:129: AssertionError
|
||||
failure_demo.py:130: AssertionError
|
||||
_________________________ test_attribute_instance __________________________
|
||||
|
||||
def test_attribute_instance():
|
||||
@@ -351,7 +351,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
E + where 1 = <failure_demo.test_attribute_instance.<locals>.Foo object at 0xdeadbeef>.b
|
||||
E + where <failure_demo.test_attribute_instance.<locals>.Foo object at 0xdeadbeef> = <class 'failure_demo.test_attribute_instance.<locals>.Foo'>()
|
||||
|
||||
failure_demo.py:136: AssertionError
|
||||
failure_demo.py:137: AssertionError
|
||||
__________________________ test_attribute_failure __________________________
|
||||
|
||||
def test_attribute_failure():
|
||||
@@ -364,7 +364,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
i = Foo()
|
||||
> assert i.b == 2
|
||||
|
||||
failure_demo.py:147:
|
||||
failure_demo.py:148:
|
||||
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
||||
|
||||
self = <failure_demo.test_attribute_failure.<locals>.Foo object at 0xdeadbeef>
|
||||
@@ -373,7 +373,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
> raise Exception("Failed to get attrib")
|
||||
E Exception: Failed to get attrib
|
||||
|
||||
failure_demo.py:142: Exception
|
||||
failure_demo.py:143: Exception
|
||||
_________________________ test_attribute_multiple __________________________
|
||||
|
||||
def test_attribute_multiple():
|
||||
@@ -390,7 +390,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
E + and 2 = <failure_demo.test_attribute_multiple.<locals>.Bar object at 0xdeadbeef>.b
|
||||
E + where <failure_demo.test_attribute_multiple.<locals>.Bar object at 0xdeadbeef> = <class 'failure_demo.test_attribute_multiple.<locals>.Bar'>()
|
||||
|
||||
failure_demo.py:157: AssertionError
|
||||
failure_demo.py:158: AssertionError
|
||||
__________________________ TestRaises.test_raises __________________________
|
||||
|
||||
self = <failure_demo.TestRaises object at 0xdeadbeef>
|
||||
@@ -400,7 +400,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
> raises(TypeError, int, s)
|
||||
E ValueError: invalid literal for int() with base 10: 'qwe'
|
||||
|
||||
failure_demo.py:167: ValueError
|
||||
failure_demo.py:168: ValueError
|
||||
______________________ TestRaises.test_raises_doesnt _______________________
|
||||
|
||||
self = <failure_demo.TestRaises object at 0xdeadbeef>
|
||||
@@ -409,7 +409,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
> raises(IOError, int, "3")
|
||||
E Failed: DID NOT RAISE <class 'OSError'>
|
||||
|
||||
failure_demo.py:170: Failed
|
||||
failure_demo.py:171: Failed
|
||||
__________________________ TestRaises.test_raise ___________________________
|
||||
|
||||
self = <failure_demo.TestRaises object at 0xdeadbeef>
|
||||
@@ -418,7 +418,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
> raise ValueError("demo error")
|
||||
E ValueError: demo error
|
||||
|
||||
failure_demo.py:173: ValueError
|
||||
failure_demo.py:174: ValueError
|
||||
________________________ TestRaises.test_tupleerror ________________________
|
||||
|
||||
self = <failure_demo.TestRaises object at 0xdeadbeef>
|
||||
@@ -427,7 +427,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
> a, b = [1] # NOQA
|
||||
E ValueError: not enough values to unpack (expected 2, got 1)
|
||||
|
||||
failure_demo.py:176: ValueError
|
||||
failure_demo.py:177: ValueError
|
||||
______ TestRaises.test_reinterpret_fails_with_print_for_the_fun_of_it ______
|
||||
|
||||
self = <failure_demo.TestRaises object at 0xdeadbeef>
|
||||
@@ -436,9 +436,9 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
items = [1, 2, 3]
|
||||
print("items is %r" % items)
|
||||
> a, b = items.pop()
|
||||
E TypeError: 'int' object is not iterable
|
||||
E TypeError: cannot unpack non-iterable int object
|
||||
|
||||
failure_demo.py:181: TypeError
|
||||
failure_demo.py:182: TypeError
|
||||
--------------------------- Captured stdout call ---------------------------
|
||||
items is [1, 2, 3]
|
||||
________________________ TestRaises.test_some_error ________________________
|
||||
@@ -449,7 +449,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
> if namenotexi: # NOQA
|
||||
E NameError: name 'namenotexi' is not defined
|
||||
|
||||
failure_demo.py:184: NameError
|
||||
failure_demo.py:185: NameError
|
||||
____________________ test_dynamic_compile_shows_nicely _____________________
|
||||
|
||||
def test_dynamic_compile_shows_nicely():
|
||||
@@ -464,14 +464,14 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
sys.modules[name] = module
|
||||
> module.foo()
|
||||
|
||||
failure_demo.py:202:
|
||||
failure_demo.py:203:
|
||||
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
||||
|
||||
def foo():
|
||||
> assert 1 == 0
|
||||
E AssertionError
|
||||
|
||||
<0-codegen 'abc-123' $REGENDOC_TMPDIR/assertion/failure_demo.py:199>:2: AssertionError
|
||||
<0-codegen 'abc-123' $REGENDOC_TMPDIR/assertion/failure_demo.py:200>:2: AssertionError
|
||||
____________________ TestMoreErrors.test_complex_error _____________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
|
||||
@@ -485,9 +485,9 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
|
||||
> somefunc(f(), g())
|
||||
|
||||
failure_demo.py:213:
|
||||
failure_demo.py:214:
|
||||
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
||||
failure_demo.py:11: in somefunc
|
||||
failure_demo.py:12: in somefunc
|
||||
otherfunc(x, y)
|
||||
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
||||
|
||||
@@ -497,7 +497,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
> assert a == b
|
||||
E assert 44 == 43
|
||||
|
||||
failure_demo.py:7: AssertionError
|
||||
failure_demo.py:8: AssertionError
|
||||
___________________ TestMoreErrors.test_z1_unpack_error ____________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
|
||||
@@ -507,7 +507,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
> a, b = items
|
||||
E ValueError: not enough values to unpack (expected 2, got 0)
|
||||
|
||||
failure_demo.py:217: ValueError
|
||||
failure_demo.py:218: ValueError
|
||||
____________________ TestMoreErrors.test_z2_type_error _____________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
|
||||
@@ -515,9 +515,9 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
def test_z2_type_error(self):
|
||||
items = 3
|
||||
> a, b = items
|
||||
E TypeError: 'int' object is not iterable
|
||||
E TypeError: cannot unpack non-iterable int object
|
||||
|
||||
failure_demo.py:221: TypeError
|
||||
failure_demo.py:222: TypeError
|
||||
______________________ TestMoreErrors.test_startswith ______________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
|
||||
@@ -530,7 +530,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
E + where False = <built-in method startswith of str object at 0xdeadbeef>('456')
|
||||
E + where <built-in method startswith of str object at 0xdeadbeef> = '123'.startswith
|
||||
|
||||
failure_demo.py:226: AssertionError
|
||||
failure_demo.py:227: AssertionError
|
||||
__________________ TestMoreErrors.test_startswith_nested ___________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
|
||||
@@ -549,7 +549,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
E + where '123' = <function TestMoreErrors.test_startswith_nested.<locals>.f at 0xdeadbeef>()
|
||||
E + and '456' = <function TestMoreErrors.test_startswith_nested.<locals>.g at 0xdeadbeef>()
|
||||
|
||||
failure_demo.py:235: AssertionError
|
||||
failure_demo.py:236: AssertionError
|
||||
_____________________ TestMoreErrors.test_global_func ______________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
|
||||
@@ -560,7 +560,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
E + where False = isinstance(43, float)
|
||||
E + where 43 = globf(42)
|
||||
|
||||
failure_demo.py:238: AssertionError
|
||||
failure_demo.py:239: AssertionError
|
||||
_______________________ TestMoreErrors.test_instance _______________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
|
||||
@@ -571,7 +571,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
E assert 42 != 42
|
||||
E + where 42 = <failure_demo.TestMoreErrors object at 0xdeadbeef>.x
|
||||
|
||||
failure_demo.py:242: AssertionError
|
||||
failure_demo.py:243: AssertionError
|
||||
_______________________ TestMoreErrors.test_compare ________________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
|
||||
@@ -581,7 +581,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
E assert 11 < 5
|
||||
E + where 11 = globf(10)
|
||||
|
||||
failure_demo.py:245: AssertionError
|
||||
failure_demo.py:246: AssertionError
|
||||
_____________________ TestMoreErrors.test_try_finally ______________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
|
||||
@@ -592,7 +592,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
> assert x == 0
|
||||
E assert 1 == 0
|
||||
|
||||
failure_demo.py:250: AssertionError
|
||||
failure_demo.py:251: AssertionError
|
||||
___________________ TestCustomAssertMsg.test_single_line ___________________
|
||||
|
||||
self = <failure_demo.TestCustomAssertMsg object at 0xdeadbeef>
|
||||
@@ -607,7 +607,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
E assert 1 == 2
|
||||
E + where 1 = <class 'failure_demo.TestCustomAssertMsg.test_single_line.<locals>.A'>.a
|
||||
|
||||
failure_demo.py:261: AssertionError
|
||||
failure_demo.py:262: AssertionError
|
||||
____________________ TestCustomAssertMsg.test_multiline ____________________
|
||||
|
||||
self = <failure_demo.TestCustomAssertMsg object at 0xdeadbeef>
|
||||
@@ -626,7 +626,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
E assert 1 == 2
|
||||
E + where 1 = <class 'failure_demo.TestCustomAssertMsg.test_multiline.<locals>.A'>.a
|
||||
|
||||
failure_demo.py:268: AssertionError
|
||||
failure_demo.py:269: AssertionError
|
||||
___________________ TestCustomAssertMsg.test_custom_repr ___________________
|
||||
|
||||
self = <failure_demo.TestCustomAssertMsg object at 0xdeadbeef>
|
||||
@@ -648,5 +648,5 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
E assert 1 == 2
|
||||
E + where 1 = This is JSON\n{\n 'foo': 'bar'\n}.a
|
||||
|
||||
failure_demo.py:281: AssertionError
|
||||
failure_demo.py:282: AssertionError
|
||||
======================== 44 failed in 0.12 seconds =========================
|
||||
|
||||
@@ -194,9 +194,9 @@ and when running it will see a skipped "slow" test:
|
||||
collected 2 items
|
||||
|
||||
test_module.py .s [100%]
|
||||
|
||||
========================= short test summary info ==========================
|
||||
SKIPPED [1] test_module.py:8: need --runslow option to run
|
||||
|
||||
=================== 1 passed, 1 skipped in 0.12 seconds ====================
|
||||
|
||||
Or run it including the ``slow`` marked test:
|
||||
@@ -606,7 +606,7 @@ We can run this:
|
||||
file $REGENDOC_TMPDIR/b/test_error.py, line 1
|
||||
def test_root(db): # no db here, will error out
|
||||
E fixture 'db' not found
|
||||
> available fixtures: cache, capfd, capfdbinary, caplog, capsys, capsysbinary, doctest_namespace, monkeypatch, pytestconfig, record_property, record_xml_attribute, recwarn, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory
|
||||
> available fixtures: cache, capfd, capfdbinary, caplog, capsys, capsysbinary, doctest_namespace, monkeypatch, pytestconfig, record_property, record_testsuite_property, record_xml_attribute, recwarn, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory
|
||||
> use 'pytest --fixtures [testpath]' for help on them.
|
||||
|
||||
$REGENDOC_TMPDIR/b/test_error.py:1
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import pytest
|
||||
|
||||
xfail = pytest.mark.xfail
|
||||
|
||||
@@ -1179,6 +1179,8 @@ Given the tests file structure is:
|
||||
|
||||
conftest.py
|
||||
# content of tests/conftest.py
|
||||
import pytest
|
||||
|
||||
@pytest.fixture
|
||||
def username():
|
||||
return 'username'
|
||||
|
||||
@@ -28,7 +28,7 @@ Install ``pytest``
|
||||
.. code-block:: bash
|
||||
|
||||
$ pytest --version
|
||||
This is pytest version 4.x.y, imported from $PYTHON_PREFIX/lib/python3.6/site-packages/pytest.py
|
||||
This is pytest version 4.x.y, imported from $PYTHON_PREFIX/lib/python3.7/site-packages/pytest.py
|
||||
|
||||
.. _`simpletest`:
|
||||
|
||||
|
||||
@@ -4,6 +4,117 @@ Historical Notes
|
||||
This page lists features or behavior from previous versions of pytest which have changed over the years. They are
|
||||
kept here as a historical note so users looking at old code can find documentation related to them.
|
||||
|
||||
|
||||
.. _marker-revamp:
|
||||
|
||||
Marker revamp and iteration
|
||||
---------------------------
|
||||
|
||||
.. versionchanged:: 3.6
|
||||
|
||||
pytest's marker implementation traditionally worked by simply updating the ``__dict__`` attribute of functions to cumulatively add markers. As a result, markers would unintentionally be passed along class hierarchies in surprising ways. Further, the API for retrieving them was inconsistent, as markers from parameterization would be stored differently than markers applied using the ``@pytest.mark`` decorator and markers added via ``node.add_marker``.
|
||||
|
||||
This state of things made it technically next to impossible to use data from markers correctly without having a deep understanding of the internals, leading to subtle and hard to understand bugs in more advanced usages.
|
||||
|
||||
Depending on how a marker got declared/changed one would get either a ``MarkerInfo`` which might contain markers from sibling classes,
|
||||
``MarkDecorators`` when marks came from parameterization or from a ``node.add_marker`` call, discarding prior marks. Also ``MarkerInfo`` acts like a single mark, when it in fact represents a merged view on multiple marks with the same name.
|
||||
|
||||
On top of that markers were not accessible in the same way for modules, classes, and functions/methods.
|
||||
In fact, markers were only accessible in functions, even if they were declared on classes/modules.
|
||||
|
||||
A new API to access markers has been introduced in pytest 3.6 in order to solve the problems with
|
||||
the initial design, providing the :func:`_pytest.nodes.Node.iter_markers` method to iterate over
|
||||
markers in a consistent manner and reworking the internals, which solved a great deal of problems
|
||||
with the initial design.
|
||||
|
||||
|
||||
.. _update marker code:
|
||||
|
||||
Updating code
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
The old ``Node.get_marker(name)`` function is considered deprecated because it returns an internal ``MarkerInfo`` object
|
||||
which contains the merged name, ``*args`` and ``**kwargs`` of all the markers which apply to that node.
|
||||
|
||||
In general there are two scenarios on how markers should be handled:
|
||||
|
||||
1. Marks overwrite each other. Order matters but you only want to think of your mark as a single item. E.g.
|
||||
``log_level('info')`` at a module level can be overwritten by ``log_level('debug')`` for a specific test.
|
||||
|
||||
In this case, use ``Node.get_closest_marker(name)``:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# replace this:
|
||||
marker = item.get_marker("log_level")
|
||||
if marker:
|
||||
level = marker.args[0]
|
||||
|
||||
# by this:
|
||||
marker = item.get_closest_marker("log_level")
|
||||
if marker:
|
||||
level = marker.args[0]
|
||||
|
||||
2. Marks compose in an additive manner. E.g. ``skipif(condition)`` marks mean you just want to evaluate all of them,
|
||||
order doesn't even matter. You probably want to think of your marks as a set here.
|
||||
|
||||
In this case iterate over each mark and handle their ``*args`` and ``**kwargs`` individually.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# replace this
|
||||
skipif = item.get_marker("skipif")
|
||||
if skipif:
|
||||
for condition in skipif.args:
|
||||
# eval condition
|
||||
...
|
||||
|
||||
# by this:
|
||||
for skipif in item.iter_markers("skipif"):
|
||||
condition = skipif.args[0]
|
||||
# eval condition
|
||||
|
||||
|
||||
If you are unsure or have any questions, please consider opening
|
||||
`an issue <https://github.com/pytest-dev/pytest/issues>`_.
|
||||
|
||||
Related issues
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
Here is a non-exhaustive list of issues fixed by the new implementation:
|
||||
|
||||
* Marks don't pick up nested classes (`#199 <https://github.com/pytest-dev/pytest/issues/199>`_).
|
||||
|
||||
* Markers stain on all related classes (`#568 <https://github.com/pytest-dev/pytest/issues/568>`_).
|
||||
|
||||
* Combining marks - args and kwargs calculation (`#2897 <https://github.com/pytest-dev/pytest/issues/2897>`_).
|
||||
|
||||
* ``request.node.get_marker('name')`` returns ``None`` for markers applied in classes (`#902 <https://github.com/pytest-dev/pytest/issues/902>`_).
|
||||
|
||||
* Marks applied in parametrize are stored as markdecorator (`#2400 <https://github.com/pytest-dev/pytest/issues/2400>`_).
|
||||
|
||||
* Fix marker interaction in a backward incompatible way (`#1670 <https://github.com/pytest-dev/pytest/issues/1670>`_).
|
||||
|
||||
* Refactor marks to get rid of the current "marks transfer" mechanism (`#2363 <https://github.com/pytest-dev/pytest/issues/2363>`_).
|
||||
|
||||
* Introduce FunctionDefinition node, use it in generate_tests (`#2522 <https://github.com/pytest-dev/pytest/issues/2522>`_).
|
||||
|
||||
* Remove named marker attributes and collect markers in items (`#891 <https://github.com/pytest-dev/pytest/issues/891>`_).
|
||||
|
||||
* skipif mark from parametrize hides module level skipif mark (`#1540 <https://github.com/pytest-dev/pytest/issues/1540>`_).
|
||||
|
||||
* skipif + parametrize not skipping tests (`#1296 <https://github.com/pytest-dev/pytest/issues/1296>`_).
|
||||
|
||||
* Marker transfer incompatible with inheritance (`#535 <https://github.com/pytest-dev/pytest/issues/535>`_).
|
||||
|
||||
More details can be found in the `original PR <https://github.com/pytest-dev/pytest/pull/3317>`_.
|
||||
|
||||
.. note::
|
||||
|
||||
in a future major relase of pytest we will introduce class based markers,
|
||||
at which point markers will no longer be limited to instances of :py:class:`Mark`.
|
||||
|
||||
|
||||
cache plugin integrated into the core
|
||||
-------------------------------------
|
||||
|
||||
|
||||
165
doc/en/mark.rst
165
doc/en/mark.rst
@@ -1,9 +1,7 @@
|
||||
|
||||
.. _mark:
|
||||
|
||||
Marking test functions with attributes
|
||||
=================================================================
|
||||
|
||||
======================================
|
||||
|
||||
By using the ``pytest.mark`` helper you can easily set
|
||||
metadata on your test functions. There are
|
||||
@@ -17,8 +15,10 @@ some builtin markers, for example:
|
||||
to the same test function.
|
||||
|
||||
It's easy to create custom markers or to apply markers
|
||||
to whole test classes or modules. See :ref:`mark examples` for examples
|
||||
which also serve as documentation.
|
||||
to whole test classes or modules. Those markers can be used by plugins, and also
|
||||
are commonly used to :ref:`select tests <mark run>` on the command-line with the ``-m`` option.
|
||||
|
||||
See :ref:`mark examples` for examples which also serve as documentation.
|
||||
|
||||
.. note::
|
||||
|
||||
@@ -26,136 +26,53 @@ which also serve as documentation.
|
||||
:ref:`fixtures <fixtures>`.
|
||||
|
||||
|
||||
Raising errors on unknown marks: --strict
|
||||
-----------------------------------------
|
||||
Registering marks
|
||||
-----------------
|
||||
|
||||
When the ``--strict`` command-line flag is passed, any unknown marks applied
|
||||
with the ``@pytest.mark.name_of_the_mark`` decorator will trigger an error.
|
||||
Marks defined or added by pytest or by a plugin will not trigger an error.
|
||||
|
||||
Marks can be registered in ``pytest.ini`` like this:
|
||||
You can register custom marks in your ``pytest.ini`` file like this:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[pytest]
|
||||
markers =
|
||||
slow
|
||||
slow: marks tests as slow (deselect with '-m "not slow"')
|
||||
serial
|
||||
|
||||
This can be used to prevent users mistyping mark names by accident. Test suites that want to enforce this
|
||||
should add ``--strict`` to ``addopts``:
|
||||
Note that everything after the ``:`` is an optional description.
|
||||
|
||||
Alternatively, you can register new markers programatically in a
|
||||
:ref:`pytest_configure <initialization-hooks>` hook:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def pytest_configure(config):
|
||||
config.addinivalue_line(
|
||||
"markers", "env(name): mark test to run only on named environment"
|
||||
)
|
||||
|
||||
|
||||
Registered marks appear in pytest's help text and do not emit warnings (see the next section). It
|
||||
is recommended that third-party plugins always :ref:`register their markers <registering-markers>`.
|
||||
|
||||
.. _unknown-marks:
|
||||
|
||||
Raising errors on unknown marks
|
||||
-------------------------------
|
||||
|
||||
Unregistered marks applied with the ``@pytest.mark.name_of_the_mark`` decorator
|
||||
will always emit a warning in order to avoid silently doing something
|
||||
surprising due to mis-typed names. As described in the previous section, you can disable
|
||||
the warning for custom marks by registering them in your ``pytest.ini`` file or
|
||||
using a custom ``pytest_configure`` hook.
|
||||
|
||||
When the ``--strict-markers`` command-line flag is passed, any unknown marks applied
|
||||
with the ``@pytest.mark.name_of_the_mark`` decorator will trigger an error. You can
|
||||
enforce this validation in your project by adding ``--strict-markers`` to ``addopts``:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[pytest]
|
||||
addopts = --strict
|
||||
addopts = --strict-markers
|
||||
markers =
|
||||
slow
|
||||
slow: marks tests as slow (deselect with '-m "not slow"')
|
||||
serial
|
||||
|
||||
|
||||
.. _marker-revamp:
|
||||
|
||||
Marker revamp and iteration
|
||||
---------------------------
|
||||
|
||||
|
||||
|
||||
pytest's marker implementation traditionally worked by simply updating the ``__dict__`` attribute of functions to cumulatively add markers. As a result, markers would unintentionally be passed along class hierarchies in surprising ways. Further, the API for retrieving them was inconsistent, as markers from parameterization would be stored differently than markers applied using the ``@pytest.mark`` decorator and markers added via ``node.add_marker``.
|
||||
|
||||
This state of things made it technically next to impossible to use data from markers correctly without having a deep understanding of the internals, leading to subtle and hard to understand bugs in more advanced usages.
|
||||
|
||||
Depending on how a marker got declared/changed one would get either a ``MarkerInfo`` which might contain markers from sibling classes,
|
||||
``MarkDecorators`` when marks came from parameterization or from a ``node.add_marker`` call, discarding prior marks. Also ``MarkerInfo`` acts like a single mark, when it in fact represents a merged view on multiple marks with the same name.
|
||||
|
||||
On top of that markers were not accessible the same way for modules, classes, and functions/methods.
|
||||
In fact, markers were only accessible in functions, even if they were declared on classes/modules.
|
||||
|
||||
A new API to access markers has been introduced in pytest 3.6 in order to solve the problems with the initial design, providing :func:`_pytest.nodes.Node.iter_markers` method to iterate over markers in a consistent manner and reworking the internals, which solved great deal of problems with the initial design.
|
||||
|
||||
|
||||
.. _update marker code:
|
||||
|
||||
Updating code
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
The old ``Node.get_marker(name)`` function is considered deprecated because it returns an internal ``MarkerInfo`` object
|
||||
which contains the merged name, ``*args`` and ``**kwargs`` of all the markers which apply to that node.
|
||||
|
||||
In general there are two scenarios on how markers should be handled:
|
||||
|
||||
1. Marks overwrite each other. Order matters but you only want to think of your mark as a single item. E.g.
|
||||
``log_level('info')`` at a module level can be overwritten by ``log_level('debug')`` for a specific test.
|
||||
|
||||
In this case, use ``Node.get_closest_marker(name)``:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# replace this:
|
||||
marker = item.get_marker("log_level")
|
||||
if marker:
|
||||
level = marker.args[0]
|
||||
|
||||
# by this:
|
||||
marker = item.get_closest_marker("log_level")
|
||||
if marker:
|
||||
level = marker.args[0]
|
||||
|
||||
2. Marks compose in an additive manner. E.g. ``skipif(condition)`` marks mean you just want to evaluate all of them,
|
||||
order doesn't even matter. You probably want to think of your marks as a set here.
|
||||
|
||||
In this case iterate over each mark and handle their ``*args`` and ``**kwargs`` individually.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# replace this
|
||||
skipif = item.get_marker("skipif")
|
||||
if skipif:
|
||||
for condition in skipif.args:
|
||||
# eval condition
|
||||
...
|
||||
|
||||
# by this:
|
||||
for skipif in item.iter_markers("skipif"):
|
||||
condition = skipif.args[0]
|
||||
# eval condition
|
||||
|
||||
|
||||
If you are unsure or have any questions, please consider opening
|
||||
`an issue <https://github.com/pytest-dev/pytest/issues>`_.
|
||||
|
||||
Related issues
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
Here is a non-exhaustive list of issues fixed by the new implementation:
|
||||
|
||||
* Marks don't pick up nested classes (`#199 <https://github.com/pytest-dev/pytest/issues/199>`_).
|
||||
|
||||
* Markers stain on all related classes (`#568 <https://github.com/pytest-dev/pytest/issues/568>`_).
|
||||
|
||||
* Combining marks - args and kwargs calculation (`#2897 <https://github.com/pytest-dev/pytest/issues/2897>`_).
|
||||
|
||||
* ``request.node.get_marker('name')`` returns ``None`` for markers applied in classes (`#902 <https://github.com/pytest-dev/pytest/issues/902>`_).
|
||||
|
||||
* Marks applied in parametrize are stored as markdecorator (`#2400 <https://github.com/pytest-dev/pytest/issues/2400>`_).
|
||||
|
||||
* Fix marker interaction in a backward incompatible way (`#1670 <https://github.com/pytest-dev/pytest/issues/1670>`_).
|
||||
|
||||
* Refactor marks to get rid of the current "marks transfer" mechanism (`#2363 <https://github.com/pytest-dev/pytest/issues/2363>`_).
|
||||
|
||||
* Introduce FunctionDefinition node, use it in generate_tests (`#2522 <https://github.com/pytest-dev/pytest/issues/2522>`_).
|
||||
|
||||
* Remove named marker attributes and collect markers in items (`#891 <https://github.com/pytest-dev/pytest/issues/891>`_).
|
||||
|
||||
* skipif mark from parametrize hides module level skipif mark (`#1540 <https://github.com/pytest-dev/pytest/issues/1540>`_).
|
||||
|
||||
* skipif + parametrize not skipping tests (`#1296 <https://github.com/pytest-dev/pytest/issues/1296>`_).
|
||||
|
||||
* Marker transfer incompatible with inheritance (`#535 <https://github.com/pytest-dev/pytest/issues/535>`_).
|
||||
|
||||
More details can be found in the `original PR <https://github.com/pytest-dev/pytest/pull/3317>`_.
|
||||
|
||||
.. note::
|
||||
|
||||
in a future major relase of pytest we will introduce class based markers,
|
||||
at which point markers will no longer be limited to instances of :py:class:`Mark`
|
||||
|
||||
@@ -16,7 +16,7 @@ and a discussion of its motivation.
|
||||
|
||||
|
||||
Simple example: monkeypatching functions
|
||||
---------------------------------------------------
|
||||
----------------------------------------
|
||||
|
||||
If you want to pretend that ``os.expanduser`` returns a certain
|
||||
directory, you can use the :py:meth:`monkeypatch.setattr` method to
|
||||
@@ -38,8 +38,8 @@ Here our test function monkeypatches ``os.path.expanduser`` and
|
||||
then calls into a function that calls it. After the test function
|
||||
finishes the ``os.path.expanduser`` modification will be undone.
|
||||
|
||||
example: preventing "requests" from remote operations
|
||||
------------------------------------------------------
|
||||
Global patch example: preventing "requests" from remote operations
|
||||
------------------------------------------------------------------
|
||||
|
||||
If you want to prevent the "requests" library from performing http
|
||||
requests in all your tests, you can do::
|
||||
@@ -81,6 +81,80 @@ so that any attempts within tests to create http requests will fail.
|
||||
See issue `#3290 <https://github.com/pytest-dev/pytest/issues/3290>`_ for details.
|
||||
|
||||
|
||||
Monkeypatching environment variables
|
||||
------------------------------------
|
||||
|
||||
If you are working with environment variables you often need to safely change the values
|
||||
or delete them from the system for testing purposes. ``Monkeypatch`` provides a mechanism
|
||||
to do this using the ``setenv`` and ``delenv`` method. Our example code to test:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# contents of our original code file e.g. code.py
|
||||
import os
|
||||
|
||||
|
||||
def get_os_user_lower():
|
||||
"""Simple retrieval function.
|
||||
Returns lowercase USER or raises EnvironmentError."""
|
||||
username = os.getenv("USER")
|
||||
|
||||
if username is None:
|
||||
raise EnvironmentError("USER environment is not set.")
|
||||
|
||||
return username.lower()
|
||||
|
||||
There are two potential paths. First, the ``USER`` environment variable is set to a
|
||||
value. Second, the ``USER`` environment variable does not exist. Using ``monkeypatch``
|
||||
both paths can be safely tested without impacting the running environment:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# contents of our test file e.g. test_code.py
|
||||
import pytest
|
||||
|
||||
|
||||
def test_upper_to_lower(monkeypatch):
|
||||
"""Set the USER env var to assert the behavior."""
|
||||
monkeypatch.setenv("USER", "TestingUser")
|
||||
assert get_os_user_lower() == "testinguser"
|
||||
|
||||
|
||||
def test_raise_exception(monkeypatch):
|
||||
"""Remove the USER env var and assert EnvironmentError is raised."""
|
||||
monkeypatch.delenv("USER", raising=False)
|
||||
|
||||
with pytest.raises(EnvironmentError):
|
||||
_ = get_os_user_lower()
|
||||
|
||||
This behavior can be moved into ``fixture`` structures and shared across tests:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_env_user(monkeypatch):
|
||||
monkeypatch.setenv("USER", "TestingUser")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_env_missing(monkeypatch):
|
||||
monkeypatch.delenv("USER", raising=False)
|
||||
|
||||
|
||||
# Notice the tests reference the fixtures for mocks
|
||||
def test_upper_to_lower(mock_env_user):
|
||||
assert get_os_user_lower() == "testinguser"
|
||||
|
||||
|
||||
def test_raise_exception(mock_env_missing):
|
||||
with pytest.raises(EnvironmentError):
|
||||
_ = get_os_user_lower()
|
||||
|
||||
|
||||
|
||||
.. currentmodule:: _pytest.monkeypatch
|
||||
|
||||
API Reference
|
||||
|
||||
@@ -7,16 +7,20 @@ Python 3.4's last release is scheduled for
|
||||
`March 2019 <https://www.python.org/dev/peps/pep-0429/#release-schedule>`__. pytest is one of
|
||||
the participating projects of the https://python3statement.org.
|
||||
|
||||
We plan to drop support for Python 2.7 and 3.4 at the same time with the release of **pytest 5.0**,
|
||||
scheduled to be released by **mid-2019**. Thanks to the `python_requires <https://packaging.python.org/guides/distributing-packages-using-setuptools/#python-requires>`__ ``setuptools`` option,
|
||||
The **pytest 4.6** series will be the last to support Python 2.7 and 3.4, and is scheduled
|
||||
to be released by **mid-2019**. **pytest 5.0** and onwards will support only Python 3.5+.
|
||||
|
||||
Thanks to the `python_requires`_ ``setuptools`` option,
|
||||
Python 2.7 and Python 3.4 users using a modern ``pip`` version
|
||||
will install the last compatible pytest ``4.X`` version automatically even if ``5.0`` or later
|
||||
will install the last pytest ``4.6`` version automatically even if ``5.0`` or later
|
||||
are available on PyPI.
|
||||
|
||||
During the period **from mid-2019 and 2020**, the pytest core team plans to make
|
||||
bug-fix releases of the pytest ``4.X`` series by back-porting patches to the ``4.x-maintenance``
|
||||
branch.
|
||||
While pytest ``5.0`` will be the new mainstream and development version, until **January 2020**
|
||||
the pytest core team plans to make bug-fix releases of the pytest ``4.6`` series by
|
||||
back-porting patches to the ``4.6-maintenance`` branch that affect Python 2 users.
|
||||
|
||||
**After 2020**, the core team will no longer actively back port-patches, but the ``4.x-maintenance``
|
||||
branch will continue to exist so the community itself can contribute patches. The
|
||||
core team will be happy to accept those patches and make new ``4.X`` releases **until mid-2020**.
|
||||
**After 2020**, the core team will no longer actively backport patches, but the ``4.6-maintenance``
|
||||
branch will continue to exist so the community itself can contribute patches. The core team will
|
||||
be happy to accept those patches and make new ``4.6`` releases **until mid-2020**.
|
||||
|
||||
.. _`python_requires`: https://packaging.python.org/guides/distributing-packages-using-setuptools/#python-requires>
|
||||
|
||||
@@ -424,6 +424,14 @@ record_property
|
||||
|
||||
.. autofunction:: _pytest.junitxml.record_property()
|
||||
|
||||
|
||||
record_testsuite_property
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
**Tutorial**: :ref:`record_testsuite_property example`.
|
||||
|
||||
.. autofunction:: _pytest.junitxml.record_testsuite_property()
|
||||
|
||||
caplog
|
||||
~~~~~~
|
||||
|
||||
@@ -573,6 +581,8 @@ Bootstrapping hooks called for plugins registered early enough (internal and set
|
||||
.. autofunction:: pytest_cmdline_parse
|
||||
.. autofunction:: pytest_cmdline_main
|
||||
|
||||
.. _`initialization-hooks`:
|
||||
|
||||
Initialization hooks
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
@@ -1077,6 +1087,22 @@ passed multiple times. The expected format is ``name=value``. For example::
|
||||
This tells pytest to ignore deprecation warnings and turn all other warnings
|
||||
into errors. For more information please refer to :ref:`warnings`.
|
||||
|
||||
|
||||
.. confval:: junit_duration_report
|
||||
|
||||
.. versionadded:: 4.1
|
||||
|
||||
Configures how durations are recorded into the JUnit XML report:
|
||||
|
||||
* ``total`` (the default): duration times reported include setup, call, and teardown times.
|
||||
* ``call``: duration times reported include only call times, excluding setup and teardown.
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[pytest]
|
||||
junit_duration_report = call
|
||||
|
||||
|
||||
.. confval:: junit_family
|
||||
|
||||
.. versionadded:: 4.2
|
||||
@@ -1092,10 +1118,35 @@ passed multiple times. The expected format is ``name=value``. For example::
|
||||
[pytest]
|
||||
junit_family = xunit2
|
||||
|
||||
|
||||
.. confval:: junit_logging
|
||||
|
||||
.. versionadded:: 3.5
|
||||
|
||||
Configures if stdout/stderr should be written to the JUnit XML file. Valid values are
|
||||
``system-out``, ``system-err``, and ``no`` (the default).
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[pytest]
|
||||
junit_logging = system-out
|
||||
|
||||
|
||||
.. confval:: junit_log_passing_tests
|
||||
|
||||
.. versionadded:: 4.6
|
||||
|
||||
If ``junit_logging != "no"``, configures if the captured output should be written
|
||||
to the JUnit XML file for **passing** tests. Default is ``True``.
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[pytest]
|
||||
junit_log_passing_tests = False
|
||||
|
||||
|
||||
.. confval:: junit_suite_name
|
||||
|
||||
|
||||
|
||||
To set the name of the root test suite xml item, you can configure the ``junit_suite_name`` option in your config file:
|
||||
|
||||
.. code-block:: ini
|
||||
@@ -1261,15 +1312,17 @@ passed multiple times. The expected format is ``name=value``. For example::
|
||||
|
||||
.. confval:: markers
|
||||
|
||||
When the ``--strict`` command-line argument is used, only known markers -
|
||||
defined in code by core pytest or some plugin - are allowed.
|
||||
You can list additional markers in this setting to add them to the whitelist.
|
||||
When the ``--strict-markers`` or ``--strict`` command-line arguments are used,
|
||||
only known markers - defined in code by core pytest or some plugin - are allowed.
|
||||
|
||||
You can list one marker name per line, indented from the option name.
|
||||
You can list additional markers in this setting to add them to the whitelist,
|
||||
in which case you probably want to add ``--strict-markers`` to ``addopts``
|
||||
to avoid future regressions:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[pytest]
|
||||
addopts = --strict-markers
|
||||
markers =
|
||||
slow
|
||||
serial
|
||||
|
||||
@@ -352,6 +352,7 @@ Running it with the report-on-xfail option gives this output:
|
||||
collected 7 items
|
||||
|
||||
xfail_demo.py xxxxxxx [100%]
|
||||
|
||||
========================= short test summary info ==========================
|
||||
XFAIL xfail_demo.py::test_hello
|
||||
XFAIL xfail_demo.py::test_hello2
|
||||
@@ -365,7 +366,6 @@ Running it with the report-on-xfail option gives this output:
|
||||
XFAIL xfail_demo.py::test_hello6
|
||||
reason: reason
|
||||
XFAIL xfail_demo.py::test_hello7
|
||||
|
||||
======================== 7 xfailed in 0.12 seconds =========================
|
||||
|
||||
.. _`skip/xfail with parametrize`:
|
||||
|
||||
4
doc/en/tidelift.rst
Normal file
4
doc/en/tidelift.rst
Normal file
@@ -0,0 +1,4 @@
|
||||
|
||||
|
||||
|
||||
.. include:: ../../TIDELIFT.rst
|
||||
@@ -231,11 +231,12 @@ Example:
|
||||
XFAIL test_example.py::test_xfail
|
||||
reason: xfailing this test
|
||||
XPASS test_example.py::test_xpass always xfail
|
||||
ERROR test_example.py::test_error
|
||||
FAILED test_example.py::test_fail
|
||||
ERROR test_example.py::test_error - assert 0
|
||||
FAILED test_example.py::test_fail - assert 0
|
||||
= 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.12 seconds =
|
||||
|
||||
The ``-r`` options accepts a number of characters after it, with ``a`` used above meaning "all except passes".
|
||||
The ``-r`` options accepts a number of characters after it, with ``a`` used
|
||||
above meaning "all except passes".
|
||||
|
||||
Here is the full list of available characters that can be used:
|
||||
|
||||
@@ -247,6 +248,7 @@ Here is the full list of available characters that can be used:
|
||||
- ``p`` - passed
|
||||
- ``P`` - passed with output
|
||||
- ``a`` - all except ``pP``
|
||||
- ``A`` - all
|
||||
|
||||
More than one character can be used, so for example to only see failed and skipped tests, you can execute:
|
||||
|
||||
@@ -279,7 +281,7 @@ More than one character can be used, so for example to only see failed and skipp
|
||||
|
||||
test_example.py:14: AssertionError
|
||||
========================= short test summary info ==========================
|
||||
FAILED test_example.py::test_fail
|
||||
FAILED test_example.py::test_fail - assert 0
|
||||
SKIPPED [1] $REGENDOC_TMPDIR/test_example.py:23: skipping this test
|
||||
= 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.12 seconds =
|
||||
|
||||
@@ -314,12 +316,12 @@ captured output:
|
||||
E assert 0
|
||||
|
||||
test_example.py:14: AssertionError
|
||||
========================= short test summary info ==========================
|
||||
PASSED test_example.py::test_ok
|
||||
================================== PASSES ==================================
|
||||
_________________________________ test_ok __________________________________
|
||||
--------------------------- Captured stdout call ---------------------------
|
||||
ok
|
||||
========================= short test summary info ==========================
|
||||
PASSED test_example.py::test_ok
|
||||
= 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.12 seconds =
|
||||
|
||||
.. _pdb-option:
|
||||
@@ -456,13 +458,6 @@ instead, configure the ``junit_duration_report`` option like this:
|
||||
record_property
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
|
||||
|
||||
|
||||
Fixture renamed from ``record_xml_property`` to ``record_property`` as user
|
||||
properties are now available to all reporters.
|
||||
``record_xml_property`` is now deprecated.
|
||||
|
||||
If you want to log additional information for a test, you can use the
|
||||
``record_property`` fixture:
|
||||
|
||||
@@ -520,9 +515,7 @@ Will result in:
|
||||
|
||||
.. warning::
|
||||
|
||||
``record_property`` is an experimental feature and may change in the future.
|
||||
|
||||
Also please note that using this feature will break any schema verification.
|
||||
Please note that using this feature will break schema verifications for the latest JUnitXML schema.
|
||||
This might be a problem when used with some CI servers.
|
||||
|
||||
record_xml_attribute
|
||||
@@ -585,43 +578,45 @@ Instead, this will add an attribute ``assertions="REQ-1234"`` inside the generat
|
||||
</xs:complexType>
|
||||
</xs:element>
|
||||
|
||||
LogXML: add_global_property
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
.. warning::
|
||||
|
||||
Please note that using this feature will break schema verifications for the latest JUnitXML schema.
|
||||
This might be a problem when used with some CI servers.
|
||||
|
||||
.. _record_testsuite_property example:
|
||||
|
||||
If you want to add a properties node in the testsuite level, which may contains properties that are relevant
|
||||
to all testcases you can use ``LogXML.add_global_properties``
|
||||
record_testsuite_property
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. versionadded:: 4.5
|
||||
|
||||
If you want to add a properties node at the test-suite level, which may contains properties
|
||||
that are relevant to all tests, you can use the ``record_testsuite_property`` session-scoped fixture:
|
||||
|
||||
The ``record_testsuite_property`` session-scoped fixture can be used to add properties relevant
|
||||
to all tests.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def log_global_env_facts(f):
|
||||
|
||||
if pytest.config.pluginmanager.hasplugin("junitxml"):
|
||||
my_junit = getattr(pytest.config, "_xml", None)
|
||||
|
||||
my_junit.add_global_property("ARCH", "PPC")
|
||||
my_junit.add_global_property("STORAGE_TYPE", "CEPH")
|
||||
|
||||
|
||||
@pytest.mark.usefixtures(log_global_env_facts.__name__)
|
||||
def start_and_prepare_env():
|
||||
pass
|
||||
@pytest.fixture(scope="session", autouse=True)
|
||||
def log_global_env_facts(record_testsuite_property):
|
||||
record_testsuite_property("ARCH", "PPC")
|
||||
record_testsuite_property("STORAGE_TYPE", "CEPH")
|
||||
|
||||
|
||||
class TestMe(object):
|
||||
def test_foo(self):
|
||||
assert True
|
||||
|
||||
This will add a property node below the testsuite node to the generated xml:
|
||||
The fixture is a callable which receives ``name`` and ``value`` of a ``<property>`` tag
|
||||
added at the test-suite level of the generated xml:
|
||||
|
||||
.. code-block:: xml
|
||||
|
||||
<testsuite errors="0" failures="0" name="pytest" skips="0" tests="1" time="0.006">
|
||||
<testsuite errors="0" failures="0" name="pytest" skipped="0" tests="1" time="0.006">
|
||||
<properties>
|
||||
<property name="ARCH" value="PPC"/>
|
||||
<property name="STORAGE_TYPE" value="CEPH"/>
|
||||
@@ -629,11 +624,11 @@ This will add a property node below the testsuite node to the generated xml:
|
||||
<testcase classname="test_me.TestMe" file="test_me.py" line="16" name="test_foo" time="0.000243663787842"/>
|
||||
</testsuite>
|
||||
|
||||
.. warning::
|
||||
``name`` must be a string, ``value`` will be converted to a string and properly xml-escaped.
|
||||
|
||||
The generated XML is compatible with the latest ``xunit`` standard, contrary to `record_property`_
|
||||
and `record_xml_attribute`_.
|
||||
|
||||
This is an experimental feature, and its interface might be replaced
|
||||
by something more powerful and general in future versions. The
|
||||
functionality per-se will be kept.
|
||||
|
||||
Creating resultlog format files
|
||||
----------------------------------------------------
|
||||
|
||||
@@ -400,7 +400,7 @@ defines an ``__init__`` constructor, as this prevents the class from being insta
|
||||
|
||||
============================= warnings summary =============================
|
||||
test_pytest_warnings.py:1
|
||||
$REGENDOC_TMPDIR/test_pytest_warnings.py:1: PytestWarning: cannot collect test class 'Test' because it has a __init__ constructor
|
||||
$REGENDOC_TMPDIR/test_pytest_warnings.py:1: PytestCollectionWarning: cannot collect test class 'Test' because it has a __init__ constructor (from: test_pytest_warnings.py)
|
||||
class Test:
|
||||
|
||||
-- Docs: https://docs.pytest.org/en/latest/warnings.html
|
||||
@@ -415,8 +415,20 @@ The following warning types ares used by pytest and are part of the public API:
|
||||
|
||||
.. autoclass:: pytest.PytestWarning
|
||||
|
||||
.. autoclass:: pytest.PytestAssertRewriteWarning
|
||||
|
||||
.. autoclass:: pytest.PytestCacheWarning
|
||||
|
||||
.. autoclass:: pytest.PytestCollectionWarning
|
||||
|
||||
.. autoclass:: pytest.PytestConfigWarning
|
||||
|
||||
.. autoclass:: pytest.PytestDeprecationWarning
|
||||
|
||||
.. autoclass:: pytest.RemovedInPytest4Warning
|
||||
|
||||
.. autoclass:: pytest.PytestExperimentalApiWarning
|
||||
|
||||
.. autoclass:: pytest.PytestUnhandledCoroutineWarning
|
||||
|
||||
.. autoclass:: pytest.PytestUnknownMarkWarning
|
||||
|
||||
.. autoclass:: pytest.RemovedInPytest4Warning
|
||||
|
||||
@@ -223,7 +223,6 @@ import ``helper.py`` normally. The contents of
|
||||
pytest.register_assert_rewrite("pytest_foo.helper")
|
||||
|
||||
|
||||
|
||||
Requiring/Loading plugins in a test module or conftest file
|
||||
-----------------------------------------------------------
|
||||
|
||||
@@ -286,6 +285,26 @@ the plugin manager like this:
|
||||
If you want to look at the names of existing plugins, use
|
||||
the ``--trace-config`` option.
|
||||
|
||||
|
||||
.. _registering-markers:
|
||||
|
||||
Registering custom markers
|
||||
--------------------------
|
||||
|
||||
If your plugin uses any markers, you should register them so that they appear in
|
||||
pytest's help text and do not :ref:`cause spurious warnings <unknown-marks>`.
|
||||
For example, the following plugin would register ``cool_marker`` and
|
||||
``mark_with`` for all users:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def pytest_configure(config):
|
||||
config.addinivalue_line("markers", "cool_marker: this one is for cool tests.")
|
||||
config.addinivalue_line(
|
||||
"markers", "mark_with(arg, arg2): this marker takes arguments."
|
||||
)
|
||||
|
||||
|
||||
Testing plugins
|
||||
---------------
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import json
|
||||
|
||||
import py
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import sys
|
||||
from distutils.core import setup
|
||||
|
||||
|
||||
96
scripts/publish_gh_release_notes.py
Normal file
96
scripts/publish_gh_release_notes.py
Normal file
@@ -0,0 +1,96 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Script used to publish GitHub release notes extracted from CHANGELOG.rst.
|
||||
|
||||
This script is meant to be executed after a successful deployment in Travis.
|
||||
|
||||
Uses the following environment variables:
|
||||
|
||||
* GIT_TAG: the name of the tag of the current commit.
|
||||
* GH_RELEASE_NOTES_TOKEN: a personal access token with 'repo' permissions. It should be encrypted using:
|
||||
|
||||
$travis encrypt GH_RELEASE_NOTES_TOKEN=<token> -r pytest-dev/pytest
|
||||
|
||||
And the contents pasted in the ``deploy.env.secure`` section in the ``travis.yml`` file.
|
||||
|
||||
The script also requires ``pandoc`` to be previously installed in the system.
|
||||
|
||||
Requires Python3.6+.
|
||||
"""
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import github3
|
||||
import pypandoc
|
||||
|
||||
|
||||
def publish_github_release(slug, token, tag_name, body):
|
||||
github = github3.login(token=token)
|
||||
owner, repo = slug.split("/")
|
||||
repo = github.repository(owner, repo)
|
||||
return repo.create_release(tag_name=tag_name, body=body)
|
||||
|
||||
|
||||
def parse_changelog(tag_name):
|
||||
p = Path(__file__).parent.parent / "CHANGELOG.rst"
|
||||
changelog_lines = p.read_text(encoding="UTF-8").splitlines()
|
||||
|
||||
title_regex = re.compile(r"pytest (\d\.\d+\.\d+) \(\d{4}-\d{2}-\d{2}\)")
|
||||
consuming_version = False
|
||||
version_lines = []
|
||||
for line in changelog_lines:
|
||||
m = title_regex.match(line)
|
||||
if m:
|
||||
# found the version we want: start to consume lines until we find the next version title
|
||||
if m.group(1) == tag_name:
|
||||
consuming_version = True
|
||||
# found a new version title while parsing the version we want: break out
|
||||
elif consuming_version:
|
||||
break
|
||||
if consuming_version:
|
||||
version_lines.append(line)
|
||||
|
||||
return "\n".join(version_lines)
|
||||
|
||||
|
||||
def convert_rst_to_md(text):
|
||||
return pypandoc.convert_text(text, "md", format="rst")
|
||||
|
||||
|
||||
def main(argv):
|
||||
if len(argv) > 1:
|
||||
tag_name = argv[1]
|
||||
else:
|
||||
tag_name = os.environ.get("TRAVIS_TAG")
|
||||
if not tag_name:
|
||||
print("tag_name not given and $TRAVIS_TAG not set", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
token = os.environ.get("GH_RELEASE_NOTES_TOKEN")
|
||||
if not token:
|
||||
print("GH_RELEASE_NOTES_TOKEN not set", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
slug = os.environ.get("TRAVIS_REPO_SLUG")
|
||||
if not slug:
|
||||
print("TRAVIS_REPO_SLUG not set", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
rst_body = parse_changelog(tag_name)
|
||||
md_body = convert_rst_to_md(rst_body)
|
||||
if not publish_github_release(slug, token, tag_name, md_body):
|
||||
print("Could not publish release notes:", file=sys.stderr)
|
||||
print(md_body, file=sys.stderr)
|
||||
return 5
|
||||
|
||||
print()
|
||||
print(f"Release notes for {tag_name} published successfully:")
|
||||
print(f"https://github.com/{slug}/releases/tag/{tag_name}")
|
||||
print()
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main(sys.argv))
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Invoke development tasks.
|
||||
"""
|
||||
|
||||
@@ -6,11 +6,11 @@ if "%PYTEST_COVERAGE%" == "1" (
|
||||
) else (
|
||||
echo CODECOV_TOKEN NOT defined
|
||||
)
|
||||
%PYTHON% -m pip install codecov
|
||||
%PYTHON% -m coverage combine
|
||||
%PYTHON% -m coverage xml
|
||||
%PYTHON% -m coverage report -m
|
||||
scripts\retry %PYTHON% -m codecov --required -X gcov pycov search -f coverage.xml --name %PYTEST_CODECOV_NAME%
|
||||
python -m pip install codecov
|
||||
python -m coverage combine
|
||||
python -m coverage xml
|
||||
python -m coverage report -m
|
||||
scripts\retry python -m codecov --required -X gcov pycov search -f coverage.xml --name %PYTEST_CODECOV_NAME%
|
||||
) else (
|
||||
echo Skipping coverage upload, PYTEST_COVERAGE=%PYTEST_COVERAGE%
|
||||
)
|
||||
|
||||
7
setup.py
7
setup.py
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from setuptools import setup
|
||||
|
||||
# TODO: if py gets upgrade to >=1.6,
|
||||
@@ -5,7 +6,7 @@ from setuptools import setup
|
||||
INSTALL_REQUIRES = [
|
||||
"py>=1.5.0",
|
||||
"six>=1.10.0",
|
||||
"setuptools",
|
||||
"packaging",
|
||||
"attrs>=17.4.0",
|
||||
'more-itertools>=4.0.0,<6.0.0;python_version<="2.7"',
|
||||
'more-itertools>=4.0.0;python_version>"2.7"',
|
||||
@@ -13,7 +14,9 @@ INSTALL_REQUIRES = [
|
||||
'funcsigs>=1.0;python_version<"3.0"',
|
||||
'pathlib2>=2.2.0;python_version<"3.6"',
|
||||
'colorama;sys_platform=="win32"',
|
||||
"pluggy>=0.11",
|
||||
"pluggy>=0.12,<1.0",
|
||||
'importlib-metadata>=0.12;python_version<"3.8"',
|
||||
"wcwidth",
|
||||
]
|
||||
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
__all__ = ["__version__"]
|
||||
|
||||
try:
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""allow bash-completion for argparse with argcomplete if installed
|
||||
needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail
|
||||
to find the magic string, so _ARGCOMPLETE env. var is never set, and
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
""" python inspection/code generation API """
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# copied from python-2.7.3's traceback.py
|
||||
# CHANGES:
|
||||
# - some_str is replaced, trying to create unicode strings
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
@@ -564,14 +565,20 @@ class ExceptionInfo(object):
|
||||
|
||||
def match(self, regexp):
|
||||
"""
|
||||
Match the regular expression 'regexp' on the string representation of
|
||||
the exception. If it matches then True is returned (so that it is
|
||||
possible to write 'assert excinfo.match()'). If it doesn't match an
|
||||
AssertionError is raised.
|
||||
Check whether the regular expression 'regexp' is found in the string
|
||||
representation of the exception using ``re.search``. If it matches
|
||||
then True is returned (so that it is possible to write
|
||||
``assert excinfo.match()``). If it doesn't match an AssertionError is
|
||||
raised.
|
||||
"""
|
||||
__tracebackhide__ = True
|
||||
if not re.search(regexp, str(self.value)):
|
||||
assert 0, "Pattern '{!s}' not found in '{!s}'".format(regexp, self.value)
|
||||
value = (
|
||||
text_type(self.value) if isinstance(regexp, text_type) else str(self.value)
|
||||
)
|
||||
if not re.search(regexp, value):
|
||||
raise AssertionError(
|
||||
u"Pattern {!r} not found in {!r}".format(regexp, value)
|
||||
)
|
||||
return True
|
||||
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import pprint
|
||||
|
||||
from six.moves import reprlib
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
support for presenting detailed information in failing assertions.
|
||||
"""
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""Rewrite assertion AST to produce nice error messages"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
@@ -60,11 +61,13 @@ class AssertionRewritingHook(object):
|
||||
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.fnpats = config.getini("python_files")
|
||||
try:
|
||||
self.fnpats = config.getini("python_files")
|
||||
except ValueError:
|
||||
self.fnpats = ["test_*.py", "*_test.py"]
|
||||
self.session = None
|
||||
self.modules = {}
|
||||
self._rewritten_names = set()
|
||||
self._register_with_pkg_resources()
|
||||
self._must_rewrite = set()
|
||||
# flag to guard against trying to rewrite a pyc file while we are already writing another pyc file,
|
||||
# which might result in infinite recursion (#3506)
|
||||
@@ -268,11 +271,13 @@ class AssertionRewritingHook(object):
|
||||
self._marked_for_rewrite_cache.clear()
|
||||
|
||||
def _warn_already_imported(self, name):
|
||||
from _pytest.warning_types import PytestWarning
|
||||
from _pytest.warning_types import PytestAssertRewriteWarning
|
||||
from _pytest.warnings import _issue_warning_captured
|
||||
|
||||
_issue_warning_captured(
|
||||
PytestWarning("Module already imported so cannot be rewritten: %s" % name),
|
||||
PytestAssertRewriteWarning(
|
||||
"Module already imported so cannot be rewritten: %s" % name
|
||||
),
|
||||
self.config.hook,
|
||||
stacklevel=5,
|
||||
)
|
||||
@@ -313,24 +318,6 @@ class AssertionRewritingHook(object):
|
||||
tp = desc[2]
|
||||
return tp == imp.PKG_DIRECTORY
|
||||
|
||||
@classmethod
|
||||
def _register_with_pkg_resources(cls):
|
||||
"""
|
||||
Ensure package resources can be loaded from this loader. May be called
|
||||
multiple times, as the operation is idempotent.
|
||||
"""
|
||||
try:
|
||||
import pkg_resources
|
||||
|
||||
# access an attribute in case a deferred importer is present
|
||||
pkg_resources.__name__
|
||||
except ImportError:
|
||||
return
|
||||
|
||||
# Since pytest tests are always located in the file system, the
|
||||
# DefaultProvider is appropriate.
|
||||
pkg_resources.register_loader_type(cls, pkg_resources.DefaultProvider)
|
||||
|
||||
def get_data(self, pathname):
|
||||
"""Optional PEP302 get_data API.
|
||||
"""
|
||||
@@ -744,12 +731,12 @@ class AssertionRewriter(ast.NodeVisitor):
|
||||
|
||||
def display(self, expr):
|
||||
"""Call saferepr on the expression."""
|
||||
return self.helper("saferepr", expr)
|
||||
return self.helper("_saferepr", expr)
|
||||
|
||||
def helper(self, name, *args):
|
||||
"""Call a helper in this module."""
|
||||
py_name = ast.Name("@pytest_ar", ast.Load())
|
||||
attr = ast.Attribute(py_name, "_" + name, ast.Load())
|
||||
attr = ast.Attribute(py_name, name, ast.Load())
|
||||
return ast_Call(attr, list(args), [])
|
||||
|
||||
def builtin(self, name):
|
||||
@@ -819,11 +806,13 @@ class AssertionRewriter(ast.NodeVisitor):
|
||||
|
||||
"""
|
||||
if isinstance(assert_.test, ast.Tuple) and len(assert_.test.elts) >= 1:
|
||||
from _pytest.warning_types import PytestWarning
|
||||
from _pytest.warning_types import PytestAssertRewriteWarning
|
||||
import warnings
|
||||
|
||||
warnings.warn_explicit(
|
||||
PytestWarning("assertion is always true, perhaps remove parentheses?"),
|
||||
PytestAssertRewriteWarning(
|
||||
"assertion is always true, perhaps remove parentheses?"
|
||||
),
|
||||
category=None,
|
||||
filename=str(self.module_path),
|
||||
lineno=assert_.lineno,
|
||||
@@ -849,14 +838,14 @@ class AssertionRewriter(ast.NodeVisitor):
|
||||
negation = ast.UnaryOp(ast.Not(), top_condition)
|
||||
self.statements.append(ast.If(negation, body, []))
|
||||
if assert_.msg:
|
||||
assertmsg = self.helper("format_assertmsg", assert_.msg)
|
||||
assertmsg = self.helper("_format_assertmsg", assert_.msg)
|
||||
explanation = "\n>assert " + explanation
|
||||
else:
|
||||
assertmsg = ast.Str("")
|
||||
explanation = "assert " + explanation
|
||||
template = ast.BinOp(assertmsg, ast.Add(), ast.Str(explanation))
|
||||
msg = self.pop_format_context(template)
|
||||
fmt = self.helper("format_explanation", msg)
|
||||
fmt = self.helper("_format_explanation", msg)
|
||||
err_name = ast.Name("AssertionError", ast.Load())
|
||||
exc = ast_Call(err_name, [fmt], [])
|
||||
if sys.version_info[0] >= 3:
|
||||
@@ -887,10 +876,10 @@ class AssertionRewriter(ast.NodeVisitor):
|
||||
val_is_none = ast.Compare(node, [ast.Is()], [AST_NONE])
|
||||
send_warning = ast.parse(
|
||||
"""
|
||||
from _pytest.warning_types import PytestWarning
|
||||
from _pytest.warning_types import PytestAssertRewriteWarning
|
||||
from warnings import warn_explicit
|
||||
warn_explicit(
|
||||
PytestWarning('asserting the value None, please use "assert is None"'),
|
||||
PytestAssertRewriteWarning('asserting the value None, please use "assert is None"'),
|
||||
category=None,
|
||||
filename={filename!r},
|
||||
lineno={lineno},
|
||||
@@ -906,7 +895,7 @@ warn_explicit(
|
||||
# _should_repr_global_name() thinks it's acceptable.
|
||||
locs = ast_Call(self.builtin("locals"), [], [])
|
||||
inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs])
|
||||
dorepr = self.helper("should_repr_global_name", name)
|
||||
dorepr = self.helper("_should_repr_global_name", name)
|
||||
test = ast.BoolOp(ast.Or(), [inlocs, dorepr])
|
||||
expr = ast.IfExp(test, self.display(name), ast.Str(name.id))
|
||||
return name, self.explanation_param(expr)
|
||||
@@ -942,7 +931,7 @@ warn_explicit(
|
||||
self.statements = body = inner
|
||||
self.statements = save
|
||||
self.on_failure = fail_save
|
||||
expl_template = self.helper("format_boolop", expl_list, ast.Num(is_or))
|
||||
expl_template = self.helper("_format_boolop", expl_list, ast.Num(is_or))
|
||||
expl = self.pop_format_context(expl_template)
|
||||
return ast.Name(res_var, ast.Load()), self.explanation_param(expl)
|
||||
|
||||
@@ -1067,7 +1056,7 @@ warn_explicit(
|
||||
left_res, left_expl = next_res, next_expl
|
||||
# Use pytest.assertion.util._reprcompare if that's available.
|
||||
expl_call = self.helper(
|
||||
"call_reprcompare",
|
||||
"_call_reprcompare",
|
||||
ast.Tuple(syms, ast.Load()),
|
||||
ast.Tuple(load_names, ast.Load()),
|
||||
ast.Tuple(expls, ast.Load()),
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Utilities for truncating assertion output.
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""Utilities for assertion debugging"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
@@ -11,6 +12,7 @@ import _pytest._code
|
||||
from ..compat import Sequence
|
||||
from _pytest import outcomes
|
||||
from _pytest._io.saferepr import saferepr
|
||||
from _pytest.compat import ATTRS_EQ_FIELD
|
||||
|
||||
# The _reprcompare attribute on the util module is used by the new assertion
|
||||
# interpretation code and assertion rewriter to detect this plugin was
|
||||
@@ -285,20 +287,30 @@ def _compare_eq_iterable(left, right, verbose=0):
|
||||
|
||||
def _compare_eq_sequence(left, right, verbose=0):
|
||||
explanation = []
|
||||
for i in range(min(len(left), len(right))):
|
||||
len_left = len(left)
|
||||
len_right = len(right)
|
||||
for i in range(min(len_left, len_right)):
|
||||
if left[i] != right[i]:
|
||||
explanation += [u"At index %s diff: %r != %r" % (i, left[i], right[i])]
|
||||
break
|
||||
if len(left) > len(right):
|
||||
explanation += [
|
||||
u"Left contains more items, first extra item: %s"
|
||||
% saferepr(left[len(right)])
|
||||
]
|
||||
elif len(left) < len(right):
|
||||
explanation += [
|
||||
u"Right contains more items, first extra item: %s"
|
||||
% saferepr(right[len(left)])
|
||||
]
|
||||
len_diff = len_left - len_right
|
||||
|
||||
if len_diff:
|
||||
if len_diff > 0:
|
||||
dir_with_more = "Left"
|
||||
extra = saferepr(left[len_right])
|
||||
else:
|
||||
len_diff = 0 - len_diff
|
||||
dir_with_more = "Right"
|
||||
extra = saferepr(right[len_left])
|
||||
|
||||
if len_diff == 1:
|
||||
explanation += [u"%s contains one more item: %s" % (dir_with_more, extra)]
|
||||
else:
|
||||
explanation += [
|
||||
u"%s contains %d more items, first extra item: %s"
|
||||
% (dir_with_more, len_diff, extra)
|
||||
]
|
||||
return explanation
|
||||
|
||||
|
||||
@@ -319,7 +331,9 @@ def _compare_eq_set(left, right, verbose=0):
|
||||
|
||||
def _compare_eq_dict(left, right, verbose=0):
|
||||
explanation = []
|
||||
common = set(left).intersection(set(right))
|
||||
set_left = set(left)
|
||||
set_right = set(right)
|
||||
common = set_left.intersection(set_right)
|
||||
same = {k: left[k] for k in common if left[k] == right[k]}
|
||||
if same and verbose < 2:
|
||||
explanation += [u"Omitting %s identical items, use -vv to show" % len(same)]
|
||||
@@ -331,15 +345,23 @@ def _compare_eq_dict(left, right, verbose=0):
|
||||
explanation += [u"Differing items:"]
|
||||
for k in diff:
|
||||
explanation += [saferepr({k: left[k]}) + " != " + saferepr({k: right[k]})]
|
||||
extra_left = set(left) - set(right)
|
||||
if extra_left:
|
||||
explanation.append(u"Left contains more items:")
|
||||
extra_left = set_left - set_right
|
||||
len_extra_left = len(extra_left)
|
||||
if len_extra_left:
|
||||
explanation.append(
|
||||
u"Left contains %d more item%s:"
|
||||
% (len_extra_left, "" if len_extra_left == 1 else "s")
|
||||
)
|
||||
explanation.extend(
|
||||
pprint.pformat({k: left[k] for k in extra_left}).splitlines()
|
||||
)
|
||||
extra_right = set(right) - set(left)
|
||||
if extra_right:
|
||||
explanation.append(u"Right contains more items:")
|
||||
extra_right = set_right - set_left
|
||||
len_extra_right = len(extra_right)
|
||||
if len_extra_right:
|
||||
explanation.append(
|
||||
u"Right contains %d more item%s:"
|
||||
% (len_extra_right, "" if len_extra_right == 1 else "s")
|
||||
)
|
||||
explanation.extend(
|
||||
pprint.pformat({k: right[k] for k in extra_right}).splitlines()
|
||||
)
|
||||
@@ -353,7 +375,9 @@ def _compare_eq_cls(left, right, verbose, type_fns):
|
||||
fields_to_check = [field for field, info in all_fields.items() if info.compare]
|
||||
elif isattrs(left):
|
||||
all_fields = left.__attrs_attrs__
|
||||
fields_to_check = [field.name for field in all_fields if field.cmp]
|
||||
fields_to_check = [
|
||||
field.name for field in all_fields if getattr(field, ATTRS_EQ_FIELD)
|
||||
]
|
||||
|
||||
same = []
|
||||
diff = []
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
merged implementation of the cache provider
|
||||
|
||||
@@ -20,7 +21,7 @@ import pytest
|
||||
from .compat import _PY2 as PY2
|
||||
from .pathlib import Path
|
||||
from .pathlib import resolve_from_str
|
||||
from .pathlib import rmtree
|
||||
from .pathlib import rm_rf
|
||||
|
||||
README_CONTENT = u"""\
|
||||
# pytest cache directory #
|
||||
@@ -50,7 +51,7 @@ class Cache(object):
|
||||
def for_config(cls, config):
|
||||
cachedir = cls.cache_dir_from_config(config)
|
||||
if config.getoption("cacheclear") and cachedir.exists():
|
||||
rmtree(cachedir, force=True)
|
||||
rm_rf(cachedir)
|
||||
cachedir.mkdir()
|
||||
return cls(cachedir, config)
|
||||
|
||||
@@ -60,10 +61,10 @@ class Cache(object):
|
||||
|
||||
def warn(self, fmt, **args):
|
||||
from _pytest.warnings import _issue_warning_captured
|
||||
from _pytest.warning_types import PytestWarning
|
||||
from _pytest.warning_types import PytestCacheWarning
|
||||
|
||||
_issue_warning_captured(
|
||||
PytestWarning(fmt.format(**args) if args else fmt),
|
||||
PytestCacheWarning(fmt.format(**args) if args else fmt),
|
||||
self._config.hook,
|
||||
stacklevel=3,
|
||||
)
|
||||
@@ -157,18 +158,37 @@ class LFPlugin(object):
|
||||
self.active = any(config.getoption(key) for key in active_keys)
|
||||
self.lastfailed = config.cache.get("cache/lastfailed", {})
|
||||
self._previously_failed_count = None
|
||||
self._no_failures_behavior = self.config.getoption("last_failed_no_failures")
|
||||
self._report_status = None
|
||||
self._skipped_files = 0 # count skipped files during collection due to --lf
|
||||
|
||||
def last_failed_paths(self):
|
||||
"""Returns a set with all Paths()s of the previously failed nodeids (cached).
|
||||
"""
|
||||
try:
|
||||
return self._last_failed_paths
|
||||
except AttributeError:
|
||||
rootpath = Path(self.config.rootdir)
|
||||
result = {rootpath / nodeid.split("::")[0] for nodeid in self.lastfailed}
|
||||
result = {x for x in result if x.exists()}
|
||||
self._last_failed_paths = result
|
||||
return result
|
||||
|
||||
def pytest_ignore_collect(self, path):
|
||||
"""
|
||||
Ignore this file path if we are in --lf mode and it is not in the list of
|
||||
previously failed files.
|
||||
"""
|
||||
if self.active and self.config.getoption("lf") and path.isfile():
|
||||
last_failed_paths = self.last_failed_paths()
|
||||
if last_failed_paths:
|
||||
skip_it = Path(path) not in self.last_failed_paths()
|
||||
if skip_it:
|
||||
self._skipped_files += 1
|
||||
return skip_it
|
||||
|
||||
def pytest_report_collectionfinish(self):
|
||||
if self.active and self.config.getoption("verbose") >= 0:
|
||||
if not self._previously_failed_count:
|
||||
return None
|
||||
noun = "failure" if self._previously_failed_count == 1 else "failures"
|
||||
suffix = " first" if self.config.getoption("failedfirst") else ""
|
||||
mode = "rerun previous {count} {noun}{suffix}".format(
|
||||
count=self._previously_failed_count, suffix=suffix, noun=noun
|
||||
)
|
||||
return "run-last-failure: %s" % mode
|
||||
return "run-last-failure: %s" % self._report_status
|
||||
|
||||
def pytest_runtest_logreport(self, report):
|
||||
if (report.when == "call" and report.passed) or report.skipped:
|
||||
@@ -186,28 +206,51 @@ class LFPlugin(object):
|
||||
self.lastfailed[report.nodeid] = True
|
||||
|
||||
def pytest_collection_modifyitems(self, session, config, items):
|
||||
if self.active:
|
||||
if self.lastfailed:
|
||||
previously_failed = []
|
||||
previously_passed = []
|
||||
for item in items:
|
||||
if item.nodeid in self.lastfailed:
|
||||
previously_failed.append(item)
|
||||
else:
|
||||
previously_passed.append(item)
|
||||
self._previously_failed_count = len(previously_failed)
|
||||
if not previously_failed:
|
||||
# running a subset of all tests with recorded failures outside
|
||||
# of the set of tests currently executing
|
||||
return
|
||||
if not self.active:
|
||||
return
|
||||
|
||||
if self.lastfailed:
|
||||
previously_failed = []
|
||||
previously_passed = []
|
||||
for item in items:
|
||||
if item.nodeid in self.lastfailed:
|
||||
previously_failed.append(item)
|
||||
else:
|
||||
previously_passed.append(item)
|
||||
self._previously_failed_count = len(previously_failed)
|
||||
|
||||
if not previously_failed:
|
||||
# Running a subset of all tests with recorded failures
|
||||
# only outside of it.
|
||||
self._report_status = "%d known failures not in selected tests" % (
|
||||
len(self.lastfailed),
|
||||
)
|
||||
else:
|
||||
if self.config.getoption("lf"):
|
||||
items[:] = previously_failed
|
||||
config.hook.pytest_deselected(items=previously_passed)
|
||||
else:
|
||||
else: # --failedfirst
|
||||
items[:] = previously_failed + previously_passed
|
||||
elif self._no_failures_behavior == "none":
|
||||
|
||||
noun = "failure" if self._previously_failed_count == 1 else "failures"
|
||||
suffix = " first" if self.config.getoption("failedfirst") else ""
|
||||
self._report_status = "rerun previous {count} {noun}{suffix}".format(
|
||||
count=self._previously_failed_count, suffix=suffix, noun=noun
|
||||
)
|
||||
|
||||
if self._skipped_files > 0:
|
||||
files_noun = "file" if self._skipped_files == 1 else "files"
|
||||
self._report_status += " (skipped {files} {files_noun})".format(
|
||||
files=self._skipped_files, files_noun=files_noun
|
||||
)
|
||||
else:
|
||||
self._report_status = "no previously failed tests, "
|
||||
if self.config.getoption("last_failed_no_failures") == "none":
|
||||
self._report_status += "deselecting all items."
|
||||
config.hook.pytest_deselected(items=items)
|
||||
items[:] = []
|
||||
else:
|
||||
self._report_status += "not deselecting items."
|
||||
|
||||
def pytest_sessionfinish(self, session):
|
||||
config = self.config
|
||||
@@ -282,9 +325,13 @@ def pytest_addoption(parser):
|
||||
)
|
||||
group.addoption(
|
||||
"--cache-show",
|
||||
action="store_true",
|
||||
action="append",
|
||||
nargs="?",
|
||||
dest="cacheshow",
|
||||
help="show cache contents, don't perform collection or tests",
|
||||
help=(
|
||||
"show cache contents, don't perform collection or tests. "
|
||||
"Optional argument: glob (default: '*')."
|
||||
),
|
||||
)
|
||||
group.addoption(
|
||||
"--cache-clear",
|
||||
@@ -303,8 +350,7 @@ def pytest_addoption(parser):
|
||||
dest="last_failed_no_failures",
|
||||
choices=("all", "none"),
|
||||
default="all",
|
||||
help="change the behavior when no test failed in the last run or no "
|
||||
"information about the last failures was found in the cache",
|
||||
help="which tests to run with no previously (known) failures.",
|
||||
)
|
||||
|
||||
|
||||
@@ -360,11 +406,16 @@ def cacheshow(config, session):
|
||||
if not config.cache._cachedir.is_dir():
|
||||
tw.line("cache is empty")
|
||||
return 0
|
||||
|
||||
glob = config.option.cacheshow[0]
|
||||
if glob is None:
|
||||
glob = "*"
|
||||
|
||||
dummy = object()
|
||||
basedir = config.cache._cachedir
|
||||
vdir = basedir / "v"
|
||||
tw.sep("-", "cache values")
|
||||
for valpath in sorted(x for x in vdir.rglob("*") if x.is_file()):
|
||||
tw.sep("-", "cache values for %r" % glob)
|
||||
for valpath in sorted(x for x in vdir.rglob(glob) if x.is_file()):
|
||||
key = valpath.relative_to(vdir)
|
||||
val = config.cache.get(key, dummy)
|
||||
if val is dummy:
|
||||
@@ -376,8 +427,8 @@ def cacheshow(config, session):
|
||||
|
||||
ddir = basedir / "d"
|
||||
if ddir.is_dir():
|
||||
contents = sorted(ddir.rglob("*"))
|
||||
tw.sep("-", "cache directories")
|
||||
contents = sorted(ddir.rglob(glob))
|
||||
tw.sep("-", "cache directories for %r" % glob)
|
||||
for p in contents:
|
||||
# if p.check(dir=1):
|
||||
# print("%s/" % p.relto(basedir))
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
per-test stdout/stderr capturing mechanism.
|
||||
|
||||
@@ -56,13 +57,6 @@ def pytest_load_initial_conftests(early_config, parser, args):
|
||||
# make sure that capturemanager is properly reset at final shutdown
|
||||
early_config.add_cleanup(capman.stop_global_capturing)
|
||||
|
||||
# make sure logging does not raise exceptions at the end
|
||||
def silence_logging_at_shutdown():
|
||||
if "logging" in sys.modules:
|
||||
sys.modules["logging"].raiseExceptions = False
|
||||
|
||||
early_config.add_cleanup(silence_logging_at_shutdown)
|
||||
|
||||
# finally trigger conftest loading but while capturing (issue93)
|
||||
capman.start_global_capturing()
|
||||
outcome = yield
|
||||
@@ -365,8 +359,7 @@ class CaptureFixture(object):
|
||||
self._captured_err = self.captureclass.EMPTY_BUFFER
|
||||
|
||||
def _start(self):
|
||||
# Start if not started yet
|
||||
if getattr(self, "_capture", None) is None:
|
||||
if self._capture is None:
|
||||
self._capture = MultiCapture(
|
||||
out=True, err=True, in_=False, Capture=self.captureclass
|
||||
)
|
||||
@@ -396,11 +389,13 @@ class CaptureFixture(object):
|
||||
|
||||
def _suspend(self):
|
||||
"""Suspends this fixture's own capturing temporarily."""
|
||||
self._capture.suspend_capturing()
|
||||
if self._capture is not None:
|
||||
self._capture.suspend_capturing()
|
||||
|
||||
def _resume(self):
|
||||
"""Resumes this fixture's own capturing temporarily."""
|
||||
self._capture.resume_capturing()
|
||||
if self._capture is not None:
|
||||
self._capture.resume_capturing()
|
||||
|
||||
@contextlib.contextmanager
|
||||
def disabled(self):
|
||||
@@ -454,6 +449,10 @@ class EncodedFile(object):
|
||||
"""Ensure that file.name is a string."""
|
||||
return repr(self.buffer)
|
||||
|
||||
@property
|
||||
def mode(self):
|
||||
return self.buffer.mode.replace("b", "")
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(object.__getattribute__(self, "buffer"), name)
|
||||
|
||||
@@ -463,6 +462,7 @@ CaptureResult = collections.namedtuple("CaptureResult", ["out", "err"])
|
||||
|
||||
class MultiCapture(object):
|
||||
out = err = in_ = None
|
||||
_state = None
|
||||
|
||||
def __init__(self, out=True, err=True, in_=True, Capture=None):
|
||||
if in_:
|
||||
@@ -473,9 +473,16 @@ class MultiCapture(object):
|
||||
self.err = Capture(2)
|
||||
|
||||
def __repr__(self):
|
||||
return "<MultiCapture out=%r err=%r in_=%r>" % (self.out, self.err, self.in_)
|
||||
return "<MultiCapture out=%r err=%r in_=%r _state=%r _in_suspended=%r>" % (
|
||||
self.out,
|
||||
self.err,
|
||||
self.in_,
|
||||
self._state,
|
||||
getattr(self, "_in_suspended", "<UNSET>"),
|
||||
)
|
||||
|
||||
def start_capturing(self):
|
||||
self._state = "started"
|
||||
if self.in_:
|
||||
self.in_.start()
|
||||
if self.out:
|
||||
@@ -493,6 +500,7 @@ class MultiCapture(object):
|
||||
return out, err
|
||||
|
||||
def suspend_capturing(self, in_=False):
|
||||
self._state = "suspended"
|
||||
if self.out:
|
||||
self.out.suspend()
|
||||
if self.err:
|
||||
@@ -502,6 +510,7 @@ class MultiCapture(object):
|
||||
self._in_suspended = True
|
||||
|
||||
def resume_capturing(self):
|
||||
self._state = "resumed"
|
||||
if self.out:
|
||||
self.out.resume()
|
||||
if self.err:
|
||||
@@ -512,9 +521,9 @@ class MultiCapture(object):
|
||||
|
||||
def stop_capturing(self):
|
||||
""" stop capturing and reset capturing streams """
|
||||
if hasattr(self, "_reset"):
|
||||
if self._state == "stopped":
|
||||
raise ValueError("was already stopped")
|
||||
self._reset = True
|
||||
self._state = "stopped"
|
||||
if self.out:
|
||||
self.out.done()
|
||||
if self.err:
|
||||
@@ -542,6 +551,7 @@ class FDCaptureBinary(object):
|
||||
"""
|
||||
|
||||
EMPTY_BUFFER = b""
|
||||
_state = None
|
||||
|
||||
def __init__(self, targetfd, tmpfile=None):
|
||||
self.targetfd = targetfd
|
||||
@@ -568,9 +578,10 @@ class FDCaptureBinary(object):
|
||||
self.tmpfile_fd = tmpfile.fileno()
|
||||
|
||||
def __repr__(self):
|
||||
return "<FDCapture %s oldfd=%s>" % (
|
||||
return "<FDCapture %s oldfd=%s _state=%r>" % (
|
||||
self.targetfd,
|
||||
getattr(self, "targetfd_save", None),
|
||||
self._state,
|
||||
)
|
||||
|
||||
def start(self):
|
||||
@@ -581,6 +592,7 @@ class FDCaptureBinary(object):
|
||||
raise ValueError("saved filedescriptor not valid anymore")
|
||||
os.dup2(self.tmpfile_fd, self.targetfd)
|
||||
self.syscapture.start()
|
||||
self._state = "started"
|
||||
|
||||
def snap(self):
|
||||
self.tmpfile.seek(0)
|
||||
@@ -597,14 +609,17 @@ class FDCaptureBinary(object):
|
||||
os.close(targetfd_save)
|
||||
self.syscapture.done()
|
||||
_attempt_to_close_capture_file(self.tmpfile)
|
||||
self._state = "done"
|
||||
|
||||
def suspend(self):
|
||||
self.syscapture.suspend()
|
||||
os.dup2(self.targetfd_save, self.targetfd)
|
||||
self._state = "suspended"
|
||||
|
||||
def resume(self):
|
||||
self.syscapture.resume()
|
||||
os.dup2(self.tmpfile_fd, self.targetfd)
|
||||
self._state = "resumed"
|
||||
|
||||
def writeorg(self, data):
|
||||
""" write to original file descriptor. """
|
||||
@@ -632,6 +647,7 @@ class FDCapture(FDCaptureBinary):
|
||||
class SysCapture(object):
|
||||
|
||||
EMPTY_BUFFER = str()
|
||||
_state = None
|
||||
|
||||
def __init__(self, fd, tmpfile=None):
|
||||
name = patchsysdict[fd]
|
||||
@@ -644,8 +660,17 @@ class SysCapture(object):
|
||||
tmpfile = CaptureIO()
|
||||
self.tmpfile = tmpfile
|
||||
|
||||
def __repr__(self):
|
||||
return "<SysCapture %s _old=%r, tmpfile=%r _state=%r>" % (
|
||||
self.name,
|
||||
self._old,
|
||||
self.tmpfile,
|
||||
self._state,
|
||||
)
|
||||
|
||||
def start(self):
|
||||
setattr(sys, self.name, self.tmpfile)
|
||||
self._state = "started"
|
||||
|
||||
def snap(self):
|
||||
res = self.tmpfile.getvalue()
|
||||
@@ -657,12 +682,15 @@ class SysCapture(object):
|
||||
setattr(sys, self.name, self._old)
|
||||
del self._old
|
||||
_attempt_to_close_capture_file(self.tmpfile)
|
||||
self._state = "done"
|
||||
|
||||
def suspend(self):
|
||||
setattr(sys, self.name, self._old)
|
||||
self._state = "suspended"
|
||||
|
||||
def resume(self):
|
||||
setattr(sys, self.name, self.tmpfile)
|
||||
self._state = "resumed"
|
||||
|
||||
def writeorg(self, data):
|
||||
self._old.write(data)
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
python version compatibility code
|
||||
"""
|
||||
@@ -12,6 +13,7 @@ import re
|
||||
import sys
|
||||
from contextlib import contextmanager
|
||||
|
||||
import attr
|
||||
import py
|
||||
import six
|
||||
from six import text_type
|
||||
@@ -36,7 +38,6 @@ if _PY3:
|
||||
else:
|
||||
from funcsigs import signature, Parameter as Parameter
|
||||
|
||||
NoneType = type(None)
|
||||
NOTSET = object()
|
||||
|
||||
PY35 = sys.version_info[:2] >= (3, 5)
|
||||
@@ -61,6 +62,12 @@ else:
|
||||
return None
|
||||
|
||||
|
||||
if sys.version_info >= (3, 8):
|
||||
from importlib import metadata as importlib_metadata # noqa
|
||||
else:
|
||||
import importlib_metadata # noqa
|
||||
|
||||
|
||||
def _format_args(func):
|
||||
return str(signature(func))
|
||||
|
||||
@@ -377,7 +384,7 @@ if _PY3:
|
||||
else:
|
||||
|
||||
def safe_str(v):
|
||||
"""returns v as string, converting to ascii if necessary"""
|
||||
"""returns v as string, converting to utf-8 if necessary"""
|
||||
try:
|
||||
return str(v)
|
||||
except UnicodeError:
|
||||
@@ -406,8 +413,8 @@ def _setup_collect_fakemodule():
|
||||
|
||||
pytest.collect = ModuleType("pytest.collect")
|
||||
pytest.collect.__all__ = [] # used for setns
|
||||
for attr in COLLECT_FAKEMODULE_ATTRIBUTES:
|
||||
setattr(pytest.collect, attr, getattr(pytest, attr))
|
||||
for attribute in COLLECT_FAKEMODULE_ATTRIBUTES:
|
||||
setattr(pytest.collect, attribute, getattr(pytest, attribute))
|
||||
|
||||
|
||||
if _PY2:
|
||||
@@ -455,3 +462,9 @@ if six.PY2:
|
||||
|
||||
else:
|
||||
from functools import lru_cache # noqa: F401
|
||||
|
||||
|
||||
if getattr(attr, "__version_info__", ()) >= (19, 2):
|
||||
ATTRS_EQ_FIELD = "eq"
|
||||
else:
|
||||
ATTRS_EQ_FIELD = "cmp"
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
""" command line options, ini-file and conftest.py processing. """
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
@@ -14,6 +15,7 @@ import warnings
|
||||
|
||||
import py
|
||||
import six
|
||||
from packaging.version import Version
|
||||
from pluggy import HookimplMarker
|
||||
from pluggy import HookspecMarker
|
||||
from pluggy import PluginManager
|
||||
@@ -28,11 +30,12 @@ from .findpaths import exists
|
||||
from _pytest import deprecated
|
||||
from _pytest._code import ExceptionInfo
|
||||
from _pytest._code import filter_traceback
|
||||
from _pytest.compat import importlib_metadata
|
||||
from _pytest.compat import lru_cache
|
||||
from _pytest.compat import safe_str
|
||||
from _pytest.outcomes import fail
|
||||
from _pytest.outcomes import Skipped
|
||||
from _pytest.warning_types import PytestWarning
|
||||
from _pytest.warning_types import PytestConfigWarning
|
||||
|
||||
hookimpl = HookimplMarker("pytest")
|
||||
hookspec = HookspecMarker("pytest")
|
||||
@@ -112,13 +115,18 @@ def directory_arg(path, optname):
|
||||
return path
|
||||
|
||||
|
||||
default_plugins = (
|
||||
# Plugins that cannot be disabled via "-p no:X" currently.
|
||||
essential_plugins = ( # fmt: off
|
||||
"mark",
|
||||
"main",
|
||||
"terminal",
|
||||
"runner",
|
||||
"python",
|
||||
"fixtures",
|
||||
"helpconfig", # Provides -p.
|
||||
) # fmt: on
|
||||
|
||||
default_plugins = essential_plugins + (
|
||||
"python",
|
||||
"terminal",
|
||||
"debugging",
|
||||
"unittest",
|
||||
"capture",
|
||||
@@ -127,7 +135,6 @@ default_plugins = (
|
||||
"monkeypatch",
|
||||
"recwarn",
|
||||
"pastebin",
|
||||
"helpconfig",
|
||||
"nose",
|
||||
"assertion",
|
||||
"junitxml",
|
||||
@@ -143,7 +150,6 @@ default_plugins = (
|
||||
"reports",
|
||||
)
|
||||
|
||||
|
||||
builtin_plugins = set(default_plugins)
|
||||
builtin_plugins.add("pytester")
|
||||
|
||||
@@ -279,7 +285,6 @@ class PytestPluginManager(PluginManager):
|
||||
known_marks = {m.name for m in getattr(method, "pytestmark", [])}
|
||||
|
||||
for name in ("tryfirst", "trylast", "optionalhook", "hookwrapper"):
|
||||
|
||||
opts.setdefault(name, hasattr(method, name) or name in known_marks)
|
||||
return opts
|
||||
|
||||
@@ -305,7 +310,7 @@ class PytestPluginManager(PluginManager):
|
||||
def register(self, plugin, name=None):
|
||||
if name in ["pytest_catchlog", "pytest_capturelog"]:
|
||||
warnings.warn(
|
||||
PytestWarning(
|
||||
PytestConfigWarning(
|
||||
"{} plugin has been merged into the core, "
|
||||
"please remove it from your requirements.".format(
|
||||
name.replace("_", "-")
|
||||
@@ -496,6 +501,9 @@ class PytestPluginManager(PluginManager):
|
||||
def consider_pluginarg(self, arg):
|
||||
if arg.startswith("no:"):
|
||||
name = arg[3:]
|
||||
if name in essential_plugins:
|
||||
raise UsageError("plugin %s cannot be disabled" % name)
|
||||
|
||||
# PR #4304 : remove stepwise if cacheprovider is blocked
|
||||
if name == "cacheprovider":
|
||||
self.set_blocked("stepwise")
|
||||
@@ -569,7 +577,7 @@ class PytestPluginManager(PluginManager):
|
||||
from _pytest.warnings import _issue_warning_captured
|
||||
|
||||
_issue_warning_captured(
|
||||
PytestWarning("skipped plugin %r: %s" % (modname, e.msg)),
|
||||
PytestConfigWarning("skipped plugin %r: %s" % (modname, e.msg)),
|
||||
self.hook,
|
||||
stacklevel=1,
|
||||
)
|
||||
@@ -782,25 +790,17 @@ class Config(object):
|
||||
modules or packages in the distribution package for
|
||||
all pytest plugins.
|
||||
"""
|
||||
import pkg_resources
|
||||
|
||||
self.pluginmanager.rewrite_hook = hook
|
||||
|
||||
if os.environ.get("PYTEST_DISABLE_PLUGIN_AUTOLOAD"):
|
||||
# We don't autoload from setuptools entry points, no need to continue.
|
||||
return
|
||||
|
||||
# 'RECORD' available for plugins installed normally (pip install)
|
||||
# 'SOURCES.txt' available for plugins installed in dev mode (pip install -e)
|
||||
# for installed plugins 'SOURCES.txt' returns an empty list, and vice-versa
|
||||
# so it shouldn't be an issue
|
||||
metadata_files = "RECORD", "SOURCES.txt"
|
||||
|
||||
package_files = (
|
||||
entry.split(",")[0]
|
||||
for entrypoint in pkg_resources.iter_entry_points("pytest11")
|
||||
for metadata in metadata_files
|
||||
for entry in entrypoint.dist._get_metadata(metadata)
|
||||
str(file)
|
||||
for dist in importlib_metadata.distributions()
|
||||
if any(ep.group == "pytest11" for ep in dist.entry_points)
|
||||
for file in dist.files or []
|
||||
)
|
||||
|
||||
for name in _iter_rewritable_modules(package_files):
|
||||
@@ -858,7 +858,7 @@ class Config(object):
|
||||
from _pytest.warnings import _issue_warning_captured
|
||||
|
||||
_issue_warning_captured(
|
||||
PytestWarning(
|
||||
PytestConfigWarning(
|
||||
"could not load initial conftests: {}".format(e.path)
|
||||
),
|
||||
self.hook,
|
||||
@@ -869,11 +869,10 @@ class Config(object):
|
||||
|
||||
def _checkversion(self):
|
||||
import pytest
|
||||
from pkg_resources import parse_version
|
||||
|
||||
minver = self.inicfg.get("minversion", None)
|
||||
if minver:
|
||||
if parse_version(minver) > parse_version(pytest.__version__):
|
||||
if Version(minver) > Version(pytest.__version__):
|
||||
raise pytest.UsageError(
|
||||
"%s:%d: requires pytest-%s, actual pytest-%s'"
|
||||
% (
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import argparse
|
||||
import warnings
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
class UsageError(Exception):
|
||||
""" error in pytest usage or invocation"""
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import os
|
||||
|
||||
import py
|
||||
@@ -32,7 +33,11 @@ def getcfg(args, config=None):
|
||||
for inibasename in inibasenames:
|
||||
p = base.join(inibasename)
|
||||
if exists(p):
|
||||
iniconfig = py.iniconfig.IniConfig(p)
|
||||
try:
|
||||
iniconfig = py.iniconfig.IniConfig(p)
|
||||
except py.iniconfig.ParseError as exc:
|
||||
raise UsageError(str(exc))
|
||||
|
||||
if (
|
||||
inibasename == "setup.cfg"
|
||||
and "tool:pytest" in iniconfig.sections
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
""" interactive debugging with PDB, the Python Debugger. """
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
@@ -48,42 +49,18 @@ def pytest_addoption(parser):
|
||||
)
|
||||
|
||||
|
||||
def _import_pdbcls(modname, classname):
|
||||
try:
|
||||
__import__(modname)
|
||||
mod = sys.modules[modname]
|
||||
|
||||
# Handle --pdbcls=pdb:pdb.Pdb (useful e.g. with pdbpp).
|
||||
parts = classname.split(".")
|
||||
pdb_cls = getattr(mod, parts[0])
|
||||
for part in parts[1:]:
|
||||
pdb_cls = getattr(pdb_cls, part)
|
||||
|
||||
return pdb_cls
|
||||
except Exception as exc:
|
||||
value = ":".join((modname, classname))
|
||||
raise UsageError("--pdbcls: could not import {!r}: {}".format(value, exc))
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
pdb_cls = config.getvalue("usepdb_cls")
|
||||
if pdb_cls:
|
||||
pdb_cls = _import_pdbcls(*pdb_cls)
|
||||
else:
|
||||
pdb_cls = pdb.Pdb
|
||||
|
||||
if config.getvalue("trace"):
|
||||
config.pluginmanager.register(PdbTrace(), "pdbtrace")
|
||||
if config.getvalue("usepdb"):
|
||||
config.pluginmanager.register(PdbInvoke(), "pdbinvoke")
|
||||
|
||||
pytestPDB._saved.append(
|
||||
(pdb.set_trace, pytestPDB._pluginmanager, pytestPDB._config, pytestPDB._pdb_cls)
|
||||
(pdb.set_trace, pytestPDB._pluginmanager, pytestPDB._config)
|
||||
)
|
||||
pdb.set_trace = pytestPDB.set_trace
|
||||
pytestPDB._pluginmanager = config.pluginmanager
|
||||
pytestPDB._config = config
|
||||
pytestPDB._pdb_cls = pdb_cls
|
||||
|
||||
# NOTE: not using pytest_unconfigure, since it might get called although
|
||||
# pytest_configure was not (if another plugin raises UsageError).
|
||||
@@ -92,7 +69,6 @@ def pytest_configure(config):
|
||||
pdb.set_trace,
|
||||
pytestPDB._pluginmanager,
|
||||
pytestPDB._config,
|
||||
pytestPDB._pdb_cls,
|
||||
) = pytestPDB._saved.pop()
|
||||
|
||||
config._cleanup.append(fin)
|
||||
@@ -103,9 +79,9 @@ class pytestPDB(object):
|
||||
|
||||
_pluginmanager = None
|
||||
_config = None
|
||||
_pdb_cls = pdb.Pdb
|
||||
_saved = []
|
||||
_recursive_debug = 0
|
||||
_wrapped_pdb_cls = None
|
||||
|
||||
@classmethod
|
||||
def _is_capturing(cls, capman):
|
||||
@@ -114,16 +90,138 @@ class pytestPDB(object):
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def _init_pdb(cls, *args, **kwargs):
|
||||
def _import_pdb_cls(cls, capman):
|
||||
if not cls._config:
|
||||
# Happens when using pytest.set_trace outside of a test.
|
||||
return pdb.Pdb
|
||||
|
||||
usepdb_cls = cls._config.getvalue("usepdb_cls")
|
||||
|
||||
if cls._wrapped_pdb_cls and cls._wrapped_pdb_cls[0] == usepdb_cls:
|
||||
return cls._wrapped_pdb_cls[1]
|
||||
|
||||
if usepdb_cls:
|
||||
modname, classname = usepdb_cls
|
||||
|
||||
try:
|
||||
__import__(modname)
|
||||
mod = sys.modules[modname]
|
||||
|
||||
# Handle --pdbcls=pdb:pdb.Pdb (useful e.g. with pdbpp).
|
||||
parts = classname.split(".")
|
||||
pdb_cls = getattr(mod, parts[0])
|
||||
for part in parts[1:]:
|
||||
pdb_cls = getattr(pdb_cls, part)
|
||||
except Exception as exc:
|
||||
value = ":".join((modname, classname))
|
||||
raise UsageError(
|
||||
"--pdbcls: could not import {!r}: {}".format(value, exc)
|
||||
)
|
||||
else:
|
||||
pdb_cls = pdb.Pdb
|
||||
|
||||
wrapped_cls = cls._get_pdb_wrapper_class(pdb_cls, capman)
|
||||
cls._wrapped_pdb_cls = (usepdb_cls, wrapped_cls)
|
||||
return wrapped_cls
|
||||
|
||||
@classmethod
|
||||
def _get_pdb_wrapper_class(cls, pdb_cls, capman):
|
||||
import _pytest.config
|
||||
|
||||
class PytestPdbWrapper(pdb_cls, object):
|
||||
_pytest_capman = capman
|
||||
_continued = False
|
||||
|
||||
def do_debug(self, arg):
|
||||
cls._recursive_debug += 1
|
||||
ret = super(PytestPdbWrapper, self).do_debug(arg)
|
||||
cls._recursive_debug -= 1
|
||||
return ret
|
||||
|
||||
def do_continue(self, arg):
|
||||
ret = super(PytestPdbWrapper, self).do_continue(arg)
|
||||
if cls._recursive_debug == 0:
|
||||
tw = _pytest.config.create_terminal_writer(cls._config)
|
||||
tw.line()
|
||||
|
||||
capman = self._pytest_capman
|
||||
capturing = pytestPDB._is_capturing(capman)
|
||||
if capturing:
|
||||
if capturing == "global":
|
||||
tw.sep(">", "PDB continue (IO-capturing resumed)")
|
||||
else:
|
||||
tw.sep(
|
||||
">",
|
||||
"PDB continue (IO-capturing resumed for %s)"
|
||||
% capturing,
|
||||
)
|
||||
capman.resume()
|
||||
else:
|
||||
tw.sep(">", "PDB continue")
|
||||
cls._pluginmanager.hook.pytest_leave_pdb(config=cls._config, pdb=self)
|
||||
self._continued = True
|
||||
return ret
|
||||
|
||||
do_c = do_cont = do_continue
|
||||
|
||||
def do_quit(self, arg):
|
||||
"""Raise Exit outcome when quit command is used in pdb.
|
||||
|
||||
This is a bit of a hack - it would be better if BdbQuit
|
||||
could be handled, but this would require to wrap the
|
||||
whole pytest run, and adjust the report etc.
|
||||
"""
|
||||
ret = super(PytestPdbWrapper, self).do_quit(arg)
|
||||
|
||||
if cls._recursive_debug == 0:
|
||||
outcomes.exit("Quitting debugger")
|
||||
|
||||
return ret
|
||||
|
||||
do_q = do_quit
|
||||
do_exit = do_quit
|
||||
|
||||
def setup(self, f, tb):
|
||||
"""Suspend on setup().
|
||||
|
||||
Needed after do_continue resumed, and entering another
|
||||
breakpoint again.
|
||||
"""
|
||||
ret = super(PytestPdbWrapper, self).setup(f, tb)
|
||||
if not ret and self._continued:
|
||||
# pdb.setup() returns True if the command wants to exit
|
||||
# from the interaction: do not suspend capturing then.
|
||||
if self._pytest_capman:
|
||||
self._pytest_capman.suspend_global_capture(in_=True)
|
||||
return ret
|
||||
|
||||
def get_stack(self, f, t):
|
||||
stack, i = super(PytestPdbWrapper, self).get_stack(f, t)
|
||||
if f is None:
|
||||
# Find last non-hidden frame.
|
||||
i = max(0, len(stack) - 1)
|
||||
while i and stack[i][0].f_locals.get("__tracebackhide__", False):
|
||||
i -= 1
|
||||
return stack, i
|
||||
|
||||
return PytestPdbWrapper
|
||||
|
||||
@classmethod
|
||||
def _init_pdb(cls, method, *args, **kwargs):
|
||||
""" Initialize PDB debugging, dropping any IO capturing. """
|
||||
import _pytest.config
|
||||
|
||||
if cls._pluginmanager is not None:
|
||||
capman = cls._pluginmanager.getplugin("capturemanager")
|
||||
if capman:
|
||||
capman.suspend(in_=True)
|
||||
else:
|
||||
capman = None
|
||||
if capman:
|
||||
capman.suspend(in_=True)
|
||||
|
||||
if cls._config:
|
||||
tw = _pytest.config.create_terminal_writer(cls._config)
|
||||
tw.line()
|
||||
|
||||
if cls._recursive_debug == 0:
|
||||
# Handle header similar to pdb.set_trace in py37+.
|
||||
header = kwargs.pop("header", None)
|
||||
@@ -131,92 +229,28 @@ class pytestPDB(object):
|
||||
tw.sep(">", header)
|
||||
else:
|
||||
capturing = cls._is_capturing(capman)
|
||||
if capturing:
|
||||
if capturing == "global":
|
||||
tw.sep(">", "PDB set_trace (IO-capturing turned off)")
|
||||
else:
|
||||
tw.sep(
|
||||
">",
|
||||
"PDB set_trace (IO-capturing turned off for %s)"
|
||||
% capturing,
|
||||
)
|
||||
if capturing == "global":
|
||||
tw.sep(">", "PDB %s (IO-capturing turned off)" % (method,))
|
||||
elif capturing:
|
||||
tw.sep(
|
||||
">",
|
||||
"PDB %s (IO-capturing turned off for %s)"
|
||||
% (method, capturing),
|
||||
)
|
||||
else:
|
||||
tw.sep(">", "PDB set_trace")
|
||||
tw.sep(">", "PDB %s" % (method,))
|
||||
|
||||
class _PdbWrapper(cls._pdb_cls, object):
|
||||
_pytest_capman = capman
|
||||
_continued = False
|
||||
_pdb = cls._import_pdb_cls(capman)(**kwargs)
|
||||
|
||||
def do_debug(self, arg):
|
||||
cls._recursive_debug += 1
|
||||
ret = super(_PdbWrapper, self).do_debug(arg)
|
||||
cls._recursive_debug -= 1
|
||||
return ret
|
||||
|
||||
def do_continue(self, arg):
|
||||
ret = super(_PdbWrapper, self).do_continue(arg)
|
||||
if cls._recursive_debug == 0:
|
||||
tw = _pytest.config.create_terminal_writer(cls._config)
|
||||
tw.line()
|
||||
|
||||
capman = self._pytest_capman
|
||||
capturing = pytestPDB._is_capturing(capman)
|
||||
if capturing:
|
||||
if capturing == "global":
|
||||
tw.sep(">", "PDB continue (IO-capturing resumed)")
|
||||
else:
|
||||
tw.sep(
|
||||
">",
|
||||
"PDB continue (IO-capturing resumed for %s)"
|
||||
% capturing,
|
||||
)
|
||||
capman.resume()
|
||||
else:
|
||||
tw.sep(">", "PDB continue")
|
||||
cls._pluginmanager.hook.pytest_leave_pdb(
|
||||
config=cls._config, pdb=self
|
||||
)
|
||||
self._continued = True
|
||||
return ret
|
||||
|
||||
do_c = do_cont = do_continue
|
||||
|
||||
def set_quit(self):
|
||||
"""Raise Exit outcome when quit command is used in pdb.
|
||||
|
||||
This is a bit of a hack - it would be better if BdbQuit
|
||||
could be handled, but this would require to wrap the
|
||||
whole pytest run, and adjust the report etc.
|
||||
"""
|
||||
super(_PdbWrapper, self).set_quit()
|
||||
if cls._recursive_debug == 0:
|
||||
outcomes.exit("Quitting debugger")
|
||||
|
||||
def setup(self, f, tb):
|
||||
"""Suspend on setup().
|
||||
|
||||
Needed after do_continue resumed, and entering another
|
||||
breakpoint again.
|
||||
"""
|
||||
ret = super(_PdbWrapper, self).setup(f, tb)
|
||||
if not ret and self._continued:
|
||||
# pdb.setup() returns True if the command wants to exit
|
||||
# from the interaction: do not suspend capturing then.
|
||||
if self._pytest_capman:
|
||||
self._pytest_capman.suspend_global_capture(in_=True)
|
||||
return ret
|
||||
|
||||
_pdb = _PdbWrapper(**kwargs)
|
||||
if cls._pluginmanager:
|
||||
cls._pluginmanager.hook.pytest_enter_pdb(config=cls._config, pdb=_pdb)
|
||||
else:
|
||||
_pdb = cls._pdb_cls(**kwargs)
|
||||
return _pdb
|
||||
|
||||
@classmethod
|
||||
def set_trace(cls, *args, **kwargs):
|
||||
"""Invoke debugging via ``Pdb.set_trace``, dropping any IO capturing."""
|
||||
frame = sys._getframe().f_back
|
||||
_pdb = cls._init_pdb(*args, **kwargs)
|
||||
_pdb = cls._init_pdb("set_trace", *args, **kwargs)
|
||||
_pdb.set_trace(frame)
|
||||
|
||||
|
||||
@@ -243,10 +277,10 @@ class PdbTrace(object):
|
||||
|
||||
|
||||
def _test_pytest_function(pyfuncitem):
|
||||
_pdb = pytestPDB._init_pdb()
|
||||
_pdb = pytestPDB._init_pdb("runcall")
|
||||
testfunction = pyfuncitem.obj
|
||||
pyfuncitem.obj = _pdb.runcall
|
||||
if "func" in pyfuncitem._fixtureinfo.argnames: # noqa
|
||||
if "func" in pyfuncitem._fixtureinfo.argnames: # pragma: no branch
|
||||
raise ValueError("--trace can't be used with a fixture named func!")
|
||||
pyfuncitem.funcargs["func"] = testfunction
|
||||
new_list = list(pyfuncitem._fixtureinfo.argnames)
|
||||
@@ -292,22 +326,8 @@ def _postmortem_traceback(excinfo):
|
||||
return excinfo._excinfo[2]
|
||||
|
||||
|
||||
def _find_last_non_hidden_frame(stack):
|
||||
i = max(0, len(stack) - 1)
|
||||
while i and stack[i][0].f_locals.get("__tracebackhide__", False):
|
||||
i -= 1
|
||||
return i
|
||||
|
||||
|
||||
def post_mortem(t):
|
||||
class Pdb(pytestPDB._pdb_cls, object):
|
||||
def get_stack(self, f, t):
|
||||
stack, i = super(Pdb, self).get_stack(f, t)
|
||||
if f is None:
|
||||
i = _find_last_non_hidden_frame(stack)
|
||||
return stack, i
|
||||
|
||||
p = Pdb()
|
||||
p = pytestPDB._init_pdb("post_mortem")
|
||||
p.reset()
|
||||
p.interaction(None, t)
|
||||
if p.quitting:
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
This module contains deprecation messages and bits of code used elsewhere in the codebase
|
||||
that is planned to be removed in the next pytest release.
|
||||
@@ -39,8 +40,8 @@ GETFUNCARGVALUE = RemovedInPytest4Warning(
|
||||
RAISES_MESSAGE_PARAMETER = PytestDeprecationWarning(
|
||||
"The 'message' parameter is deprecated.\n"
|
||||
"(did you mean to use `match='some regex'` to check the exception message?)\n"
|
||||
"Please comment on https://github.com/pytest-dev/pytest/issues/3974 "
|
||||
"if you have concerns about removal of this parameter."
|
||||
"Please see:\n"
|
||||
" https://docs.pytest.org/en/4.6-maintenance/deprecations.html#message-parameter-of-pytest-raises"
|
||||
)
|
||||
|
||||
RESULT_LOG = PytestDeprecationWarning(
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
""" discover and run doctests in modules and test files."""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
@@ -7,6 +8,7 @@ import inspect
|
||||
import platform
|
||||
import sys
|
||||
import traceback
|
||||
import warnings
|
||||
from contextlib import contextmanager
|
||||
|
||||
import pytest
|
||||
@@ -16,6 +18,7 @@ from _pytest._code.code import TerminalRepr
|
||||
from _pytest.compat import safe_getattr
|
||||
from _pytest.fixtures import FixtureRequest
|
||||
from _pytest.outcomes import Skipped
|
||||
from _pytest.warning_types import PytestWarning
|
||||
|
||||
DOCTEST_REPORT_CHOICE_NONE = "none"
|
||||
DOCTEST_REPORT_CHOICE_CDIFF = "cdiff"
|
||||
@@ -373,10 +376,18 @@ def _patch_unwrap_mock_aware():
|
||||
else:
|
||||
|
||||
def _mock_aware_unwrap(obj, stop=None):
|
||||
if stop is None:
|
||||
return real_unwrap(obj, stop=_is_mocked)
|
||||
else:
|
||||
try:
|
||||
if stop is None or stop is _is_mocked:
|
||||
return real_unwrap(obj, stop=_is_mocked)
|
||||
return real_unwrap(obj, stop=lambda obj: _is_mocked(obj) or stop(obj))
|
||||
except Exception as e:
|
||||
warnings.warn(
|
||||
"Got %r when unwrapping %r. This is usually caused "
|
||||
"by a violation of Python's object protocol; see e.g. "
|
||||
"https://github.com/pytest-dev/pytest/issues/5080" % (e, obj),
|
||||
PytestWarning,
|
||||
)
|
||||
raise
|
||||
|
||||
inspect.unwrap = _mock_aware_unwrap
|
||||
try:
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
@@ -853,11 +854,9 @@ class FixtureDef(object):
|
||||
exceptions.append(sys.exc_info())
|
||||
if exceptions:
|
||||
e = exceptions[0]
|
||||
del (
|
||||
exceptions
|
||||
) # ensure we don't keep all frames alive because of the traceback
|
||||
# Ensure to not keep frame references through traceback.
|
||||
del exceptions
|
||||
six.reraise(*e)
|
||||
|
||||
finally:
|
||||
hook = self._fixturemanager.session.gethookproxy(request.node.fspath)
|
||||
hook.pytest_fixture_post_finalizer(fixturedef=self, request=request)
|
||||
@@ -1023,6 +1022,7 @@ def fixture(scope="function", params=None, autouse=False, ids=None, name=None):
|
||||
:arg params: an optional list of parameters which will cause multiple
|
||||
invocations of the fixture function and all of the tests
|
||||
using it.
|
||||
The current parameter is available in ``request.param``.
|
||||
|
||||
:arg autouse: if True, the fixture func is activated for all tests that
|
||||
can see it. If False (the default) then an explicit
|
||||
@@ -1074,6 +1074,15 @@ def pytestconfig(request):
|
||||
return request.config
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
parser.addini(
|
||||
"usefixtures",
|
||||
type="args",
|
||||
default=[],
|
||||
help="list of default fixtures to be used with this project",
|
||||
)
|
||||
|
||||
|
||||
class FixtureManager(object):
|
||||
"""
|
||||
pytest fixtures definitions and information is stored and managed
|
||||
@@ -1118,18 +1127,40 @@ class FixtureManager(object):
|
||||
self._nodeid_and_autousenames = [("", self.config.getini("usefixtures"))]
|
||||
session.config.pluginmanager.register(self, "funcmanage")
|
||||
|
||||
def _get_direct_parametrize_args(self, node):
|
||||
"""This function returns all the direct parametrization
|
||||
arguments of a node, so we don't mistake them for fixtures
|
||||
|
||||
Check https://github.com/pytest-dev/pytest/issues/5036
|
||||
|
||||
This things are done later as well when dealing with parametrization
|
||||
so this could be improved
|
||||
"""
|
||||
from _pytest.mark import ParameterSet
|
||||
|
||||
parametrize_argnames = []
|
||||
for marker in node.iter_markers(name="parametrize"):
|
||||
if not marker.kwargs.get("indirect", False):
|
||||
p_argnames, _ = ParameterSet._parse_parametrize_args(
|
||||
*marker.args, **marker.kwargs
|
||||
)
|
||||
parametrize_argnames.extend(p_argnames)
|
||||
|
||||
return parametrize_argnames
|
||||
|
||||
def getfixtureinfo(self, node, func, cls, funcargs=True):
|
||||
if funcargs and not getattr(node, "nofuncargs", False):
|
||||
argnames = getfuncargnames(func, cls=cls)
|
||||
else:
|
||||
argnames = ()
|
||||
|
||||
usefixtures = itertools.chain.from_iterable(
|
||||
mark.args for mark in node.iter_markers(name="usefixtures")
|
||||
)
|
||||
initialnames = tuple(usefixtures) + argnames
|
||||
fm = node.session._fixturemanager
|
||||
initialnames, names_closure, arg2fixturedefs = fm.getfixtureclosure(
|
||||
initialnames, node
|
||||
initialnames, node, ignore_args=self._get_direct_parametrize_args(node)
|
||||
)
|
||||
return FuncFixtureInfo(argnames, initialnames, names_closure, arg2fixturedefs)
|
||||
|
||||
@@ -1163,7 +1194,7 @@ class FixtureManager(object):
|
||||
autousenames.extend(basenames)
|
||||
return autousenames
|
||||
|
||||
def getfixtureclosure(self, fixturenames, parentnode):
|
||||
def getfixtureclosure(self, fixturenames, parentnode, ignore_args=()):
|
||||
# collect the closure of all fixtures , starting with the given
|
||||
# fixturenames as the initial set. As we have to visit all
|
||||
# factory definitions anyway, we also return an arg2fixturedefs
|
||||
@@ -1191,6 +1222,8 @@ class FixtureManager(object):
|
||||
while lastlen != len(fixturenames_closure):
|
||||
lastlen = len(fixturenames_closure)
|
||||
for argname in fixturenames_closure:
|
||||
if argname in ignore_args:
|
||||
continue
|
||||
if argname in arg2fixturedefs:
|
||||
continue
|
||||
fixturedefs = self.getfixturedefs(argname, parentid)
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Provides a function to report all internal modules for using freezing tools
|
||||
pytest
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
""" version info, help messages, tracing configuration. """
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
@@ -141,23 +142,48 @@ def pytest_cmdline_main(config):
|
||||
|
||||
|
||||
def showhelp(config):
|
||||
import textwrap
|
||||
|
||||
reporter = config.pluginmanager.get_plugin("terminalreporter")
|
||||
tw = reporter._tw
|
||||
tw.write(config._parser.optparser.format_help())
|
||||
tw.line()
|
||||
tw.line()
|
||||
tw.line(
|
||||
"[pytest] ini-options in the first pytest.ini|tox.ini|setup.cfg file found:"
|
||||
)
|
||||
tw.line()
|
||||
|
||||
columns = tw.fullwidth # costly call
|
||||
indent_len = 24 # based on argparse's max_help_position=24
|
||||
indent = " " * indent_len
|
||||
for name in config._parser._ininames:
|
||||
help, type, default = config._parser._inidict[name]
|
||||
if type is None:
|
||||
type = "string"
|
||||
spec = "%s (%s)" % (name, type)
|
||||
line = " %-24s %s" % (spec, help)
|
||||
tw.line(line[: tw.fullwidth])
|
||||
spec = "%s (%s):" % (name, type)
|
||||
tw.write(" %s" % spec)
|
||||
spec_len = len(spec)
|
||||
if spec_len > (indent_len - 3):
|
||||
# Display help starting at a new line.
|
||||
tw.line()
|
||||
helplines = textwrap.wrap(
|
||||
help,
|
||||
columns,
|
||||
initial_indent=indent,
|
||||
subsequent_indent=indent,
|
||||
break_on_hyphens=False,
|
||||
)
|
||||
|
||||
for line in helplines:
|
||||
tw.line(line)
|
||||
else:
|
||||
# Display help starting after the spec, following lines indented.
|
||||
tw.write(" " * (indent_len - spec_len - 2))
|
||||
wrapped = textwrap.wrap(help, columns - indent_len, break_on_hyphens=False)
|
||||
|
||||
tw.line(wrapped[0])
|
||||
for line in wrapped[1:]:
|
||||
tw.line(indent + line)
|
||||
|
||||
tw.line()
|
||||
tw.line("environment variables:")
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
""" hook specifications for pytest plugins, invoked from main.py and builtin plugins. """
|
||||
from pluggy import HookspecMarker
|
||||
|
||||
@@ -227,7 +228,7 @@ def pytest_collectreport(report):
|
||||
|
||||
|
||||
def pytest_deselected(items):
|
||||
""" called for test items deselected by keyword. """
|
||||
""" called for test items deselected, e.g. by keyword. """
|
||||
|
||||
|
||||
@hookspec(firstresult=True)
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
report test results in JUnit-XML format,
|
||||
for use with Jenkins and build integration servers.
|
||||
@@ -166,6 +167,9 @@ class _NodeReporter(object):
|
||||
self.append(node)
|
||||
|
||||
def write_captured_output(self, report):
|
||||
if not self.xml.log_passing_tests and report.passed:
|
||||
return
|
||||
|
||||
content_out = report.capstdout
|
||||
content_log = report.caplog
|
||||
content_err = report.capstderr
|
||||
@@ -252,7 +256,14 @@ class _NodeReporter(object):
|
||||
|
||||
def append_skipped(self, report):
|
||||
if hasattr(report, "wasxfail"):
|
||||
self._add_simple(Junit.skipped, "expected test failure", report.wasxfail)
|
||||
xfailreason = report.wasxfail
|
||||
if xfailreason.startswith("reason: "):
|
||||
xfailreason = xfailreason[8:]
|
||||
self.append(
|
||||
Junit.skipped(
|
||||
"", type="pytest.xfail", message=bin_xml_escape(xfailreason)
|
||||
)
|
||||
)
|
||||
else:
|
||||
filename, lineno, skipreason = report.longrepr
|
||||
if skipreason.startswith("Skipped: "):
|
||||
@@ -274,6 +285,21 @@ class _NodeReporter(object):
|
||||
self.to_xml = lambda: py.xml.raw(data)
|
||||
|
||||
|
||||
def _warn_incompatibility_with_xunit2(request, fixture_name):
|
||||
"""Emits a PytestWarning about the given fixture being incompatible with newer xunit revisions"""
|
||||
from _pytest.warning_types import PytestWarning
|
||||
|
||||
xml = getattr(request.config, "_xml", None)
|
||||
if xml is not None and xml.family not in ("xunit1", "legacy"):
|
||||
request.node.warn(
|
||||
PytestWarning(
|
||||
"{fixture_name} is incompatible with junit_family '{family}' (use 'legacy' or 'xunit1')".format(
|
||||
fixture_name=fixture_name, family=xml.family
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def record_property(request):
|
||||
"""Add an extra properties the calling test.
|
||||
@@ -287,6 +313,7 @@ def record_property(request):
|
||||
def test_function(record_property):
|
||||
record_property("example_key", 1)
|
||||
"""
|
||||
_warn_incompatibility_with_xunit2(request, "record_property")
|
||||
|
||||
def append_property(name, value):
|
||||
request.node.user_properties.append((name, value))
|
||||
@@ -300,31 +327,67 @@ def record_xml_attribute(request):
|
||||
The fixture is callable with ``(name, value)``, with value being
|
||||
automatically xml-encoded
|
||||
"""
|
||||
from _pytest.warning_types import PytestWarning
|
||||
from _pytest.warning_types import PytestExperimentalApiWarning
|
||||
|
||||
request.node.warn(PytestWarning("record_xml_attribute is an experimental feature"))
|
||||
request.node.warn(
|
||||
PytestExperimentalApiWarning("record_xml_attribute is an experimental feature")
|
||||
)
|
||||
|
||||
_warn_incompatibility_with_xunit2(request, "record_xml_attribute")
|
||||
|
||||
# Declare noop
|
||||
def add_attr_noop(name, value):
|
||||
pass
|
||||
|
||||
attr_func = add_attr_noop
|
||||
xml = getattr(request.config, "_xml", None)
|
||||
|
||||
if xml is not None and xml.family != "xunit1":
|
||||
request.node.warn(
|
||||
PytestWarning(
|
||||
"record_xml_attribute is incompatible with junit_family: "
|
||||
"%s (use: legacy|xunit1)" % xml.family
|
||||
)
|
||||
)
|
||||
elif xml is not None:
|
||||
xml = getattr(request.config, "_xml", None)
|
||||
if xml is not None:
|
||||
node_reporter = xml.node_reporter(request.node.nodeid)
|
||||
attr_func = node_reporter.add_attribute
|
||||
|
||||
return attr_func
|
||||
|
||||
|
||||
def _check_record_param_type(param, v):
|
||||
"""Used by record_testsuite_property to check that the given parameter name is of the proper
|
||||
type"""
|
||||
__tracebackhide__ = True
|
||||
if not isinstance(v, six.string_types):
|
||||
msg = "{param} parameter needs to be a string, but {g} given"
|
||||
raise TypeError(msg.format(param=param, g=type(v).__name__))
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def record_testsuite_property(request):
|
||||
"""
|
||||
Records a new ``<property>`` tag as child of the root ``<testsuite>``. This is suitable to
|
||||
writing global information regarding the entire test suite, and is compatible with ``xunit2`` JUnit family.
|
||||
|
||||
This is a ``session``-scoped fixture which is called with ``(name, value)``. Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def test_foo(record_testsuite_property):
|
||||
record_testsuite_property("ARCH", "PPC")
|
||||
record_testsuite_property("STORAGE_TYPE", "CEPH")
|
||||
|
||||
``name`` must be a string, ``value`` will be converted to a string and properly xml-escaped.
|
||||
"""
|
||||
|
||||
__tracebackhide__ = True
|
||||
|
||||
def record_func(name, value):
|
||||
"""noop function in case --junitxml was not passed in the command-line"""
|
||||
__tracebackhide__ = True
|
||||
_check_record_param_type("name", name)
|
||||
|
||||
xml = getattr(request.config, "_xml", None)
|
||||
if xml is not None:
|
||||
record_func = xml.add_global_property # noqa
|
||||
return record_func
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("terminal reporting")
|
||||
group.addoption(
|
||||
@@ -354,6 +417,12 @@ def pytest_addoption(parser):
|
||||
"one of no|system-out|system-err",
|
||||
default="no",
|
||||
) # choices=['no', 'stdout', 'stderr'])
|
||||
parser.addini(
|
||||
"junit_log_passing_tests",
|
||||
"Capture log information for passing tests to JUnit report: ",
|
||||
type="bool",
|
||||
default=True,
|
||||
)
|
||||
parser.addini(
|
||||
"junit_duration_report",
|
||||
"Duration time to report: one of total|call",
|
||||
@@ -377,6 +446,7 @@ def pytest_configure(config):
|
||||
config.getini("junit_logging"),
|
||||
config.getini("junit_duration_report"),
|
||||
config.getini("junit_family"),
|
||||
config.getini("junit_log_passing_tests"),
|
||||
)
|
||||
config.pluginmanager.register(config._xml)
|
||||
|
||||
@@ -412,18 +482,21 @@ class LogXML(object):
|
||||
logging="no",
|
||||
report_duration="total",
|
||||
family="xunit1",
|
||||
log_passing_tests=True,
|
||||
):
|
||||
logfile = os.path.expanduser(os.path.expandvars(logfile))
|
||||
self.logfile = os.path.normpath(os.path.abspath(logfile))
|
||||
self.prefix = prefix
|
||||
self.suite_name = suite_name
|
||||
self.logging = logging
|
||||
self.log_passing_tests = log_passing_tests
|
||||
self.report_duration = report_duration
|
||||
self.family = family
|
||||
self.stats = dict.fromkeys(["error", "passed", "failure", "skipped"], 0)
|
||||
self.node_reporters = {} # nodeid -> _NodeReporter
|
||||
self.node_reporters_ordered = []
|
||||
self.global_properties = []
|
||||
|
||||
# List of reports that failed on call but teardown is pending.
|
||||
self.open_reports = []
|
||||
self.cnt_double_fail_tests = 0
|
||||
@@ -612,7 +685,9 @@ class LogXML(object):
|
||||
terminalreporter.write_sep("-", "generated xml file: %s" % (self.logfile))
|
||||
|
||||
def add_global_property(self, name, value):
|
||||
self.global_properties.append((str(name), bin_xml_escape(value)))
|
||||
__tracebackhide__ = True
|
||||
_check_record_param_type("name", name)
|
||||
self.global_properties.append((name, bin_xml_escape(value)))
|
||||
|
||||
def _get_global_properties_node(self):
|
||||
"""Return a Junit node containing custom properties, if any.
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
""" Access and control log capturing. """
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
@@ -15,8 +16,13 @@ from _pytest.compat import dummy_context_manager
|
||||
from _pytest.config import create_terminal_writer
|
||||
from _pytest.pathlib import Path
|
||||
|
||||
DEFAULT_LOG_FORMAT = "%(filename)-25s %(lineno)4d %(levelname)-8s %(message)s"
|
||||
DEFAULT_LOG_FORMAT = "%(levelname)-8s %(name)s:%(filename)s:%(lineno)d %(message)s"
|
||||
DEFAULT_LOG_DATE_FORMAT = "%H:%M:%S"
|
||||
_ANSI_ESCAPE_SEQ = re.compile(r"\x1b\[[\d;]+m")
|
||||
|
||||
|
||||
def _remove_ansi_escape_sequences(text):
|
||||
return _ANSI_ESCAPE_SEQ.sub("", text)
|
||||
|
||||
|
||||
class ColoredLevelFormatter(logging.Formatter):
|
||||
@@ -71,6 +77,36 @@ class ColoredLevelFormatter(logging.Formatter):
|
||||
return super(ColoredLevelFormatter, self).format(record)
|
||||
|
||||
|
||||
if not six.PY2:
|
||||
# Formatter classes don't support format styles in PY2
|
||||
|
||||
class PercentStyleMultiline(logging.PercentStyle):
|
||||
"""A logging style with special support for multiline messages.
|
||||
|
||||
If the message of a record consists of multiple lines, this style
|
||||
formats the message as if each line were logged separately.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def _update_message(record_dict, message):
|
||||
tmp = record_dict.copy()
|
||||
tmp["message"] = message
|
||||
return tmp
|
||||
|
||||
def format(self, record):
|
||||
if "\n" in record.message:
|
||||
lines = record.message.splitlines()
|
||||
formatted = self._fmt % self._update_message(record.__dict__, lines[0])
|
||||
# TODO optimize this by introducing an option that tells the
|
||||
# logging framework that the indentation doesn't
|
||||
# change. This allows to compute the indentation only once.
|
||||
indentation = _remove_ansi_escape_sequences(formatted).find(lines[0])
|
||||
lines[0] = formatted
|
||||
return ("\n" + " " * indentation).join(lines)
|
||||
else:
|
||||
return self._fmt % record.__dict__
|
||||
|
||||
|
||||
def get_option_ini(config, *names):
|
||||
for name in names:
|
||||
ret = config.getoption(name) # 'default' arg won't work as expected
|
||||
@@ -256,8 +292,8 @@ class LogCaptureFixture(object):
|
||||
|
||||
@property
|
||||
def text(self):
|
||||
"""Returns the log text."""
|
||||
return self.handler.stream.getvalue()
|
||||
"""Returns the formatted log text."""
|
||||
return _remove_ansi_escape_sequences(self.handler.stream.getvalue())
|
||||
|
||||
@property
|
||||
def records(self):
|
||||
@@ -388,12 +424,8 @@ class LoggingPlugin(object):
|
||||
"""
|
||||
self._config = config
|
||||
|
||||
# enable verbose output automatically if live logging is enabled
|
||||
if self._log_cli_enabled() and config.getoption("verbose") < 1:
|
||||
config.option.verbose = 1
|
||||
|
||||
self.print_logs = get_option_ini(config, "log_print")
|
||||
self.formatter = logging.Formatter(
|
||||
self.formatter = self._create_formatter(
|
||||
get_option_ini(config, "log_format"),
|
||||
get_option_ini(config, "log_date_format"),
|
||||
)
|
||||
@@ -427,6 +459,22 @@ class LoggingPlugin(object):
|
||||
if self._log_cli_enabled():
|
||||
self._setup_cli_logging()
|
||||
|
||||
def _create_formatter(self, log_format, log_date_format):
|
||||
# color option doesn't exist if terminal plugin is disabled
|
||||
color = getattr(self._config.option, "color", "no")
|
||||
if color != "no" and ColoredLevelFormatter.LEVELNAME_FMT_REGEX.search(
|
||||
log_format
|
||||
):
|
||||
formatter = ColoredLevelFormatter(
|
||||
create_terminal_writer(self._config), log_format, log_date_format
|
||||
)
|
||||
else:
|
||||
formatter = logging.Formatter(log_format, log_date_format)
|
||||
|
||||
if not six.PY2:
|
||||
formatter._style = PercentStyleMultiline(formatter._style._fmt)
|
||||
return formatter
|
||||
|
||||
def _setup_cli_logging(self):
|
||||
config = self._config
|
||||
terminal_reporter = config.pluginmanager.get_plugin("terminalreporter")
|
||||
@@ -437,23 +485,12 @@ class LoggingPlugin(object):
|
||||
capture_manager = config.pluginmanager.get_plugin("capturemanager")
|
||||
# if capturemanager plugin is disabled, live logging still works.
|
||||
log_cli_handler = _LiveLoggingStreamHandler(terminal_reporter, capture_manager)
|
||||
log_cli_format = get_option_ini(config, "log_cli_format", "log_format")
|
||||
log_cli_date_format = get_option_ini(
|
||||
config, "log_cli_date_format", "log_date_format"
|
||||
|
||||
log_cli_formatter = self._create_formatter(
|
||||
get_option_ini(config, "log_cli_format", "log_format"),
|
||||
get_option_ini(config, "log_cli_date_format", "log_date_format"),
|
||||
)
|
||||
if (
|
||||
config.option.color != "no"
|
||||
and ColoredLevelFormatter.LEVELNAME_FMT_REGEX.search(log_cli_format)
|
||||
):
|
||||
log_cli_formatter = ColoredLevelFormatter(
|
||||
create_terminal_writer(config),
|
||||
log_cli_format,
|
||||
datefmt=log_cli_date_format,
|
||||
)
|
||||
else:
|
||||
log_cli_formatter = logging.Formatter(
|
||||
log_cli_format, datefmt=log_cli_date_format
|
||||
)
|
||||
|
||||
log_cli_level = get_actual_log_level(config, "log_cli_level", "log_level")
|
||||
self.log_cli_handler = log_cli_handler
|
||||
self.live_logs_context = lambda: catching_logs(
|
||||
@@ -603,6 +640,15 @@ class LoggingPlugin(object):
|
||||
@pytest.hookimpl(hookwrapper=True)
|
||||
def pytest_runtestloop(self, session):
|
||||
"""Runs all collected test items."""
|
||||
|
||||
if session.config.option.collectonly:
|
||||
yield
|
||||
return
|
||||
|
||||
if self._log_cli_enabled() and self._config.getoption("verbose") < 1:
|
||||
# setting verbose flag is needed to avoid messy test progress output
|
||||
self._config.option.verbose = 1
|
||||
|
||||
with self.live_logs_context():
|
||||
if self.log_file_handler is not None:
|
||||
with catching_logs(self.log_file_handler, level=self.log_file_level):
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
""" core implementation of testing process: init, session, runtest loop. """
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
@@ -47,11 +48,6 @@ def pytest_addoption(parser):
|
||||
type="args",
|
||||
default=[],
|
||||
)
|
||||
# parser.addini("dirpatterns",
|
||||
# "patterns specifying possible locations of test files",
|
||||
# type="linelist", default=["**/test_*.txt",
|
||||
# "**/test_*.py", "**/*_test.py"]
|
||||
# )
|
||||
group = parser.getgroup("general", "running and selection options")
|
||||
group._addoption(
|
||||
"-x",
|
||||
@@ -71,9 +67,10 @@ def pytest_addoption(parser):
|
||||
help="exit after first num failures or errors.",
|
||||
)
|
||||
group._addoption(
|
||||
"--strict-markers",
|
||||
"--strict",
|
||||
action="store_true",
|
||||
help="marks not registered in configuration file raise errors.",
|
||||
help="markers not registered in the `markers` section of the configuration file raise errors.",
|
||||
)
|
||||
group._addoption(
|
||||
"-c",
|
||||
@@ -208,16 +205,20 @@ def wrap_session(config, doit):
|
||||
initstate = 2
|
||||
session.exitstatus = doit(config, session) or 0
|
||||
except UsageError:
|
||||
session.exitstatus = EXIT_USAGEERROR
|
||||
raise
|
||||
except Failed:
|
||||
session.exitstatus = EXIT_TESTSFAILED
|
||||
except (KeyboardInterrupt, exit.Exception):
|
||||
excinfo = _pytest._code.ExceptionInfo.from_current()
|
||||
exitstatus = EXIT_INTERRUPTED
|
||||
if initstate <= 2 and isinstance(excinfo.value, exit.Exception):
|
||||
sys.stderr.write("{}: {}\n".format(excinfo.typename, excinfo.value.msg))
|
||||
if isinstance(excinfo.value, exit.Exception):
|
||||
if excinfo.value.returncode is not None:
|
||||
exitstatus = excinfo.value.returncode
|
||||
if initstate < 2:
|
||||
sys.stderr.write(
|
||||
"{}: {}\n".format(excinfo.typename, excinfo.value.msg)
|
||||
)
|
||||
config.hook.pytest_keyboard_interrupt(excinfo=excinfo)
|
||||
session.exitstatus = exitstatus
|
||||
except: # noqa
|
||||
@@ -431,7 +432,7 @@ class Session(nodes.FSCollector):
|
||||
self.shouldfail = False
|
||||
self.trace = config.trace.root.get("collection")
|
||||
self._norecursepatterns = config.getini("norecursedirs")
|
||||
self.startdir = py.path.local()
|
||||
self.startdir = config.invocation_dir
|
||||
self._initialpaths = frozenset()
|
||||
# Keep track of any collected nodes in here, so we don't duplicate fixtures
|
||||
self._node_cache = {}
|
||||
@@ -620,7 +621,13 @@ class Session(nodes.FSCollector):
|
||||
# Module itself, so just use that. If this special case isn't taken, then all
|
||||
# the files in the package will be yielded.
|
||||
if argpath.basename == "__init__.py":
|
||||
yield next(m[0].collect())
|
||||
try:
|
||||
yield next(m[0].collect())
|
||||
except StopIteration:
|
||||
# The package collects nothing with only an __init__.py
|
||||
# file in it, which gets ignored by the default
|
||||
# "python_files" option.
|
||||
pass
|
||||
return
|
||||
for y in m:
|
||||
yield y
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
""" generic mechanism for marking and selecting python functions. """
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
@@ -100,6 +101,9 @@ pytest_cmdline_main.tryfirst = True
|
||||
|
||||
def deselect_by_keyword(items, config):
|
||||
keywordexpr = config.option.keyword.lstrip()
|
||||
if not keywordexpr:
|
||||
return
|
||||
|
||||
if keywordexpr.startswith("-"):
|
||||
keywordexpr = "not " + keywordexpr[1:]
|
||||
selectuntil = False
|
||||
@@ -147,8 +151,7 @@ def pytest_collection_modifyitems(items, config):
|
||||
|
||||
def pytest_configure(config):
|
||||
config._old_mark_config = MARK_GEN._config
|
||||
if config.option.strict:
|
||||
MARK_GEN._config = config
|
||||
MARK_GEN._config = config
|
||||
|
||||
empty_parameterset = config.getini(EMPTY_PARAMETERSET_OPTION)
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import os
|
||||
import platform
|
||||
import sys
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
this is a place where we put datastructures used by legacy apis
|
||||
we hope ot remove
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import inspect
|
||||
import warnings
|
||||
from collections import namedtuple
|
||||
@@ -7,11 +8,13 @@ import attr
|
||||
import six
|
||||
|
||||
from ..compat import ascii_escaped
|
||||
from ..compat import ATTRS_EQ_FIELD
|
||||
from ..compat import getfslineno
|
||||
from ..compat import MappingMixin
|
||||
from ..compat import NOTSET
|
||||
from _pytest.deprecated import PYTEST_PARAM_UNKNOWN_KWARGS
|
||||
from _pytest.outcomes import fail
|
||||
from _pytest.warning_types import PytestUnknownMarkWarning
|
||||
|
||||
EMPTY_PARAMETERSET_OPTION = "empty_parameter_set_mark"
|
||||
|
||||
@@ -101,16 +104,25 @@ class ParameterSet(namedtuple("ParameterSet", "values, marks, id")):
|
||||
else:
|
||||
return cls(parameterset, marks=[], id=None)
|
||||
|
||||
@classmethod
|
||||
def _for_parametrize(cls, argnames, argvalues, func, config, function_definition):
|
||||
@staticmethod
|
||||
def _parse_parametrize_args(argnames, argvalues, *args, **kwargs):
|
||||
if not isinstance(argnames, (tuple, list)):
|
||||
argnames = [x.strip() for x in argnames.split(",") if x.strip()]
|
||||
force_tuple = len(argnames) == 1
|
||||
else:
|
||||
force_tuple = False
|
||||
parameters = [
|
||||
return argnames, force_tuple
|
||||
|
||||
@staticmethod
|
||||
def _parse_parametrize_parameters(argvalues, force_tuple):
|
||||
return [
|
||||
ParameterSet.extract_from(x, force_tuple=force_tuple) for x in argvalues
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def _for_parametrize(cls, argnames, argvalues, func, config, function_definition):
|
||||
argnames, force_tuple = cls._parse_parametrize_args(argnames, argvalues)
|
||||
parameters = cls._parse_parametrize_parameters(argvalues, force_tuple)
|
||||
del argvalues
|
||||
|
||||
if parameters:
|
||||
@@ -135,7 +147,7 @@ class ParameterSet(namedtuple("ParameterSet", "values, marks, id")):
|
||||
)
|
||||
else:
|
||||
# empty parameter set (likely computed at runtime): create a single
|
||||
# parameter set with NOSET values, with the "empty parameter set" mark applied to it
|
||||
# parameter set with NOTSET values, with the "empty parameter set" mark applied to it
|
||||
mark = get_empty_parameterset_mark(config, argnames, func)
|
||||
parameters.append(
|
||||
ParameterSet(values=(NOTSET,) * len(argnames), marks=[mark], id=None)
|
||||
@@ -158,7 +170,7 @@ class Mark(object):
|
||||
:type other: Mark
|
||||
:rtype: Mark
|
||||
|
||||
combines by appending aargs and merging the mappings
|
||||
combines by appending args and merging the mappings
|
||||
"""
|
||||
assert self.name == other.name
|
||||
return Mark(
|
||||
@@ -289,28 +301,41 @@ class MarkGenerator(object):
|
||||
on the ``test_function`` object. """
|
||||
|
||||
_config = None
|
||||
_markers = set()
|
||||
|
||||
def __getattr__(self, name):
|
||||
if name[0] == "_":
|
||||
raise AttributeError("Marker name must NOT start with underscore")
|
||||
if self._config is not None:
|
||||
self._check(name)
|
||||
return MarkDecorator(Mark(name, (), {}))
|
||||
|
||||
def _check(self, name):
|
||||
try:
|
||||
if name in self._markers:
|
||||
return
|
||||
except AttributeError:
|
||||
pass
|
||||
self._markers = values = set()
|
||||
for line in self._config.getini("markers"):
|
||||
marker = line.split(":", 1)[0]
|
||||
marker = marker.rstrip()
|
||||
x = marker.split("(", 1)[0]
|
||||
values.add(x)
|
||||
if name not in self._markers:
|
||||
fail("{!r} not a registered marker".format(name), pytrace=False)
|
||||
if self._config is not None:
|
||||
# We store a set of markers as a performance optimisation - if a mark
|
||||
# name is in the set we definitely know it, but a mark may be known and
|
||||
# not in the set. We therefore start by updating the set!
|
||||
if name not in self._markers:
|
||||
for line in self._config.getini("markers"):
|
||||
# example lines: "skipif(condition): skip the given test if..."
|
||||
# or "hypothesis: tests which use Hypothesis", so to get the
|
||||
# marker name we split on both `:` and `(`.
|
||||
marker = line.split(":")[0].split("(")[0].strip()
|
||||
self._markers.add(marker)
|
||||
|
||||
# If the name is not in the set of known marks after updating,
|
||||
# then it really is time to issue a warning or an error.
|
||||
if name not in self._markers:
|
||||
if self._config.option.strict_markers:
|
||||
fail(
|
||||
"{!r} not found in `markers` configuration option".format(name),
|
||||
pytrace=False,
|
||||
)
|
||||
else:
|
||||
warnings.warn(
|
||||
"Unknown pytest.mark.%s - is this a typo? You can register "
|
||||
"custom marks to avoid this warning - for details, see "
|
||||
"https://docs.pytest.org/en/latest/mark.html" % name,
|
||||
PytestUnknownMarkWarning,
|
||||
)
|
||||
|
||||
return MarkDecorator(Mark(name, (), {}))
|
||||
|
||||
|
||||
MARK_GEN = MarkGenerator()
|
||||
@@ -353,7 +378,8 @@ class NodeKeywords(MappingMixin):
|
||||
return "<NodeKeywords for node %s>" % (self.node,)
|
||||
|
||||
|
||||
@attr.s(cmp=False, hash=False)
|
||||
# mypy cannot find this overload, remove when on attrs>=19.2
|
||||
@attr.s(hash=False, **{ATTRS_EQ_FIELD: False}) # type: ignore
|
||||
class NodeMarkers(object):
|
||||
"""
|
||||
internal structure for storing marks belonging to a node
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user