Compare commits
480 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b27ba97721 | ||
|
|
2b56c7e1ce | ||
|
|
2bee7d7c3e | ||
|
|
713b9e54c3 | ||
|
|
ba4b8c869c | ||
|
|
82e9013e73 | ||
|
|
14142b9113 | ||
|
|
0123b29ed7 | ||
|
|
16efa1bfef | ||
|
|
5624e366c1 | ||
|
|
b88f5df4ce | ||
|
|
3a402811de | ||
|
|
e05b33ed16 | ||
|
|
2e11ea6108 | ||
|
|
18786992bb | ||
|
|
3173a26388 | ||
|
|
5e7b8d813b | ||
|
|
afac1f0021 | ||
|
|
0783030357 | ||
|
|
d910175b9f | ||
|
|
6b2bae9392 | ||
|
|
7ef44913a1 | ||
|
|
813ef9e88f | ||
|
|
f2dd9cc63e | ||
|
|
ce8b1dfa04 | ||
|
|
a73d0151a6 | ||
|
|
3cb1457e6d | ||
|
|
90dfee5da5 | ||
|
|
77a995ffad | ||
|
|
0383d43645 | ||
|
|
76c2a8ebbe | ||
|
|
71a7fd02a5 | ||
|
|
7bc8cb8e2b | ||
|
|
dee8d94876 | ||
|
|
a20880cca2 | ||
|
|
ae9465215e | ||
|
|
1555973487 | ||
|
|
3322c1e033 | ||
|
|
7678f891f9 | ||
|
|
4f2abd7ae0 | ||
|
|
122cf60b27 | ||
|
|
63e3d89647 | ||
|
|
122748a6cf | ||
|
|
1f639e2c22 | ||
|
|
83ba5eb58a | ||
|
|
d07c5ba4ae | ||
|
|
b162ab6a45 | ||
|
|
57141dc708 | ||
|
|
bad4ffc3a7 | ||
|
|
71ad5b0fbb | ||
|
|
3fada8c8ee | ||
|
|
271dc7f17a | ||
|
|
19eb0590f1 | ||
|
|
eaa05531ed | ||
|
|
74aed6ea4c | ||
|
|
cfa9ebc91f | ||
|
|
b0fd8742da | ||
|
|
12cc729f6b | ||
|
|
1c5efffd90 | ||
|
|
8c9ea5e055 | ||
|
|
c58b0fb4ac | ||
|
|
4011af68cd | ||
|
|
9637b3e376 | ||
|
|
33c3ec66b7 | ||
|
|
a79acf279a | ||
|
|
9a4c0b991b | ||
|
|
b490f5f979 | ||
|
|
acfd0fd9d6 | ||
|
|
88434f1f42 | ||
|
|
4d01740be3 | ||
|
|
07792c7113 | ||
|
|
068ef90b92 | ||
|
|
065773aa97 | ||
|
|
b62276826c | ||
|
|
7bdfba3578 | ||
|
|
6bfd30d169 | ||
|
|
7731e45615 | ||
|
|
8806b1f531 | ||
|
|
19c9e53604 | ||
|
|
c28b63135f | ||
|
|
7c64d5d882 | ||
|
|
d3d9f9f668 | ||
|
|
018edf2a0e | ||
|
|
04c01fb606 | ||
|
|
ea0c7e43b6 | ||
|
|
de8fdab7a9 | ||
|
|
c1361b48f8 | ||
|
|
1b4ad7774b | ||
|
|
409cc2946a | ||
|
|
3114be9181 | ||
|
|
e4103cb02c | ||
|
|
217605c217 | ||
|
|
2fcf21a6c7 | ||
|
|
c8cf748c49 | ||
|
|
249b53e623 | ||
|
|
9669413b1f | ||
|
|
e2382e96ed | ||
|
|
1a9f4a51cb | ||
|
|
892bdd59dc | ||
|
|
df46afc96d | ||
|
|
6918d07560 | ||
|
|
c997c32004 | ||
|
|
450409d123 | ||
|
|
702acdba46 | ||
|
|
f832ac3316 | ||
|
|
9422e10322 | ||
|
|
5c3b4a6f52 | ||
|
|
05850d73bd | ||
|
|
b48f51eb03 | ||
|
|
cf5b544db3 | ||
|
|
73c5b7f4b1 | ||
|
|
8f2f51be6d | ||
|
|
f2f3ced508 | ||
|
|
23102a7d84 | ||
|
|
f0d538329c | ||
|
|
6c8bcf601c | ||
|
|
9d7b919c7d | ||
|
|
333e9d5c10 | ||
|
|
f1b605c95e | ||
|
|
2bb8d93001 | ||
|
|
d049b35397 | ||
|
|
8ee557f7ae | ||
|
|
ca3884d9bb | ||
|
|
bc163605ab | ||
|
|
1675048b35 | ||
|
|
10bf6aac76 | ||
|
|
f8dd6349c1 | ||
|
|
8c8809e1aa | ||
|
|
e56544cb58 | ||
|
|
f9cc704b1a | ||
|
|
bd57307a39 | ||
|
|
667c6463ab | ||
|
|
4e594552eb | ||
|
|
955dc6d18a | ||
|
|
3ddbc7fb2a | ||
|
|
bb60736a6f | ||
|
|
35b3b1097f | ||
|
|
01082fea12 | ||
|
|
404cf0c872 | ||
|
|
a511b98da9 | ||
|
|
487659d8b1 | ||
|
|
955e542210 | ||
|
|
29bb0eda27 | ||
|
|
a98270eac0 | ||
|
|
1aac64573f | ||
|
|
d47b9d04d4 | ||
|
|
5bf9f9a711 | ||
|
|
c28e428249 | ||
|
|
505c3340bf | ||
|
|
7a69365486 | ||
|
|
3c82b1cb97 | ||
|
|
0215bcd84e | ||
|
|
01b9774e3b | ||
|
|
9859d37cf6 | ||
|
|
1c7aeb670a | ||
|
|
691c706fcc | ||
|
|
a4adf511fc | ||
|
|
4265ab3a41 | ||
|
|
b135f5af8d | ||
|
|
daff9066c0 | ||
|
|
c2f762460f | ||
|
|
43eab917a1 | ||
|
|
0e569faca2 | ||
|
|
a7c235732a | ||
|
|
cec2183aeb | ||
|
|
c049fd85ab | ||
|
|
62381125e7 | ||
|
|
a7ede64f42 | ||
|
|
307652202c | ||
|
|
a287aea00e | ||
|
|
6bf6265c59 | ||
|
|
7c26a65865 | ||
|
|
1e3205e7cf | ||
|
|
32dac18f38 | ||
|
|
f05ca74d27 | ||
|
|
e5f4c47cd5 | ||
|
|
05bfe73cf9 | ||
|
|
2a6a1ca07d | ||
|
|
9f3bfe82cf | ||
|
|
c3a8e609f9 | ||
|
|
7259c453d6 | ||
|
|
28761c8da1 | ||
|
|
d9c4e646c4 | ||
|
|
8ccc0177c8 | ||
|
|
eaf7ce9a99 | ||
|
|
ac3056c5a2 | ||
|
|
409d61b972 | ||
|
|
6ead01aacd | ||
|
|
4c37dca011 | ||
|
|
44c10dbd5f | ||
|
|
0f11a7a73d | ||
|
|
d50198a3ff | ||
|
|
ac052a98ad | ||
|
|
f4a84a8dfd | ||
|
|
1049a38cee | ||
|
|
d7f082519a | ||
|
|
2d613a03b3 | ||
|
|
28c6c5bb71 | ||
|
|
6b9d729ed3 | ||
|
|
0ba774a7c3 | ||
|
|
0a62c4ac04 | ||
|
|
137255816e | ||
|
|
2f1b192fe6 | ||
|
|
7183335e62 | ||
|
|
0822a1e53a | ||
|
|
cb94fd31c8 | ||
|
|
fa75d818cf | ||
|
|
1434b66c35 | ||
|
|
a24132ddc5 | ||
|
|
ed2425119f | ||
|
|
a295a3ddaf | ||
|
|
3b3ce0e799 | ||
|
|
1a61265b1e | ||
|
|
67ac878ccf | ||
|
|
3eb4973065 | ||
|
|
39ba996133 | ||
|
|
cb481a354a | ||
|
|
300f78556f | ||
|
|
0db9dade65 | ||
|
|
a77c83a4c3 | ||
|
|
0767f080a4 | ||
|
|
2498aeaaa5 | ||
|
|
345df99db7 | ||
|
|
8665f5652a | ||
|
|
beb457c75e | ||
|
|
f7e81dab9a | ||
|
|
ee936b27a8 | ||
|
|
e0ce8b79d5 | ||
|
|
8ffa3aa65d | ||
|
|
b095e0de47 | ||
|
|
2f065a555f | ||
|
|
ec76f70d71 | ||
|
|
65e6038111 | ||
|
|
9f6da8cbeb | ||
|
|
ba72b480b9 | ||
|
|
ba76080b59 | ||
|
|
5ecac3c861 | ||
|
|
0a19439677 | ||
|
|
e772bb6480 | ||
|
|
7560a7b808 | ||
|
|
6c8d8a99f4 | ||
|
|
78de9d4677 | ||
|
|
23a0b532db | ||
|
|
9fce0bdd88 | ||
|
|
28fabc52bd | ||
|
|
5f95dce956 | ||
|
|
75d0b899bb | ||
|
|
7f90e74e02 | ||
|
|
cf7761f91f | ||
|
|
ef0915e1db | ||
|
|
68c486a25f | ||
|
|
f5fab2bfa1 | ||
|
|
f43b54aaeb | ||
|
|
aa06e6c8f3 | ||
|
|
bf3aa72a29 | ||
|
|
198fcd8a6f | ||
|
|
f7e925dcc1 | ||
|
|
cf6632a57a | ||
|
|
1a66a503b6 | ||
|
|
82763a293a | ||
|
|
73e0bf92f7 | ||
|
|
0d5ed57b40 | ||
|
|
1ce45a6f67 | ||
|
|
8db6642515 | ||
|
|
29e336bd9b | ||
|
|
d3e1907899 | ||
|
|
b5b710b3ae | ||
|
|
0d3958e8de | ||
|
|
9064eea216 | ||
|
|
dc6e7b9fcf | ||
|
|
b43ebb7d65 | ||
|
|
8a5e72c936 | ||
|
|
cd924b66ca | ||
|
|
1076a7e61d | ||
|
|
cff58457dd | ||
|
|
b3f4398d64 | ||
|
|
2ca47cb3f5 | ||
|
|
d19fe3c410 | ||
|
|
2de145f372 | ||
|
|
942fd91995 | ||
|
|
cb15e7c700 | ||
|
|
2959fb3198 | ||
|
|
aa13c625da | ||
|
|
958374addb | ||
|
|
6c2f673daf | ||
|
|
fd2fb36eac | ||
|
|
ab39502c98 | ||
|
|
50c7b5d2b5 | ||
|
|
3bdcd6b4f3 | ||
|
|
d89c88478d | ||
|
|
635916052c | ||
|
|
bd34bd872a | ||
|
|
1d8f668e10 | ||
|
|
28343bdcbd | ||
|
|
ebfe8eabf5 | ||
|
|
829cc5a242 | ||
|
|
7fa27485df | ||
|
|
b2839c4084 | ||
|
|
880e368607 | ||
|
|
b9111fe677 | ||
|
|
401c3d1109 | ||
|
|
2a724a1c81 | ||
|
|
52ad5a1591 | ||
|
|
4abf95ba4f | ||
|
|
f163b37f6a | ||
|
|
a82dd2f064 | ||
|
|
1873dc6a8a | ||
|
|
8c47db724c | ||
|
|
693e9d0733 | ||
|
|
5c09cc16f2 | ||
|
|
0824789459 | ||
|
|
ec4ca59bf0 | ||
|
|
3c94f32e77 | ||
|
|
d66b6c8371 | ||
|
|
6e687c4354 | ||
|
|
13d8577451 | ||
|
|
3c7438969a | ||
|
|
66cfc66d63 | ||
|
|
13c4b7d212 | ||
|
|
8c7d9124ba | ||
|
|
240d314f36 | ||
|
|
27c9d80a7e | ||
|
|
cb828ebe70 | ||
|
|
d35d09f82d | ||
|
|
f0feb6c83a | ||
|
|
dcbb9c1f5a | ||
|
|
c05fcc8641 | ||
|
|
c98e7aed94 | ||
|
|
c0231ae780 | ||
|
|
675e9507d8 | ||
|
|
0b532fda76 | ||
|
|
7d1c697c30 | ||
|
|
65aee1e0c8 | ||
|
|
8d413c1926 | ||
|
|
104f8fc836 | ||
|
|
157515f3c5 | ||
|
|
183750fa86 | ||
|
|
fd8f92d0e7 | ||
|
|
1e34734f8f | ||
|
|
7c2e843358 | ||
|
|
79414164c2 | ||
|
|
0cca7f831a | ||
|
|
1f4ae789b8 | ||
|
|
3f46315a9d | ||
|
|
e03b8b9e95 | ||
|
|
eb7f950e20 | ||
|
|
848b735a06 | ||
|
|
7337cce332 | ||
|
|
7a5c0a01bc | ||
|
|
faf222f8fb | ||
|
|
9258fd1296 | ||
|
|
7440cece59 | ||
|
|
f1332872a6 | ||
|
|
ac12245f5f | ||
|
|
d9eab12ee0 | ||
|
|
2e756d698b | ||
|
|
15e235c63e | ||
|
|
65c23017c7 | ||
|
|
bb7608c56f | ||
|
|
8a3f40996a | ||
|
|
6f1d358a0c | ||
|
|
3ad315bcee | ||
|
|
129600d698 | ||
|
|
a96710dd8a | ||
|
|
54e08b729f | ||
|
|
01606315aa | ||
|
|
8f2f3bb1fa | ||
|
|
499fda2349 | ||
|
|
c7aacc96bb | ||
|
|
0394ebffee | ||
|
|
0225be53a2 | ||
|
|
da5add1294 | ||
|
|
11f1f79222 | ||
|
|
3f1fb62584 | ||
|
|
14bf4cdf44 | ||
|
|
56dcc9e1f8 | ||
|
|
55a570e513 | ||
|
|
2dca68b863 | ||
|
|
d7ee3dac2c | ||
|
|
866904ab80 | ||
|
|
35a57a0dfb | ||
|
|
6afbac29a3 | ||
|
|
d23fbab188 | ||
|
|
374c4325a8 | ||
|
|
2c071a060e | ||
|
|
0f8b462677 | ||
|
|
898028cb22 | ||
|
|
4480d3e518 | ||
|
|
4868d0d97a | ||
|
|
8052c7c5c6 | ||
|
|
13d750db20 | ||
|
|
e98627223f | ||
|
|
c89e379f49 | ||
|
|
24a66db8d3 | ||
|
|
4027254a4b | ||
|
|
37c37963c4 | ||
|
|
666acc9b7a | ||
|
|
a740ef2036 | ||
|
|
4c590e002f | ||
|
|
26b06bdb45 | ||
|
|
d5f1d7aebe | ||
|
|
f7747f5dd6 | ||
|
|
c224c4f1d6 | ||
|
|
2c402f4bd9 | ||
|
|
602cd5e21f | ||
|
|
57e5bd0664 | ||
|
|
31738155b5 | ||
|
|
9db1823707 | ||
|
|
89dfde9535 | ||
|
|
39a43dbae1 | ||
|
|
c1167ac552 | ||
|
|
628ff4d619 | ||
|
|
add912ff68 | ||
|
|
b77c876481 | ||
|
|
9b78a216a2 | ||
|
|
7aff51ee83 | ||
|
|
e33736c791 | ||
|
|
d5cc0f2a62 | ||
|
|
2a23fdab9f | ||
|
|
aaa7e837cc | ||
|
|
71c8ca7d3d | ||
|
|
56a0dd7658 | ||
|
|
7a82285b03 | ||
|
|
4cda7093f6 | ||
|
|
6a9bf2852a | ||
|
|
3e669a262a | ||
|
|
f1b8431d99 | ||
|
|
a43ba78d3b | ||
|
|
2180d9ef6d | ||
|
|
0371a3023a | ||
|
|
d7588b8d40 | ||
|
|
b62b549f5f | ||
|
|
60a358fa2d | ||
|
|
bb29f31d22 | ||
|
|
b1928f878d | ||
|
|
8651d880a0 | ||
|
|
95824c588a | ||
|
|
c54cbd63c8 | ||
|
|
1db132290f | ||
|
|
caa08ebd45 | ||
|
|
b08ae4449b | ||
|
|
776a632170 | ||
|
|
d74a975f82 | ||
|
|
dfe54cd82f | ||
|
|
2b9522e9da | ||
|
|
528ee3e1c5 | ||
|
|
9677099acf | ||
|
|
1b0e8d73d5 | ||
|
|
50b846e9d3 | ||
|
|
fa9791127a | ||
|
|
2d9b432613 | ||
|
|
5d8da88e9e | ||
|
|
0da690c8e9 | ||
|
|
e32c903fb6 | ||
|
|
0ed7aa2db6 | ||
|
|
85cc12e328 | ||
|
|
c470ade0a5 | ||
|
|
9972c78cfa | ||
|
|
3a17c1b30b | ||
|
|
7b35405033 | ||
|
|
aa1955de72 | ||
|
|
7e58defc15 | ||
|
|
f2b7809d5d | ||
|
|
647d89c444 | ||
|
|
683b2632b4 | ||
|
|
13f7f27fd2 | ||
|
|
279733a30b | ||
|
|
be91c4d932 | ||
|
|
497cd87fdd | ||
|
|
94a05e513e | ||
|
|
9021194efd | ||
|
|
d4a76a0b99 | ||
|
|
789a3662ce | ||
|
|
93a68cdfb4 | ||
|
|
4f9bf028f5 | ||
|
|
4bc0415720 | ||
|
|
844d660d5c | ||
|
|
2c6453c72d | ||
|
|
e6ffa78e59 | ||
|
|
bf39e89946 |
@@ -16,3 +16,11 @@ source = src/
|
||||
*/lib/python*/site-packages/
|
||||
*/pypy*/site-packages/
|
||||
*\Lib\site-packages\
|
||||
|
||||
[report]
|
||||
skip_covered = True
|
||||
show_missing = True
|
||||
exclude_lines =
|
||||
\#\s*pragma: no cover
|
||||
^\s*raise NotImplementedError\b
|
||||
^\s*return NotImplemented\b
|
||||
|
||||
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -6,7 +6,7 @@ Here is a quick checklist that should be present in PRs.
|
||||
-->
|
||||
|
||||
- [ ] Target the `master` branch for bug fixes, documentation updates and trivial changes.
|
||||
- [ ] Target the `features` branch for new features and removals/deprecations.
|
||||
- [ ] Target the `features` branch for new features, improvements, and removals/deprecations.
|
||||
- [ ] Include documentation when adding new features.
|
||||
- [ ] Include new tests or update existing tests when applicable.
|
||||
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -35,6 +35,7 @@ env/
|
||||
.tox
|
||||
.cache
|
||||
.pytest_cache
|
||||
.mypy_cache
|
||||
.coverage
|
||||
.coverage.*
|
||||
coverage.xml
|
||||
|
||||
@@ -1,17 +1,15 @@
|
||||
exclude: doc/en/example/py2py3/test_py2.py
|
||||
repos:
|
||||
- repo: https://github.com/python/black
|
||||
- repo: https://github.com/psf/black
|
||||
rev: 19.3b0
|
||||
hooks:
|
||||
- id: black
|
||||
args: [--safe, --quiet]
|
||||
language_version: python3
|
||||
- repo: https://github.com/asottile/blacken-docs
|
||||
rev: v1.0.0
|
||||
hooks:
|
||||
- id: blacken-docs
|
||||
additional_dependencies: [black==19.3b0]
|
||||
language_version: python3
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v2.2.3
|
||||
hooks:
|
||||
@@ -28,6 +26,7 @@ repos:
|
||||
hooks:
|
||||
- id: flake8
|
||||
language_version: python3
|
||||
additional_dependencies: [flake8-typing-imports==1.3.0]
|
||||
- repo: https://github.com/asottile/reorder_python_imports
|
||||
rev: v1.4.0
|
||||
hooks:
|
||||
@@ -42,6 +41,12 @@ repos:
|
||||
rev: v1.4.0
|
||||
hooks:
|
||||
- id: rst-backticks
|
||||
- repo: https://github.com/pre-commit/mirrors-mypy
|
||||
rev: v0.720
|
||||
hooks:
|
||||
- id: mypy
|
||||
files: ^(src/|testing/)
|
||||
args: []
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: rst
|
||||
@@ -54,7 +59,7 @@ repos:
|
||||
name: changelog filenames
|
||||
language: fail
|
||||
entry: 'changelog files must be named ####.(feature|bugfix|doc|deprecation|removal|vendor|trivial).rst'
|
||||
exclude: changelog/(\d+\.(feature|bugfix|doc|deprecation|removal|vendor|trivial).rst|README.rst|_template.rst)
|
||||
exclude: changelog/(\d+\.(feature|improvement|bugfix|doc|deprecation|removal|vendor|trivial).rst|README.rst|_template.rst)
|
||||
files: ^changelog/
|
||||
- id: py-deprecated
|
||||
name: py library is deprecated
|
||||
|
||||
27
.travis.yml
27
.travis.yml
@@ -13,6 +13,10 @@ env:
|
||||
global:
|
||||
- PYTEST_ADDOPTS=-vv
|
||||
|
||||
# setuptools-scm needs all tags in order to obtain a proper version
|
||||
git:
|
||||
depth: false
|
||||
|
||||
install:
|
||||
- python -m pip install --upgrade --pre tox
|
||||
|
||||
@@ -31,7 +35,9 @@ jobs:
|
||||
- test $(python -c 'import sys; print("%d%d" % sys.version_info[0:2])') = 37
|
||||
|
||||
# Full run of latest supported version, without xdist.
|
||||
- env: TOXENV=py37
|
||||
# Coverage for:
|
||||
# - test_sys_breakpoint_interception (via pexpect).
|
||||
- env: TOXENV=py37-pexpect PYTEST_COVERAGE=1
|
||||
python: '3.7'
|
||||
|
||||
# Coverage tracking is slow with pypy, skip it.
|
||||
@@ -45,20 +51,16 @@ jobs:
|
||||
# - pytester's LsofFdLeakChecker
|
||||
# - TestArgComplete (linux only)
|
||||
# - numpy
|
||||
# - old attrs
|
||||
# Empty PYTEST_ADDOPTS to run this non-verbose.
|
||||
- env: TOXENV=py37-lsof-numpy-twisted-xdist PYTEST_COVERAGE=1 PYTEST_ADDOPTS=
|
||||
- env: TOXENV=py37-lsof-oldattrs-numpy-twisted-xdist PYTEST_COVERAGE=1 PYTEST_ADDOPTS=
|
||||
|
||||
# Specialized factors for py37.
|
||||
# Coverage for:
|
||||
# - test_sys_breakpoint_interception (via pexpect).
|
||||
- env: TOXENV=py37-pexpect PYTEST_COVERAGE=1
|
||||
- env: TOXENV=py37-pluggymaster-xdist
|
||||
- env: TOXENV=py37-freeze
|
||||
|
||||
# Jobs only run via Travis cron jobs (currently daily).
|
||||
- env: TOXENV=py38-xdist
|
||||
python: '3.8-dev'
|
||||
if: type = cron
|
||||
|
||||
- stage: baseline
|
||||
env: TOXENV=py36-xdist
|
||||
@@ -70,8 +72,17 @@ jobs:
|
||||
|
||||
- stage: deploy
|
||||
python: '3.6'
|
||||
install: pip install -U setuptools setuptools_scm
|
||||
install: pip install -U setuptools setuptools_scm tox
|
||||
script: skip
|
||||
# token to upload github release notes: GH_RELEASE_NOTES_TOKEN
|
||||
env:
|
||||
- secure: "OjOeL7/0JUDkV00SsTs732e8vQjHynpbG9FKTNtZZJ+1Zn4Cib+hAlwmlBnvVukML0X60YpcfjnC4quDOIGLPsh5zeXnvJmYtAIIUNQXjWz8NhcGYrhyzuP1rqV22U68RTCdmOq3lMYU/W2acwHP7T49PwJtOiUM5kF120UAQ0Zi5EmkqkIvH8oM5mO9Dlver+/U7Htpz9rhKrHBXQNCMZI6yj2aUyukqB2PN2fjAlDbCF//+FmvYw9NjT4GeFOSkTCf4ER9yfqs7yglRfwiLtOCZ2qKQhWZNsSJDB89rxIRXWavJUjJKeY2EW2/NkomYJDpqJLIF4JeFRw/HhA47CYPeo6BJqyyNV+0CovL1frpWfi9UQw2cMbgFUkUIUk3F6DD59PHNIOX2R/HX56dQsw7WKl3QuHlCOkICXYg8F7Ta684IoKjeTX03/6QNOkURfDBwfGszY0FpbxrjCSWKom6RyZdyidnESaxv9RzjcIRZVh1rp8KMrwS1OrwRSdG0zjlsPr49hWMenN/8fKgcHTV4/r1Tj6mip0dorSRCrgUNIeRBKgmui6FS8642ab5JNKOxMteVPVR2sFuhjOQ0Jy+PmvceYY9ZMWc3+/B/KVh0dZ3hwvLGZep/vxDS2PwCA5/xw31714vT5LxidKo8yECjBynMU/wUTTS695D3NY="
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
# required by publish_gh_release_notes
|
||||
- pandoc
|
||||
after_deploy: tox -e publish_gh_release_notes
|
||||
deploy:
|
||||
provider: pypi
|
||||
user: nicoddemus
|
||||
|
||||
10
AUTHORS
10
AUTHORS
@@ -15,6 +15,7 @@ Alexander Johnson
|
||||
Alexei Kozlenok
|
||||
Allan Feldman
|
||||
Aly Sivji
|
||||
Amir Elkess
|
||||
Anatoly Bubenkoff
|
||||
Anders Hovmöller
|
||||
Andras Mitzki
|
||||
@@ -22,6 +23,7 @@ Andras Tim
|
||||
Andrea Cimatoribus
|
||||
Andreas Zeidler
|
||||
Andrey Paramonov
|
||||
Andrzej Klajnert
|
||||
Andrzej Ostrowski
|
||||
Andy Freeland
|
||||
Anthon van der Neut
|
||||
@@ -54,6 +56,7 @@ Charnjit SiNGH (CCSJ)
|
||||
Chris Lamb
|
||||
Christian Boelsen
|
||||
Christian Fetzer
|
||||
Christian Neumüller
|
||||
Christian Theunert
|
||||
Christian Tismer
|
||||
Christopher Gilling
|
||||
@@ -70,6 +73,7 @@ Danielle Jenkins
|
||||
Dave Hunt
|
||||
David Díaz-Barquero
|
||||
David Mohr
|
||||
David Paul Röthlisberger
|
||||
David Szotten
|
||||
David Vierra
|
||||
Daw-Ran Liou
|
||||
@@ -94,6 +98,7 @@ Feng Ma
|
||||
Florian Bruhin
|
||||
Floris Bruynooghe
|
||||
Gabriel Reis
|
||||
Gene Wood
|
||||
George Kussumoto
|
||||
Georgy Dyuldin
|
||||
Graham Horler
|
||||
@@ -171,6 +176,7 @@ mbyt
|
||||
Michael Aquilina
|
||||
Michael Birtwell
|
||||
Michael Droettboom
|
||||
Michael Goerz
|
||||
Michael Seifert
|
||||
Michal Wajszczuk
|
||||
Mihai Capotă
|
||||
@@ -207,6 +213,7 @@ Raphael Castaneda
|
||||
Raphael Pierzina
|
||||
Raquel Alegre
|
||||
Ravi Chandra
|
||||
Robert Holt
|
||||
Roberto Polli
|
||||
Roland Puntaier
|
||||
Romain Dorgueil
|
||||
@@ -237,6 +244,7 @@ Tareq Alayan
|
||||
Ted Xiao
|
||||
Thomas Grainger
|
||||
Thomas Hisch
|
||||
Tim Hoffmann
|
||||
Tim Strazny
|
||||
Tom Dalton
|
||||
Tom Viner
|
||||
@@ -256,7 +264,9 @@ Wil Cooley
|
||||
William Lee
|
||||
Wim Glenn
|
||||
Wouter van Ackooy
|
||||
Xixi Zhao
|
||||
Xuan Luong
|
||||
Xuecong Liao
|
||||
Yoav Caspi
|
||||
Zac Hatfield-Dodds
|
||||
Zoltán Máté
|
||||
|
||||
417
CHANGELOG.rst
417
CHANGELOG.rst
@@ -1,6 +1,6 @@
|
||||
=================
|
||||
Changelog history
|
||||
=================
|
||||
=========
|
||||
Changelog
|
||||
=========
|
||||
|
||||
Versions follow `Semantic Versioning <https://semver.org/>`_ (``<major>.<minor>.<patch>``).
|
||||
|
||||
@@ -18,6 +18,329 @@ with advance notice in the **Deprecations** section of releases.
|
||||
|
||||
.. towncrier release notes start
|
||||
|
||||
pytest 5.2.2 (2019-10-24)
|
||||
=========================
|
||||
|
||||
Bug Fixes
|
||||
---------
|
||||
|
||||
- `#5206 <https://github.com/pytest-dev/pytest/issues/5206>`_: Fix ``--nf`` to not forget about known nodeids with partial test selection.
|
||||
|
||||
|
||||
- `#5906 <https://github.com/pytest-dev/pytest/issues/5906>`_: Fix crash with ``KeyboardInterrupt`` during ``--setup-show``.
|
||||
|
||||
|
||||
- `#5946 <https://github.com/pytest-dev/pytest/issues/5946>`_: Fixed issue when parametrizing fixtures with numpy arrays (and possibly other sequence-like types).
|
||||
|
||||
|
||||
- `#6044 <https://github.com/pytest-dev/pytest/issues/6044>`_: Properly ignore ``FileNotFoundError`` exceptions when trying to remove old temporary directories,
|
||||
for instance when multiple processes try to remove the same directory (common with ``pytest-xdist``
|
||||
for example).
|
||||
|
||||
|
||||
pytest 5.2.1 (2019-10-06)
|
||||
=========================
|
||||
|
||||
Bug Fixes
|
||||
---------
|
||||
|
||||
- `#5902 <https://github.com/pytest-dev/pytest/issues/5902>`_: Fix warnings about deprecated ``cmp`` attribute in ``attrs>=19.2``.
|
||||
|
||||
|
||||
pytest 4.6.6 (2019-10-11)
|
||||
=========================
|
||||
|
||||
Bug Fixes
|
||||
---------
|
||||
|
||||
- `#5523 <https://github.com/pytest-dev/pytest/issues/5523>`_: Fixed using multiple short options together in the command-line (for example ``-vs``) in Python 3.8+.
|
||||
|
||||
|
||||
- `#5537 <https://github.com/pytest-dev/pytest/issues/5537>`_: Replace ``importlib_metadata`` backport with ``importlib.metadata`` from the
|
||||
standard library on Python 3.8+.
|
||||
|
||||
|
||||
- `#5806 <https://github.com/pytest-dev/pytest/issues/5806>`_: Fix "lexer" being used when uploading to bpaste.net from ``--pastebin`` to "text".
|
||||
|
||||
|
||||
- `#5902 <https://github.com/pytest-dev/pytest/issues/5902>`_: Fix warnings about deprecated ``cmp`` attribute in ``attrs>=19.2``.
|
||||
|
||||
|
||||
|
||||
Trivial/Internal Changes
|
||||
------------------------
|
||||
|
||||
- `#5801 <https://github.com/pytest-dev/pytest/issues/5801>`_: Fixes python version checks (detected by ``flake8-2020``) in case python4 becomes a thing.
|
||||
|
||||
|
||||
|
||||
pytest 5.2.0 (2019-09-28)
|
||||
=========================
|
||||
|
||||
Deprecations
|
||||
------------
|
||||
|
||||
- `#1682 <https://github.com/pytest-dev/pytest/issues/1682>`_: Passing arguments to pytest.fixture() as positional arguments is deprecated - pass them
|
||||
as a keyword argument instead.
|
||||
|
||||
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- `#1682 <https://github.com/pytest-dev/pytest/issues/1682>`_: The ``scope`` parameter of ``@pytest.fixture`` can now be a callable that receives
|
||||
the fixture name and the ``config`` object as keyword-only parameters.
|
||||
See `the docs <https://docs.pytest.org/en/latest/fixture.html#dynamic-scope>`__ for more information.
|
||||
|
||||
|
||||
- `#5764 <https://github.com/pytest-dev/pytest/issues/5764>`_: New behavior of the ``--pastebin`` option: failures to connect to the pastebin server are reported, without failing the pytest run
|
||||
|
||||
|
||||
|
||||
Bug Fixes
|
||||
---------
|
||||
|
||||
- `#5806 <https://github.com/pytest-dev/pytest/issues/5806>`_: Fix "lexer" being used when uploading to bpaste.net from ``--pastebin`` to "text".
|
||||
|
||||
|
||||
- `#5884 <https://github.com/pytest-dev/pytest/issues/5884>`_: Fix ``--setup-only`` and ``--setup-show`` for custom pytest items.
|
||||
|
||||
|
||||
|
||||
Trivial/Internal Changes
|
||||
------------------------
|
||||
|
||||
- `#5056 <https://github.com/pytest-dev/pytest/issues/5056>`_: The HelpFormatter uses ``py.io.get_terminal_width`` for better width detection.
|
||||
|
||||
|
||||
pytest 5.1.3 (2019-09-18)
|
||||
=========================
|
||||
|
||||
Bug Fixes
|
||||
---------
|
||||
|
||||
- `#5807 <https://github.com/pytest-dev/pytest/issues/5807>`_: Fix pypy3.6 (nightly) on windows.
|
||||
|
||||
|
||||
- `#5811 <https://github.com/pytest-dev/pytest/issues/5811>`_: Handle ``--fulltrace`` correctly with ``pytest.raises``.
|
||||
|
||||
|
||||
- `#5819 <https://github.com/pytest-dev/pytest/issues/5819>`_: Windows: Fix regression with conftest whose qualified name contains uppercase
|
||||
characters (introduced by #5792).
|
||||
|
||||
|
||||
pytest 5.1.2 (2019-08-30)
|
||||
=========================
|
||||
|
||||
Bug Fixes
|
||||
---------
|
||||
|
||||
- `#2270 <https://github.com/pytest-dev/pytest/issues/2270>`_: Fixed ``self`` reference in function-scoped fixtures defined plugin classes: previously ``self``
|
||||
would be a reference to a *test* class, not the *plugin* class.
|
||||
|
||||
|
||||
- `#570 <https://github.com/pytest-dev/pytest/issues/570>`_: Fixed long standing issue where fixture scope was not respected when indirect fixtures were used during
|
||||
parametrization.
|
||||
|
||||
|
||||
- `#5782 <https://github.com/pytest-dev/pytest/issues/5782>`_: Fix decoding error when printing an error response from ``--pastebin``.
|
||||
|
||||
|
||||
- `#5786 <https://github.com/pytest-dev/pytest/issues/5786>`_: Chained exceptions in test and collection reports are now correctly serialized, allowing plugins like
|
||||
``pytest-xdist`` to display them properly.
|
||||
|
||||
|
||||
- `#5792 <https://github.com/pytest-dev/pytest/issues/5792>`_: Windows: Fix error that occurs in certain circumstances when loading
|
||||
``conftest.py`` from a working directory that has casing other than the one stored
|
||||
in the filesystem (e.g., ``c:\test`` instead of ``C:\test``).
|
||||
|
||||
|
||||
pytest 5.1.1 (2019-08-20)
|
||||
=========================
|
||||
|
||||
Bug Fixes
|
||||
---------
|
||||
|
||||
- `#5751 <https://github.com/pytest-dev/pytest/issues/5751>`_: Fixed ``TypeError`` when importing pytest on Python 3.5.0 and 3.5.1.
|
||||
|
||||
|
||||
pytest 5.1.0 (2019-08-15)
|
||||
=========================
|
||||
|
||||
Removals
|
||||
--------
|
||||
|
||||
- `#5180 <https://github.com/pytest-dev/pytest/issues/5180>`_: As per our policy, the following features have been deprecated in the 4.X series and are now
|
||||
removed:
|
||||
|
||||
* ``Request.getfuncargvalue``: use ``Request.getfixturevalue`` instead.
|
||||
|
||||
* ``pytest.raises`` and ``pytest.warns`` no longer support strings as the second argument.
|
||||
|
||||
* ``message`` parameter of ``pytest.raises``.
|
||||
|
||||
* ``pytest.raises``, ``pytest.warns`` and ``ParameterSet.param`` now use native keyword-only
|
||||
syntax. This might change the exception message from previous versions, but they still raise
|
||||
``TypeError`` on unknown keyword arguments as before.
|
||||
|
||||
* ``pytest.config`` global variable.
|
||||
|
||||
* ``tmpdir_factory.ensuretemp`` method.
|
||||
|
||||
* ``pytest_logwarning`` hook.
|
||||
|
||||
* ``RemovedInPytest4Warning`` warning type.
|
||||
|
||||
* ``request`` is now a reserved name for fixtures.
|
||||
|
||||
|
||||
For more information consult
|
||||
`Deprecations and Removals <https://docs.pytest.org/en/latest/deprecations.html>`__ in the docs.
|
||||
|
||||
|
||||
- `#5565 <https://github.com/pytest-dev/pytest/issues/5565>`_: Removed unused support code for `unittest2 <https://pypi.org/project/unittest2/>`__.
|
||||
|
||||
The ``unittest2`` backport module is no longer
|
||||
necessary since Python 3.3+, and the small amount of code in pytest to support it also doesn't seem
|
||||
to be used: after removed, all tests still pass unchanged.
|
||||
|
||||
Although our policy is to introduce a deprecation period before removing any features or support
|
||||
for third party libraries, because this code is apparently not used
|
||||
at all (even if ``unittest2`` is used by a test suite executed by pytest), it was decided to
|
||||
remove it in this release.
|
||||
|
||||
If you experience a regression because of this, please
|
||||
`file an issue <https://github.com/pytest-dev/pytest/issues/new>`__.
|
||||
|
||||
|
||||
- `#5615 <https://github.com/pytest-dev/pytest/issues/5615>`_: ``pytest.fail``, ``pytest.xfail`` and ``pytest.skip`` no longer support bytes for the message argument.
|
||||
|
||||
This was supported for Python 2 where it was tempting to use ``"message"``
|
||||
instead of ``u"message"``.
|
||||
|
||||
Python 3 code is unlikely to pass ``bytes`` to these functions. If you do,
|
||||
please decode it to an ``str`` beforehand.
|
||||
|
||||
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- `#5564 <https://github.com/pytest-dev/pytest/issues/5564>`_: New ``Config.invocation_args`` attribute containing the unchanged arguments passed to ``pytest.main()``.
|
||||
|
||||
|
||||
- `#5576 <https://github.com/pytest-dev/pytest/issues/5576>`_: New `NUMBER <https://docs.pytest.org/en/latest/doctest.html#using-doctest-options>`__
|
||||
option for doctests to ignore irrelevant differences in floating-point numbers.
|
||||
Inspired by Sébastien Boisgérault's `numtest <https://github.com/boisgera/numtest>`__
|
||||
extension for doctest.
|
||||
|
||||
|
||||
|
||||
Improvements
|
||||
------------
|
||||
|
||||
- `#5471 <https://github.com/pytest-dev/pytest/issues/5471>`_: JUnit XML now includes a timestamp and hostname in the testsuite tag.
|
||||
|
||||
|
||||
- `#5707 <https://github.com/pytest-dev/pytest/issues/5707>`_: Time taken to run the test suite now includes a human-readable representation when it takes over
|
||||
60 seconds, for example::
|
||||
|
||||
===== 2 failed in 102.70s (0:01:42) =====
|
||||
|
||||
|
||||
|
||||
Bug Fixes
|
||||
---------
|
||||
|
||||
- `#4344 <https://github.com/pytest-dev/pytest/issues/4344>`_: Fix RuntimeError/StopIteration when trying to collect package with "__init__.py" only.
|
||||
|
||||
|
||||
- `#5115 <https://github.com/pytest-dev/pytest/issues/5115>`_: Warnings issued during ``pytest_configure`` are explicitly not treated as errors, even if configured as such, because it otherwise completely breaks pytest.
|
||||
|
||||
|
||||
- `#5477 <https://github.com/pytest-dev/pytest/issues/5477>`_: The XML file produced by ``--junitxml`` now correctly contain a ``<testsuites>`` root element.
|
||||
|
||||
|
||||
- `#5524 <https://github.com/pytest-dev/pytest/issues/5524>`_: Fix issue where ``tmp_path`` and ``tmpdir`` would not remove directories containing files marked as read-only,
|
||||
which could lead to pytest crashing when executed a second time with the ``--basetemp`` option.
|
||||
|
||||
|
||||
- `#5537 <https://github.com/pytest-dev/pytest/issues/5537>`_: Replace ``importlib_metadata`` backport with ``importlib.metadata`` from the
|
||||
standard library on Python 3.8+.
|
||||
|
||||
|
||||
- `#5578 <https://github.com/pytest-dev/pytest/issues/5578>`_: Improve type checking for some exception-raising functions (``pytest.xfail``, ``pytest.skip``, etc)
|
||||
so they provide better error messages when users meant to use marks (for example ``@pytest.xfail``
|
||||
instead of ``@pytest.mark.xfail``).
|
||||
|
||||
|
||||
- `#5606 <https://github.com/pytest-dev/pytest/issues/5606>`_: Fixed internal error when test functions were patched with objects that cannot be compared
|
||||
for truth values against others, like ``numpy`` arrays.
|
||||
|
||||
|
||||
- `#5634 <https://github.com/pytest-dev/pytest/issues/5634>`_: ``pytest.exit`` is now correctly handled in ``unittest`` cases.
|
||||
This makes ``unittest`` cases handle ``quit`` from pytest's pdb correctly.
|
||||
|
||||
|
||||
- `#5650 <https://github.com/pytest-dev/pytest/issues/5650>`_: Improved output when parsing an ini configuration file fails.
|
||||
|
||||
|
||||
- `#5701 <https://github.com/pytest-dev/pytest/issues/5701>`_: Fix collection of ``staticmethod`` objects defined with ``functools.partial``.
|
||||
|
||||
|
||||
- `#5734 <https://github.com/pytest-dev/pytest/issues/5734>`_: Skip async generator test functions, and update the warning message to refer to ``async def`` functions.
|
||||
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- `#5669 <https://github.com/pytest-dev/pytest/issues/5669>`_: Add docstring for ``Testdir.copy_example``.
|
||||
|
||||
|
||||
|
||||
Trivial/Internal Changes
|
||||
------------------------
|
||||
|
||||
- `#5095 <https://github.com/pytest-dev/pytest/issues/5095>`_: XML files of the ``xunit2`` family are now validated against the schema by pytest's own test suite
|
||||
to avoid future regressions.
|
||||
|
||||
|
||||
- `#5516 <https://github.com/pytest-dev/pytest/issues/5516>`_: Cache node splitting function which can improve collection performance in very large test suites.
|
||||
|
||||
|
||||
- `#5603 <https://github.com/pytest-dev/pytest/issues/5603>`_: Simplified internal ``SafeRepr`` class and removed some dead code.
|
||||
|
||||
|
||||
- `#5664 <https://github.com/pytest-dev/pytest/issues/5664>`_: When invoking pytest's own testsuite with ``PYTHONDONTWRITEBYTECODE=1``,
|
||||
the ``test_xfail_handling`` test no longer fails.
|
||||
|
||||
|
||||
- `#5684 <https://github.com/pytest-dev/pytest/issues/5684>`_: Replace manual handling of ``OSError.errno`` in the codebase by new ``OSError`` subclasses (``PermissionError``, ``FileNotFoundError``, etc.).
|
||||
|
||||
|
||||
pytest 5.0.1 (2019-07-04)
|
||||
=========================
|
||||
|
||||
Bug Fixes
|
||||
---------
|
||||
|
||||
- `#5479 <https://github.com/pytest-dev/pytest/issues/5479>`_: Improve quoting in ``raises`` match failure message.
|
||||
|
||||
|
||||
- `#5523 <https://github.com/pytest-dev/pytest/issues/5523>`_: Fixed using multiple short options together in the command-line (for example ``-vs``) in Python 3.8+.
|
||||
|
||||
|
||||
- `#5547 <https://github.com/pytest-dev/pytest/issues/5547>`_: ``--step-wise`` now handles ``xfail(strict=True)`` markers properly.
|
||||
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- `#5517 <https://github.com/pytest-dev/pytest/issues/5517>`_: Improve "Declaring new hooks" section in chapter "Writing Plugins"
|
||||
|
||||
|
||||
pytest 5.0.0 (2019-06-28)
|
||||
=========================
|
||||
|
||||
@@ -68,6 +391,24 @@ Removals
|
||||
- `#5412 <https://github.com/pytest-dev/pytest/issues/5412>`_: ``ExceptionInfo`` objects (returned by ``pytest.raises``) now have the same ``str`` representation as ``repr``, which
|
||||
avoids some confusion when users use ``print(e)`` to inspect the object.
|
||||
|
||||
This means code like:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
with pytest.raises(SomeException) as e:
|
||||
...
|
||||
assert "some message" in str(e)
|
||||
|
||||
|
||||
Needs to be changed to:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
with pytest.raises(SomeException) as e:
|
||||
...
|
||||
assert "some message" in str(e.value)
|
||||
|
||||
|
||||
|
||||
|
||||
Deprecations
|
||||
@@ -203,6 +544,47 @@ Improved Documentation
|
||||
- `#5416 <https://github.com/pytest-dev/pytest/issues/5416>`_: Fix PytestUnknownMarkWarning in run/skip example.
|
||||
|
||||
|
||||
pytest 4.6.5 (2019-08-05)
|
||||
=========================
|
||||
|
||||
Bug Fixes
|
||||
---------
|
||||
|
||||
- `#4344 <https://github.com/pytest-dev/pytest/issues/4344>`_: Fix RuntimeError/StopIteration when trying to collect package with "__init__.py" only.
|
||||
|
||||
|
||||
- `#5478 <https://github.com/pytest-dev/pytest/issues/5478>`_: Fix encode error when using unicode strings in exceptions with ``pytest.raises``.
|
||||
|
||||
|
||||
- `#5524 <https://github.com/pytest-dev/pytest/issues/5524>`_: Fix issue where ``tmp_path`` and ``tmpdir`` would not remove directories containing files marked as read-only,
|
||||
which could lead to pytest crashing when executed a second time with the ``--basetemp`` option.
|
||||
|
||||
|
||||
- `#5547 <https://github.com/pytest-dev/pytest/issues/5547>`_: ``--step-wise`` now handles ``xfail(strict=True)`` markers properly.
|
||||
|
||||
|
||||
- `#5650 <https://github.com/pytest-dev/pytest/issues/5650>`_: Improved output when parsing an ini configuration file fails.
|
||||
|
||||
pytest 4.6.4 (2019-06-28)
|
||||
=========================
|
||||
|
||||
Bug Fixes
|
||||
---------
|
||||
|
||||
- `#5404 <https://github.com/pytest-dev/pytest/issues/5404>`_: Emit a warning when attempting to unwrap a broken object raises an exception,
|
||||
for easier debugging (`#5080 <https://github.com/pytest-dev/pytest/issues/5080>`__).
|
||||
|
||||
|
||||
- `#5444 <https://github.com/pytest-dev/pytest/issues/5444>`_: Fix ``--stepwise`` mode when the first file passed on the command-line fails to collect.
|
||||
|
||||
|
||||
- `#5482 <https://github.com/pytest-dev/pytest/issues/5482>`_: Fix bug introduced in 4.6.0 causing collection errors when passing
|
||||
more than 2 positional arguments to ``pytest.mark.parametrize``.
|
||||
|
||||
|
||||
- `#5505 <https://github.com/pytest-dev/pytest/issues/5505>`_: Fix crash when discovery fails while using ``-p no:terminal``.
|
||||
|
||||
|
||||
pytest 4.6.3 (2019-06-11)
|
||||
=========================
|
||||
|
||||
@@ -2131,10 +2513,10 @@ Features
|
||||
design. This introduces new ``Node.iter_markers(name)`` and
|
||||
``Node.get_closest_marker(name)`` APIs. Users are **strongly encouraged** to
|
||||
read the `reasons for the revamp in the docs
|
||||
<https://docs.pytest.org/en/latest/mark.html#marker-revamp-and-iteration>`_,
|
||||
<https://docs.pytest.org/en/latest/historical-notes.html#marker-revamp-and-iteration>`_,
|
||||
or jump over to details about `updating existing code to use the new APIs
|
||||
<https://docs.pytest.org/en/latest/mark.html#updating-code>`_. (`#3317
|
||||
<https://github.com/pytest-dev/pytest/issues/3317>`_)
|
||||
<https://docs.pytest.org/en/latest/historical-notes.html#updating-code>`_.
|
||||
(`#3317 <https://github.com/pytest-dev/pytest/issues/3317>`_)
|
||||
|
||||
- Now when ``@pytest.fixture`` is applied more than once to the same function a
|
||||
``ValueError`` is raised. This buggy behavior would cause surprising problems
|
||||
@@ -2540,10 +2922,10 @@ Features
|
||||
<https://github.com/pytest-dev/pytest/issues/3038>`_)
|
||||
|
||||
- New `pytest_runtest_logfinish
|
||||
<https://docs.pytest.org/en/latest/writing_plugins.html#_pytest.hookspec.pytest_runtest_logfinish>`_
|
||||
<https://docs.pytest.org/en/latest/reference.html#_pytest.hookspec.pytest_runtest_logfinish>`_
|
||||
hook which is called when a test item has finished executing, analogous to
|
||||
`pytest_runtest_logstart
|
||||
<https://docs.pytest.org/en/latest/writing_plugins.html#_pytest.hookspec.pytest_runtest_start>`_.
|
||||
<https://docs.pytest.org/en/latest/reference.html#_pytest.hookspec.pytest_runtest_logstart>`_.
|
||||
(`#3101 <https://github.com/pytest-dev/pytest/issues/3101>`_)
|
||||
|
||||
- Improve performance when collecting tests using many fixtures. (`#3107
|
||||
@@ -3533,7 +3915,7 @@ Bug Fixes
|
||||
Thanks `@sirex`_ for the report and `@nicoddemus`_ for the PR.
|
||||
|
||||
* Replace ``raise StopIteration`` usages in the code by simple ``returns`` to finish generators, in accordance to `PEP-479`_ (`#2160`_).
|
||||
Thanks `@tgoodlet`_ for the report and `@nicoddemus`_ for the PR.
|
||||
Thanks to `@nicoddemus`_ for the PR.
|
||||
|
||||
* Fix internal errors when an unprintable ``AssertionError`` is raised inside a test.
|
||||
Thanks `@omerhadari`_ for the PR.
|
||||
@@ -3664,7 +4046,7 @@ Bug Fixes
|
||||
|
||||
.. _@syre: https://github.com/syre
|
||||
.. _@adler-j: https://github.com/adler-j
|
||||
.. _@d-b-w: https://bitbucket.org/d-b-w/
|
||||
.. _@d-b-w: https://github.com/d-b-w
|
||||
.. _@DuncanBetts: https://github.com/DuncanBetts
|
||||
.. _@dupuy: https://bitbucket.org/dupuy/
|
||||
.. _@kerrick-lyft: https://github.com/kerrick-lyft
|
||||
@@ -3724,7 +4106,7 @@ Bug Fixes
|
||||
|
||||
.. _@adborden: https://github.com/adborden
|
||||
.. _@cwitty: https://github.com/cwitty
|
||||
.. _@d_b_w: https://github.com/d_b_w
|
||||
.. _@d_b_w: https://github.com/d-b-w
|
||||
.. _@gdyuldin: https://github.com/gdyuldin
|
||||
.. _@matclab: https://github.com/matclab
|
||||
.. _@MSeifert04: https://github.com/MSeifert04
|
||||
@@ -3759,7 +4141,7 @@ Bug Fixes
|
||||
Thanks `@axil`_ for the PR.
|
||||
|
||||
* Explain a bad scope value passed to ``@fixture`` declarations or
|
||||
a ``MetaFunc.parametrize()`` call. Thanks `@tgoodlet`_ for the PR.
|
||||
a ``MetaFunc.parametrize()`` call.
|
||||
|
||||
* This version includes ``pluggy-0.4.0``, which correctly handles
|
||||
``VersionConflict`` errors in plugins (`#704`_).
|
||||
@@ -3769,7 +4151,6 @@ Bug Fixes
|
||||
.. _@philpep: https://github.com/philpep
|
||||
.. _@raquel-ucl: https://github.com/raquel-ucl
|
||||
.. _@axil: https://github.com/axil
|
||||
.. _@tgoodlet: https://github.com/tgoodlet
|
||||
.. _@vlad-dragos: https://github.com/vlad-dragos
|
||||
|
||||
.. _#1853: https://github.com/pytest-dev/pytest/issues/1853
|
||||
@@ -4115,7 +4496,7 @@ time or change existing behaviors in order to make them less surprising/more use
|
||||
* Updated docstrings with a more uniform style.
|
||||
|
||||
* Add stderr write for ``pytest.exit(msg)`` during startup. Previously the message was never shown.
|
||||
Thanks `@BeyondEvil`_ for reporting `#1210`_. Thanks to `@JonathonSonesen`_ and
|
||||
Thanks `@BeyondEvil`_ for reporting `#1210`_. Thanks to `@jgsonesen`_ and
|
||||
`@tomviner`_ for the PR.
|
||||
|
||||
* No longer display the incorrect test deselection reason (`#1372`_).
|
||||
@@ -4163,7 +4544,7 @@ time or change existing behaviors in order to make them less surprising/more use
|
||||
Thanks to `@Stranger6667`_ for the PR.
|
||||
|
||||
* Fixed the total tests tally in junit xml output (`#1798`_).
|
||||
Thanks to `@cryporchild`_ for the PR.
|
||||
Thanks to `@cboelsen`_ for the PR.
|
||||
|
||||
* Fixed off-by-one error with lines from ``request.node.warn``.
|
||||
Thanks to `@blueyed`_ for the PR.
|
||||
@@ -4236,7 +4617,7 @@ time or change existing behaviors in order to make them less surprising/more use
|
||||
.. _@BeyondEvil: https://github.com/BeyondEvil
|
||||
.. _@blueyed: https://github.com/blueyed
|
||||
.. _@ceridwen: https://github.com/ceridwen
|
||||
.. _@cryporchild: https://github.com/cryporchild
|
||||
.. _@cboelsen: https://github.com/cboelsen
|
||||
.. _@csaftoiu: https://github.com/csaftoiu
|
||||
.. _@d6e: https://github.com/d6e
|
||||
.. _@davehunt: https://github.com/davehunt
|
||||
@@ -4247,7 +4628,7 @@ time or change existing behaviors in order to make them less surprising/more use
|
||||
.. _@gprasad84: https://github.com/gprasad84
|
||||
.. _@graingert: https://github.com/graingert
|
||||
.. _@hartym: https://github.com/hartym
|
||||
.. _@JonathonSonesen: https://github.com/JonathonSonesen
|
||||
.. _@jgsonesen: https://github.com/jgsonesen
|
||||
.. _@kalekundert: https://github.com/kalekundert
|
||||
.. _@kvas-it: https://github.com/kvas-it
|
||||
.. _@marscher: https://github.com/marscher
|
||||
@@ -4384,7 +4765,7 @@ time or change existing behaviors in order to make them less surprising/more use
|
||||
|
||||
**Changes**
|
||||
|
||||
* **Important**: `py.code <https://pylib.readthedocs.io/en/latest/code.html>`_ has been
|
||||
* **Important**: `py.code <https://pylib.readthedocs.io/en/stable/code.html>`_ has been
|
||||
merged into the ``pytest`` repository as ``pytest._code``. This decision
|
||||
was made because ``py.code`` had very few uses outside ``pytest`` and the
|
||||
fact that it was in a different repository made it difficult to fix bugs on
|
||||
|
||||
84
CODE_OF_CONDUCT.md
Normal file
84
CODE_OF_CONDUCT.md
Normal file
@@ -0,0 +1,84 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as
|
||||
contributors and maintainers pledge to making participation in our project and
|
||||
our community a harassment-free experience for everyone, regardless of age, body
|
||||
size, disability, ethnicity, sex characteristics, gender identity and expression,
|
||||
level of experience, education, socio-economic status, nationality, personal
|
||||
appearance, race, religion, or sexual identity and orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment
|
||||
include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or
|
||||
advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic
|
||||
address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable
|
||||
behavior and are expected to take appropriate and fair corrective action in
|
||||
response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or
|
||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||
permanently any contributor for other behaviors that they deem inappropriate,
|
||||
threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community. Examples of
|
||||
representing a project or community include using an official project e-mail
|
||||
address, posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event. Representation of a project may be
|
||||
further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported by contacting the project team at coc@pytest.org. All
|
||||
complaints will be reviewed and investigated and will result in a response that
|
||||
is deemed necessary and appropriate to the circumstances. The project team is
|
||||
obligated to maintain confidentiality with regard to the reporter of an incident.
|
||||
Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good
|
||||
faith may face temporary or permanent repercussions as determined by other
|
||||
members of the project's leadership.
|
||||
|
||||
The coc@pytest.org address is routed to the following people who can also be
|
||||
contacted individually:
|
||||
|
||||
- Brianna Laugher ([@pfctdayelise](https://github.com/pfctdayelise)): brianna@laugher.id.au
|
||||
- Bruno Oliveira ([@nicoddemus](https://github.com/nicoddemus)): nicoddemus@gmail.com
|
||||
- Florian Bruhin ([@the-compiler](https://github.com/the-compiler)): pytest@the-compiler.org
|
||||
- Ronny Pfannschmidt ([@RonnyPfannschmidt](https://github.com/RonnyPfannschmidt)): ich@ronnypfannschmidt.de
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
||||
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
|
||||
For answers to common questions about this code of conduct, see
|
||||
https://www.contributor-covenant.org/faq
|
||||
@@ -5,8 +5,9 @@ Contribution getting started
|
||||
Contributions are highly welcomed and appreciated. Every little help counts,
|
||||
so do not hesitate!
|
||||
|
||||
.. contents:: Contribution links
|
||||
.. contents::
|
||||
:depth: 2
|
||||
:backlinks: none
|
||||
|
||||
|
||||
.. _submitfeedback:
|
||||
@@ -166,10 +167,10 @@ Short version
|
||||
#. Enable and install `pre-commit <https://pre-commit.com>`_ to ensure style-guides and code checks are followed.
|
||||
#. Target ``master`` for bugfixes and doc changes.
|
||||
#. Target ``features`` for new features or functionality changes.
|
||||
#. Follow **PEP-8** for naming and `black <https://github.com/python/black>`_ for formatting.
|
||||
#. Follow **PEP-8** for naming and `black <https://github.com/psf/black>`_ for formatting.
|
||||
#. Tests are run using ``tox``::
|
||||
|
||||
tox -e linting,py27,py37
|
||||
tox -e linting,py37
|
||||
|
||||
The test environments above are usually enough to cover most cases locally.
|
||||
|
||||
@@ -217,7 +218,9 @@ Here is a simple overview, with pytest-specific bits:
|
||||
If you need some help with Git, follow this quick start
|
||||
guide: https://git.wiki.kernel.org/index.php/QuickStart
|
||||
|
||||
#. Install `pre-commit <https://pre-commit.com>`_ and its hook on the pytest repo::
|
||||
#. Install `pre-commit <https://pre-commit.com>`_ and its hook on the pytest repo:
|
||||
|
||||
**Note: pre-commit must be installed as admin, as it will not function otherwise**::
|
||||
|
||||
$ pip install --user pre-commit
|
||||
$ pre-commit install
|
||||
@@ -237,20 +240,20 @@ Here is a simple overview, with pytest-specific bits:
|
||||
|
||||
#. Run all the tests
|
||||
|
||||
You need to have Python 2.7 and 3.7 available in your system. Now
|
||||
You need to have Python 3.7 available in your system. Now
|
||||
running tests is as simple as issuing this command::
|
||||
|
||||
$ tox -e linting,py27,py37
|
||||
$ tox -e linting,py37
|
||||
|
||||
This command will run tests via the "tox" tool against Python 2.7 and 3.7
|
||||
This command will run tests via the "tox" tool against Python 3.7
|
||||
and also perform "lint" coding-style checks.
|
||||
|
||||
#. You can now edit your local working copy and run the tests again as necessary. Please follow PEP-8 for naming.
|
||||
|
||||
You can pass different options to ``tox``. For example, to run tests on Python 2.7 and pass options to pytest
|
||||
You can pass different options to ``tox``. For example, to run tests on Python 3.7 and pass options to pytest
|
||||
(e.g. enter pdb on failure) to pytest you can do::
|
||||
|
||||
$ tox -e py27 -- --pdb
|
||||
$ tox -e py37 -- --pdb
|
||||
|
||||
Or to only run tests in a particular test module on Python 3.7::
|
||||
|
||||
@@ -266,7 +269,8 @@ Here is a simple overview, with pytest-specific bits:
|
||||
|
||||
#. Create a new changelog entry in ``changelog``. The file should be named ``<issueid>.<type>.rst``,
|
||||
where *issueid* is the number of the issue related to the change and *type* is one of
|
||||
``bugfix``, ``removal``, ``feature``, ``vendor``, ``doc`` or ``trivial``.
|
||||
``bugfix``, ``removal``, ``feature``, ``vendor``, ``doc`` or ``trivial``. You may not create a
|
||||
changelog entry if the change doesn't affect the documented behaviour of Pytest.
|
||||
|
||||
#. Add yourself to ``AUTHORS`` file if not there yet, in alphabetical order.
|
||||
|
||||
|
||||
44
OPENCOLLECTIVE.rst
Normal file
44
OPENCOLLECTIVE.rst
Normal file
@@ -0,0 +1,44 @@
|
||||
==============
|
||||
OpenCollective
|
||||
==============
|
||||
|
||||
pytest has a collective setup at `OpenCollective`_. This document describes how the core team manages
|
||||
OpenCollective-related activities.
|
||||
|
||||
What is it
|
||||
==========
|
||||
|
||||
Open Collective is an online funding platform for open and transparent communities.
|
||||
It provides tools to raise money and share your finances in full transparency.
|
||||
|
||||
It is the platform of choice for individuals and companies that want to make one-time or
|
||||
monthly donations directly to the project.
|
||||
|
||||
Funds
|
||||
=====
|
||||
|
||||
The OpenCollective funds donated to pytest will be used to fund overall maintenance,
|
||||
local sprints, merchandising (stickers to distribute in conferences for example), and future
|
||||
gatherings of pytest developers (sprints).
|
||||
|
||||
`Core contributors`_ which are contributing on a continuous basis are free to submit invoices
|
||||
to bill maintenance hours using the platform. How much each contributor should request is still an
|
||||
open question, but we should use common sense and trust in the contributors, most of which know
|
||||
themselves in-person. A good rule of thumb is to bill the same amount as monthly payments
|
||||
contributors which participate in the `Tidelift`_ subscription. If in doubt, just ask.
|
||||
|
||||
Admins
|
||||
======
|
||||
|
||||
A few people have admin access to the OpenCollective dashboard to make changes. Those people
|
||||
are part of the `@pytest-dev/opencollective-admins`_ team.
|
||||
|
||||
`Core contributors`_ interested in helping out with OpenCollective maintenance are welcome! We don't
|
||||
expect much work here other than the occasional approval of expenses from other core contributors.
|
||||
Just drop a line to one of the `@pytest-dev/opencollective-admins`_ or use the mailing list.
|
||||
|
||||
|
||||
.. _`OpenCollective`: https://opencollective.com/pytest
|
||||
.. _`Tidelift`: https://tidelift.com
|
||||
.. _`core contributors`: https://github.com/orgs/pytest-dev/teams/core/members
|
||||
.. _`@pytest-dev/opencollective-admins`: https://github.com/orgs/pytest-dev/teams/opencollective-admins/members
|
||||
24
README.rst
24
README.rst
@@ -26,7 +26,7 @@
|
||||
:target: https://dev.azure.com/pytest-dev/pytest
|
||||
|
||||
.. image:: https://img.shields.io/badge/code%20style-black-000000.svg
|
||||
:target: https://github.com/python/black
|
||||
:target: https://github.com/psf/black
|
||||
|
||||
.. image:: https://www.codetriage.com/pytest-dev/pytest/badges/users.svg
|
||||
:target: https://www.codetriage.com/pytest-dev/pytest
|
||||
@@ -111,14 +111,28 @@ Consult the `Changelog <https://docs.pytest.org/en/latest/changelog.html>`__ pag
|
||||
Support pytest
|
||||
--------------
|
||||
|
||||
You can support pytest by obtaining a `Tideflift subscription`_.
|
||||
`Open Collective`_ is an online funding platform for open and transparent communities.
|
||||
It provide tools to raise money and share your finances in full transparency.
|
||||
|
||||
Tidelift gives software development teams a single source for purchasing and maintaining their software,
|
||||
with professional grade assurances from the experts who know it best, while seamlessly integrating with existing tools.
|
||||
It is the platform of choice for individuals and companies that want to make one-time or
|
||||
monthly donations directly to the project.
|
||||
|
||||
See more datails in the `pytest collective`_.
|
||||
|
||||
.. _Open Collective: https://opencollective.com
|
||||
.. _pytest collective: https://opencollective.com/pytest
|
||||
|
||||
|
||||
.. _`Tideflift subscription`: https://tidelift.com/subscription/pkg/pypi-pytest?utm_source=pypi-pytest&utm_medium=referral&utm_campaign=readme
|
||||
pytest for enterprise
|
||||
---------------------
|
||||
|
||||
Available as part of the Tidelift Subscription.
|
||||
|
||||
The maintainers of pytest and thousands of other packages are working with Tidelift to deliver commercial support and
|
||||
maintenance for the open source dependencies you use to build your applications.
|
||||
Save time, reduce risk, and improve code health, while paying the maintainers of the exact dependencies you use.
|
||||
|
||||
`Learn more. <https://tidelift.com/subscription/pkg/pypi-pytest?utm_source=pypi-pytest&utm_medium=referral&utm_campaign=enterprise&utm_term=repo>`_
|
||||
|
||||
Security
|
||||
^^^^^^^^
|
||||
|
||||
@@ -12,6 +12,9 @@ Tidelift aims to make Open Source sustainable by offering subscriptions to compa
|
||||
on Open Source packages. This subscription allows it to pay maintainers of those Open Source
|
||||
packages to aid sustainability of the work.
|
||||
|
||||
It is the perfect platform for companies that want to support Open Source packages and at the same
|
||||
time obtain assurances regarding maintenance, quality and security.
|
||||
|
||||
Funds
|
||||
=====
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ if __name__ == "__main__":
|
||||
import pstats
|
||||
|
||||
script = sys.argv[1:] if len(sys.argv) > 1 else ["empty.py"]
|
||||
stats = cProfile.run("pytest.cmdline.main(%r)" % script, "prof")
|
||||
cProfile.run("pytest.cmdline.main(%r)" % script, "prof")
|
||||
p = pstats.Stats("prof")
|
||||
p.strip_dirs()
|
||||
p.sort_stats("cumulative")
|
||||
|
||||
@@ -12,6 +12,7 @@ Each file should be named like ``<ISSUE>.<TYPE>.rst``, where
|
||||
``<ISSUE>`` is an issue number, and ``<TYPE>`` is one of:
|
||||
|
||||
* ``feature``: new user facing features, like new command-line options and new behavior.
|
||||
* ``improvement``: improvement of existing functionality, usually without requiring user intervention (for example, new fields being written in ``--junitxml``, improved colors in terminal, etc).
|
||||
* ``bugfix``: fixes a reported bug.
|
||||
* ``doc``: documentation improvement, like rewording an entire session or adding missing docs.
|
||||
* ``deprecation``: feature deprecation.
|
||||
|
||||
7
codecov.yml
Normal file
7
codecov.yml
Normal file
@@ -0,0 +1,7 @@
|
||||
coverage:
|
||||
status:
|
||||
project: true
|
||||
patch: true
|
||||
changes: true
|
||||
|
||||
comment: off
|
||||
@@ -16,7 +16,7 @@ REGENDOC_ARGS := \
|
||||
--normalize "/[ \t]+\n/\n/" \
|
||||
--normalize "~\$$REGENDOC_TMPDIR~/home/sweet/project~" \
|
||||
--normalize "~/path/to/example~/home/sweet/project~" \
|
||||
--normalize "/in \d+.\d+ seconds/in 0.12 seconds/" \
|
||||
--normalize "/in \d.\d\ds/in 0.12s/" \
|
||||
--normalize "@/tmp/pytest-of-.*/pytest-\d+@PYTEST_TMPDIR@" \
|
||||
--normalize "@pytest-(\d+)\\.[^ ,]+@pytest-\1.x.y@" \
|
||||
--normalize "@(This is pytest version )(\d+)\\.[^ ,]+@\1\2.x.y@" \
|
||||
|
||||
@@ -4,13 +4,15 @@
|
||||
<li><a href="{{ pathto('index') }}">Home</a></li>
|
||||
<li><a href="{{ pathto('getting-started') }}">Install</a></li>
|
||||
<li><a href="{{ pathto('contents') }}">Contents</a></li>
|
||||
<li><a href="{{ pathto('reference') }}">Reference</a></li>
|
||||
<li><a href="{{ pathto('reference') }}">API Reference</a></li>
|
||||
<li><a href="{{ pathto('example/index') }}">Examples</a></li>
|
||||
<li><a href="{{ pathto('customize') }}">Customize</a></li>
|
||||
<li><a href="{{ pathto('changelog') }}">Changelog</a></li>
|
||||
<li><a href="{{ pathto('contributing') }}">Contributing</a></li>
|
||||
<li><a href="{{ pathto('backwards-compatibility') }}">Backwards Compatibility</a></li>
|
||||
<li><a href="{{ pathto('py27-py34-deprecation') }}">Python 2.7 and 3.4 Support</a></li>
|
||||
<li><a href="{{ pathto('sponsor') }}">Sponsor</a></li>
|
||||
<li><a href="{{ pathto('tidelift') }}">pytest for Enterprise</a></li>
|
||||
<li><a href="{{ pathto('license') }}">License</a></li>
|
||||
<li><a href="{{ pathto('contact') }}">Contact Channels</a></li>
|
||||
</ul>
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
{%- block footer %}
|
||||
<div class="footer">
|
||||
© Copyright {{ copyright }}.
|
||||
Created using <a href="http://sphinx.pocoo.org/">Sphinx</a>.
|
||||
Created using <a href="https://www.sphinx-doc.org/">Sphinx</a> {{ sphinx_version }}.
|
||||
</div>
|
||||
{% if pagename == 'index' %}
|
||||
</div>
|
||||
|
||||
15
doc/en/_themes/flask/slim_searchbox.html
Normal file
15
doc/en/_themes/flask/slim_searchbox.html
Normal file
@@ -0,0 +1,15 @@
|
||||
{#
|
||||
basic/searchbox.html with heading removed.
|
||||
#}
|
||||
{%- if pagename != "search" and builder != "singlehtml" %}
|
||||
<div id="searchbox" style="display: none" role="search">
|
||||
<div class="searchformwrapper">
|
||||
<form class="search" action="{{ pathto('search') }}" method="get">
|
||||
<input type="text" name="q" aria-labelledby="searchlabel"
|
||||
placeholder="Search"/>
|
||||
<input type="submit" value="{{ _('Go') }}" />
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
<script type="text/javascript">$('#searchbox').show(0);</script>
|
||||
{%- endif %}
|
||||
@@ -8,11 +8,12 @@
|
||||
|
||||
{% set page_width = '1020px' %}
|
||||
{% set sidebar_width = '220px' %}
|
||||
/* orange of logo is #d67c29 but we use black for links for now */
|
||||
{% set link_color = '#000' %}
|
||||
{% set link_hover_color = '#000' %}
|
||||
/* muted version of green logo color #C9D22A */
|
||||
{% set link_color = '#606413' %}
|
||||
/* blue logo color */
|
||||
{% set link_hover_color = '#009de0' %}
|
||||
{% set base_font = 'sans-serif' %}
|
||||
{% set header_font = 'serif' %}
|
||||
{% set header_font = 'sans-serif' %}
|
||||
|
||||
@import url("basic.css");
|
||||
|
||||
@@ -20,7 +21,7 @@
|
||||
|
||||
body {
|
||||
font-family: {{ base_font }};
|
||||
font-size: 17px;
|
||||
font-size: 16px;
|
||||
background-color: white;
|
||||
color: #000;
|
||||
margin: 0;
|
||||
@@ -78,13 +79,13 @@ div.related {
|
||||
}
|
||||
|
||||
div.sphinxsidebar a {
|
||||
color: #444;
|
||||
text-decoration: none;
|
||||
border-bottom: 1px dotted #999;
|
||||
border-bottom: none;
|
||||
}
|
||||
|
||||
div.sphinxsidebar a:hover {
|
||||
border-bottom: 1px solid #999;
|
||||
color: {{ link_hover_color }};
|
||||
border-bottom: 1px solid {{ link_hover_color }};
|
||||
}
|
||||
|
||||
div.sphinxsidebar {
|
||||
@@ -106,14 +107,14 @@ div.sphinxsidebar h3,
|
||||
div.sphinxsidebar h4 {
|
||||
font-family: {{ header_font }};
|
||||
color: #444;
|
||||
font-size: 24px;
|
||||
font-size: 21px;
|
||||
font-weight: normal;
|
||||
margin: 0 0 5px 0;
|
||||
margin: 16px 0 0 0;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
div.sphinxsidebar h4 {
|
||||
font-size: 20px;
|
||||
font-size: 18px;
|
||||
}
|
||||
|
||||
div.sphinxsidebar h3 a {
|
||||
@@ -205,10 +206,22 @@ div.body p, div.body dd, div.body li {
|
||||
line-height: 1.4em;
|
||||
}
|
||||
|
||||
ul.simple li {
|
||||
margin-bottom: 0.5em;
|
||||
}
|
||||
|
||||
div.topic ul.simple li {
|
||||
margin-bottom: 0;
|
||||
}
|
||||
|
||||
div.topic li > p:first-child {
|
||||
margin-top: 0;
|
||||
margin-bottom: 0;
|
||||
}
|
||||
|
||||
div.admonition {
|
||||
background: #fafafa;
|
||||
margin: 20px -30px;
|
||||
padding: 10px 30px;
|
||||
padding: 10px 20px;
|
||||
border-top: 1px solid #ccc;
|
||||
border-bottom: 1px solid #ccc;
|
||||
}
|
||||
@@ -217,11 +230,6 @@ div.admonition tt.xref, div.admonition a tt {
|
||||
border-bottom: 1px solid #fafafa;
|
||||
}
|
||||
|
||||
dd div.admonition {
|
||||
margin-left: -60px;
|
||||
padding-left: 60px;
|
||||
}
|
||||
|
||||
div.admonition p.admonition-title {
|
||||
font-family: {{ header_font }};
|
||||
font-weight: normal;
|
||||
@@ -231,7 +239,7 @@ div.admonition p.admonition-title {
|
||||
line-height: 1;
|
||||
}
|
||||
|
||||
div.admonition p.last {
|
||||
div.admonition :last-child {
|
||||
margin-bottom: 0;
|
||||
}
|
||||
|
||||
@@ -243,7 +251,7 @@ dt:target, .highlight {
|
||||
background: #FAF3E8;
|
||||
}
|
||||
|
||||
div.note {
|
||||
div.note, div.warning {
|
||||
background-color: #eee;
|
||||
border: 1px solid #ccc;
|
||||
}
|
||||
@@ -257,6 +265,11 @@ div.topic {
|
||||
background-color: #eee;
|
||||
}
|
||||
|
||||
div.topic a {
|
||||
text-decoration: none;
|
||||
border-bottom: none;
|
||||
}
|
||||
|
||||
p.admonition-title {
|
||||
display: inline;
|
||||
}
|
||||
@@ -358,21 +371,10 @@ ul, ol {
|
||||
|
||||
pre {
|
||||
background: #eee;
|
||||
padding: 7px 30px;
|
||||
margin: 15px -30px;
|
||||
padding: 7px 12px;
|
||||
line-height: 1.3em;
|
||||
}
|
||||
|
||||
dl pre, blockquote pre, li pre {
|
||||
margin-left: -60px;
|
||||
padding-left: 60px;
|
||||
}
|
||||
|
||||
dl dl pre {
|
||||
margin-left: -90px;
|
||||
padding-left: 90px;
|
||||
}
|
||||
|
||||
tt {
|
||||
background-color: #ecf0f3;
|
||||
color: #222;
|
||||
@@ -393,6 +395,20 @@ a.reference:hover {
|
||||
border-bottom: 1px solid {{ link_hover_color }};
|
||||
}
|
||||
|
||||
li.toctree-l1 a.reference,
|
||||
li.toctree-l2 a.reference,
|
||||
li.toctree-l3 a.reference,
|
||||
li.toctree-l4 a.reference {
|
||||
border-bottom: none;
|
||||
}
|
||||
|
||||
li.toctree-l1 a.reference:hover,
|
||||
li.toctree-l2 a.reference:hover,
|
||||
li.toctree-l3 a.reference:hover,
|
||||
li.toctree-l4 a.reference:hover {
|
||||
border-bottom: 1px solid {{ link_hover_color }};
|
||||
}
|
||||
|
||||
a.footnote-reference {
|
||||
text-decoration: none;
|
||||
font-size: 0.7em;
|
||||
@@ -408,6 +424,56 @@ a:hover tt {
|
||||
background: #EEE;
|
||||
}
|
||||
|
||||
#reference div.section h2 {
|
||||
/* separate code elements in the reference section */
|
||||
border-top: 2px solid #ccc;
|
||||
padding-top: 0.5em;
|
||||
}
|
||||
|
||||
#reference div.section h3 {
|
||||
/* separate code elements in the reference section */
|
||||
border-top: 1px solid #ccc;
|
||||
padding-top: 0.5em;
|
||||
}
|
||||
|
||||
dl.class, dl.function {
|
||||
margin-top: 1em;
|
||||
margin-bottom: 1em;
|
||||
}
|
||||
|
||||
dl.class > dd {
|
||||
border-left: 3px solid #ccc;
|
||||
margin-left: 0px;
|
||||
padding-left: 30px;
|
||||
}
|
||||
|
||||
dl.field-list {
|
||||
flex-direction: column;
|
||||
}
|
||||
|
||||
dl.field-list dd {
|
||||
padding-left: 4em;
|
||||
border-left: 3px solid #ccc;
|
||||
margin-bottom: 0.5em;
|
||||
}
|
||||
|
||||
dl.field-list dd > ul {
|
||||
list-style: none;
|
||||
padding-left: 0px;
|
||||
}
|
||||
|
||||
dl.field-list dd > ul > li li :first-child {
|
||||
text-indent: 0;
|
||||
}
|
||||
|
||||
dl.field-list dd > ul > li :first-child {
|
||||
text-indent: -2em;
|
||||
padding-left: 0px;
|
||||
}
|
||||
|
||||
dl.field-list dd > p:first-child {
|
||||
text-indent: -2em;
|
||||
}
|
||||
|
||||
@media screen and (max-width: 870px) {
|
||||
|
||||
|
||||
@@ -24,11 +24,9 @@ The ideal pytest helper
|
||||
- feels confident in using pytest (e.g. has explored command line options, knows how to write parametrized tests, has an idea about conftest contents)
|
||||
- does not need to be an expert in every aspect!
|
||||
|
||||
`Pytest helpers, sign up here`_! (preferably in February, hard deadline 22 March)
|
||||
Pytest helpers, sign up here! (preferably in February, hard deadline 22 March)
|
||||
|
||||
|
||||
.. _`Pytest helpers, sign up here`: http://goo.gl/forms/nxqAhqWt1P
|
||||
|
||||
|
||||
The ideal partner project
|
||||
-----------------------------------------
|
||||
@@ -40,11 +38,9 @@ The ideal partner project
|
||||
- has the support of the core development team, in trying out pytest adoption
|
||||
- has no tests... or 100% test coverage... or somewhere in between!
|
||||
|
||||
`Partner projects, sign up here`_! (by 22 March)
|
||||
Partner projects, sign up here! (by 22 March)
|
||||
|
||||
|
||||
.. _`Partner projects, sign up here`: http://goo.gl/forms/ZGyqlHiwk3
|
||||
|
||||
|
||||
What does it mean to "adopt pytest"?
|
||||
-----------------------------------------
|
||||
@@ -68,11 +64,11 @@ Progressive success might look like:
|
||||
It may be after the month is up, the partner project decides that pytest is not right for it. That's okay - hopefully the pytest team will also learn something about its weaknesses or deficiencies.
|
||||
|
||||
.. _`nose and unittest`: faq.html#how-does-pytest-relate-to-nose-and-unittest
|
||||
.. _assert: asserts.html
|
||||
.. _assert: assert.html
|
||||
.. _pycmd: https://bitbucket.org/hpk42/pycmd/overview
|
||||
.. _`setUp/tearDown methods`: xunit_setup.html
|
||||
.. _fixtures: fixture.html
|
||||
.. _markers: markers.html
|
||||
.. _markers: mark.html
|
||||
.. _distributed: xdist.html
|
||||
|
||||
|
||||
|
||||
@@ -6,7 +6,17 @@ Release announcements
|
||||
:maxdepth: 2
|
||||
|
||||
|
||||
release-5.2.2
|
||||
release-5.2.1
|
||||
release-5.2.0
|
||||
release-5.1.3
|
||||
release-5.1.2
|
||||
release-5.1.1
|
||||
release-5.1.0
|
||||
release-5.0.1
|
||||
release-5.0.0
|
||||
release-4.6.5
|
||||
release-4.6.4
|
||||
release-4.6.3
|
||||
release-4.6.2
|
||||
release-4.6.1
|
||||
|
||||
@@ -12,7 +12,7 @@ courtesy of Benjamin Peterson. You can now safely use ``assert``
|
||||
statements in test modules without having to worry about side effects
|
||||
or python optimization ("-OO") options. This is achieved by rewriting
|
||||
assert statements in test modules upon import, using a PEP302 hook.
|
||||
See http://pytest.org/assert.html#advanced-assertion-introspection for
|
||||
See https://docs.pytest.org/en/latest/assert.html for
|
||||
detailed information. The work has been partly sponsored by my company,
|
||||
merlinux GmbH.
|
||||
|
||||
|
||||
@@ -75,7 +75,7 @@ The py.test Development Team
|
||||
|
||||
**Changes**
|
||||
|
||||
* **Important**: `py.code <https://pylib.readthedocs.io/en/latest/code.html>`_ has been
|
||||
* **Important**: `py.code <https://pylib.readthedocs.io/en/stable/code.html>`_ has been
|
||||
merged into the ``pytest`` repository as ``pytest._code``. This decision
|
||||
was made because ``py.code`` had very few uses outside ``pytest`` and the
|
||||
fact that it was in a different repository made it difficult to fix bugs on
|
||||
@@ -88,7 +88,7 @@ The py.test Development Team
|
||||
**experimental**, so you definitely should not import it explicitly!
|
||||
|
||||
Please note that the original ``py.code`` is still available in
|
||||
`pylib <https://pylib.readthedocs.io>`_.
|
||||
`pylib <https://pylib.readthedocs.io/en/stable/>`_.
|
||||
|
||||
* ``pytest_enter_pdb`` now optionally receives the pytest config object.
|
||||
Thanks `@nicoddemus`_ for the PR.
|
||||
|
||||
@@ -66,8 +66,8 @@ The py.test Development Team
|
||||
|
||||
.. _#510: https://github.com/pytest-dev/pytest/issues/510
|
||||
.. _#1506: https://github.com/pytest-dev/pytest/pull/1506
|
||||
.. _#1496: https://github.com/pytest-dev/pytest/issue/1496
|
||||
.. _#1524: https://github.com/pytest-dev/pytest/issue/1524
|
||||
.. _#1496: https://github.com/pytest-dev/pytest/issues/1496
|
||||
.. _#1524: https://github.com/pytest-dev/pytest/pull/1524
|
||||
|
||||
.. _@astraw38: https://github.com/astraw38
|
||||
.. _@hackebrot: https://github.com/hackebrot
|
||||
|
||||
22
doc/en/announce/release-4.6.4.rst
Normal file
22
doc/en/announce/release-4.6.4.rst
Normal file
@@ -0,0 +1,22 @@
|
||||
pytest-4.6.4
|
||||
=======================================
|
||||
|
||||
pytest 4.6.4 has just been released to PyPI.
|
||||
|
||||
This is a bug-fix release, being a drop-in replacement. To upgrade::
|
||||
|
||||
pip install --upgrade pytest
|
||||
|
||||
The full changelog is available at https://docs.pytest.org/en/latest/changelog.html.
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Anthony Sottile
|
||||
* Bruno Oliveira
|
||||
* Daniel Hahler
|
||||
* Thomas Grainger
|
||||
* Zac Hatfield-Dodds
|
||||
|
||||
|
||||
Happy testing,
|
||||
The pytest Development Team
|
||||
21
doc/en/announce/release-4.6.5.rst
Normal file
21
doc/en/announce/release-4.6.5.rst
Normal file
@@ -0,0 +1,21 @@
|
||||
pytest-4.6.5
|
||||
=======================================
|
||||
|
||||
pytest 4.6.5 has just been released to PyPI.
|
||||
|
||||
This is a bug-fix release, being a drop-in replacement. To upgrade::
|
||||
|
||||
pip install --upgrade pytest
|
||||
|
||||
The full changelog is available at https://docs.pytest.org/en/latest/changelog.html.
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Anthony Sottile
|
||||
* Bruno Oliveira
|
||||
* Daniel Hahler
|
||||
* Thomas Grainger
|
||||
|
||||
|
||||
Happy testing,
|
||||
The pytest Development Team
|
||||
25
doc/en/announce/release-5.0.1.rst
Normal file
25
doc/en/announce/release-5.0.1.rst
Normal file
@@ -0,0 +1,25 @@
|
||||
pytest-5.0.1
|
||||
=======================================
|
||||
|
||||
pytest 5.0.1 has just been released to PyPI.
|
||||
|
||||
This is a bug-fix release, being a drop-in replacement. To upgrade::
|
||||
|
||||
pip install --upgrade pytest
|
||||
|
||||
The full changelog is available at https://docs.pytest.org/en/latest/changelog.html.
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* AmirElkess
|
||||
* Andreu Vallbona Plazas
|
||||
* Anthony Sottile
|
||||
* Bruno Oliveira
|
||||
* Florian Bruhin
|
||||
* Michael Moore
|
||||
* Niklas Meinzer
|
||||
* Thomas Grainger
|
||||
|
||||
|
||||
Happy testing,
|
||||
The pytest Development Team
|
||||
56
doc/en/announce/release-5.1.0.rst
Normal file
56
doc/en/announce/release-5.1.0.rst
Normal file
@@ -0,0 +1,56 @@
|
||||
pytest-5.1.0
|
||||
=======================================
|
||||
|
||||
The pytest team is proud to announce the 5.1.0 release!
|
||||
|
||||
pytest is a mature Python testing tool with more than a 2000 tests
|
||||
against itself, passing on many different interpreters and platforms.
|
||||
|
||||
This release contains a number of bugs fixes and improvements, so users are encouraged
|
||||
to take a look at the CHANGELOG:
|
||||
|
||||
https://docs.pytest.org/en/latest/changelog.html
|
||||
|
||||
For complete documentation, please visit:
|
||||
|
||||
https://docs.pytest.org/en/latest/
|
||||
|
||||
As usual, you can upgrade from pypi via:
|
||||
|
||||
pip install -U pytest
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Albert Tugushev
|
||||
* Alexey Zankevich
|
||||
* Anthony Sottile
|
||||
* Bruno Oliveira
|
||||
* Daniel Hahler
|
||||
* David Röthlisberger
|
||||
* Florian Bruhin
|
||||
* Ilya Stepin
|
||||
* Jon Dufresne
|
||||
* Kaiqi
|
||||
* Max R
|
||||
* Miro Hrončok
|
||||
* Oliver Bestwalter
|
||||
* Ran Benita
|
||||
* Ronny Pfannschmidt
|
||||
* Samuel Searles-Bryant
|
||||
* Semen Zhydenko
|
||||
* Steffen Schroeder
|
||||
* Thomas Grainger
|
||||
* Tim Hoffmann
|
||||
* William Woodall
|
||||
* Wojtek Erbetowski
|
||||
* Xixi Zhao
|
||||
* Yash Todi
|
||||
* boris
|
||||
* dmitry.dygalo
|
||||
* helloocc
|
||||
* martbln
|
||||
* mei-li
|
||||
|
||||
|
||||
Happy testing,
|
||||
The Pytest Development Team
|
||||
24
doc/en/announce/release-5.1.1.rst
Normal file
24
doc/en/announce/release-5.1.1.rst
Normal file
@@ -0,0 +1,24 @@
|
||||
pytest-5.1.1
|
||||
=======================================
|
||||
|
||||
pytest 5.1.1 has just been released to PyPI.
|
||||
|
||||
This is a bug-fix release, being a drop-in replacement. To upgrade::
|
||||
|
||||
pip install --upgrade pytest
|
||||
|
||||
The full changelog is available at https://docs.pytest.org/en/latest/changelog.html.
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Anthony Sottile
|
||||
* Bruno Oliveira
|
||||
* Daniel Hahler
|
||||
* Florian Bruhin
|
||||
* Hugo van Kemenade
|
||||
* Ran Benita
|
||||
* Ronny Pfannschmidt
|
||||
|
||||
|
||||
Happy testing,
|
||||
The pytest Development Team
|
||||
23
doc/en/announce/release-5.1.2.rst
Normal file
23
doc/en/announce/release-5.1.2.rst
Normal file
@@ -0,0 +1,23 @@
|
||||
pytest-5.1.2
|
||||
=======================================
|
||||
|
||||
pytest 5.1.2 has just been released to PyPI.
|
||||
|
||||
This is a bug-fix release, being a drop-in replacement. To upgrade::
|
||||
|
||||
pip install --upgrade pytest
|
||||
|
||||
The full changelog is available at https://docs.pytest.org/en/latest/changelog.html.
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Andrzej Klajnert
|
||||
* Anthony Sottile
|
||||
* Bruno Oliveira
|
||||
* Christian Neumüller
|
||||
* Robert Holt
|
||||
* linchiwei123
|
||||
|
||||
|
||||
Happy testing,
|
||||
The pytest Development Team
|
||||
23
doc/en/announce/release-5.1.3.rst
Normal file
23
doc/en/announce/release-5.1.3.rst
Normal file
@@ -0,0 +1,23 @@
|
||||
pytest-5.1.3
|
||||
=======================================
|
||||
|
||||
pytest 5.1.3 has just been released to PyPI.
|
||||
|
||||
This is a bug-fix release, being a drop-in replacement. To upgrade::
|
||||
|
||||
pip install --upgrade pytest
|
||||
|
||||
The full changelog is available at https://docs.pytest.org/en/latest/changelog.html.
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Anthony Sottile
|
||||
* Bruno Oliveira
|
||||
* Christian Neumüller
|
||||
* Daniel Hahler
|
||||
* Gene Wood
|
||||
* Hugo
|
||||
|
||||
|
||||
Happy testing,
|
||||
The pytest Development Team
|
||||
35
doc/en/announce/release-5.2.0.rst
Normal file
35
doc/en/announce/release-5.2.0.rst
Normal file
@@ -0,0 +1,35 @@
|
||||
pytest-5.2.0
|
||||
=======================================
|
||||
|
||||
The pytest team is proud to announce the 5.2.0 release!
|
||||
|
||||
pytest is a mature Python testing tool with more than a 2000 tests
|
||||
against itself, passing on many different interpreters and platforms.
|
||||
|
||||
This release contains a number of bugs fixes and improvements, so users are encouraged
|
||||
to take a look at the CHANGELOG:
|
||||
|
||||
https://docs.pytest.org/en/latest/changelog.html
|
||||
|
||||
For complete documentation, please visit:
|
||||
|
||||
https://docs.pytest.org/en/latest/
|
||||
|
||||
As usual, you can upgrade from pypi via:
|
||||
|
||||
pip install -U pytest
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Andrzej Klajnert
|
||||
* Anthony Sottile
|
||||
* Bruno Oliveira
|
||||
* Daniel Hahler
|
||||
* James Cooke
|
||||
* Michael Goerz
|
||||
* Ran Benita
|
||||
* Tomáš Chvátal
|
||||
|
||||
|
||||
Happy testing,
|
||||
The Pytest Development Team
|
||||
23
doc/en/announce/release-5.2.1.rst
Normal file
23
doc/en/announce/release-5.2.1.rst
Normal file
@@ -0,0 +1,23 @@
|
||||
pytest-5.2.1
|
||||
=======================================
|
||||
|
||||
pytest 5.2.1 has just been released to PyPI.
|
||||
|
||||
This is a bug-fix release, being a drop-in replacement. To upgrade::
|
||||
|
||||
pip install --upgrade pytest
|
||||
|
||||
The full changelog is available at https://docs.pytest.org/en/latest/changelog.html.
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Anthony Sottile
|
||||
* Bruno Oliveira
|
||||
* Florian Bruhin
|
||||
* Hynek Schlawack
|
||||
* Kevin J. Foley
|
||||
* tadashigaki
|
||||
|
||||
|
||||
Happy testing,
|
||||
The pytest Development Team
|
||||
29
doc/en/announce/release-5.2.2.rst
Normal file
29
doc/en/announce/release-5.2.2.rst
Normal file
@@ -0,0 +1,29 @@
|
||||
pytest-5.2.2
|
||||
=======================================
|
||||
|
||||
pytest 5.2.2 has just been released to PyPI.
|
||||
|
||||
This is a bug-fix release, being a drop-in replacement. To upgrade::
|
||||
|
||||
pip install --upgrade pytest
|
||||
|
||||
The full changelog is available at https://docs.pytest.org/en/latest/changelog.html.
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Albert Tugushev
|
||||
* Andrzej Klajnert
|
||||
* Anthony Sottile
|
||||
* Bruno Oliveira
|
||||
* Daniel Hahler
|
||||
* Florian Bruhin
|
||||
* Nattaphoom Chaipreecha
|
||||
* Oliver Bestwalter
|
||||
* Philipp Loose
|
||||
* Ran Benita
|
||||
* Victor Maryama
|
||||
* Yoav Caspi
|
||||
|
||||
|
||||
Happy testing,
|
||||
The pytest Development Team
|
||||
@@ -31,7 +31,7 @@ you will see the return value of the function call:
|
||||
|
||||
$ pytest test_assert1.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 1 item
|
||||
@@ -47,7 +47,7 @@ you will see the return value of the function call:
|
||||
E + where 3 = f()
|
||||
|
||||
test_assert1.py:6: AssertionError
|
||||
========================= 1 failed in 0.12 seconds =========================
|
||||
============================ 1 failed in 0.12s =============================
|
||||
|
||||
``pytest`` has support for showing the values of the most common subexpressions
|
||||
including calls, attributes, comparisons, and binary and unary
|
||||
@@ -186,7 +186,7 @@ if you run this module:
|
||||
|
||||
$ pytest test_assert2.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 1 item
|
||||
@@ -208,7 +208,7 @@ if you run this module:
|
||||
E Use -v to get the full diff
|
||||
|
||||
test_assert2.py:6: AssertionError
|
||||
========================= 1 failed in 0.12 seconds =========================
|
||||
============================ 1 failed in 0.12s =============================
|
||||
|
||||
Special comparisons are done for a number of cases:
|
||||
|
||||
@@ -238,14 +238,17 @@ file which provides an alternative explanation for ``Foo`` objects:
|
||||
|
||||
def pytest_assertrepr_compare(op, left, right):
|
||||
if isinstance(left, Foo) and isinstance(right, Foo) and op == "==":
|
||||
return ["Comparing Foo instances:", " vals: %s != %s" % (left.val, right.val)]
|
||||
return [
|
||||
"Comparing Foo instances:",
|
||||
" vals: {} != {}".format(left.val, right.val),
|
||||
]
|
||||
|
||||
now, given this test module:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of test_foocompare.py
|
||||
class Foo(object):
|
||||
class Foo:
|
||||
def __init__(self, val):
|
||||
self.val = val
|
||||
|
||||
@@ -276,7 +279,7 @@ the conftest file:
|
||||
E vals: 1 != 2
|
||||
|
||||
test_foocompare.py:12: AssertionError
|
||||
1 failed in 0.12 seconds
|
||||
1 failed in 0.12s
|
||||
|
||||
.. _assert-details:
|
||||
.. _`assert introspection`:
|
||||
|
||||
@@ -104,6 +104,7 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a
|
||||
|
||||
Captured logs are available through the following properties/methods::
|
||||
|
||||
* caplog.messages -> list of format-interpolated log messages
|
||||
* caplog.text -> string containing formatted log output
|
||||
* caplog.records -> list of logging.LogRecord instances
|
||||
* caplog.record_tuples -> list of (logger_name, level, message) tuples
|
||||
@@ -160,9 +161,12 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a
|
||||
in python < 3.6 this is a pathlib2.Path
|
||||
|
||||
|
||||
no tests ran in 0.12 seconds
|
||||
no tests ran in 0.12s
|
||||
|
||||
You can also interactively ask for help, e.g. by typing on the Python interactive prompt something like::
|
||||
You can also interactively ask for help, e.g. by typing on the Python interactive prompt something like:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import pytest
|
||||
|
||||
help(pytest)
|
||||
|
||||
131
doc/en/cache.rst
131
doc/en/cache.rst
@@ -33,15 +33,18 @@ Other plugins may access the `config.cache`_ object to set/get
|
||||
Rerunning only failures or failures first
|
||||
-----------------------------------------------
|
||||
|
||||
First, let's create 50 test invocation of which only 2 fail::
|
||||
First, let's create 50 test invocation of which only 2 fail:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of test_50.py
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.mark.parametrize("i", range(50))
|
||||
def test_num(i):
|
||||
if i in (17, 25):
|
||||
pytest.fail("bad luck")
|
||||
pytest.fail("bad luck")
|
||||
|
||||
If you run this for the first time you will see two failures:
|
||||
|
||||
@@ -57,10 +60,10 @@ If you run this for the first time you will see two failures:
|
||||
@pytest.mark.parametrize("i", range(50))
|
||||
def test_num(i):
|
||||
if i in (17, 25):
|
||||
> pytest.fail("bad luck")
|
||||
E Failed: bad luck
|
||||
> pytest.fail("bad luck")
|
||||
E Failed: bad luck
|
||||
|
||||
test_50.py:6: Failed
|
||||
test_50.py:7: Failed
|
||||
_______________________________ test_num[25] _______________________________
|
||||
|
||||
i = 25
|
||||
@@ -68,11 +71,11 @@ If you run this for the first time you will see two failures:
|
||||
@pytest.mark.parametrize("i", range(50))
|
||||
def test_num(i):
|
||||
if i in (17, 25):
|
||||
> pytest.fail("bad luck")
|
||||
E Failed: bad luck
|
||||
> pytest.fail("bad luck")
|
||||
E Failed: bad luck
|
||||
|
||||
test_50.py:6: Failed
|
||||
2 failed, 48 passed in 0.12 seconds
|
||||
test_50.py:7: Failed
|
||||
2 failed, 48 passed in 0.12s
|
||||
|
||||
If you then run it with ``--lf``:
|
||||
|
||||
@@ -80,7 +83,7 @@ If you then run it with ``--lf``:
|
||||
|
||||
$ pytest --lf
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 50 items / 48 deselected / 2 selected
|
||||
@@ -96,10 +99,10 @@ If you then run it with ``--lf``:
|
||||
@pytest.mark.parametrize("i", range(50))
|
||||
def test_num(i):
|
||||
if i in (17, 25):
|
||||
> pytest.fail("bad luck")
|
||||
E Failed: bad luck
|
||||
> pytest.fail("bad luck")
|
||||
E Failed: bad luck
|
||||
|
||||
test_50.py:6: Failed
|
||||
test_50.py:7: Failed
|
||||
_______________________________ test_num[25] _______________________________
|
||||
|
||||
i = 25
|
||||
@@ -107,11 +110,11 @@ If you then run it with ``--lf``:
|
||||
@pytest.mark.parametrize("i", range(50))
|
||||
def test_num(i):
|
||||
if i in (17, 25):
|
||||
> pytest.fail("bad luck")
|
||||
E Failed: bad luck
|
||||
> pytest.fail("bad luck")
|
||||
E Failed: bad luck
|
||||
|
||||
test_50.py:6: Failed
|
||||
================= 2 failed, 48 deselected in 0.12 seconds ==================
|
||||
test_50.py:7: Failed
|
||||
===================== 2 failed, 48 deselected in 0.12s =====================
|
||||
|
||||
You have run only the two failing tests from the last run, while the 48 passing
|
||||
tests have not been run ("deselected").
|
||||
@@ -124,7 +127,7 @@ of ``FF`` and dots):
|
||||
|
||||
$ pytest --ff
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 50 items
|
||||
@@ -140,10 +143,10 @@ of ``FF`` and dots):
|
||||
@pytest.mark.parametrize("i", range(50))
|
||||
def test_num(i):
|
||||
if i in (17, 25):
|
||||
> pytest.fail("bad luck")
|
||||
E Failed: bad luck
|
||||
> pytest.fail("bad luck")
|
||||
E Failed: bad luck
|
||||
|
||||
test_50.py:6: Failed
|
||||
test_50.py:7: Failed
|
||||
_______________________________ test_num[25] _______________________________
|
||||
|
||||
i = 25
|
||||
@@ -151,11 +154,11 @@ of ``FF`` and dots):
|
||||
@pytest.mark.parametrize("i", range(50))
|
||||
def test_num(i):
|
||||
if i in (17, 25):
|
||||
> pytest.fail("bad luck")
|
||||
E Failed: bad luck
|
||||
> pytest.fail("bad luck")
|
||||
E Failed: bad luck
|
||||
|
||||
test_50.py:6: Failed
|
||||
=================== 2 failed, 48 passed in 0.12 seconds ====================
|
||||
test_50.py:7: Failed
|
||||
======================= 2 failed, 48 passed in 0.12s =======================
|
||||
|
||||
.. _`config.cache`:
|
||||
|
||||
@@ -183,15 +186,19 @@ The new config.cache object
|
||||
Plugins or conftest.py support code can get a cached value using the
|
||||
pytest ``config`` object. Here is a basic example plugin which
|
||||
implements a :ref:`fixture` which re-uses previously created state
|
||||
across pytest invocations::
|
||||
across pytest invocations:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of test_caching.py
|
||||
import pytest
|
||||
import time
|
||||
|
||||
|
||||
def expensive_computation():
|
||||
print("running expensive computation...")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mydata(request):
|
||||
val = request.config.cache.get("example/value", None)
|
||||
@@ -201,6 +208,7 @@ across pytest invocations::
|
||||
request.config.cache.set("example/value", val)
|
||||
return val
|
||||
|
||||
|
||||
def test_function(mydata):
|
||||
assert mydata == 23
|
||||
|
||||
@@ -219,10 +227,10 @@ If you run this command for the first time, you can see the print statement:
|
||||
> assert mydata == 23
|
||||
E assert 42 == 23
|
||||
|
||||
test_caching.py:17: AssertionError
|
||||
test_caching.py:20: AssertionError
|
||||
-------------------------- Captured stdout setup ---------------------------
|
||||
running expensive computation...
|
||||
1 failed in 0.12 seconds
|
||||
1 failed in 0.12s
|
||||
|
||||
If you run it a second time, the value will be retrieved from
|
||||
the cache and nothing will be printed:
|
||||
@@ -240,8 +248,8 @@ the cache and nothing will be printed:
|
||||
> assert mydata == 23
|
||||
E assert 42 == 23
|
||||
|
||||
test_caching.py:17: AssertionError
|
||||
1 failed in 0.12 seconds
|
||||
test_caching.py:20: AssertionError
|
||||
1 failed in 0.12s
|
||||
|
||||
See the :ref:`cache-api` for more details.
|
||||
|
||||
@@ -256,7 +264,7 @@ You can always peek at the content of the cache using the
|
||||
|
||||
$ pytest --cache-show
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
@@ -269,13 +277,66 @@ You can always peek at the content of the cache using the
|
||||
'test_caching.py::test_function': True,
|
||||
'test_foocompare.py::test_compare': True}
|
||||
cache/nodeids contains:
|
||||
['test_caching.py::test_function']
|
||||
['test_assert1.py::test_function',
|
||||
'test_assert2.py::test_set_comparison',
|
||||
'test_foocompare.py::test_compare',
|
||||
'test_50.py::test_num[0]',
|
||||
'test_50.py::test_num[1]',
|
||||
'test_50.py::test_num[2]',
|
||||
'test_50.py::test_num[3]',
|
||||
'test_50.py::test_num[4]',
|
||||
'test_50.py::test_num[5]',
|
||||
'test_50.py::test_num[6]',
|
||||
'test_50.py::test_num[7]',
|
||||
'test_50.py::test_num[8]',
|
||||
'test_50.py::test_num[9]',
|
||||
'test_50.py::test_num[10]',
|
||||
'test_50.py::test_num[11]',
|
||||
'test_50.py::test_num[12]',
|
||||
'test_50.py::test_num[13]',
|
||||
'test_50.py::test_num[14]',
|
||||
'test_50.py::test_num[15]',
|
||||
'test_50.py::test_num[16]',
|
||||
'test_50.py::test_num[17]',
|
||||
'test_50.py::test_num[18]',
|
||||
'test_50.py::test_num[19]',
|
||||
'test_50.py::test_num[20]',
|
||||
'test_50.py::test_num[21]',
|
||||
'test_50.py::test_num[22]',
|
||||
'test_50.py::test_num[23]',
|
||||
'test_50.py::test_num[24]',
|
||||
'test_50.py::test_num[25]',
|
||||
'test_50.py::test_num[26]',
|
||||
'test_50.py::test_num[27]',
|
||||
'test_50.py::test_num[28]',
|
||||
'test_50.py::test_num[29]',
|
||||
'test_50.py::test_num[30]',
|
||||
'test_50.py::test_num[31]',
|
||||
'test_50.py::test_num[32]',
|
||||
'test_50.py::test_num[33]',
|
||||
'test_50.py::test_num[34]',
|
||||
'test_50.py::test_num[35]',
|
||||
'test_50.py::test_num[36]',
|
||||
'test_50.py::test_num[37]',
|
||||
'test_50.py::test_num[38]',
|
||||
'test_50.py::test_num[39]',
|
||||
'test_50.py::test_num[40]',
|
||||
'test_50.py::test_num[41]',
|
||||
'test_50.py::test_num[42]',
|
||||
'test_50.py::test_num[43]',
|
||||
'test_50.py::test_num[44]',
|
||||
'test_50.py::test_num[45]',
|
||||
'test_50.py::test_num[46]',
|
||||
'test_50.py::test_num[47]',
|
||||
'test_50.py::test_num[48]',
|
||||
'test_50.py::test_num[49]',
|
||||
'test_caching.py::test_function']
|
||||
cache/stepwise contains:
|
||||
[]
|
||||
example/value contains:
|
||||
42
|
||||
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
========================== no tests ran in 0.12s ===========================
|
||||
|
||||
``--cache-show`` takes an optional argument to specify a glob pattern for
|
||||
filtering:
|
||||
@@ -284,7 +345,7 @@ filtering:
|
||||
|
||||
$ pytest --cache-show example/*
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
@@ -292,7 +353,7 @@ filtering:
|
||||
example/value contains:
|
||||
42
|
||||
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
========================== no tests ran in 0.12s ===========================
|
||||
|
||||
Clearing Cache content
|
||||
----------------------
|
||||
|
||||
@@ -49,16 +49,21 @@ Using print statements for debugging
|
||||
---------------------------------------------------
|
||||
|
||||
One primary benefit of the default capturing of stdout/stderr output
|
||||
is that you can use print statements for debugging::
|
||||
is that you can use print statements for debugging:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of test_module.py
|
||||
|
||||
|
||||
def setup_function(function):
|
||||
print("setting up %s" % function)
|
||||
print("setting up", function)
|
||||
|
||||
|
||||
def test_func1():
|
||||
assert True
|
||||
|
||||
|
||||
def test_func2():
|
||||
assert False
|
||||
|
||||
@@ -69,7 +74,7 @@ of the failing function and hide the other one:
|
||||
|
||||
$ pytest
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 2 items
|
||||
@@ -83,10 +88,10 @@ of the failing function and hide the other one:
|
||||
> assert False
|
||||
E assert False
|
||||
|
||||
test_module.py:9: AssertionError
|
||||
test_module.py:12: AssertionError
|
||||
-------------------------- Captured stdout setup ---------------------------
|
||||
setting up <function test_func2 at 0xdeadbeef>
|
||||
==================== 1 failed, 1 passed in 0.12 seconds ====================
|
||||
======================= 1 failed, 1 passed in 0.12s ========================
|
||||
|
||||
Accessing captured output from a test function
|
||||
---------------------------------------------------
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
#
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
# The short X.Y version.
|
||||
import datetime
|
||||
import os
|
||||
import sys
|
||||
|
||||
@@ -63,8 +62,7 @@ master_doc = "contents"
|
||||
|
||||
# General information about the project.
|
||||
project = "pytest"
|
||||
year = datetime.datetime.utcnow().year
|
||||
copyright = "2015–2019 , holger krekel and pytest-dev team"
|
||||
copyright = "2015–2019, holger krekel and pytest-dev team"
|
||||
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
@@ -167,18 +165,18 @@ html_favicon = "img/pytest1favi.ico"
|
||||
|
||||
html_sidebars = {
|
||||
"index": [
|
||||
"slim_searchbox.html",
|
||||
"sidebarintro.html",
|
||||
"globaltoc.html",
|
||||
"links.html",
|
||||
"sourcelink.html",
|
||||
"searchbox.html",
|
||||
],
|
||||
"**": [
|
||||
"slim_searchbox.html",
|
||||
"globaltoc.html",
|
||||
"relations.html",
|
||||
"links.html",
|
||||
"sourcelink.html",
|
||||
"searchbox.html",
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
@@ -38,19 +38,24 @@ Full pytest documentation
|
||||
customize
|
||||
example/index
|
||||
bash-completion
|
||||
faq
|
||||
|
||||
backwards-compatibility
|
||||
deprecations
|
||||
py27-py34-deprecation
|
||||
historical-notes
|
||||
license
|
||||
|
||||
contributing
|
||||
development_guide
|
||||
|
||||
sponsor
|
||||
tidelift
|
||||
license
|
||||
contact
|
||||
|
||||
historical-notes
|
||||
talks
|
||||
projects
|
||||
faq
|
||||
contact
|
||||
tidelift
|
||||
|
||||
|
||||
.. only:: html
|
||||
|
||||
|
||||
@@ -107,8 +107,8 @@ check for ini-files as follows:
|
||||
|
||||
# first look for pytest.ini files
|
||||
path/pytest.ini
|
||||
path/setup.cfg # must also contain [tool:pytest] section to match
|
||||
path/tox.ini # must also contain [pytest] section to match
|
||||
path/setup.cfg # must also contain [tool:pytest] section to match
|
||||
pytest.ini
|
||||
... # all the way down to the root
|
||||
|
||||
@@ -134,10 +134,13 @@ progress output, you can write it into a configuration file:
|
||||
.. code-block:: ini
|
||||
|
||||
# content of pytest.ini or tox.ini
|
||||
# setup.cfg files should use [tool:pytest] section instead
|
||||
[pytest]
|
||||
addopts = -ra -q
|
||||
|
||||
# content of setup.cfg
|
||||
[tool:pytest]
|
||||
addopts = -ra -q
|
||||
|
||||
Alternatively, you can set a ``PYTEST_ADDOPTS`` environment variable to add command
|
||||
line options while the environment is in use:
|
||||
|
||||
|
||||
@@ -20,8 +20,8 @@ Below is a complete list of all pytest features which are considered deprecated.
|
||||
:ref:`standard warning filters <warnings>`.
|
||||
|
||||
|
||||
Removal of ``funcargnames`` alias for ``fixturenames``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
``funcargnames`` alias for ``fixturenames``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. deprecated:: 5.0
|
||||
|
||||
@@ -34,12 +34,47 @@ in places where we or plugin authors must distinguish between fixture names and
|
||||
names supplied by non-fixture things such as ``pytest.mark.parametrize``.
|
||||
|
||||
|
||||
Result log (``--result-log``)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. deprecated:: 4.0
|
||||
|
||||
The ``--result-log`` option produces a stream of test reports which can be
|
||||
analysed at runtime. It uses a custom format which requires users to implement their own
|
||||
parser, but the team believes using a line-based format that can be parsed using standard
|
||||
tools would provide a suitable and better alternative.
|
||||
|
||||
The current plan is to provide an alternative in the pytest 5.0 series and remove the ``--result-log``
|
||||
option in pytest 6.0 after the new implementation proves satisfactory to all users and is deemed
|
||||
stable.
|
||||
|
||||
The actual alternative is still being discussed in issue `#4488 <https://github.com/pytest-dev/pytest/issues/4488>`__.
|
||||
|
||||
|
||||
Removed Features
|
||||
----------------
|
||||
|
||||
As stated in our :ref:`backwards-compatibility` policy, deprecated features are removed only in major releases after
|
||||
an appropriate period of deprecation has passed.
|
||||
|
||||
|
||||
``pytest.config`` global
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. versionremoved:: 5.0
|
||||
|
||||
The ``pytest.config`` global object is deprecated. Instead use
|
||||
``request.config`` (via the ``request`` fixture) or if you are a plugin author
|
||||
use the ``pytest_configure(config)`` hook. Note that many hooks can also access
|
||||
the ``config`` object indirectly, through ``session.config`` or ``item.config`` for example.
|
||||
|
||||
|
||||
.. _`raises message deprecated`:
|
||||
|
||||
``"message"`` parameter of ``pytest.raises``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. deprecated:: 4.1
|
||||
.. versionremoved:: 5.0
|
||||
|
||||
It is a common mistake to think this parameter will match the exception message, while in fact
|
||||
it only serves to provide a custom message in case the ``pytest.raises`` check fails. To prevent
|
||||
@@ -70,22 +105,12 @@ If you still have concerns about this deprecation and future removal, please com
|
||||
`issue #3974 <https://github.com/pytest-dev/pytest/issues/3974>`__.
|
||||
|
||||
|
||||
``pytest.config`` global
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. deprecated:: 4.1
|
||||
|
||||
The ``pytest.config`` global object is deprecated. Instead use
|
||||
``request.config`` (via the ``request`` fixture) or if you are a plugin author
|
||||
use the ``pytest_configure(config)`` hook. Note that many hooks can also access
|
||||
the ``config`` object indirectly, through ``session.config`` or ``item.config`` for example.
|
||||
|
||||
.. _raises-warns-exec:
|
||||
|
||||
``raises`` / ``warns`` with a string as the second argument
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. deprecated:: 4.1
|
||||
.. versionremoved:: 5.0
|
||||
|
||||
Use the context manager form of these instead. When necessary, invoke ``exec``
|
||||
directly.
|
||||
@@ -116,27 +141,6 @@ Becomes:
|
||||
|
||||
|
||||
|
||||
Result log (``--result-log``)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. deprecated:: 4.0
|
||||
|
||||
The ``--result-log`` option produces a stream of test reports which can be
|
||||
analysed at runtime. It uses a custom format which requires users to implement their own
|
||||
parser, but the team believes using a line-based format that can be parsed using standard
|
||||
tools would provide a suitable and better alternative.
|
||||
|
||||
The current plan is to provide an alternative in the pytest 5.0 series and remove the ``--result-log``
|
||||
option in pytest 6.0 after the new implementation proves satisfactory to all users and is deemed
|
||||
stable.
|
||||
|
||||
The actual alternative is still being discussed in issue `#4488 <https://github.com/pytest-dev/pytest/issues/4488>`__.
|
||||
|
||||
Removed Features
|
||||
----------------
|
||||
|
||||
As stated in our :ref:`backwards-compatibility` policy, deprecated features are removed only in major releases after
|
||||
an appropriate period of deprecation has passed.
|
||||
|
||||
Using ``Class`` in custom Collectors
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@@ -455,7 +459,9 @@ Internal classes accessed through ``Node``
|
||||
.. versionremoved:: 4.0
|
||||
|
||||
Access of ``Module``, ``Function``, ``Class``, ``Instance``, ``File`` and ``Item`` through ``Node`` instances now issue
|
||||
this warning::
|
||||
this warning:
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
usage of Function.Module is deprecated, please use pytest.Module instead
|
||||
|
||||
|
||||
@@ -29,14 +29,14 @@ then you can just invoke ``pytest`` directly:
|
||||
|
||||
$ pytest
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 1 item
|
||||
|
||||
test_example.txt . [100%]
|
||||
|
||||
========================= 1 passed in 0.12 seconds =========================
|
||||
============================ 1 passed in 0.12s =============================
|
||||
|
||||
By default, pytest will collect ``test*.txt`` files looking for doctest directives, but you
|
||||
can pass additional globs using the ``--doctest-glob`` option (multi-allowed).
|
||||
@@ -58,7 +58,7 @@ and functions, including from test modules:
|
||||
|
||||
$ pytest --doctest-modules
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 2 items
|
||||
@@ -66,7 +66,7 @@ and functions, including from test modules:
|
||||
mymodule.py . [ 50%]
|
||||
test_example.txt . [100%]
|
||||
|
||||
========================= 2 passed in 0.12 seconds =========================
|
||||
============================ 2 passed in 0.12s =============================
|
||||
|
||||
You can make these changes permanent in your project by
|
||||
putting them into a pytest.ini file like this:
|
||||
@@ -103,7 +103,7 @@ that will be used for those doctest files using the
|
||||
Using 'doctest' options
|
||||
-----------------------
|
||||
|
||||
The standard ``doctest`` module provides some `options <https://docs.python.org/3/library/doctest.html#option-flags>`__
|
||||
Python's standard ``doctest`` module provides some `options <https://docs.python.org/3/library/doctest.html#option-flags>`__
|
||||
to configure the strictness of doctest tests. In pytest, you can enable those flags using the
|
||||
configuration file.
|
||||
|
||||
@@ -115,23 +115,52 @@ lengthy exception stack traces you can just write:
|
||||
[pytest]
|
||||
doctest_optionflags= NORMALIZE_WHITESPACE IGNORE_EXCEPTION_DETAIL
|
||||
|
||||
pytest also introduces new options to allow doctests to run in Python 2 and
|
||||
Python 3 unchanged:
|
||||
|
||||
* ``ALLOW_UNICODE``: when enabled, the ``u`` prefix is stripped from unicode
|
||||
strings in expected doctest output.
|
||||
|
||||
* ``ALLOW_BYTES``: when enabled, the ``b`` prefix is stripped from byte strings
|
||||
in expected doctest output.
|
||||
|
||||
Alternatively, options can be enabled by an inline comment in the doc test
|
||||
itself:
|
||||
|
||||
.. code-block:: rst
|
||||
|
||||
# content of example.rst
|
||||
>>> get_unicode_greeting() # doctest: +ALLOW_UNICODE
|
||||
'Hello'
|
||||
>>> something_that_raises() # doctest: +IGNORE_EXCEPTION_DETAIL
|
||||
Traceback (most recent call last):
|
||||
ValueError: ...
|
||||
|
||||
pytest also introduces new options:
|
||||
|
||||
* ``ALLOW_UNICODE``: when enabled, the ``u`` prefix is stripped from unicode
|
||||
strings in expected doctest output. This allows doctests to run in Python 2
|
||||
and Python 3 unchanged.
|
||||
|
||||
* ``ALLOW_BYTES``: similarly, the ``b`` prefix is stripped from byte strings
|
||||
in expected doctest output.
|
||||
|
||||
* ``NUMBER``: when enabled, floating-point numbers only need to match as far as
|
||||
the precision you have written in the expected doctest output. For example,
|
||||
the following output would only need to match to 2 decimal places::
|
||||
|
||||
>>> math.pi
|
||||
3.14
|
||||
|
||||
If you wrote ``3.1416`` then the actual output would need to match to 4
|
||||
decimal places; and so on.
|
||||
|
||||
This avoids false positives caused by limited floating-point precision, like
|
||||
this::
|
||||
|
||||
Expected:
|
||||
0.233
|
||||
Got:
|
||||
0.23300000000000001
|
||||
|
||||
``NUMBER`` also supports lists of floating-point numbers -- in fact, it
|
||||
matches floating-point numbers appearing anywhere in the output, even inside
|
||||
a string! This means that it may not be appropriate to enable globally in
|
||||
``doctest_optionflags`` in your configuration file.
|
||||
|
||||
.. versionadded:: 5.1
|
||||
|
||||
|
||||
Continue on failure
|
||||
-------------------
|
||||
|
||||
By default, pytest would report only the first failure for a given doctest. If
|
||||
you want to continue the test even when you have failures, do:
|
||||
@@ -191,15 +220,21 @@ namespace in which your doctests run. It is intended to be used within
|
||||
your own fixtures to provide the tests that use them with context.
|
||||
|
||||
``doctest_namespace`` is a standard ``dict`` object into which you
|
||||
place the objects you want to appear in the doctest namespace::
|
||||
place the objects you want to appear in the doctest namespace:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of conftest.py
|
||||
import numpy
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def add_np(doctest_namespace):
|
||||
doctest_namespace['np'] = numpy
|
||||
doctest_namespace["np"] = numpy
|
||||
|
||||
which can then be used in your doctests directly::
|
||||
which can then be used in your doctests directly:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of numpy.py
|
||||
def arange():
|
||||
@@ -219,7 +254,9 @@ Skipping tests dynamically
|
||||
|
||||
.. versionadded:: 4.4
|
||||
|
||||
You can use ``pytest.skip`` to dynamically skip doctests. For example::
|
||||
You can use ``pytest.skip`` to dynamically skip doctests. For example:
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
>>> import sys, pytest
|
||||
>>> if sys.platform.startswith('win'):
|
||||
|
||||
@@ -177,7 +177,7 @@ class TestRaises:
|
||||
|
||||
def test_reinterpret_fails_with_print_for_the_fun_of_it(self):
|
||||
items = [1, 2, 3]
|
||||
print("items is %r" % items)
|
||||
print("items is {!r}".format(items))
|
||||
a, b = items.pop()
|
||||
|
||||
def test_some_error(self):
|
||||
|
||||
@@ -18,7 +18,7 @@ example: specifying and selecting acceptance tests
|
||||
return AcceptFixture(request)
|
||||
|
||||
|
||||
class AcceptFixture(object):
|
||||
class AcceptFixture:
|
||||
def __init__(self, request):
|
||||
if not request.config.getoption("acceptance"):
|
||||
pytest.skip("specify -A to run acceptance tests")
|
||||
@@ -65,7 +65,7 @@ extend the `accept example`_ by putting this in our test module:
|
||||
return arg
|
||||
|
||||
|
||||
class TestSpecialAcceptance(object):
|
||||
class TestSpecialAcceptance:
|
||||
def test_sometest(self, accept):
|
||||
assert accept.tmpdir.join("special").check()
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.fixture("session")
|
||||
@pytest.fixture(scope="session")
|
||||
def setup(request):
|
||||
setup = CostlySetup()
|
||||
yield setup
|
||||
|
||||
38
doc/en/example/fixtures/test_fixtures_order.py
Normal file
38
doc/en/example/fixtures/test_fixtures_order.py
Normal file
@@ -0,0 +1,38 @@
|
||||
import pytest
|
||||
|
||||
# fixtures documentation order example
|
||||
order = []
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def s1():
|
||||
order.append("s1")
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def m1():
|
||||
order.append("m1")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def f1(f3):
|
||||
order.append("f1")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def f3():
|
||||
order.append("f3")
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def a1():
|
||||
order.append("a1")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def f2():
|
||||
order.append("f2")
|
||||
|
||||
|
||||
def test_order(f1, m1, f2, s1):
|
||||
assert order == ["s1", "m1", "a1", "f3", "f1", "f2"]
|
||||
@@ -33,7 +33,7 @@ You can "mark" a test function with custom metadata like this:
|
||||
pass
|
||||
|
||||
|
||||
class TestClass(object):
|
||||
class TestClass:
|
||||
def test_method(self):
|
||||
pass
|
||||
|
||||
@@ -45,14 +45,14 @@ You can then restrict a test run to only run tests marked with ``webtest``:
|
||||
|
||||
$ pytest -v -m webtest
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collecting ... collected 4 items / 3 deselected / 1 selected
|
||||
|
||||
test_server.py::test_send_http PASSED [100%]
|
||||
|
||||
================== 1 passed, 3 deselected in 0.12 seconds ==================
|
||||
===================== 1 passed, 3 deselected in 0.12s ======================
|
||||
|
||||
Or the inverse, running all tests except the webtest ones:
|
||||
|
||||
@@ -60,7 +60,7 @@ Or the inverse, running all tests except the webtest ones:
|
||||
|
||||
$ pytest -v -m "not webtest"
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collecting ... collected 4 items / 1 deselected / 3 selected
|
||||
@@ -69,7 +69,7 @@ Or the inverse, running all tests except the webtest ones:
|
||||
test_server.py::test_another PASSED [ 66%]
|
||||
test_server.py::TestClass::test_method PASSED [100%]
|
||||
|
||||
================== 3 passed, 1 deselected in 0.12 seconds ==================
|
||||
===================== 3 passed, 1 deselected in 0.12s ======================
|
||||
|
||||
Selecting tests based on their node ID
|
||||
--------------------------------------
|
||||
@@ -82,14 +82,14 @@ tests based on their module, class, method, or function name:
|
||||
|
||||
$ pytest -v test_server.py::TestClass::test_method
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collecting ... collected 1 item
|
||||
|
||||
test_server.py::TestClass::test_method PASSED [100%]
|
||||
|
||||
========================= 1 passed in 0.12 seconds =========================
|
||||
============================ 1 passed in 0.12s =============================
|
||||
|
||||
You can also select on the class:
|
||||
|
||||
@@ -97,14 +97,14 @@ You can also select on the class:
|
||||
|
||||
$ pytest -v test_server.py::TestClass
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collecting ... collected 1 item
|
||||
|
||||
test_server.py::TestClass::test_method PASSED [100%]
|
||||
|
||||
========================= 1 passed in 0.12 seconds =========================
|
||||
============================ 1 passed in 0.12s =============================
|
||||
|
||||
Or select multiple nodes:
|
||||
|
||||
@@ -112,7 +112,7 @@ Or select multiple nodes:
|
||||
|
||||
$ pytest -v test_server.py::TestClass test_server.py::test_send_http
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collecting ... collected 2 items
|
||||
@@ -120,7 +120,7 @@ Or select multiple nodes:
|
||||
test_server.py::TestClass::test_method PASSED [ 50%]
|
||||
test_server.py::test_send_http PASSED [100%]
|
||||
|
||||
========================= 2 passed in 0.12 seconds =========================
|
||||
============================ 2 passed in 0.12s =============================
|
||||
|
||||
.. _node-id:
|
||||
|
||||
@@ -152,14 +152,14 @@ select tests based on their names:
|
||||
|
||||
$ pytest -v -k http # running with the above defined example module
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collecting ... collected 4 items / 3 deselected / 1 selected
|
||||
|
||||
test_server.py::test_send_http PASSED [100%]
|
||||
|
||||
================== 1 passed, 3 deselected in 0.12 seconds ==================
|
||||
===================== 1 passed, 3 deselected in 0.12s ======================
|
||||
|
||||
And you can also run all tests except the ones that match the keyword:
|
||||
|
||||
@@ -167,7 +167,7 @@ And you can also run all tests except the ones that match the keyword:
|
||||
|
||||
$ pytest -k "not send_http" -v
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collecting ... collected 4 items / 1 deselected / 3 selected
|
||||
@@ -176,7 +176,7 @@ And you can also run all tests except the ones that match the keyword:
|
||||
test_server.py::test_another PASSED [ 66%]
|
||||
test_server.py::TestClass::test_method PASSED [100%]
|
||||
|
||||
================== 3 passed, 1 deselected in 0.12 seconds ==================
|
||||
===================== 3 passed, 1 deselected in 0.12s ======================
|
||||
|
||||
Or to select "http" and "quick" tests:
|
||||
|
||||
@@ -184,7 +184,7 @@ Or to select "http" and "quick" tests:
|
||||
|
||||
$ pytest -k "http or quick" -v
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collecting ... collected 4 items / 2 deselected / 2 selected
|
||||
@@ -192,7 +192,7 @@ Or to select "http" and "quick" tests:
|
||||
test_server.py::test_send_http PASSED [ 50%]
|
||||
test_server.py::test_something_quick PASSED [100%]
|
||||
|
||||
================== 2 passed, 2 deselected in 0.12 seconds ==================
|
||||
===================== 2 passed, 2 deselected in 0.12s ======================
|
||||
|
||||
.. note::
|
||||
|
||||
@@ -278,7 +278,7 @@ its test methods:
|
||||
|
||||
|
||||
@pytest.mark.webtest
|
||||
class TestClass(object):
|
||||
class TestClass:
|
||||
def test_startup(self):
|
||||
pass
|
||||
|
||||
@@ -295,7 +295,7 @@ Due to legacy reasons, it is possible to set the ``pytestmark`` attribute on a T
|
||||
import pytest
|
||||
|
||||
|
||||
class TestClass(object):
|
||||
class TestClass:
|
||||
pytestmark = pytest.mark.webtest
|
||||
|
||||
or if you need to use multiple markers you can use a list:
|
||||
@@ -305,7 +305,7 @@ or if you need to use multiple markers you can use a list:
|
||||
import pytest
|
||||
|
||||
|
||||
class TestClass(object):
|
||||
class TestClass:
|
||||
pytestmark = [pytest.mark.webtest, pytest.mark.slowtest]
|
||||
|
||||
You can also set a module level marker::
|
||||
@@ -336,7 +336,7 @@ apply a marker to an individual test instance:
|
||||
|
||||
@pytest.mark.foo
|
||||
@pytest.mark.parametrize(
|
||||
("n", "expected"), [(1, 2), pytest.param((1, 3), marks=pytest.mark.bar), (2, 3)]
|
||||
("n", "expected"), [(1, 2), pytest.param(1, 3, marks=pytest.mark.bar), (2, 3)]
|
||||
)
|
||||
def test_increment(n, expected):
|
||||
assert n + 1 == expected
|
||||
@@ -384,7 +384,7 @@ specifies via named environments:
|
||||
envnames = [mark.args[0] for mark in item.iter_markers(name="env")]
|
||||
if envnames:
|
||||
if item.config.getoption("-E") not in envnames:
|
||||
pytest.skip("test requires env in %r" % envnames)
|
||||
pytest.skip("test requires env in {!r}".format(envnames))
|
||||
|
||||
A test file using this local plugin:
|
||||
|
||||
@@ -406,14 +406,14 @@ the test needs:
|
||||
|
||||
$ pytest -E stage2
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 1 item
|
||||
|
||||
test_someenv.py s [100%]
|
||||
|
||||
======================== 1 skipped in 0.12 seconds =========================
|
||||
============================ 1 skipped in 0.12s ============================
|
||||
|
||||
and here is one that specifies exactly the environment needed:
|
||||
|
||||
@@ -421,14 +421,14 @@ and here is one that specifies exactly the environment needed:
|
||||
|
||||
$ pytest -E stage1
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 1 item
|
||||
|
||||
test_someenv.py . [100%]
|
||||
|
||||
========================= 1 passed in 0.12 seconds =========================
|
||||
============================ 1 passed in 0.12s =============================
|
||||
|
||||
The ``--markers`` option always gives you a list of available markers:
|
||||
|
||||
@@ -499,7 +499,7 @@ The output is as follows:
|
||||
$ pytest -q -s
|
||||
Mark(name='my_marker', args=(<function hello_world at 0xdeadbeef>,), kwargs={})
|
||||
.
|
||||
1 passed in 0.12 seconds
|
||||
1 passed in 0.12s
|
||||
|
||||
We can see that the custom marker has its argument set extended with the function ``hello_world``. This is the key difference between creating a custom marker as a callable, which invokes ``__call__`` behind the scenes, and using ``with_args``.
|
||||
|
||||
@@ -523,7 +523,7 @@ code you can read over all such settings. Example:
|
||||
|
||||
|
||||
@pytest.mark.glob("class", x=2)
|
||||
class TestClass(object):
|
||||
class TestClass:
|
||||
@pytest.mark.glob("function", x=3)
|
||||
def test_something(self):
|
||||
pass
|
||||
@@ -539,7 +539,7 @@ test function. From a conftest file we can read it like this:
|
||||
|
||||
def pytest_runtest_setup(item):
|
||||
for mark in item.iter_markers(name="glob"):
|
||||
print("glob args=%s kwargs=%s" % (mark.args, mark.kwargs))
|
||||
print("glob args={} kwargs={}".format(mark.args, mark.kwargs))
|
||||
sys.stdout.flush()
|
||||
|
||||
Let's run this without capturing output and see what we get:
|
||||
@@ -551,7 +551,7 @@ Let's run this without capturing output and see what we get:
|
||||
glob args=('class',) kwargs={'x': 2}
|
||||
glob args=('module',) kwargs={'x': 1}
|
||||
.
|
||||
1 passed in 0.12 seconds
|
||||
1 passed in 0.12s
|
||||
|
||||
marking platform specific tests with pytest
|
||||
--------------------------------------------------------------
|
||||
@@ -578,7 +578,7 @@ for your particular platform, you could use the following plugin:
|
||||
supported_platforms = ALL.intersection(mark.name for mark in item.iter_markers())
|
||||
plat = sys.platform
|
||||
if supported_platforms and plat not in supported_platforms:
|
||||
pytest.skip("cannot run on platform %s" % (plat))
|
||||
pytest.skip("cannot run on platform {}".format(plat))
|
||||
|
||||
then tests will be skipped if they were specified for a different platform.
|
||||
Let's do a little test file to show how this looks like:
|
||||
@@ -614,7 +614,7 @@ then you will see two tests skipped and two executed tests as expected:
|
||||
|
||||
$ pytest -rs # this option reports skip reasons
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 4 items
|
||||
@@ -623,7 +623,7 @@ then you will see two tests skipped and two executed tests as expected:
|
||||
|
||||
========================= short test summary info ==========================
|
||||
SKIPPED [2] $REGENDOC_TMPDIR/conftest.py:13: cannot run on platform linux
|
||||
=================== 2 passed, 2 skipped in 0.12 seconds ====================
|
||||
======================= 2 passed, 2 skipped in 0.12s =======================
|
||||
|
||||
Note that if you specify a platform via the marker-command line option like this:
|
||||
|
||||
@@ -631,14 +631,14 @@ Note that if you specify a platform via the marker-command line option like this
|
||||
|
||||
$ pytest -m linux
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 4 items / 3 deselected / 1 selected
|
||||
|
||||
test_plat.py . [100%]
|
||||
|
||||
================== 1 passed, 3 deselected in 0.12 seconds ==================
|
||||
===================== 1 passed, 3 deselected in 0.12s ======================
|
||||
|
||||
then the unmarked-tests will not be run. It is thus a way to restrict the run to the specific tests.
|
||||
|
||||
@@ -695,7 +695,7 @@ We can now use the ``-m option`` to select one set:
|
||||
|
||||
$ pytest -m interface --tb=short
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 4 items / 2 deselected / 2 selected
|
||||
@@ -711,7 +711,7 @@ We can now use the ``-m option`` to select one set:
|
||||
test_module.py:8: in test_interface_complex
|
||||
assert 0
|
||||
E assert 0
|
||||
================== 2 failed, 2 deselected in 0.12 seconds ==================
|
||||
===================== 2 failed, 2 deselected in 0.12s ======================
|
||||
|
||||
or to select both "event" and "interface" tests:
|
||||
|
||||
@@ -719,7 +719,7 @@ or to select both "event" and "interface" tests:
|
||||
|
||||
$ pytest -m "interface or event" --tb=short
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 4 items / 1 deselected / 3 selected
|
||||
@@ -739,4 +739,4 @@ or to select both "event" and "interface" tests:
|
||||
test_module.py:12: in test_event_simple
|
||||
assert 0
|
||||
E assert 0
|
||||
================== 3 failed, 1 deselected in 0.12 seconds ==================
|
||||
===================== 3 failed, 1 deselected in 0.12s ======================
|
||||
|
||||
@@ -69,4 +69,4 @@ class Python:
|
||||
@pytest.mark.parametrize("obj", [42, {}, {1: 3}])
|
||||
def test_basic_objects(python1, python2, obj):
|
||||
python1.dumps(obj)
|
||||
python2.load_and_is_true("obj == %s" % obj)
|
||||
python2.load_and_is_true("obj == {}".format(obj))
|
||||
|
||||
@@ -12,14 +12,14 @@ A basic example for specifying tests in Yaml files
|
||||
.. _`pytest-yamlwsgi`: http://bitbucket.org/aafshar/pytest-yamlwsgi/src/tip/pytest_yamlwsgi.py
|
||||
.. _`PyYAML`: https://pypi.org/project/PyYAML/
|
||||
|
||||
Here is an example ``conftest.py`` (extracted from Ali Afshnars special purpose `pytest-yamlwsgi`_ plugin). This ``conftest.py`` will collect ``test*.yml`` files and will execute the yaml-formatted content as custom tests:
|
||||
Here is an example ``conftest.py`` (extracted from Ali Afshnars special purpose `pytest-yamlwsgi`_ plugin). This ``conftest.py`` will collect ``test*.yaml`` files and will execute the yaml-formatted content as custom tests:
|
||||
|
||||
.. include:: nonpython/conftest.py
|
||||
:literal:
|
||||
|
||||
You can create a simple example file:
|
||||
|
||||
.. include:: nonpython/test_simple.yml
|
||||
.. include:: nonpython/test_simple.yaml
|
||||
:literal:
|
||||
|
||||
and if you installed `PyYAML`_ or a compatible YAML-parser you can
|
||||
@@ -27,21 +27,21 @@ now execute the test specification:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
nonpython $ pytest test_simple.yml
|
||||
nonpython $ pytest test_simple.yaml
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR/nonpython
|
||||
collected 2 items
|
||||
|
||||
test_simple.yml F. [100%]
|
||||
test_simple.yaml F. [100%]
|
||||
|
||||
================================= FAILURES =================================
|
||||
______________________________ usecase: hello ______________________________
|
||||
usecase execution failed
|
||||
spec failed: 'some': 'other'
|
||||
no further details known at this point.
|
||||
==================== 1 failed, 1 passed in 0.12 seconds ====================
|
||||
======================= 1 failed, 1 passed in 0.12s ========================
|
||||
|
||||
.. regendoc:wipe
|
||||
|
||||
@@ -64,20 +64,20 @@ consulted when reporting in ``verbose`` mode:
|
||||
|
||||
nonpython $ pytest -v
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR/nonpython
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_simple.yml::hello FAILED [ 50%]
|
||||
test_simple.yml::ok PASSED [100%]
|
||||
test_simple.yaml::hello FAILED [ 50%]
|
||||
test_simple.yaml::ok PASSED [100%]
|
||||
|
||||
================================= FAILURES =================================
|
||||
______________________________ usecase: hello ______________________________
|
||||
usecase execution failed
|
||||
spec failed: 'some': 'other'
|
||||
no further details known at this point.
|
||||
==================== 1 failed, 1 passed in 0.12 seconds ====================
|
||||
======================= 1 failed, 1 passed in 0.12s ========================
|
||||
|
||||
.. regendoc:wipe
|
||||
|
||||
@@ -88,13 +88,13 @@ interesting to just look at the collection tree:
|
||||
|
||||
nonpython $ pytest --collect-only
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR/nonpython
|
||||
collected 2 items
|
||||
<Package $REGENDOC_TMPDIR/nonpython>
|
||||
<YamlFile test_simple.yml>
|
||||
<YamlFile test_simple.yaml>
|
||||
<YamlItem hello>
|
||||
<YamlItem ok>
|
||||
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
========================== no tests ran in 0.12s ===========================
|
||||
|
||||
@@ -3,7 +3,7 @@ import pytest
|
||||
|
||||
|
||||
def pytest_collect_file(parent, path):
|
||||
if path.ext == ".yml" and path.basename.startswith("test"):
|
||||
if path.ext == ".yaml" and path.basename.startswith("test"):
|
||||
return YamlFile(path, parent)
|
||||
|
||||
|
||||
@@ -33,13 +33,13 @@ class YamlItem(pytest.Item):
|
||||
return "\n".join(
|
||||
[
|
||||
"usecase execution failed",
|
||||
" spec failed: %r: %r" % excinfo.value.args[1:3],
|
||||
" spec failed: {1!r}: {2!r}".format(*excinfo.value.args),
|
||||
" no further details known at this point.",
|
||||
]
|
||||
)
|
||||
|
||||
def reportinfo(self):
|
||||
return self.fspath, 0, "usecase: %s" % self.name
|
||||
return self.fspath, 0, "usecase: {}".format(self.name)
|
||||
|
||||
|
||||
class YamlException(Exception):
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# test_simple.yml
|
||||
# test_simple.yaml
|
||||
ok:
|
||||
sub1: sub1
|
||||
|
||||
@@ -19,24 +19,30 @@ Generating parameters combinations, depending on command line
|
||||
|
||||
Let's say we want to execute a test with different computation
|
||||
parameters and the parameter range shall be determined by a command
|
||||
line argument. Let's first write a simple (do-nothing) computation test::
|
||||
line argument. Let's first write a simple (do-nothing) computation test:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of test_compute.py
|
||||
|
||||
|
||||
def test_compute(param1):
|
||||
assert param1 < 4
|
||||
|
||||
Now we add a test configuration like this::
|
||||
Now we add a test configuration like this:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of conftest.py
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption("--all", action="store_true",
|
||||
help="run all combinations")
|
||||
parser.addoption("--all", action="store_true", help="run all combinations")
|
||||
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
if 'param1' in metafunc.fixturenames:
|
||||
if metafunc.config.getoption('all'):
|
||||
if "param1" in metafunc.fixturenames:
|
||||
if metafunc.config.getoption("all"):
|
||||
end = 5
|
||||
else:
|
||||
end = 2
|
||||
@@ -48,7 +54,7 @@ This means that we only run 2 tests if we do not pass ``--all``:
|
||||
|
||||
$ pytest -q test_compute.py
|
||||
.. [100%]
|
||||
2 passed in 0.12 seconds
|
||||
2 passed in 0.12s
|
||||
|
||||
We run only two computations, so we see two dots.
|
||||
let's run the full monty:
|
||||
@@ -66,8 +72,8 @@ let's run the full monty:
|
||||
> assert param1 < 4
|
||||
E assert 4 < 4
|
||||
|
||||
test_compute.py:3: AssertionError
|
||||
1 failed, 4 passed in 0.12 seconds
|
||||
test_compute.py:4: AssertionError
|
||||
1 failed, 4 passed in 0.12s
|
||||
|
||||
As expected when running the full range of ``param1`` values
|
||||
we'll get an error on the last one.
|
||||
@@ -83,7 +89,9 @@ Running pytest with ``--collect-only`` will show the generated IDs.
|
||||
|
||||
Numbers, strings, booleans and None will have their usual string representation
|
||||
used in the test ID. For other objects, pytest will make a string based on
|
||||
the argument name::
|
||||
the argument name:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of test_time.py
|
||||
|
||||
@@ -112,7 +120,7 @@ the argument name::
|
||||
def idfn(val):
|
||||
if isinstance(val, (datetime,)):
|
||||
# note this wouldn't show any hours/minutes/seconds
|
||||
return val.strftime('%Y%m%d')
|
||||
return val.strftime("%Y%m%d")
|
||||
|
||||
|
||||
@pytest.mark.parametrize("a,b,expected", testdata, ids=idfn)
|
||||
@@ -120,12 +128,18 @@ the argument name::
|
||||
diff = a - b
|
||||
assert diff == expected
|
||||
|
||||
@pytest.mark.parametrize("a,b,expected", [
|
||||
pytest.param(datetime(2001, 12, 12), datetime(2001, 12, 11),
|
||||
timedelta(1), id='forward'),
|
||||
pytest.param(datetime(2001, 12, 11), datetime(2001, 12, 12),
|
||||
timedelta(-1), id='backward'),
|
||||
])
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"a,b,expected",
|
||||
[
|
||||
pytest.param(
|
||||
datetime(2001, 12, 12), datetime(2001, 12, 11), timedelta(1), id="forward"
|
||||
),
|
||||
pytest.param(
|
||||
datetime(2001, 12, 11), datetime(2001, 12, 12), timedelta(-1), id="backward"
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_timedistance_v3(a, b, expected):
|
||||
diff = a - b
|
||||
assert diff == expected
|
||||
@@ -144,7 +158,7 @@ objects, they are still using the default pytest representation:
|
||||
|
||||
$ pytest test_time.py --collect-only
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 8 items
|
||||
@@ -158,7 +172,7 @@ objects, they are still using the default pytest representation:
|
||||
<Function test_timedistance_v3[forward]>
|
||||
<Function test_timedistance_v3[backward]>
|
||||
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
========================== no tests ran in 0.12s ===========================
|
||||
|
||||
In ``test_timedistance_v3``, we used ``pytest.param`` to specify the test IDs
|
||||
together with the actual data, instead of listing them separately.
|
||||
@@ -171,10 +185,13 @@ A quick port of "testscenarios"
|
||||
Here is a quick port to run tests configured with `test scenarios`_,
|
||||
an add-on from Robert Collins for the standard unittest framework. We
|
||||
only have to work a bit to construct the correct arguments for pytest's
|
||||
:py:func:`Metafunc.parametrize`::
|
||||
:py:func:`Metafunc.parametrize`:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of test_scenarios.py
|
||||
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
idlist = []
|
||||
argvalues = []
|
||||
@@ -182,13 +199,15 @@ only have to work a bit to construct the correct arguments for pytest's
|
||||
idlist.append(scenario[0])
|
||||
items = scenario[1].items()
|
||||
argnames = [x[0] for x in items]
|
||||
argvalues.append(([x[1] for x in items]))
|
||||
argvalues.append([x[1] for x in items])
|
||||
metafunc.parametrize(argnames, argvalues, ids=idlist, scope="class")
|
||||
|
||||
scenario1 = ('basic', {'attribute': 'value'})
|
||||
scenario2 = ('advanced', {'attribute': 'value2'})
|
||||
|
||||
class TestSampleWithScenarios(object):
|
||||
scenario1 = ("basic", {"attribute": "value"})
|
||||
scenario2 = ("advanced", {"attribute": "value2"})
|
||||
|
||||
|
||||
class TestSampleWithScenarios:
|
||||
scenarios = [scenario1, scenario2]
|
||||
|
||||
def test_demo1(self, attribute):
|
||||
@@ -203,14 +222,14 @@ this is a fully self-contained example which you can run with:
|
||||
|
||||
$ pytest test_scenarios.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 4 items
|
||||
|
||||
test_scenarios.py .... [100%]
|
||||
|
||||
========================= 4 passed in 0.12 seconds =========================
|
||||
============================ 4 passed in 0.12s =============================
|
||||
|
||||
If you just collect tests you'll also nicely see 'advanced' and 'basic' as variants for the test function:
|
||||
|
||||
@@ -218,7 +237,7 @@ If you just collect tests you'll also nicely see 'advanced' and 'basic' as varia
|
||||
|
||||
$ pytest --collect-only test_scenarios.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 4 items
|
||||
@@ -229,7 +248,7 @@ If you just collect tests you'll also nicely see 'advanced' and 'basic' as varia
|
||||
<Function test_demo1[advanced]>
|
||||
<Function test_demo2[advanced]>
|
||||
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
========================== no tests ran in 0.12s ===========================
|
||||
|
||||
Note that we told ``metafunc.parametrize()`` that your scenario values
|
||||
should be considered class-scoped. With pytest-2.3 this leads to a
|
||||
@@ -243,12 +262,16 @@ Deferring the setup of parametrized resources
|
||||
The parametrization of test functions happens at collection
|
||||
time. It is a good idea to setup expensive resources like DB
|
||||
connections or subprocess only when the actual test is run.
|
||||
Here is a simple example how you can achieve that, first
|
||||
the actual test requiring a ``db`` object::
|
||||
Here is a simple example how you can achieve that. This test
|
||||
requires a ``db`` object fixture:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of test_backends.py
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
def test_db_initialized(db):
|
||||
# a dummy test
|
||||
if db.__class__.__name__ == "DB2":
|
||||
@@ -256,20 +279,27 @@ the actual test requiring a ``db`` object::
|
||||
|
||||
We can now add a test configuration that generates two invocations of
|
||||
the ``test_db_initialized`` function and also implements a factory that
|
||||
creates a database object for the actual test invocations::
|
||||
creates a database object for the actual test invocations:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of conftest.py
|
||||
import pytest
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
if 'db' in metafunc.fixturenames:
|
||||
metafunc.parametrize("db", ['d1', 'd2'], indirect=True)
|
||||
|
||||
class DB1(object):
|
||||
def pytest_generate_tests(metafunc):
|
||||
if "db" in metafunc.fixturenames:
|
||||
metafunc.parametrize("db", ["d1", "d2"], indirect=True)
|
||||
|
||||
|
||||
class DB1:
|
||||
"one database object"
|
||||
class DB2(object):
|
||||
|
||||
|
||||
class DB2:
|
||||
"alternative database object"
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def db(request):
|
||||
if request.param == "d1":
|
||||
@@ -285,7 +315,7 @@ Let's first see how it looks like at collection time:
|
||||
|
||||
$ pytest test_backends.py --collect-only
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 2 items
|
||||
@@ -293,7 +323,7 @@ Let's first see how it looks like at collection time:
|
||||
<Function test_db_initialized[d1]>
|
||||
<Function test_db_initialized[d2]>
|
||||
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
========================== no tests ran in 0.12s ===========================
|
||||
|
||||
And then when we run the test:
|
||||
|
||||
@@ -312,8 +342,8 @@ And then when we run the test:
|
||||
> pytest.fail("deliberately failing for demo purposes")
|
||||
E Failed: deliberately failing for demo purposes
|
||||
|
||||
test_backends.py:6: Failed
|
||||
1 failed, 1 passed in 0.12 seconds
|
||||
test_backends.py:8: Failed
|
||||
1 failed, 1 passed in 0.12s
|
||||
|
||||
The first invocation with ``db == "DB1"`` passed while the second with ``db == "DB2"`` failed. Our ``db`` fixture function has instantiated each of the DB values during the setup phase while the ``pytest_generate_tests`` generated two according calls to the ``test_db_initialized`` during the collection phase.
|
||||
|
||||
@@ -327,23 +357,29 @@ parameter on particular arguments. It can be done by passing list or tuple of
|
||||
arguments' names to ``indirect``. In the example below there is a function ``test_indirect`` which uses
|
||||
two fixtures: ``x`` and ``y``. Here we give to indirect the list, which contains the name of the
|
||||
fixture ``x``. The indirect parameter will be applied to this argument only, and the value ``a``
|
||||
will be passed to respective fixture function::
|
||||
will be passed to respective fixture function:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of test_indirect_list.py
|
||||
|
||||
import pytest
|
||||
@pytest.fixture(scope='function')
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def x(request):
|
||||
return request.param * 3
|
||||
|
||||
@pytest.fixture(scope='function')
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def y(request):
|
||||
return request.param * 2
|
||||
|
||||
@pytest.mark.parametrize('x, y', [('a', 'b')], indirect=['x'])
|
||||
def test_indirect(x,y):
|
||||
assert x == 'aaa'
|
||||
assert y == 'b'
|
||||
|
||||
@pytest.mark.parametrize("x, y", [("a", "b")], indirect=["x"])
|
||||
def test_indirect(x, y):
|
||||
assert x == "aaa"
|
||||
assert y == "b"
|
||||
|
||||
The result of this test will be successful:
|
||||
|
||||
@@ -351,14 +387,14 @@ The result of this test will be successful:
|
||||
|
||||
$ pytest test_indirect_list.py --collect-only
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 1 item
|
||||
<Module test_indirect_list.py>
|
||||
<Function test_indirect[a-b]>
|
||||
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
========================== no tests ran in 0.12s ===========================
|
||||
|
||||
.. regendoc:wipe
|
||||
|
||||
@@ -370,23 +406,28 @@ Parametrizing test methods through per-class configuration
|
||||
|
||||
Here is an example ``pytest_generate_tests`` function implementing a
|
||||
parametrization scheme similar to Michael Foord's `unittest
|
||||
parametrizer`_ but in a lot less code::
|
||||
parametrizer`_ but in a lot less code:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of ./test_parametrize.py
|
||||
import pytest
|
||||
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
# called once per each test function
|
||||
funcarglist = metafunc.cls.params[metafunc.function.__name__]
|
||||
argnames = sorted(funcarglist[0])
|
||||
metafunc.parametrize(argnames, [[funcargs[name] for name in argnames]
|
||||
for funcargs in funcarglist])
|
||||
metafunc.parametrize(
|
||||
argnames, [[funcargs[name] for name in argnames] for funcargs in funcarglist]
|
||||
)
|
||||
|
||||
class TestClass(object):
|
||||
|
||||
class TestClass:
|
||||
# a map specifying multiple argument sets for a test method
|
||||
params = {
|
||||
'test_equals': [dict(a=1, b=2), dict(a=3, b=3), ],
|
||||
'test_zerodivision': [dict(a=1, b=0), ],
|
||||
"test_equals": [dict(a=1, b=2), dict(a=3, b=3)],
|
||||
"test_zerodivision": [dict(a=1, b=0)],
|
||||
}
|
||||
|
||||
def test_equals(self, a, b):
|
||||
@@ -412,8 +453,8 @@ argument sets to use for each test function. Let's run it:
|
||||
> assert a == b
|
||||
E assert 1 == 2
|
||||
|
||||
test_parametrize.py:18: AssertionError
|
||||
1 failed, 2 passed in 0.12 seconds
|
||||
test_parametrize.py:21: AssertionError
|
||||
1 failed, 2 passed in 0.12s
|
||||
|
||||
Indirect parametrization with multiple fixtures
|
||||
--------------------------------------------------------------
|
||||
@@ -434,10 +475,11 @@ Running it results in some skips if we don't have all the python interpreters in
|
||||
.. code-block:: pytest
|
||||
|
||||
. $ pytest -rs -q multipython.py
|
||||
ssssssssssss......sss...... [100%]
|
||||
ssssssssssssssssssssssss... [100%]
|
||||
========================= short test summary info ==========================
|
||||
SKIPPED [15] $REGENDOC_TMPDIR/CWD/multipython.py:30: 'python3.5' not found
|
||||
12 passed, 15 skipped in 0.12 seconds
|
||||
SKIPPED [12] $REGENDOC_TMPDIR/CWD/multipython.py:30: 'python3.5' not found
|
||||
SKIPPED [12] $REGENDOC_TMPDIR/CWD/multipython.py:30: 'python3.6' not found
|
||||
3 passed, 24 skipped in 0.12s
|
||||
|
||||
Indirect parametrization of optional implementations/imports
|
||||
--------------------------------------------------------------------
|
||||
@@ -446,36 +488,47 @@ If you want to compare the outcomes of several implementations of a given
|
||||
API, you can write test functions that receive the already imported implementations
|
||||
and get skipped in case the implementation is not importable/available. Let's
|
||||
say we have a "base" implementation and the other (possibly optimized ones)
|
||||
need to provide similar results::
|
||||
need to provide similar results:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of conftest.py
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def basemod(request):
|
||||
return pytest.importorskip("base")
|
||||
|
||||
|
||||
@pytest.fixture(scope="session", params=["opt1", "opt2"])
|
||||
def optmod(request):
|
||||
return pytest.importorskip(request.param)
|
||||
|
||||
And then a base implementation of a simple function::
|
||||
And then a base implementation of a simple function:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of base.py
|
||||
def func1():
|
||||
return 1
|
||||
|
||||
And an optimized version::
|
||||
And an optimized version:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of opt1.py
|
||||
def func1():
|
||||
return 1.0001
|
||||
|
||||
And finally a little test module::
|
||||
And finally a little test module:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of test_module.py
|
||||
|
||||
|
||||
def test_func1(basemod, optmod):
|
||||
assert round(basemod.func1(), 3) == round(optmod.func1(), 3)
|
||||
|
||||
@@ -486,7 +539,7 @@ If you run this with reporting for skips enabled:
|
||||
|
||||
$ pytest -rs test_module.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 2 items
|
||||
@@ -494,8 +547,8 @@ If you run this with reporting for skips enabled:
|
||||
test_module.py .s [100%]
|
||||
|
||||
========================= short test summary info ==========================
|
||||
SKIPPED [1] $REGENDOC_TMPDIR/conftest.py:11: could not import 'opt2': No module named 'opt2'
|
||||
=================== 1 passed, 1 skipped in 0.12 seconds ====================
|
||||
SKIPPED [1] $REGENDOC_TMPDIR/conftest.py:13: could not import 'opt2': No module named 'opt2'
|
||||
======================= 1 passed, 1 skipped in 0.12s =======================
|
||||
|
||||
You'll see that we don't have an ``opt2`` module and thus the second test run
|
||||
of our ``test_func1`` was skipped. A few notes:
|
||||
@@ -548,16 +601,16 @@ Then run ``pytest`` with verbose mode and with only the ``basic`` marker:
|
||||
|
||||
$ pytest -v -m basic
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collecting ... collected 17 items / 14 deselected / 3 selected
|
||||
collecting ... collected 18 items / 15 deselected / 3 selected
|
||||
|
||||
test_pytest_param_example.py::test_eval[1+7-8] PASSED [ 33%]
|
||||
test_pytest_param_example.py::test_eval[basic_2+4] PASSED [ 66%]
|
||||
test_pytest_param_example.py::test_eval[basic_6*9] XFAIL [100%]
|
||||
|
||||
============ 2 passed, 14 deselected, 1 xfailed in 0.12 seconds ============
|
||||
=============== 2 passed, 15 deselected, 1 xfailed in 0.12s ================
|
||||
|
||||
As the result:
|
||||
|
||||
@@ -578,22 +631,28 @@ Use :func:`pytest.raises` with the
|
||||
in which some tests raise exceptions and others do not.
|
||||
|
||||
It is helpful to define a no-op context manager ``does_not_raise`` to serve
|
||||
as a complement to ``raises``. For example::
|
||||
as a complement to ``raises``. For example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from contextlib import contextmanager
|
||||
import pytest
|
||||
|
||||
|
||||
@contextmanager
|
||||
def does_not_raise():
|
||||
yield
|
||||
|
||||
|
||||
@pytest.mark.parametrize('example_input,expectation', [
|
||||
(3, does_not_raise()),
|
||||
(2, does_not_raise()),
|
||||
(1, does_not_raise()),
|
||||
(0, pytest.raises(ZeroDivisionError)),
|
||||
])
|
||||
@pytest.mark.parametrize(
|
||||
"example_input,expectation",
|
||||
[
|
||||
(3, does_not_raise()),
|
||||
(2, does_not_raise()),
|
||||
(1, does_not_raise()),
|
||||
(0, pytest.raises(ZeroDivisionError)),
|
||||
],
|
||||
)
|
||||
def test_division(example_input, expectation):
|
||||
"""Test how much I know division."""
|
||||
with expectation:
|
||||
@@ -603,14 +662,20 @@ In the example above, the first three test cases should run unexceptionally,
|
||||
while the fourth should raise ``ZeroDivisionError``.
|
||||
|
||||
If you're only supporting Python 3.7+, you can simply use ``nullcontext``
|
||||
to define ``does_not_raise``::
|
||||
to define ``does_not_raise``:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from contextlib import nullcontext as does_not_raise
|
||||
|
||||
Or, if you're supporting Python 3.3+ you can use::
|
||||
Or, if you're supporting Python 3.3+ you can use:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from contextlib import ExitStack as does_not_raise
|
||||
|
||||
Or, if desired, you can ``pip install contextlib2`` and use::
|
||||
Or, if desired, you can ``pip install contextlib2`` and use:
|
||||
|
||||
from contextlib2 import ExitStack as does_not_raise
|
||||
.. code-block:: python
|
||||
|
||||
from contextlib2 import nullcontext as does_not_raise
|
||||
|
||||
@@ -31,7 +31,7 @@ you will see that ``pytest`` only collects test-modules, which do not match the
|
||||
.. code-block:: pytest
|
||||
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 5 items
|
||||
|
||||
@@ -131,12 +131,15 @@ Here is an example:
|
||||
|
||||
This would make ``pytest`` look for tests in files that match the ``check_*
|
||||
.py`` glob-pattern, ``Check`` prefixes in classes, and functions and methods
|
||||
that match ``*_check``. For example, if we have::
|
||||
that match ``*_check``. For example, if we have:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of check_myapp.py
|
||||
class CheckMyApp(object):
|
||||
class CheckMyApp:
|
||||
def simple_check(self):
|
||||
pass
|
||||
|
||||
def complex_check(self):
|
||||
pass
|
||||
|
||||
@@ -146,7 +149,7 @@ The test collection would look like this:
|
||||
|
||||
$ pytest --collect-only
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini
|
||||
collected 2 items
|
||||
@@ -155,7 +158,7 @@ The test collection would look like this:
|
||||
<Function simple_check>
|
||||
<Function complex_check>
|
||||
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
========================== no tests ran in 0.12s ===========================
|
||||
|
||||
You can check for multiple glob patterns by adding a space between the patterns:
|
||||
|
||||
@@ -208,7 +211,7 @@ You can always peek at the collection tree without running tests like this:
|
||||
|
||||
. $ pytest --collect-only pythoncollection.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini
|
||||
collected 3 items
|
||||
@@ -218,7 +221,7 @@ You can always peek at the collection tree without running tests like this:
|
||||
<Function test_method>
|
||||
<Function test_anothermethod>
|
||||
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
========================== no tests ran in 0.12s ===========================
|
||||
|
||||
.. _customizing-test-collection:
|
||||
|
||||
@@ -238,7 +241,9 @@ You can easily instruct ``pytest`` to discover tests from every Python file:
|
||||
However, many projects will have a ``setup.py`` which they don't want to be
|
||||
imported. Moreover, there may files only importable by a specific python
|
||||
version. For such cases you can dynamically define files to be ignored by
|
||||
listing them in a ``conftest.py`` file::
|
||||
listing them in a ``conftest.py`` file:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of conftest.py
|
||||
import sys
|
||||
@@ -247,7 +252,9 @@ listing them in a ``conftest.py`` file::
|
||||
if sys.version_info[0] > 2:
|
||||
collect_ignore.append("pkg/module_py2.py")
|
||||
|
||||
and then if you have a module file like this::
|
||||
and then if you have a module file like this:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of pkg/module_py2.py
|
||||
def test_only_on_python2():
|
||||
@@ -256,10 +263,12 @@ and then if you have a module file like this::
|
||||
except Exception, e:
|
||||
pass
|
||||
|
||||
and a ``setup.py`` dummy file like this::
|
||||
and a ``setup.py`` dummy file like this:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of setup.py
|
||||
0/0 # will raise exception if imported
|
||||
0 / 0 # will raise exception if imported
|
||||
|
||||
If you run with a Python 2 interpreter then you will find the one test and will
|
||||
leave out the ``setup.py`` file:
|
||||
@@ -283,19 +292,21 @@ file will be left out:
|
||||
|
||||
$ pytest --collect-only
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini
|
||||
collected 0 items
|
||||
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
========================== no tests ran in 0.12s ===========================
|
||||
|
||||
It's also possible to ignore files based on Unix shell-style wildcards by adding
|
||||
patterns to ``collect_ignore_glob``.
|
||||
|
||||
The following example ``conftest.py`` ignores the file ``setup.py`` and in
|
||||
addition all files that end with ``*_py2.py`` when executed with a Python 3
|
||||
interpreter::
|
||||
interpreter:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of conftest.py
|
||||
import sys
|
||||
|
||||
@@ -9,7 +9,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
|
||||
assertion $ pytest failure_demo.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR/assertion
|
||||
collected 44 items
|
||||
@@ -119,7 +119,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
a = "1" * 100 + "a" + "2" * 100
|
||||
b = "1" * 100 + "b" + "2" * 100
|
||||
> assert a == b
|
||||
E AssertionError: assert '111111111111...2222222222222' == '1111111111111...2222222222222'
|
||||
E AssertionError: assert '111111111111...2222222222222' == '111111111111...2222222222222'
|
||||
E Skipping 90 identical leading characters in diff, use -v to show
|
||||
E Skipping 91 identical trailing characters in diff, use -v to show
|
||||
E - 1111111111a222222222
|
||||
@@ -136,7 +136,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
a = "1\n" * 100 + "a" + "2\n" * 100
|
||||
b = "1\n" * 100 + "b" + "2\n" * 100
|
||||
> assert a == b
|
||||
E AssertionError: assert '1\n1\n1\n1\n...n2\n2\n2\n2\n' == '1\n1\n1\n1\n1...n2\n2\n2\n2\n'
|
||||
E AssertionError: assert '1\n1\n1\n1\n...n2\n2\n2\n2\n' == '1\n1\n1\n1\n...n2\n2\n2\n2\n'
|
||||
E Skipping 190 identical leading characters in diff, use -v to show
|
||||
E Skipping 191 identical trailing characters in diff, use -v to show
|
||||
E 1
|
||||
@@ -235,7 +235,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
def test_not_in_text_multiline(self):
|
||||
text = "some multiline\ntext\nwhich\nincludes foo\nand a\ntail"
|
||||
> assert "foo" not in text
|
||||
E AssertionError: assert 'foo' not in 'some multiline\ntext\nw...ncludes foo\nand a\ntail'
|
||||
E AssertionError: assert 'foo' not in 'some multil...nand a\ntail'
|
||||
E 'foo' is contained here:
|
||||
E some multiline
|
||||
E text
|
||||
@@ -267,7 +267,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
def test_not_in_text_single_long(self):
|
||||
text = "head " * 50 + "foo " + "tail " * 20
|
||||
> assert "foo" not in text
|
||||
E AssertionError: assert 'foo' not in 'head head head head hea...ail tail tail tail tail '
|
||||
E AssertionError: assert 'foo' not in 'head head h...l tail tail '
|
||||
E 'foo' is contained here:
|
||||
E head head foo tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail
|
||||
E ? +++
|
||||
@@ -280,7 +280,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
def test_not_in_text_single_long_term(self):
|
||||
text = "head " * 50 + "f" * 70 + "tail " * 20
|
||||
> assert "f" * 70 not in text
|
||||
E AssertionError: assert 'fffffffffff...ffffffffffff' not in 'head head he...l tail tail '
|
||||
E AssertionError: assert 'fffffffffff...ffffffffffff' not in 'head head h...l tail tail '
|
||||
E 'ffffffffffffffffff...fffffffffffffffffff' is contained here:
|
||||
E head head fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffftail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail
|
||||
E ? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
|
||||
@@ -301,7 +301,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
left = Foo(1, "b")
|
||||
right = Foo(1, "c")
|
||||
> assert left == right
|
||||
E AssertionError: assert TestSpecialis...oo(a=1, b='b') == TestSpecialise...oo(a=1, b='c')
|
||||
E AssertionError: assert TestSpecialis...oo(a=1, b='b') == TestSpecialis...oo(a=1, b='c')
|
||||
E Omitting 1 identical items, use -vv to show
|
||||
E Differing attributes:
|
||||
E b: 'b' != 'c'
|
||||
@@ -434,9 +434,9 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
|
||||
def test_reinterpret_fails_with_print_for_the_fun_of_it(self):
|
||||
items = [1, 2, 3]
|
||||
print("items is %r" % items)
|
||||
print("items is {!r}".format(items))
|
||||
> a, b = items.pop()
|
||||
E TypeError: 'int' object is not iterable
|
||||
E TypeError: cannot unpack non-iterable int object
|
||||
|
||||
failure_demo.py:181: TypeError
|
||||
--------------------------- Captured stdout call ---------------------------
|
||||
@@ -516,7 +516,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
def test_z2_type_error(self):
|
||||
items = 3
|
||||
> a, b = items
|
||||
E TypeError: 'int' object is not iterable
|
||||
E TypeError: cannot unpack non-iterable int object
|
||||
|
||||
failure_demo.py:222: TypeError
|
||||
______________________ TestMoreErrors.test_startswith ______________________
|
||||
@@ -650,4 +650,4 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||
E + where 1 = This is JSON\n{\n 'foo': 'bar'\n}.a
|
||||
|
||||
failure_demo.py:282: AssertionError
|
||||
======================== 44 failed in 0.12 seconds =========================
|
||||
============================ 44 failed in 0.12s ============================
|
||||
|
||||
@@ -65,7 +65,7 @@ Let's run this without supplying our new option:
|
||||
test_sample.py:6: AssertionError
|
||||
--------------------------- Captured stdout call ---------------------------
|
||||
first
|
||||
1 failed in 0.12 seconds
|
||||
1 failed in 0.12s
|
||||
|
||||
And now with supplying a command line option:
|
||||
|
||||
@@ -89,7 +89,7 @@ And now with supplying a command line option:
|
||||
test_sample.py:6: AssertionError
|
||||
--------------------------- Captured stdout call ---------------------------
|
||||
second
|
||||
1 failed in 0.12 seconds
|
||||
1 failed in 0.12s
|
||||
|
||||
You can see that the command line option arrived in our test. This
|
||||
completes the basic pattern. However, one often rather wants to process
|
||||
@@ -127,12 +127,12 @@ directory with the above conftest.py:
|
||||
|
||||
$ pytest
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 0 items
|
||||
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
========================== no tests ran in 0.12s ===========================
|
||||
|
||||
.. _`excontrolskip`:
|
||||
|
||||
@@ -192,7 +192,7 @@ and when running it will see a skipped "slow" test:
|
||||
|
||||
$ pytest -rs # "-rs" means report details on the little 's'
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 2 items
|
||||
@@ -201,7 +201,7 @@ and when running it will see a skipped "slow" test:
|
||||
|
||||
========================= short test summary info ==========================
|
||||
SKIPPED [1] test_module.py:8: need --runslow option to run
|
||||
=================== 1 passed, 1 skipped in 0.12 seconds ====================
|
||||
======================= 1 passed, 1 skipped in 0.12s =======================
|
||||
|
||||
Or run it including the ``slow`` marked test:
|
||||
|
||||
@@ -209,14 +209,14 @@ Or run it including the ``slow`` marked test:
|
||||
|
||||
$ pytest --runslow
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 2 items
|
||||
|
||||
test_module.py .. [100%]
|
||||
|
||||
========================= 2 passed in 0.12 seconds =========================
|
||||
============================ 2 passed in 0.12s =============================
|
||||
|
||||
Writing well integrated assertion helpers
|
||||
--------------------------------------------------
|
||||
@@ -238,7 +238,7 @@ Example:
|
||||
def checkconfig(x):
|
||||
__tracebackhide__ = True
|
||||
if not hasattr(x, "config"):
|
||||
pytest.fail("not configured: %s" % (x,))
|
||||
pytest.fail("not configured: {}".format(x))
|
||||
|
||||
|
||||
def test_something():
|
||||
@@ -261,7 +261,7 @@ Let's run our little function:
|
||||
E Failed: not configured: 42
|
||||
|
||||
test_checkconfig.py:11: Failed
|
||||
1 failed in 0.12 seconds
|
||||
1 failed in 0.12s
|
||||
|
||||
If you only want to hide certain exceptions, you can set ``__tracebackhide__``
|
||||
to a callable which gets the ``ExceptionInfo`` object. You can for example use
|
||||
@@ -280,7 +280,7 @@ this to make sure unexpected exception types aren't hidden:
|
||||
def checkconfig(x):
|
||||
__tracebackhide__ = operator.methodcaller("errisinstance", ConfigException)
|
||||
if not hasattr(x, "config"):
|
||||
raise ConfigException("not configured: %s" % (x,))
|
||||
raise ConfigException("not configured: {}".format(x))
|
||||
|
||||
|
||||
def test_something():
|
||||
@@ -352,13 +352,13 @@ which will add the string to the test header accordingly:
|
||||
|
||||
$ pytest
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
project deps: mylib-1.1
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 0 items
|
||||
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
========================== no tests ran in 0.12s ===========================
|
||||
|
||||
.. regendoc:wipe
|
||||
|
||||
@@ -381,14 +381,14 @@ which will add info only when run with "--v":
|
||||
|
||||
$ pytest -v
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
info1: did you know that ...
|
||||
did you?
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collecting ... collected 0 items
|
||||
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
========================== no tests ran in 0.12s ===========================
|
||||
|
||||
and nothing when run plainly:
|
||||
|
||||
@@ -396,12 +396,12 @@ and nothing when run plainly:
|
||||
|
||||
$ pytest
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 0 items
|
||||
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
========================== no tests ran in 0.12s ===========================
|
||||
|
||||
profiling test duration
|
||||
--------------------------
|
||||
@@ -436,7 +436,7 @@ Now we can profile which test functions execute the slowest:
|
||||
|
||||
$ pytest --durations=3
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 3 items
|
||||
@@ -447,7 +447,7 @@ Now we can profile which test functions execute the slowest:
|
||||
0.30s call test_some_are_slow.py::test_funcslow2
|
||||
0.20s call test_some_are_slow.py::test_funcslow1
|
||||
0.10s call test_some_are_slow.py::test_funcfast
|
||||
========================= 3 passed in 0.12 seconds =========================
|
||||
============================ 3 passed in 0.12s =============================
|
||||
|
||||
incremental testing - test steps
|
||||
---------------------------------------------------
|
||||
@@ -478,7 +478,7 @@ an ``incremental`` marker which is to be used on classes:
|
||||
if "incremental" in item.keywords:
|
||||
previousfailed = getattr(item.parent, "_previousfailed", None)
|
||||
if previousfailed is not None:
|
||||
pytest.xfail("previous test failed (%s)" % previousfailed.name)
|
||||
pytest.xfail("previous test failed ({})".format(previousfailed.name))
|
||||
|
||||
These two hook implementations work together to abort incremental-marked
|
||||
tests in a class. Here is a test module example:
|
||||
@@ -491,7 +491,7 @@ tests in a class. Here is a test module example:
|
||||
|
||||
|
||||
@pytest.mark.incremental
|
||||
class TestUserHandling(object):
|
||||
class TestUserHandling:
|
||||
def test_login(self):
|
||||
pass
|
||||
|
||||
@@ -511,7 +511,7 @@ If we run this:
|
||||
|
||||
$ pytest -rx
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 4 items
|
||||
@@ -531,7 +531,7 @@ If we run this:
|
||||
========================= short test summary info ==========================
|
||||
XFAIL test_step.py::TestUserHandling::test_deletion
|
||||
reason: previous test failed (test_modification)
|
||||
============== 1 failed, 2 passed, 1 xfailed in 0.12 seconds ===============
|
||||
================== 1 failed, 2 passed, 1 xfailed in 0.12s ==================
|
||||
|
||||
We'll see that ``test_deletion`` was not executed because ``test_modification``
|
||||
failed. It is reported as an "expected failure".
|
||||
@@ -556,7 +556,7 @@ Here is an example for making a ``db`` fixture available in a directory:
|
||||
import pytest
|
||||
|
||||
|
||||
class DB(object):
|
||||
class DB:
|
||||
pass
|
||||
|
||||
|
||||
@@ -595,7 +595,7 @@ We can run this:
|
||||
|
||||
$ pytest
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 7 items
|
||||
@@ -644,7 +644,7 @@ We can run this:
|
||||
E assert 0
|
||||
|
||||
a/test_db2.py:2: AssertionError
|
||||
========== 3 failed, 2 passed, 1 xfailed, 1 error in 0.12 seconds ==========
|
||||
============= 3 failed, 2 passed, 1 xfailed, 1 error in 0.12s ==============
|
||||
|
||||
The two test modules in the ``a`` directory see the same ``db`` fixture instance
|
||||
while the one test in the sister-directory ``b`` doesn't see it. We could of course
|
||||
@@ -684,7 +684,7 @@ case we just write some information out to a ``failures`` file:
|
||||
with open("failures", mode) as f:
|
||||
# let's also access a fixture for the fun of it
|
||||
if "tmpdir" in item.fixturenames:
|
||||
extra = " (%s)" % item.funcargs["tmpdir"]
|
||||
extra = " ({})".format(item.funcargs["tmpdir"])
|
||||
else:
|
||||
extra = ""
|
||||
|
||||
@@ -709,7 +709,7 @@ and run them:
|
||||
|
||||
$ pytest test_module.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 2 items
|
||||
@@ -733,7 +733,7 @@ and run them:
|
||||
E assert 0
|
||||
|
||||
test_module.py:6: AssertionError
|
||||
========================= 2 failed in 0.12 seconds =========================
|
||||
============================ 2 failed in 0.12s =============================
|
||||
|
||||
you will have a "failures" file which contains the failing test ids:
|
||||
|
||||
@@ -813,7 +813,7 @@ and run it:
|
||||
|
||||
$ pytest -s test_module.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 3 items
|
||||
@@ -848,7 +848,7 @@ and run it:
|
||||
E assert 0
|
||||
|
||||
test_module.py:19: AssertionError
|
||||
==================== 2 failed, 1 error in 0.12 seconds =====================
|
||||
======================== 2 failed, 1 error in 0.12s ========================
|
||||
|
||||
You'll see that the fixture finalizers could use the precise reporting
|
||||
information.
|
||||
|
||||
@@ -5,30 +5,36 @@ A session-scoped fixture effectively has access to all
|
||||
collected test items. Here is an example of a fixture
|
||||
function which walks all collected tests and looks
|
||||
if their test class defines a ``callme`` method and
|
||||
calls it::
|
||||
calls it:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of conftest.py
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.fixture(scope="session", autouse=True)
|
||||
def callattr_ahead_of_alltests(request):
|
||||
print("callattr_ahead_of_alltests called")
|
||||
seen = set([None])
|
||||
seen = {None}
|
||||
session = request.node
|
||||
for item in session.items:
|
||||
cls = item.getparent(pytest.Class)
|
||||
if cls not in seen:
|
||||
if hasattr(cls.obj, "callme"):
|
||||
cls.obj.callme()
|
||||
cls.obj.callme()
|
||||
seen.add(cls)
|
||||
|
||||
test classes may now define a ``callme`` method which
|
||||
will be called ahead of running any tests::
|
||||
will be called ahead of running any tests:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of test_module.py
|
||||
|
||||
class TestHello(object):
|
||||
|
||||
class TestHello:
|
||||
@classmethod
|
||||
def callme(cls):
|
||||
print("callme called!")
|
||||
@@ -39,16 +45,20 @@ will be called ahead of running any tests::
|
||||
def test_method2(self):
|
||||
print("test_method1 called")
|
||||
|
||||
class TestOther(object):
|
||||
|
||||
class TestOther:
|
||||
@classmethod
|
||||
def callme(cls):
|
||||
print("callme other called")
|
||||
|
||||
def test_other(self):
|
||||
print("test other")
|
||||
|
||||
|
||||
# works with unittest as well ...
|
||||
import unittest
|
||||
|
||||
|
||||
class SomeTest(unittest.TestCase):
|
||||
@classmethod
|
||||
def callme(self):
|
||||
@@ -71,4 +81,4 @@ If you run this without output capturing:
|
||||
.test other
|
||||
.test_unit1 method called
|
||||
.
|
||||
4 passed in 0.12 seconds
|
||||
4 passed in 0.12s
|
||||
|
||||
@@ -15,7 +15,9 @@ Running an existing test suite with pytest
|
||||
Say you want to contribute to an existing repository somewhere.
|
||||
After pulling the code into your development space using some
|
||||
flavor of version control and (optionally) setting up a virtualenv
|
||||
you will want to run::
|
||||
you will want to run:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
cd <repository>
|
||||
pip install -e . # Environment dependent alternatives include
|
||||
|
||||
@@ -49,20 +49,25 @@ argument. For each argument name, a fixture function with that name provides
|
||||
the fixture object. Fixture functions are registered by marking them with
|
||||
:py:func:`@pytest.fixture <_pytest.python.fixture>`. Let's look at a simple
|
||||
self-contained test module containing a fixture and a test function
|
||||
using it::
|
||||
using it:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of ./test_smtpsimple.py
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def smtp_connection():
|
||||
import smtplib
|
||||
|
||||
return smtplib.SMTP("smtp.gmail.com", 587, timeout=5)
|
||||
|
||||
|
||||
def test_ehlo(smtp_connection):
|
||||
response, msg = smtp_connection.ehlo()
|
||||
assert response == 250
|
||||
assert 0 # for demo purposes
|
||||
assert 0 # for demo purposes
|
||||
|
||||
Here, the ``test_ehlo`` needs the ``smtp_connection`` fixture value. pytest
|
||||
will discover and call the :py:func:`@pytest.fixture <_pytest.python.fixture>`
|
||||
@@ -72,7 +77,7 @@ marked ``smtp_connection`` fixture function. Running the test looks like this:
|
||||
|
||||
$ pytest test_smtpsimple.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 1 item
|
||||
@@ -87,11 +92,11 @@ marked ``smtp_connection`` fixture function. Running the test looks like this:
|
||||
def test_ehlo(smtp_connection):
|
||||
response, msg = smtp_connection.ehlo()
|
||||
assert response == 250
|
||||
> assert 0 # for demo purposes
|
||||
> assert 0 # for demo purposes
|
||||
E assert 0
|
||||
|
||||
test_smtpsimple.py:11: AssertionError
|
||||
========================= 1 failed in 0.12 seconds =========================
|
||||
test_smtpsimple.py:14: AssertionError
|
||||
============================ 1 failed in 0.12s =============================
|
||||
|
||||
In the failure traceback we see that the test function was called with a
|
||||
``smtp_connection`` argument, the ``smtplib.SMTP()`` instance created by the fixture
|
||||
@@ -180,12 +185,15 @@ Possible values for ``scope`` are: ``function``, ``class``, ``module``, ``packag
|
||||
|
||||
The next example puts the fixture function into a separate ``conftest.py`` file
|
||||
so that tests from multiple test modules in the directory can
|
||||
access the fixture function::
|
||||
access the fixture function:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of conftest.py
|
||||
import pytest
|
||||
import smtplib
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def smtp_connection():
|
||||
return smtplib.SMTP("smtp.gmail.com", 587, timeout=5)
|
||||
@@ -193,16 +201,20 @@ access the fixture function::
|
||||
The name of the fixture again is ``smtp_connection`` and you can access its
|
||||
result by listing the name ``smtp_connection`` as an input parameter in any
|
||||
test or fixture function (in or below the directory where ``conftest.py`` is
|
||||
located)::
|
||||
located):
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of test_module.py
|
||||
|
||||
|
||||
def test_ehlo(smtp_connection):
|
||||
response, msg = smtp_connection.ehlo()
|
||||
assert response == 250
|
||||
assert b"smtp.gmail.com" in msg
|
||||
assert 0 # for demo purposes
|
||||
|
||||
|
||||
def test_noop(smtp_connection):
|
||||
response, msg = smtp_connection.noop()
|
||||
assert response == 250
|
||||
@@ -215,7 +227,7 @@ inspect what is going on and can now run the tests:
|
||||
|
||||
$ pytest test_module.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 2 items
|
||||
@@ -234,7 +246,7 @@ inspect what is going on and can now run the tests:
|
||||
> assert 0 # for demo purposes
|
||||
E assert 0
|
||||
|
||||
test_module.py:6: AssertionError
|
||||
test_module.py:7: AssertionError
|
||||
________________________________ test_noop _________________________________
|
||||
|
||||
smtp_connection = <smtplib.SMTP object at 0xdeadbeef>
|
||||
@@ -245,8 +257,8 @@ inspect what is going on and can now run the tests:
|
||||
> assert 0 # for demo purposes
|
||||
E assert 0
|
||||
|
||||
test_module.py:11: AssertionError
|
||||
========================= 2 failed in 0.12 seconds =========================
|
||||
test_module.py:13: AssertionError
|
||||
============================ 2 failed in 0.12s =============================
|
||||
|
||||
You see the two ``assert 0`` failing and more importantly you can also see
|
||||
that the same (module-scoped) ``smtp_connection`` object was passed into the
|
||||
@@ -289,51 +301,59 @@ are finalized when the last test of a *package* finishes.
|
||||
Use this new feature sparingly and please make sure to report any issues you find.
|
||||
|
||||
|
||||
Higher-scoped fixtures are instantiated first
|
||||
---------------------------------------------
|
||||
.. _dynamic scope:
|
||||
|
||||
Dynamic scope
|
||||
^^^^^^^^^^^^^
|
||||
|
||||
.. versionadded:: 5.2
|
||||
|
||||
In some cases, you might want to change the scope of the fixture without changing the code.
|
||||
To do that, pass a callable to ``scope``. The callable must return a string with a valid scope
|
||||
and will be executed only once - during the fixture definition. It will be called with two
|
||||
keyword arguments - ``fixture_name`` as a string and ``config`` with a configuration object.
|
||||
|
||||
This can be especially useful when dealing with fixtures that need time for setup, like spawning
|
||||
a docker container. You can use the command-line argument to control the scope of the spawned
|
||||
containers for different environments. See the example below.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def determine_scope(fixture_name, config):
|
||||
if config.getoption("--keep-containers"):
|
||||
return "session"
|
||||
return "function"
|
||||
|
||||
|
||||
@pytest.fixture(scope=determine_scope)
|
||||
def docker_container():
|
||||
yield spawn_container()
|
||||
|
||||
|
||||
|
||||
Order: Higher-scoped fixtures are instantiated first
|
||||
----------------------------------------------------
|
||||
|
||||
|
||||
|
||||
Within a function request for features, fixture of higher-scopes (such as ``session``) are instantiated first than
|
||||
lower-scoped fixtures (such as ``function`` or ``class``). The relative order of fixtures of same scope follows
|
||||
the declared order in the test function and honours dependencies between fixtures.
|
||||
the declared order in the test function and honours dependencies between fixtures. Autouse fixtures will be
|
||||
instantiated before explicitly used fixtures.
|
||||
|
||||
Consider the code below:
|
||||
|
||||
.. code-block:: python
|
||||
.. literalinclude:: example/fixtures/test_fixtures_order.py
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def s1():
|
||||
pass
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def m1():
|
||||
pass
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def f1(tmpdir):
|
||||
pass
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def f2():
|
||||
pass
|
||||
|
||||
|
||||
def test_foo(f1, m1, f2, s1):
|
||||
...
|
||||
|
||||
|
||||
The fixtures requested by ``test_foo`` will be instantiated in the following order:
|
||||
The fixtures requested by ``test_order`` will be instantiated in the following order:
|
||||
|
||||
1. ``s1``: is the highest-scoped fixture (``session``).
|
||||
2. ``m1``: is the second highest-scoped fixture (``module``).
|
||||
3. ``tmpdir``: is a ``function``-scoped fixture, required by ``f1``: it needs to be instantiated at this point
|
||||
because it is a dependency of ``f1``.
|
||||
4. ``f1``: is the first ``function``-scoped fixture in ``test_foo`` parameter list.
|
||||
5. ``f2``: is the last ``function``-scoped fixture in ``test_foo`` parameter list.
|
||||
3. ``a1``: is a ``function``-scoped ``autouse`` fixture: it will be instantiated before other fixtures
|
||||
within the same scope.
|
||||
4. ``f3``: is a ``function``-scoped fixture, required by ``f1``: it needs to be instantiated at this point
|
||||
5. ``f1``: is the first ``function``-scoped fixture in ``test_order`` parameter list.
|
||||
6. ``f2``: is the last ``function``-scoped fixture in ``test_order`` parameter list.
|
||||
|
||||
|
||||
.. _`finalization`:
|
||||
@@ -371,7 +391,7 @@ Let's execute it:
|
||||
$ pytest -s -q --tb=no
|
||||
FFteardown smtp
|
||||
|
||||
2 failed in 0.12 seconds
|
||||
2 failed in 0.12s
|
||||
|
||||
We see that the ``smtp_connection`` instance is finalized after the two
|
||||
tests finished execution. Note that if we decorated our fixture
|
||||
@@ -400,6 +420,34 @@ The ``smtp_connection`` connection will be closed after the test finished
|
||||
execution because the ``smtp_connection`` object automatically closes when
|
||||
the ``with`` statement ends.
|
||||
|
||||
Using the contextlib.ExitStack context manager finalizers will always be called
|
||||
regardless if the fixture *setup* code raises an exception. This is handy to properly
|
||||
close all resources created by a fixture even if one of them fails to be created/acquired:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of test_yield3.py
|
||||
|
||||
import contextlib
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def connect(port):
|
||||
... # create connection
|
||||
yield
|
||||
... # close connection
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def equipments():
|
||||
with contextlib.ExitStack() as stack:
|
||||
yield [stack.enter_context(connect(port)) for port in ("C1", "C3", "C28")]
|
||||
|
||||
In the example above, if ``"C28"`` fails with an exception, ``"C1"`` and ``"C3"`` will still
|
||||
be properly closed.
|
||||
|
||||
Note that if an exception happens during the *setup* code (before the ``yield`` keyword), the
|
||||
*teardown* code (after the ``yield``) will not be called.
|
||||
|
||||
@@ -428,27 +476,39 @@ Here's the ``smtp_connection`` fixture changed to use ``addfinalizer`` for clean
|
||||
return smtp_connection # provide the fixture value
|
||||
|
||||
|
||||
Here's the ``equipments`` fixture changed to use ``addfinalizer`` for cleanup:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of test_yield3.py
|
||||
|
||||
import contextlib
|
||||
import functools
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def connect(port):
|
||||
... # create connection
|
||||
yield
|
||||
... # close connection
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def equipments(request):
|
||||
r = []
|
||||
for port in ("C1", "C3", "C28"):
|
||||
cm = connect(port)
|
||||
equip = cm.__enter__()
|
||||
request.addfinalizer(functools.partial(cm.__exit__, None, None, None))
|
||||
r.append(equip)
|
||||
return r
|
||||
|
||||
|
||||
Both ``yield`` and ``addfinalizer`` methods work similarly by calling their code after the test
|
||||
ends, but ``addfinalizer`` has two key differences over ``yield``:
|
||||
|
||||
1. It is possible to register multiple finalizer functions.
|
||||
|
||||
2. Finalizers will always be called regardless if the fixture *setup* code raises an exception.
|
||||
This is handy to properly close all resources created by a fixture even if one of them
|
||||
fails to be created/acquired::
|
||||
|
||||
@pytest.fixture
|
||||
def equipments(request):
|
||||
r = []
|
||||
for port in ('C1', 'C3', 'C28'):
|
||||
equip = connect(port)
|
||||
request.addfinalizer(equip.disconnect)
|
||||
r.append(equip)
|
||||
return r
|
||||
|
||||
In the example above, if ``"C28"`` fails with an exception, ``"C1"`` and ``"C3"`` will still
|
||||
be properly closed. Of course, if an exception happens before the finalize function is
|
||||
registered then it will not be executed.
|
||||
ends. Of course, if an exception happens before the finalize function is registered then it
|
||||
will not be executed.
|
||||
|
||||
|
||||
.. _`request-context`:
|
||||
@@ -459,18 +519,21 @@ Fixtures can introspect the requesting test context
|
||||
Fixture functions can accept the :py:class:`request <FixtureRequest>` object
|
||||
to introspect the "requesting" test function, class or module context.
|
||||
Further extending the previous ``smtp_connection`` fixture example, let's
|
||||
read an optional server URL from the test module which uses our fixture::
|
||||
read an optional server URL from the test module which uses our fixture:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of conftest.py
|
||||
import pytest
|
||||
import smtplib
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def smtp_connection(request):
|
||||
server = getattr(request.module, "smtpserver", "smtp.gmail.com")
|
||||
smtp_connection = smtplib.SMTP(server, 587, timeout=5)
|
||||
yield smtp_connection
|
||||
print("finalizing %s (%s)" % (smtp_connection, server))
|
||||
print("finalizing {} ({})".format(smtp_connection, server))
|
||||
smtp_connection.close()
|
||||
|
||||
We use the ``request.module`` attribute to optionally obtain an
|
||||
@@ -482,15 +545,18 @@ again, nothing much has changed:
|
||||
$ pytest -s -q --tb=no
|
||||
FFfinalizing <smtplib.SMTP object at 0xdeadbeef> (smtp.gmail.com)
|
||||
|
||||
2 failed in 0.12 seconds
|
||||
2 failed in 0.12s
|
||||
|
||||
Let's quickly create another test module that actually sets the
|
||||
server URL in its module namespace::
|
||||
server URL in its module namespace:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of test_anothersmtp.py
|
||||
|
||||
smtpserver = "mail.python.org" # will be read by smtp fixture
|
||||
|
||||
|
||||
def test_showhelo(smtp_connection):
|
||||
assert 0, smtp_connection.helo()
|
||||
|
||||
@@ -502,7 +568,7 @@ Running it:
|
||||
F [100%]
|
||||
================================= FAILURES =================================
|
||||
______________________________ test_showhelo _______________________________
|
||||
test_anothersmtp.py:5: in test_showhelo
|
||||
test_anothersmtp.py:6: in test_showhelo
|
||||
assert 0, smtp_connection.helo()
|
||||
E AssertionError: (250, b'mail.python.org')
|
||||
E assert 0
|
||||
@@ -522,16 +588,14 @@ of a fixture is needed multiple times in a single test. Instead of returning
|
||||
data directly, the fixture instead returns a function which generates the data.
|
||||
This function can then be called multiple times in the test.
|
||||
|
||||
Factories can have have parameters as needed::
|
||||
Factories can have parameters as needed:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.fixture
|
||||
def make_customer_record():
|
||||
|
||||
def _make_customer_record(name):
|
||||
return {
|
||||
"name": name,
|
||||
"orders": []
|
||||
}
|
||||
return {"name": name, "orders": []}
|
||||
|
||||
return _make_customer_record
|
||||
|
||||
@@ -541,7 +605,9 @@ Factories can have have parameters as needed::
|
||||
customer_2 = make_customer_record("Mike")
|
||||
customer_3 = make_customer_record("Meredith")
|
||||
|
||||
If the data created by the factory requires managing, the fixture can take care of that::
|
||||
If the data created by the factory requires managing, the fixture can take care of that:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.fixture
|
||||
def make_customer_record():
|
||||
@@ -580,18 +646,20 @@ configured in multiple ways.
|
||||
Extending the previous example, we can flag the fixture to create two
|
||||
``smtp_connection`` fixture instances which will cause all tests using the fixture
|
||||
to run twice. The fixture function gets access to each parameter
|
||||
through the special :py:class:`request <FixtureRequest>` object::
|
||||
through the special :py:class:`request <FixtureRequest>` object:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of conftest.py
|
||||
import pytest
|
||||
import smtplib
|
||||
|
||||
@pytest.fixture(scope="module",
|
||||
params=["smtp.gmail.com", "mail.python.org"])
|
||||
|
||||
@pytest.fixture(scope="module", params=["smtp.gmail.com", "mail.python.org"])
|
||||
def smtp_connection(request):
|
||||
smtp_connection = smtplib.SMTP(request.param, 587, timeout=5)
|
||||
yield smtp_connection
|
||||
print("finalizing %s" % smtp_connection)
|
||||
print("finalizing {}".format(smtp_connection))
|
||||
smtp_connection.close()
|
||||
|
||||
The main change is the declaration of ``params`` with
|
||||
@@ -616,7 +684,7 @@ So let's just do another run:
|
||||
> assert 0 # for demo purposes
|
||||
E assert 0
|
||||
|
||||
test_module.py:6: AssertionError
|
||||
test_module.py:7: AssertionError
|
||||
________________________ test_noop[smtp.gmail.com] _________________________
|
||||
|
||||
smtp_connection = <smtplib.SMTP object at 0xdeadbeef>
|
||||
@@ -627,7 +695,7 @@ So let's just do another run:
|
||||
> assert 0 # for demo purposes
|
||||
E assert 0
|
||||
|
||||
test_module.py:11: AssertionError
|
||||
test_module.py:13: AssertionError
|
||||
________________________ test_ehlo[mail.python.org] ________________________
|
||||
|
||||
smtp_connection = <smtplib.SMTP object at 0xdeadbeef>
|
||||
@@ -638,7 +706,7 @@ So let's just do another run:
|
||||
> assert b"smtp.gmail.com" in msg
|
||||
E AssertionError: assert b'smtp.gmail.com' in b'mail.python.org\nPIPELINING\nSIZE 51200000\nETRN\nSTARTTLS\nAUTH DIGEST-MD5 NTLM CRAM-MD5\nENHANCEDSTATUSCODES\n8BITMIME\nDSN\nSMTPUTF8\nCHUNKING'
|
||||
|
||||
test_module.py:5: AssertionError
|
||||
test_module.py:6: AssertionError
|
||||
-------------------------- Captured stdout setup ---------------------------
|
||||
finalizing <smtplib.SMTP object at 0xdeadbeef>
|
||||
________________________ test_noop[mail.python.org] ________________________
|
||||
@@ -651,10 +719,10 @@ So let's just do another run:
|
||||
> assert 0 # for demo purposes
|
||||
E assert 0
|
||||
|
||||
test_module.py:11: AssertionError
|
||||
test_module.py:13: AssertionError
|
||||
------------------------- Captured stdout teardown -------------------------
|
||||
finalizing <smtplib.SMTP object at 0xdeadbeef>
|
||||
4 failed in 0.12 seconds
|
||||
4 failed in 0.12s
|
||||
|
||||
We see that our two test functions each ran twice, against the different
|
||||
``smtp_connection`` instances. Note also, that with the ``mail.python.org``
|
||||
@@ -672,28 +740,35 @@ Numbers, strings, booleans and None will have their usual string
|
||||
representation used in the test ID. For other objects, pytest will
|
||||
make a string based on the argument name. It is possible to customise
|
||||
the string used in a test ID for a certain fixture value by using the
|
||||
``ids`` keyword argument::
|
||||
``ids`` keyword argument:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of test_ids.py
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.fixture(params=[0, 1], ids=["spam", "ham"])
|
||||
def a(request):
|
||||
return request.param
|
||||
|
||||
|
||||
def test_a(a):
|
||||
pass
|
||||
|
||||
|
||||
def idfn(fixture_value):
|
||||
if fixture_value == 0:
|
||||
return "eggs"
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
@pytest.fixture(params=[0, 1], ids=idfn)
|
||||
def b(request):
|
||||
return request.param
|
||||
|
||||
|
||||
def test_b(b):
|
||||
pass
|
||||
|
||||
@@ -708,7 +783,7 @@ Running the above tests results in the following test IDs being used:
|
||||
|
||||
$ pytest --collect-only
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 10 items
|
||||
@@ -726,7 +801,7 @@ Running the above tests results in the following test IDs being used:
|
||||
<Function test_ehlo[mail.python.org]>
|
||||
<Function test_noop[mail.python.org]>
|
||||
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
========================== no tests ran in 0.12s ===========================
|
||||
|
||||
.. _`fixture-parametrize-marks`:
|
||||
|
||||
@@ -736,14 +811,19 @@ Using marks with parametrized fixtures
|
||||
:func:`pytest.param` can be used to apply marks in values sets of parametrized fixtures in the same way
|
||||
that they can be used with :ref:`@pytest.mark.parametrize <@pytest.mark.parametrize>`.
|
||||
|
||||
Example::
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of test_fixture_marks.py
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.fixture(params=[0, 1, pytest.param(2, marks=pytest.mark.skip)])
|
||||
def data_set(request):
|
||||
return request.param
|
||||
|
||||
|
||||
def test_data(data_set):
|
||||
pass
|
||||
|
||||
@@ -753,7 +833,7 @@ Running this test will *skip* the invocation of ``data_set`` with value ``2``:
|
||||
|
||||
$ pytest test_fixture_marks.py -v
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collecting ... collected 3 items
|
||||
@@ -762,7 +842,7 @@ Running this test will *skip* the invocation of ``data_set`` with value ``2``:
|
||||
test_fixture_marks.py::test_data[1] PASSED [ 66%]
|
||||
test_fixture_marks.py::test_data[2] SKIPPED [100%]
|
||||
|
||||
=================== 2 passed, 1 skipped in 0.12 seconds ====================
|
||||
======================= 2 passed, 1 skipped in 0.12s =======================
|
||||
|
||||
.. _`interdependent fixtures`:
|
||||
|
||||
@@ -774,20 +854,25 @@ can use other fixtures themselves. This contributes to a modular design
|
||||
of your fixtures and allows re-use of framework-specific fixtures across
|
||||
many projects. As a simple example, we can extend the previous example
|
||||
and instantiate an object ``app`` where we stick the already defined
|
||||
``smtp_connection`` resource into it::
|
||||
``smtp_connection`` resource into it:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of test_appsetup.py
|
||||
|
||||
import pytest
|
||||
|
||||
class App(object):
|
||||
|
||||
class App:
|
||||
def __init__(self, smtp_connection):
|
||||
self.smtp_connection = smtp_connection
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def app(smtp_connection):
|
||||
return App(smtp_connection)
|
||||
|
||||
|
||||
def test_smtp_connection_exists(app):
|
||||
assert app.smtp_connection
|
||||
|
||||
@@ -798,7 +883,7 @@ Here we declare an ``app`` fixture which receives the previously defined
|
||||
|
||||
$ pytest -v test_appsetup.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collecting ... collected 2 items
|
||||
@@ -806,7 +891,7 @@ Here we declare an ``app`` fixture which receives the previously defined
|
||||
test_appsetup.py::test_smtp_connection_exists[smtp.gmail.com] PASSED [ 50%]
|
||||
test_appsetup.py::test_smtp_connection_exists[mail.python.org] PASSED [100%]
|
||||
|
||||
========================= 2 passed in 0.12 seconds =========================
|
||||
============================ 2 passed in 0.12s =============================
|
||||
|
||||
Due to the parametrization of ``smtp_connection``, the test will run twice with two
|
||||
different ``App`` instances and respective smtp servers. There is no
|
||||
@@ -836,31 +921,40 @@ this eases testing of applications which create and use global state.
|
||||
|
||||
The following example uses two parametrized fixtures, one of which is
|
||||
scoped on a per-module basis, and all the functions perform ``print`` calls
|
||||
to show the setup/teardown flow::
|
||||
to show the setup/teardown flow:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of test_module.py
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.fixture(scope="module", params=["mod1", "mod2"])
|
||||
def modarg(request):
|
||||
param = request.param
|
||||
print(" SETUP modarg %s" % param)
|
||||
print(" SETUP modarg", param)
|
||||
yield param
|
||||
print(" TEARDOWN modarg %s" % param)
|
||||
print(" TEARDOWN modarg", param)
|
||||
|
||||
@pytest.fixture(scope="function", params=[1,2])
|
||||
|
||||
@pytest.fixture(scope="function", params=[1, 2])
|
||||
def otherarg(request):
|
||||
param = request.param
|
||||
print(" SETUP otherarg %s" % param)
|
||||
print(" SETUP otherarg", param)
|
||||
yield param
|
||||
print(" TEARDOWN otherarg %s" % param)
|
||||
print(" TEARDOWN otherarg", param)
|
||||
|
||||
|
||||
def test_0(otherarg):
|
||||
print(" RUN test0 with otherarg %s" % otherarg)
|
||||
print(" RUN test0 with otherarg", otherarg)
|
||||
|
||||
|
||||
def test_1(modarg):
|
||||
print(" RUN test1 with modarg %s" % modarg)
|
||||
print(" RUN test1 with modarg", modarg)
|
||||
|
||||
|
||||
def test_2(otherarg, modarg):
|
||||
print(" RUN test2 with otherarg %s and modarg %s" % (otherarg, modarg))
|
||||
print(" RUN test2 with otherarg {} and modarg {}".format(otherarg, modarg))
|
||||
|
||||
|
||||
Let's run the tests in verbose mode and with looking at the print-output:
|
||||
@@ -869,7 +963,7 @@ Let's run the tests in verbose mode and with looking at the print-output:
|
||||
|
||||
$ pytest -v -s test_module.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collecting ... collected 8 items
|
||||
@@ -907,7 +1001,7 @@ Let's run the tests in verbose mode and with looking at the print-output:
|
||||
TEARDOWN modarg mod2
|
||||
|
||||
|
||||
========================= 8 passed in 0.12 seconds =========================
|
||||
============================ 8 passed in 0.12s =============================
|
||||
|
||||
You can see that the parametrized module-scoped ``modarg`` resource caused an
|
||||
ordering of test execution that lead to the fewest possible "active" resources.
|
||||
@@ -935,7 +1029,9 @@ current working directory but otherwise do not care for the concrete
|
||||
directory. Here is how you can use the standard `tempfile
|
||||
<http://docs.python.org/library/tempfile.html>`_ and pytest fixtures to
|
||||
achieve it. We separate the creation of the fixture into a conftest.py
|
||||
file::
|
||||
file:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of conftest.py
|
||||
|
||||
@@ -943,19 +1039,23 @@ file::
|
||||
import tempfile
|
||||
import os
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def cleandir():
|
||||
newpath = tempfile.mkdtemp()
|
||||
os.chdir(newpath)
|
||||
|
||||
and declare its use in a test module via a ``usefixtures`` marker::
|
||||
and declare its use in a test module via a ``usefixtures`` marker:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of test_setenv.py
|
||||
import os
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("cleandir")
|
||||
class TestDirectoryInit(object):
|
||||
class TestDirectoryInit:
|
||||
def test_cwd_starts_empty(self):
|
||||
assert os.listdir(os.getcwd()) == []
|
||||
with open("myfile", "w") as f:
|
||||
@@ -973,7 +1073,7 @@ to verify our fixture is activated and the tests pass:
|
||||
|
||||
$ pytest -q
|
||||
.. [100%]
|
||||
2 passed in 0.12 seconds
|
||||
2 passed in 0.12s
|
||||
|
||||
You can specify multiple fixtures like this:
|
||||
|
||||
@@ -1032,25 +1132,32 @@ without declaring a function argument explicitly or a `usefixtures`_ decorator.
|
||||
As a practical example, suppose we have a database fixture which has a
|
||||
begin/rollback/commit architecture and we want to automatically surround
|
||||
each test method by a transaction and a rollback. Here is a dummy
|
||||
self-contained implementation of this idea::
|
||||
self-contained implementation of this idea:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of test_db_transact.py
|
||||
|
||||
import pytest
|
||||
|
||||
class DB(object):
|
||||
|
||||
class DB:
|
||||
def __init__(self):
|
||||
self.intransaction = []
|
||||
|
||||
def begin(self, name):
|
||||
self.intransaction.append(name)
|
||||
|
||||
def rollback(self):
|
||||
self.intransaction.pop()
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def db():
|
||||
return DB()
|
||||
|
||||
class TestClass(object):
|
||||
|
||||
class TestClass:
|
||||
@pytest.fixture(autouse=True)
|
||||
def transact(self, request, db):
|
||||
db.begin(request.function.__name__)
|
||||
@@ -1074,7 +1181,7 @@ If we run it, we get two passing tests:
|
||||
|
||||
$ pytest -q
|
||||
.. [100%]
|
||||
2 passed in 0.12 seconds
|
||||
2 passed in 0.12s
|
||||
|
||||
Here is how autouse fixtures work in other scopes:
|
||||
|
||||
@@ -1098,7 +1205,9 @@ Here is how autouse fixtures work in other scopes:
|
||||
Note that the above ``transact`` fixture may very well be a fixture that
|
||||
you want to make available in your project without having it generally
|
||||
active. The canonical way to do that is to put the transact definition
|
||||
into a conftest.py file **without** using ``autouse``::
|
||||
into a conftest.py file **without** using ``autouse``:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of conftest.py
|
||||
@pytest.fixture
|
||||
@@ -1107,10 +1216,12 @@ into a conftest.py file **without** using ``autouse``::
|
||||
yield
|
||||
db.rollback()
|
||||
|
||||
and then e.g. have a TestClass using it by declaring the need::
|
||||
and then e.g. have a TestClass using it by declaring the need:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.mark.usefixtures("transact")
|
||||
class TestClass(object):
|
||||
class TestClass:
|
||||
def test_method1(self):
|
||||
...
|
||||
|
||||
|
||||
@@ -122,4 +122,4 @@ Resources
|
||||
* Google:
|
||||
|
||||
* `Flaky Tests at Google and How We Mitigate Them <https://testing.googleblog.com/2016/05/flaky-tests-at-google-and-how-we.html>`_ by John Micco, 2016
|
||||
* `Where do Google's flaky tests come from? <https://docs.google.com/document/d/1mZ0-Kc97DI_F3tf_GBW_NB_aqka-P1jVOsFfufxqUUM/edit#heading=h.ec0r4fypsleh>`_ by Jeff Listfield, 2017
|
||||
* `Where do Google's flaky tests come from? <https://testing.googleblog.com/2017/04/where-do-our-flaky-tests-come-from.html>`_ by Jeff Listfield, 2017
|
||||
|
||||
@@ -21,19 +21,23 @@ funcarg for a test function is required. If a factory wants to
|
||||
re-use a resource across different scopes, it often used
|
||||
the ``request.cached_setup()`` helper to manage caching of
|
||||
resources. Here is a basic example how we could implement
|
||||
a per-session Database object::
|
||||
a per-session Database object:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of conftest.py
|
||||
class Database(object):
|
||||
class Database:
|
||||
def __init__(self):
|
||||
print("database instance created")
|
||||
|
||||
def destroy(self):
|
||||
print("database instance destroyed")
|
||||
|
||||
|
||||
def pytest_funcarg__db(request):
|
||||
return request.cached_setup(setup=DataBase,
|
||||
teardown=lambda db: db.destroy,
|
||||
scope="session")
|
||||
return request.cached_setup(
|
||||
setup=DataBase, teardown=lambda db: db.destroy, scope="session"
|
||||
)
|
||||
|
||||
There are several limitations and difficulties with this approach:
|
||||
|
||||
@@ -68,7 +72,9 @@ Direct scoping of fixture/funcarg factories
|
||||
|
||||
Instead of calling cached_setup() with a cache scope, you can use the
|
||||
:ref:`@pytest.fixture <pytest.fixture>` decorator and directly state
|
||||
the scope::
|
||||
the scope:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def db(request):
|
||||
@@ -90,11 +96,13 @@ Previously, funcarg factories could not directly cause parametrization.
|
||||
You needed to specify a ``@parametrize`` decorator on your test function
|
||||
or implement a ``pytest_generate_tests`` hook to perform
|
||||
parametrization, i.e. calling a test multiple times with different value
|
||||
sets. pytest-2.3 introduces a decorator for use on the factory itself::
|
||||
sets. pytest-2.3 introduces a decorator for use on the factory itself:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.fixture(params=["mysql", "pg"])
|
||||
def db(request):
|
||||
... # use request.param
|
||||
... # use request.param
|
||||
|
||||
Here the factory will be invoked twice (with the respective "mysql"
|
||||
and "pg" values set as ``request.param`` attributes) and all of
|
||||
@@ -107,7 +115,9 @@ allow to re-use already written factories because effectively
|
||||
parametrized via
|
||||
:py:func:`~_pytest.python.Metafunc.parametrize(indirect=True)` calls.
|
||||
|
||||
Of course it's perfectly fine to combine parametrization and scoping::
|
||||
Of course it's perfectly fine to combine parametrization and scoping:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.fixture(scope="session", params=["mysql", "pg"])
|
||||
def db(request):
|
||||
@@ -128,7 +138,9 @@ No ``pytest_funcarg__`` prefix when using @fixture decorator
|
||||
|
||||
When using the ``@fixture`` decorator the name of the function
|
||||
denotes the name under which the resource can be accessed as a function
|
||||
argument::
|
||||
argument:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.fixture()
|
||||
def db(request):
|
||||
@@ -137,7 +149,9 @@ argument::
|
||||
The name under which the funcarg resource can be requested is ``db``.
|
||||
|
||||
You can still use the "old" non-decorator way of specifying funcarg factories
|
||||
aka::
|
||||
aka:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def pytest_funcarg__db(request):
|
||||
...
|
||||
|
||||
@@ -28,19 +28,22 @@ Install ``pytest``
|
||||
.. code-block:: bash
|
||||
|
||||
$ pytest --version
|
||||
This is pytest version 4.x.y, imported from $PYTHON_PREFIX/lib/python3.6/site-packages/pytest.py
|
||||
This is pytest version 5.x.y, imported from $PYTHON_PREFIX/lib/python3.7/site-packages/pytest.py
|
||||
|
||||
.. _`simpletest`:
|
||||
|
||||
Create your first test
|
||||
----------------------------------------------------------
|
||||
|
||||
Create a simple test function with just four lines of code::
|
||||
Create a simple test function with just four lines of code:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of test_sample.py
|
||||
def func(x):
|
||||
return x + 1
|
||||
|
||||
|
||||
def test_answer():
|
||||
assert func(3) == 5
|
||||
|
||||
@@ -50,7 +53,7 @@ That’s it. You can now execute the test function:
|
||||
|
||||
$ pytest
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 1 item
|
||||
@@ -65,8 +68,8 @@ That’s it. You can now execute the test function:
|
||||
E assert 4 == 5
|
||||
E + where 4 = func(3)
|
||||
|
||||
test_sample.py:5: AssertionError
|
||||
========================= 1 failed in 0.12 seconds =========================
|
||||
test_sample.py:6: AssertionError
|
||||
============================ 1 failed in 0.12s =============================
|
||||
|
||||
This test returns a failure report because ``func(3)`` does not return ``5``.
|
||||
|
||||
@@ -83,13 +86,18 @@ Run multiple tests
|
||||
Assert that a certain exception is raised
|
||||
--------------------------------------------------------------
|
||||
|
||||
Use the :ref:`raises <assertraises>` helper to assert that some code raises an exception::
|
||||
Use the :ref:`raises <assertraises>` helper to assert that some code raises an exception:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of test_sysexit.py
|
||||
import pytest
|
||||
|
||||
|
||||
def f():
|
||||
raise SystemExit(1)
|
||||
|
||||
|
||||
def test_mytest():
|
||||
with pytest.raises(SystemExit):
|
||||
f()
|
||||
@@ -100,22 +108,24 @@ Execute the test function with “quiet” reporting mode:
|
||||
|
||||
$ pytest -q test_sysexit.py
|
||||
. [100%]
|
||||
1 passed in 0.12 seconds
|
||||
1 passed in 0.12s
|
||||
|
||||
Group multiple tests in a class
|
||||
--------------------------------------------------------------
|
||||
|
||||
Once you develop multiple tests, you may want to group them into a class. pytest makes it easy to create a class containing more than one test::
|
||||
Once you develop multiple tests, you may want to group them into a class. pytest makes it easy to create a class containing more than one test:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of test_class.py
|
||||
class TestClass(object):
|
||||
class TestClass:
|
||||
def test_one(self):
|
||||
x = "this"
|
||||
assert 'h' in x
|
||||
assert "h" in x
|
||||
|
||||
def test_two(self):
|
||||
x = "hello"
|
||||
assert hasattr(x, 'check')
|
||||
assert hasattr(x, "check")
|
||||
|
||||
``pytest`` discovers all tests following its :ref:`Conventions for Python test discovery <test discovery>`, so it finds both ``test_`` prefixed functions. There is no need to subclass anything. We can simply run the module by passing its filename:
|
||||
|
||||
@@ -130,19 +140,21 @@ Once you develop multiple tests, you may want to group them into a class. pytest
|
||||
|
||||
def test_two(self):
|
||||
x = "hello"
|
||||
> assert hasattr(x, 'check')
|
||||
> assert hasattr(x, "check")
|
||||
E AssertionError: assert False
|
||||
E + where False = hasattr('hello', 'check')
|
||||
|
||||
test_class.py:8: AssertionError
|
||||
1 failed, 1 passed in 0.12 seconds
|
||||
1 failed, 1 passed in 0.12s
|
||||
|
||||
The first test passed and the second failed. You can easily see the intermediate values in the assertion to help you understand the reason for the failure.
|
||||
|
||||
Request a unique temporary directory for functional tests
|
||||
--------------------------------------------------------------
|
||||
|
||||
``pytest`` provides `Builtin fixtures/function arguments <https://docs.pytest.org/en/latest/builtin.html#builtinfixtures>`_ to request arbitrary resources, like a unique temporary directory::
|
||||
``pytest`` provides `Builtin fixtures/function arguments <https://docs.pytest.org/en/latest/builtin.html>`_ to request arbitrary resources, like a unique temporary directory:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of test_tmpdir.py
|
||||
def test_needsfiles(tmpdir):
|
||||
@@ -168,7 +180,7 @@ List the name ``tmpdir`` in the test function signature and ``pytest`` will look
|
||||
test_tmpdir.py:3: AssertionError
|
||||
--------------------------- Captured stdout call ---------------------------
|
||||
PYTEST_TMPDIR/test_needsfiles0
|
||||
1 failed in 0.12 seconds
|
||||
1 failed in 0.12s
|
||||
|
||||
More info on tmpdir handling is available at :ref:`Temporary directories and files <tmpdir handling>`.
|
||||
|
||||
|
||||
@@ -12,13 +12,17 @@ pip_ for installing your application and any dependencies,
|
||||
as well as the ``pytest`` package itself.
|
||||
This ensures your code and dependencies are isolated from your system Python installation.
|
||||
|
||||
Next, place a ``setup.py`` file in the root of your package with the following minimum content::
|
||||
Next, place a ``setup.py`` file in the root of your package with the following minimum content:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from setuptools import setup, find_packages
|
||||
|
||||
setup(name="PACKAGENAME", packages=find_packages())
|
||||
|
||||
Where ``PACKAGENAME`` is the name of your package. You can then install your package in "editable" mode by running from the same directory::
|
||||
Where ``PACKAGENAME`` is the name of your package. You can then install your package in "editable" mode by running from the same directory:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install -e .
|
||||
|
||||
@@ -60,7 +64,9 @@ Tests outside application code
|
||||
|
||||
Putting tests into an extra directory outside your actual application code
|
||||
might be useful if you have many functional tests or for other reasons want
|
||||
to keep tests separate from actual application code (often a good idea)::
|
||||
to keep tests separate from actual application code (often a good idea):
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
setup.py
|
||||
mypkg/
|
||||
@@ -82,7 +88,7 @@ This has the following benefits:
|
||||
|
||||
.. note::
|
||||
|
||||
See :ref:`pythonpath` for more information about the difference between calling ``pytest`` and
|
||||
See :ref:`pytest vs python -m pytest` for more information about the difference between calling ``pytest`` and
|
||||
``python -m pytest``.
|
||||
|
||||
Note that using this scheme your test files must have **unique names**, because
|
||||
@@ -92,7 +98,9 @@ be imported as ``test_app`` and ``test_view`` top-level modules by adding ``test
|
||||
``sys.path``.
|
||||
|
||||
If you need to have test modules with the same name, you might add ``__init__.py`` files to your
|
||||
``tests`` folder and subfolders, changing them to packages::
|
||||
``tests`` folder and subfolders, changing them to packages:
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
setup.py
|
||||
mypkg/
|
||||
@@ -114,7 +122,9 @@ This is problematic if you are using a tool like `tox`_ to test your package in
|
||||
because you want to test the *installed* version of your package, not the local code from the repository.
|
||||
|
||||
In this situation, it is **strongly** suggested to use a ``src`` layout where application root package resides in a
|
||||
sub-directory of your root::
|
||||
sub-directory of your root:
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
setup.py
|
||||
src/
|
||||
@@ -140,7 +150,9 @@ Tests as part of application code
|
||||
|
||||
Inlining test directories into your application package
|
||||
is useful if you have direct relation between tests and application modules and
|
||||
want to distribute them along with your application::
|
||||
want to distribute them along with your application:
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
setup.py
|
||||
mypkg/
|
||||
@@ -153,7 +165,9 @@ want to distribute them along with your application::
|
||||
test_view.py
|
||||
...
|
||||
|
||||
In this scheme, it is easy to run your tests using the ``--pyargs`` option::
|
||||
In this scheme, it is easy to run your tests using the ``--pyargs`` option:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pytest --pyargs mypkg
|
||||
|
||||
@@ -219,101 +233,4 @@ against your source code checkout, helping to detect packaging
|
||||
glitches.
|
||||
|
||||
|
||||
Integrating with setuptools / ``python setup.py test`` / ``pytest-runner``
|
||||
--------------------------------------------------------------------------
|
||||
|
||||
You can integrate test runs into your setuptools based project
|
||||
with the `pytest-runner <https://pypi.org/project/pytest-runner/>`_ plugin.
|
||||
|
||||
Add this to ``setup.py`` file:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from setuptools import setup
|
||||
|
||||
setup(
|
||||
# ...,
|
||||
setup_requires=["pytest-runner", ...],
|
||||
tests_require=["pytest", ...],
|
||||
# ...,
|
||||
)
|
||||
|
||||
|
||||
And create an alias into ``setup.cfg`` file:
|
||||
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[aliases]
|
||||
test=pytest
|
||||
|
||||
If you now type::
|
||||
|
||||
python setup.py test
|
||||
|
||||
this will execute your tests using ``pytest-runner``. As this is a
|
||||
standalone version of ``pytest`` no prior installation whatsoever is
|
||||
required for calling the test command. You can also pass additional
|
||||
arguments to pytest such as your test directory or other
|
||||
options using ``--addopts``.
|
||||
|
||||
You can also specify other pytest-ini options in your ``setup.cfg`` file
|
||||
by putting them into a ``[tool:pytest]`` section:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[tool:pytest]
|
||||
addopts = --verbose
|
||||
python_files = testing/*/*.py
|
||||
|
||||
|
||||
Manual Integration
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If for some reason you don't want/can't use ``pytest-runner``, you can write
|
||||
your own setuptools Test command for invoking pytest.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import sys
|
||||
|
||||
from setuptools.command.test import test as TestCommand
|
||||
|
||||
|
||||
class PyTest(TestCommand):
|
||||
user_options = [("pytest-args=", "a", "Arguments to pass to pytest")]
|
||||
|
||||
def initialize_options(self):
|
||||
TestCommand.initialize_options(self)
|
||||
self.pytest_args = ""
|
||||
|
||||
def run_tests(self):
|
||||
import shlex
|
||||
|
||||
# import here, cause outside the eggs aren't loaded
|
||||
import pytest
|
||||
|
||||
errno = pytest.main(shlex.split(self.pytest_args))
|
||||
sys.exit(errno)
|
||||
|
||||
|
||||
setup(
|
||||
# ...,
|
||||
tests_require=["pytest"],
|
||||
cmdclass={"pytest": PyTest},
|
||||
)
|
||||
|
||||
Now if you run::
|
||||
|
||||
python setup.py test
|
||||
|
||||
this will download ``pytest`` if needed and then run your tests
|
||||
as you would expect it to. You can pass a single string of arguments
|
||||
using the ``--pytest-args`` or ``-a`` command-line option. For example::
|
||||
|
||||
python setup.py test -a "--durations=5"
|
||||
|
||||
is equivalent to running ``pytest --durations=5``.
|
||||
|
||||
|
||||
.. include:: links.inc
|
||||
|
||||
@@ -28,7 +28,7 @@ To execute it:
|
||||
|
||||
$ pytest
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 1 item
|
||||
@@ -44,7 +44,7 @@ To execute it:
|
||||
E + where 4 = inc(3)
|
||||
|
||||
test_sample.py:6: AssertionError
|
||||
========================= 1 failed in 0.12 seconds =========================
|
||||
============================ 1 failed in 0.12s =============================
|
||||
|
||||
Due to ``pytest``'s detailed assertion introspection, only plain ``assert`` statements are used.
|
||||
See :ref:`Getting Started <getstarted>` for more examples.
|
||||
@@ -61,7 +61,7 @@ Features
|
||||
|
||||
- Can run :ref:`unittest <unittest>` (including trial) and :ref:`nose <noseintegration>` test suites out of the box;
|
||||
|
||||
- Python Python 3.5+ and PyPy 3;
|
||||
- Python 3.5+ and PyPy 3;
|
||||
|
||||
- Rich plugin architecture, with over 315+ `external plugins <http://plugincompat.herokuapp.com>`_ and thriving community;
|
||||
|
||||
@@ -83,6 +83,39 @@ Changelog
|
||||
|
||||
Consult the :ref:`Changelog <changelog>` page for fixes and enhancements of each version.
|
||||
|
||||
Support pytest
|
||||
--------------
|
||||
|
||||
`Open Collective`_ is an online funding platform for open and transparent communities.
|
||||
It provide tools to raise money and share your finances in full transparency.
|
||||
|
||||
It is the platform of choice for individuals and companies that want to make one-time or
|
||||
monthly donations directly to the project.
|
||||
|
||||
See more datails in the `pytest collective`_.
|
||||
|
||||
.. _Open Collective: https://opencollective.com
|
||||
.. _pytest collective: https://opencollective.com/pytest
|
||||
|
||||
|
||||
pytest for enterprise
|
||||
---------------------
|
||||
|
||||
Available as part of the Tidelift Subscription.
|
||||
|
||||
The maintainers of pytest and thousands of other packages are working with Tidelift to deliver commercial support and
|
||||
maintenance for the open source dependencies you use to build your applications.
|
||||
Save time, reduce risk, and improve code health, while paying the maintainers of the exact dependencies you use.
|
||||
|
||||
`Learn more. <https://tidelift.com/subscription/pkg/pypi-pytest?utm_source=pypi-pytest&utm_medium=referral&utm_campaign=enterprise&utm_term=repo>`_
|
||||
|
||||
Security
|
||||
^^^^^^^^
|
||||
|
||||
pytest has never been associated with a security vunerability, but in any case, to report a
|
||||
security vulnerability please use the `Tidelift security contact <https://tidelift.com/security>`_.
|
||||
Tidelift will coordinate the fix and disclosure.
|
||||
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
@@ -9,7 +9,7 @@ Distributed under the terms of the `MIT`_ license, pytest is free and open sourc
|
||||
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2004-2017 Holger Krekel and others
|
||||
Copyright (c) 2004-2019 Holger Krekel and others
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
.. _`distribute docs`:
|
||||
.. _`distribute`: https://pypi.org/project/distribute/
|
||||
.. _`pip`: https://pypi.org/project/pip/
|
||||
.. _`venv`: https://docs.python.org/3/library/venv.html/
|
||||
.. _`venv`: https://docs.python.org/3/library/venv.html
|
||||
.. _`virtualenv`: https://pypi.org/project/virtualenv/
|
||||
.. _hudson: http://hudson-ci.org/
|
||||
.. _jenkins: http://jenkins-ci.org/
|
||||
|
||||
@@ -70,7 +70,9 @@ caplog fixture
|
||||
^^^^^^^^^^^^^^
|
||||
|
||||
Inside tests it is possible to change the log level for the captured log
|
||||
messages. This is supported by the ``caplog`` fixture::
|
||||
messages. This is supported by the ``caplog`` fixture:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def test_foo(caplog):
|
||||
caplog.set_level(logging.INFO)
|
||||
@@ -78,59 +80,69 @@ messages. This is supported by the ``caplog`` fixture::
|
||||
|
||||
By default the level is set on the root logger,
|
||||
however as a convenience it is also possible to set the log level of any
|
||||
logger::
|
||||
logger:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def test_foo(caplog):
|
||||
caplog.set_level(logging.CRITICAL, logger='root.baz')
|
||||
caplog.set_level(logging.CRITICAL, logger="root.baz")
|
||||
pass
|
||||
|
||||
The log levels set are restored automatically at the end of the test.
|
||||
|
||||
It is also possible to use a context manager to temporarily change the log
|
||||
level inside a ``with`` block::
|
||||
level inside a ``with`` block:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def test_bar(caplog):
|
||||
with caplog.at_level(logging.INFO):
|
||||
pass
|
||||
|
||||
Again, by default the level of the root logger is affected but the level of any
|
||||
logger can be changed instead with::
|
||||
logger can be changed instead with:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def test_bar(caplog):
|
||||
with caplog.at_level(logging.CRITICAL, logger='root.baz'):
|
||||
with caplog.at_level(logging.CRITICAL, logger="root.baz"):
|
||||
pass
|
||||
|
||||
Lastly all the logs sent to the logger during the test run are made available on
|
||||
the fixture in the form of both the ``logging.LogRecord`` instances and the final log text.
|
||||
This is useful for when you want to assert on the contents of a message::
|
||||
This is useful for when you want to assert on the contents of a message:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def test_baz(caplog):
|
||||
func_under_test()
|
||||
for record in caplog.records:
|
||||
assert record.levelname != 'CRITICAL'
|
||||
assert 'wally' not in caplog.text
|
||||
assert record.levelname != "CRITICAL"
|
||||
assert "wally" not in caplog.text
|
||||
|
||||
For all the available attributes of the log records see the
|
||||
``logging.LogRecord`` class.
|
||||
|
||||
You can also resort to ``record_tuples`` if all you want to do is to ensure,
|
||||
that certain messages have been logged under a given logger name with a given
|
||||
severity and message::
|
||||
severity and message:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def test_foo(caplog):
|
||||
logging.getLogger().info('boo %s', 'arg')
|
||||
logging.getLogger().info("boo %s", "arg")
|
||||
|
||||
assert caplog.record_tuples == [
|
||||
('root', logging.INFO, 'boo arg'),
|
||||
]
|
||||
assert caplog.record_tuples == [("root", logging.INFO, "boo arg")]
|
||||
|
||||
You can call ``caplog.clear()`` to reset the captured log records in a test::
|
||||
You can call ``caplog.clear()`` to reset the captured log records in a test:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def test_something_with_clearing_records(caplog):
|
||||
some_method_that_creates_log_records()
|
||||
caplog.clear()
|
||||
your_test_method()
|
||||
assert ['Foo'] == [rec.message for rec in caplog.records]
|
||||
assert ["Foo"] == [rec.message for rec in caplog.records]
|
||||
|
||||
|
||||
The ``caplog.records`` attribute contains records from the current stage only, so
|
||||
@@ -149,7 +161,7 @@ the records for the ``setup`` and ``call`` stages during teardown like so:
|
||||
yield window
|
||||
for when in ("setup", "call"):
|
||||
messages = [
|
||||
x.message for x in caplog.get_records(when) if x.level == logging.WARNING
|
||||
x.message for x in caplog.get_records(when) if x.levelno == logging.WARNING
|
||||
]
|
||||
if messages:
|
||||
pytest.fail(
|
||||
|
||||
@@ -40,7 +40,7 @@ You can register custom marks in your ``pytest.ini`` file like this:
|
||||
|
||||
Note that everything after the ``:`` is an optional description.
|
||||
|
||||
Alternatively, you can register new markers programatically in a
|
||||
Alternatively, you can register new markers programmatically in a
|
||||
:ref:`pytest_configure <initialization-hooks>` hook:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@@ -46,10 +46,13 @@ environment variable is missing, or to set multiple values to a known variable.
|
||||
:py:meth:`monkeypatch.setenv` and :py:meth:`monkeypatch.delenv` can be used for
|
||||
these patches.
|
||||
|
||||
4. Use :py:meth:`monkeypatch.syspath_prepend` to modify the system ``$PATH`` safely, and
|
||||
4. Use ``monkeypatch.setenv("PATH", value, prepend=os.pathsep)`` to modify ``$PATH``, and
|
||||
:py:meth:`monkeypatch.chdir` to change the context of the current working directory
|
||||
during a test.
|
||||
|
||||
5. Use :py:meth:`monkeypatch.syspath_prepend` to modify ``sys.path`` which will also
|
||||
call :py:meth:`pkg_resources.fixup_namespace_packages` and :py:meth:`importlib.invalidate_caches`.
|
||||
|
||||
See the `monkeypatch blog post`_ for some introduction material
|
||||
and a discussion of its motivation.
|
||||
|
||||
@@ -269,7 +272,7 @@ to do this using the ``setenv`` and ``delenv`` method. Our example code to test:
|
||||
username = os.getenv("USER")
|
||||
|
||||
if username is None:
|
||||
raise EnvironmentError("USER environment is not set.")
|
||||
raise OSError("USER environment is not set.")
|
||||
|
||||
return username.lower()
|
||||
|
||||
@@ -293,7 +296,7 @@ both paths can be safely tested without impacting the running environment:
|
||||
"""Remove the USER env var and assert EnvironmentError is raised."""
|
||||
monkeypatch.delenv("USER", raising=False)
|
||||
|
||||
with pytest.raises(EnvironmentError):
|
||||
with pytest.raises(OSError):
|
||||
_ = get_os_user_lower()
|
||||
|
||||
This behavior can be moved into ``fixture`` structures and shared across tests:
|
||||
@@ -320,7 +323,7 @@ This behavior can be moved into ``fixture`` structures and shared across tests:
|
||||
|
||||
|
||||
def test_raise_exception(mock_env_missing):
|
||||
with pytest.raises(EnvironmentError):
|
||||
with pytest.raises(OSError):
|
||||
_ = get_os_user_lower()
|
||||
|
||||
|
||||
|
||||
@@ -56,7 +56,7 @@ them in turn:
|
||||
|
||||
$ pytest
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 3 items
|
||||
@@ -75,7 +75,7 @@ them in turn:
|
||||
E + where 54 = eval('6*9')
|
||||
|
||||
test_expectation.py:6: AssertionError
|
||||
==================== 1 failed, 2 passed in 0.12 seconds ====================
|
||||
======================= 1 failed, 2 passed in 0.12s ========================
|
||||
|
||||
.. note::
|
||||
|
||||
@@ -121,14 +121,14 @@ Let's run this:
|
||||
|
||||
$ pytest
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 3 items
|
||||
|
||||
test_expectation.py ..x [100%]
|
||||
|
||||
=================== 2 passed, 1 xfailed in 0.12 seconds ====================
|
||||
======================= 2 passed, 1 xfailed in 0.12s =======================
|
||||
|
||||
The one parameter set which caused a failure previously now
|
||||
shows up as an "xfailed (expected to fail)" test.
|
||||
@@ -205,7 +205,7 @@ If we now pass two stringinput values, our test will run twice:
|
||||
|
||||
$ pytest -q --stringinput="hello" --stringinput="world" test_strings.py
|
||||
.. [100%]
|
||||
2 passed in 0.12 seconds
|
||||
2 passed in 0.12s
|
||||
|
||||
Let's also run with a stringinput that will lead to a failing test:
|
||||
|
||||
@@ -225,7 +225,7 @@ Let's also run with a stringinput that will lead to a failing test:
|
||||
E + where <built-in method isalpha of str object at 0xdeadbeef> = '!'.isalpha
|
||||
|
||||
test_strings.py:4: AssertionError
|
||||
1 failed in 0.12 seconds
|
||||
1 failed in 0.12s
|
||||
|
||||
As expected our test function fails.
|
||||
|
||||
@@ -239,7 +239,7 @@ list:
|
||||
s [100%]
|
||||
========================= short test summary info ==========================
|
||||
SKIPPED [1] test_strings.py: got empty parameter set ['stringinput'], function test_valid_string at $REGENDOC_TMPDIR/test_strings.py:2
|
||||
1 skipped in 0.12 seconds
|
||||
1 skipped in 0.12s
|
||||
|
||||
Note that when calling ``metafunc.parametrize`` multiple times with different parameter sets, all parameter names across
|
||||
those sets cannot be duplicated, otherwise an error will be raised.
|
||||
|
||||
@@ -8,7 +8,9 @@ Installing and Using plugins
|
||||
This section talks about installing and using third party plugins.
|
||||
For writing your own plugins, please refer to :ref:`writing-plugins`.
|
||||
|
||||
Installing a third party plugin can be easily done with ``pip``::
|
||||
Installing a third party plugin can be easily done with ``pip``:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install pytest-NAME
|
||||
pip uninstall pytest-NAME
|
||||
@@ -95,7 +97,9 @@ Finding out which plugins are active
|
||||
------------------------------------
|
||||
|
||||
If you want to find out which plugins are active in your
|
||||
environment you can type::
|
||||
environment you can type:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pytest --trace-config
|
||||
|
||||
@@ -108,7 +112,9 @@ and their names. It will also print local plugins aka
|
||||
Deactivating / unregistering a plugin by name
|
||||
---------------------------------------------
|
||||
|
||||
You can prevent plugins from loading or unregister them::
|
||||
You can prevent plugins from loading or unregister them:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pytest -p no:NAME
|
||||
|
||||
|
||||
@@ -28,7 +28,6 @@ Here are some examples of projects using ``pytest`` (please send notes via :ref:
|
||||
* `sentry <https://getsentry.com/welcome/>`_, realtime app-maintenance and exception tracking
|
||||
* `Astropy <http://www.astropy.org/>`_ and `affiliated packages <http://www.astropy.org/affiliated/index.html>`_
|
||||
* `tox <http://testrun.org/tox>`_, virtualenv/Hudson integration tool
|
||||
* `PIDA <http://pida.co.uk>`_ framework for integrated development
|
||||
* `PyPM <http://code.activestate.com/pypm/>`_ ActiveState's package manager
|
||||
* `Fom <http://packages.python.org/Fom/>`_ a fluid object mapper for FluidDB
|
||||
* `applib <https://github.com/ActiveState/applib>`_ cross-platform utilities
|
||||
@@ -37,10 +36,10 @@ Here are some examples of projects using ``pytest`` (please send notes via :ref:
|
||||
* `mwlib <https://pypi.org/project/mwlib/>`_ mediawiki parser and utility library
|
||||
* `The Translate Toolkit <http://translate.sourceforge.net/wiki/toolkit/index>`_ for localization and conversion
|
||||
* `execnet <http://codespeak.net/execnet>`_ rapid multi-Python deployment
|
||||
* `pylib <https://py.readthedocs.io>`_ cross-platform path, IO, dynamic code library
|
||||
* `Pacha <http://pacha.cafepais.com/>`_ configuration management in five minutes
|
||||
* `pylib <https://pylib.readthedocs.io/en/stable/>`_ cross-platform path, IO, dynamic code library
|
||||
* `bbfreeze <https://pypi.org/project/bbfreeze/>`_ create standalone executables from Python scripts
|
||||
* `pdb++ <http://bitbucket.org/antocuni/pdb>`_ a fancier version of PDB
|
||||
* `pdb++ <https://github.com/pdbpp/pdbpp>`_ a fancier version of PDB
|
||||
* `pudb <https://github.com/inducer/pudb>`_ full-screen console debugger for python
|
||||
* `py-s3fuse <http://code.google.com/p/py-s3fuse/>`_ Amazon S3 FUSE based filesystem
|
||||
* `waskr <http://code.google.com/p/waskr/>`_ WSGI Stats Middleware
|
||||
* `guachi <http://code.google.com/p/guachi/>`_ global persistent configs for Python modules
|
||||
@@ -77,7 +76,7 @@ Some organisations using pytest
|
||||
* `Tandberg <http://www.tandberg.com/>`_
|
||||
* `Shootq <http://web.shootq.com/>`_
|
||||
* `Stups department of Heinrich Heine University Duesseldorf <http://www.stups.uni-duesseldorf.de/projects.php>`_
|
||||
* `cellzome <http://www.cellzome.com/>`_
|
||||
* cellzome
|
||||
* `Open End, Gothenborg <http://www.openend.se>`_
|
||||
* `Laboratory of Bioinformatics, Warsaw <http://genesilico.pl/>`_
|
||||
* `merlinux, Germany <http://merlinux.eu>`_
|
||||
|
||||
@@ -7,8 +7,8 @@ Python 3.4's last release is scheduled for
|
||||
`March 2019 <https://www.python.org/dev/peps/pep-0429/#release-schedule>`__. pytest is one of
|
||||
the participating projects of the https://python3statement.org.
|
||||
|
||||
The **pytest 4.6** series will be the last to support Python 2.7 and 3.4, and is scheduled
|
||||
to be released by **mid-2019**. **pytest 5.0** and onwards will support only Python 3.5+.
|
||||
The **pytest 4.6** series is the last to support Python 2.7 and 3.4, and was released in
|
||||
**June 2019**. **pytest 5.0** and onwards will support only Python 3.5+.
|
||||
|
||||
Thanks to the `python_requires`_ ``setuptools`` option,
|
||||
Python 2.7 and Python 3.4 users using a modern ``pip`` version
|
||||
@@ -24,3 +24,8 @@ branch will continue to exist so the community itself can contribute patches. Th
|
||||
be happy to accept those patches and make new ``4.6`` releases **until mid-2020**.
|
||||
|
||||
.. _`python_requires`: https://packaging.python.org/guides/distributing-packages-using-setuptools/#python-requires
|
||||
|
||||
Technical Aspects
|
||||
-----------------
|
||||
|
||||
The technical aspects of the Python 2.7 and 3.4 support plan (such as when releases will occurr, how to backport fixes, etc) is described in issue `#5275 <https://github.com/pytest-dev/pytest/issues/5275>`__.
|
||||
|
||||
@@ -22,7 +22,9 @@ Consider this file and directory layout::
|
||||
|- test_foo.py
|
||||
|
||||
|
||||
When executing::
|
||||
When executing:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pytest root/
|
||||
|
||||
@@ -54,7 +56,9 @@ Consider this file and directory layout::
|
||||
|- test_foo.py
|
||||
|
||||
|
||||
When executing::
|
||||
When executing:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pytest root/
|
||||
|
||||
@@ -68,6 +72,8 @@ imported in the global import namespace.
|
||||
|
||||
This is also discussed in details in :ref:`test discovery`.
|
||||
|
||||
.. _`pytest vs python -m pytest`:
|
||||
|
||||
Invoking ``pytest`` versus ``python -m pytest``
|
||||
-----------------------------------------------
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
Reference
|
||||
=========
|
||||
API Reference
|
||||
=============
|
||||
|
||||
This page contains the full reference to pytest's API.
|
||||
|
||||
@@ -27,6 +27,8 @@ pytest.skip
|
||||
|
||||
.. autofunction:: _pytest.outcomes.skip(msg, [allow_module_level=False])
|
||||
|
||||
.. _`pytest.importorskip ref`:
|
||||
|
||||
pytest.importorskip
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
@@ -57,7 +59,7 @@ pytest.raises
|
||||
|
||||
**Tutorial**: :ref:`assertraises`.
|
||||
|
||||
.. autofunction:: pytest.raises(expected_exception: Exception, [match], [message])
|
||||
.. autofunction:: pytest.raises(expected_exception: Exception, [match])
|
||||
:with: excinfo
|
||||
|
||||
pytest.deprecated_call
|
||||
@@ -469,9 +471,11 @@ testdir
|
||||
This fixture provides a :class:`Testdir` instance useful for black-box testing of test files, making it ideal to
|
||||
test plugins.
|
||||
|
||||
To use it, include in your top-most ``conftest.py`` file::
|
||||
To use it, include in your top-most ``conftest.py`` file:
|
||||
|
||||
pytest_plugins = 'pytester'
|
||||
.. code-block:: python
|
||||
|
||||
pytest_plugins = "pytester"
|
||||
|
||||
|
||||
|
||||
@@ -999,7 +1003,9 @@ passed multiple times. The expected format is ``name=value``. For example::
|
||||
[pytest]
|
||||
addopts = --maxfail=2 -rf # exit after 2 failures, report fail info
|
||||
|
||||
issuing ``pytest test_hello.py`` actually means::
|
||||
issuing ``pytest test_hello.py`` actually means:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pytest --maxfail=2 -rf test_hello.py
|
||||
|
||||
|
||||
@@ -145,7 +145,7 @@ You can use the ``skipif`` marker (as any other marker) on classes:
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.mark.skipif(sys.platform == "win32", reason="does not run on windows")
|
||||
class TestPosixCalls(object):
|
||||
class TestPosixCalls:
|
||||
def test_function(self):
|
||||
"will not be setup or run under 'win32' platform"
|
||||
|
||||
@@ -179,14 +179,17 @@ information.
|
||||
Skipping on a missing import dependency
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
You can use the following helper at module level
|
||||
or within a test or test setup function::
|
||||
You can skip tests on a missing import by using :ref:`pytest.importorskip ref`
|
||||
at module level, within a test, or test setup function.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
docutils = pytest.importorskip("docutils")
|
||||
|
||||
If ``docutils`` cannot be imported here, this will lead to a
|
||||
skip outcome of the test. You can also skip based on the
|
||||
version number of a library::
|
||||
If ``docutils`` cannot be imported here, this will lead to a skip outcome of
|
||||
the test. You can also skip based on the version number of a library:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
docutils = pytest.importorskip("docutils", minversion="0.3")
|
||||
|
||||
@@ -223,7 +226,9 @@ XFail: mark test functions as expected to fail
|
||||
----------------------------------------------
|
||||
|
||||
You can use the ``xfail`` marker to indicate that you
|
||||
expect a test to fail::
|
||||
expect a test to fail:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.mark.xfail
|
||||
def test_function():
|
||||
@@ -346,7 +351,7 @@ Running it with the report-on-xfail option gives this output:
|
||||
|
||||
example $ pytest -rx xfail_demo.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR/example
|
||||
collected 7 items
|
||||
@@ -366,7 +371,7 @@ Running it with the report-on-xfail option gives this output:
|
||||
XFAIL xfail_demo.py::test_hello6
|
||||
reason: reason
|
||||
XFAIL xfail_demo.py::test_hello7
|
||||
======================== 7 xfailed in 0.12 seconds =========================
|
||||
============================ 7 xfailed in 0.12s ============================
|
||||
|
||||
.. _`skip/xfail with parametrize`:
|
||||
|
||||
|
||||
26
doc/en/sponsor.rst
Normal file
26
doc/en/sponsor.rst
Normal file
@@ -0,0 +1,26 @@
|
||||
Sponsor
|
||||
=======
|
||||
|
||||
pytest is maintained by a team of volunteers from all around the world in their free time. While
|
||||
we work on pytest because we love the project and use it daily at our daily jobs, monetary
|
||||
compensation when possible is welcome to justify time away from friends, family and personal time.
|
||||
|
||||
Money is also used to fund local sprints, merchandising (stickers to distribute in conferences for example)
|
||||
and every few years a large sprint involving all members.
|
||||
|
||||
OpenCollective
|
||||
--------------
|
||||
|
||||
`Open Collective`_ is an online funding platform for open and transparent communities.
|
||||
It provide tools to raise money and share your finances in full transparency.
|
||||
|
||||
It is the platform of choice for individuals and companies that want to make one-time or
|
||||
monthly donations directly to the project.
|
||||
|
||||
See more datails in the `pytest collective`_.
|
||||
|
||||
|
||||
.. _Tidelift: https://tidelift.com
|
||||
.. _Tidelift subscription: https://tidelift.com/subscription/pkg/pypi-pytest
|
||||
.. _Open Collective: https://opencollective.com
|
||||
.. _pytest collective: https://opencollective.com/pytest
|
||||
@@ -2,13 +2,6 @@
|
||||
Talks and Tutorials
|
||||
==========================
|
||||
|
||||
..
|
||||
.. sidebar:: Next Open Trainings
|
||||
|
||||
`Professional Testing with Python
|
||||
<http://www.python-academy.com/courses/specialtopics/python_course_testing.html>`_,
|
||||
26-28 April 2017, Leipzig, Germany.
|
||||
|
||||
.. _`funcargs`: funcargs.html
|
||||
|
||||
Books
|
||||
@@ -23,6 +16,8 @@ Books
|
||||
Talks and blog postings
|
||||
---------------------------------------------
|
||||
|
||||
- `pytest: recommendations, basic packages for testing in Python and Django, Andreu Vallbona, PyBCN June 2019 <https://www.slideshare.net/AndreuVallbonaPlazas/pybcn-pytest-recomendaciones-paquetes-bsicos-para-testing-en-python-y-django>`_.
|
||||
|
||||
- pytest: recommendations, basic packages for testing in Python and Django, Andreu Vallbona, PyconES 2017 (`slides in english <http://talks.apsl.io/testing-pycones-2017/>`_, `video in spanish <https://www.youtube.com/watch?v=K20GeR-lXDk>`_)
|
||||
|
||||
- `pytest advanced, Andrew Svetlov (Russian, PyCon Russia, 2016)
|
||||
|
||||
@@ -1,4 +1,45 @@
|
||||
pytest for enterprise
|
||||
=====================
|
||||
|
||||
`Tidelift`_ is working with the maintainers of pytest and thousands of other
|
||||
open source projects to deliver commercial support and maintenance for the open source dependencies you use
|
||||
to build your applications. Save time, reduce risk, and improve code health, while paying the maintainers of the
|
||||
exact dependencies you use.
|
||||
|
||||
`Get more details <https://tidelift.com/subscription/pkg/pypi-pytest?utm_source=pypi-pytest&utm_medium=referral&utm_campaign=enterprise>`_
|
||||
|
||||
.. include:: ../../TIDELIFT.rst
|
||||
The Tidelift Subscription is a managed open source subscription for application dependencies covering millions of open source projects across JavaScript, Python, Java, PHP, Ruby, .NET, and more.
|
||||
|
||||
Your subscription includes:
|
||||
|
||||
* **Security updates**
|
||||
|
||||
- Tidelift's security response team coordinates patches for new breaking security vulnerabilities and alerts immediately through a private channel, so your software supply chain is always secure.
|
||||
|
||||
* **Licensing verification and indemnification**
|
||||
|
||||
- Tidelift verifies license information to enable easy policy enforcement and adds intellectual property indemnification to cover creators and users in case something goes wrong. You always have a 100% up-to-date bill of materials for your dependencies to share with your legal team, customers, or partners.
|
||||
|
||||
* **Maintenance and code improvement**
|
||||
|
||||
- Tidelift ensures the software you rely on keeps working as long as you need it to work. Your managed dependencies are actively maintained and we recruit additional maintainers where required.
|
||||
|
||||
* **Package selection and version guidance**
|
||||
|
||||
- Tidelift helps you choose the best open source packages from the start—and then guide you through updates to stay on the best releases as new issues arise.
|
||||
|
||||
* **Roadmap input**
|
||||
|
||||
- Take a seat at the table with the creators behind the software you use. Tidelift's participating maintainers earn more income as their software is used by more subscribers, so they're interested in knowing what you need.
|
||||
|
||||
* **Tooling and cloud integration**
|
||||
|
||||
- Tidelift works with GitHub, GitLab, BitBucket, and every cloud platform (and other deployment targets, too).
|
||||
|
||||
The end result? All of the capabilities you expect from commercial-grade software, for the full breadth of open
|
||||
source you use. That means less time grappling with esoteric open source trivia, and more time building your own
|
||||
applications—and your business.
|
||||
|
||||
`Request a demo <https://tidelift.com/subscription/request-a-demo?utm_source=pypi-pytest&utm_medium=referral&utm_campaign=enterprise>`_
|
||||
|
||||
.. _Tidelift: https://tidelift.com
|
||||
|
||||
@@ -41,7 +41,7 @@ Running this would result in a passed test except for the last
|
||||
|
||||
$ pytest test_tmp_path.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 1 item
|
||||
@@ -64,7 +64,7 @@ Running this would result in a passed test except for the last
|
||||
E assert 0
|
||||
|
||||
test_tmp_path.py:13: AssertionError
|
||||
========================= 1 failed in 0.12 seconds =========================
|
||||
============================ 1 failed in 0.12s =============================
|
||||
|
||||
.. _`tmp_path_factory example`:
|
||||
|
||||
@@ -90,10 +90,14 @@ provide a temporary directory unique to the test invocation,
|
||||
created in the `base temporary directory`_.
|
||||
|
||||
``tmpdir`` is a `py.path.local`_ object which offers ``os.path`` methods
|
||||
and more. Here is an example test usage::
|
||||
and more. Here is an example test usage:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of test_tmpdir.py
|
||||
import os
|
||||
|
||||
|
||||
def test_create_file(tmpdir):
|
||||
p = tmpdir.mkdir("sub").join("hello.txt")
|
||||
p.write("content")
|
||||
@@ -108,7 +112,7 @@ Running this would result in a passed test except for the last
|
||||
|
||||
$ pytest test_tmpdir.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 1 item
|
||||
@@ -128,8 +132,8 @@ Running this would result in a passed test except for the last
|
||||
> assert 0
|
||||
E assert 0
|
||||
|
||||
test_tmpdir.py:7: AssertionError
|
||||
========================= 1 failed in 0.12 seconds =========================
|
||||
test_tmpdir.py:9: AssertionError
|
||||
============================ 1 failed in 0.12s =============================
|
||||
|
||||
.. _`tmpdir factory example`:
|
||||
|
||||
|
||||
@@ -10,7 +10,9 @@ It's meant for leveraging existing ``unittest``-based test suites
|
||||
to use pytest as a test runner and also allow to incrementally adapt
|
||||
the test suite to take full advantage of pytest's features.
|
||||
|
||||
To run an existing ``unittest``-style test suite using ``pytest``, type::
|
||||
To run an existing ``unittest``-style test suite using ``pytest``, type:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pytest tests
|
||||
|
||||
@@ -78,7 +80,9 @@ Running your unittest with ``pytest`` allows you to use its
|
||||
tests. Assuming you have at least skimmed the pytest fixture features,
|
||||
let's jump-start into an example that integrates a pytest ``db_class``
|
||||
fixture, setting up a class-cached database object, and then reference
|
||||
it from a unittest-style test::
|
||||
it from a unittest-style test:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of conftest.py
|
||||
|
||||
@@ -87,10 +91,12 @@ it from a unittest-style test::
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def db_class(request):
|
||||
class DummyDB(object):
|
||||
class DummyDB:
|
||||
pass
|
||||
|
||||
# set a class attribute on the invoking test context
|
||||
request.cls.db = DummyDB()
|
||||
|
||||
@@ -103,21 +109,24 @@ as the ``cls`` attribute, denoting the class from which the fixture
|
||||
is used. This architecture de-couples fixture writing from actual test
|
||||
code and allows re-use of the fixture by a minimal reference, the fixture
|
||||
name. So let's write an actual ``unittest.TestCase`` class using our
|
||||
fixture definition::
|
||||
fixture definition:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of test_unittest_db.py
|
||||
|
||||
import unittest
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("db_class")
|
||||
class MyTest(unittest.TestCase):
|
||||
def test_method1(self):
|
||||
assert hasattr(self, "db")
|
||||
assert 0, self.db # fail for demo purposes
|
||||
assert 0, self.db # fail for demo purposes
|
||||
|
||||
def test_method2(self):
|
||||
assert 0, self.db # fail for demo purposes
|
||||
assert 0, self.db # fail for demo purposes
|
||||
|
||||
The ``@pytest.mark.usefixtures("db_class")`` class-decorator makes sure that
|
||||
the pytest fixture function ``db_class`` is called once per class.
|
||||
@@ -128,7 +137,7 @@ the ``self.db`` values in the traceback:
|
||||
|
||||
$ pytest test_unittest_db.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 2 items
|
||||
@@ -142,22 +151,22 @@ the ``self.db`` values in the traceback:
|
||||
|
||||
def test_method1(self):
|
||||
assert hasattr(self, "db")
|
||||
> assert 0, self.db # fail for demo purposes
|
||||
> assert 0, self.db # fail for demo purposes
|
||||
E AssertionError: <conftest.db_class.<locals>.DummyDB object at 0xdeadbeef>
|
||||
E assert 0
|
||||
|
||||
test_unittest_db.py:9: AssertionError
|
||||
test_unittest_db.py:10: AssertionError
|
||||
___________________________ MyTest.test_method2 ____________________________
|
||||
|
||||
self = <test_unittest_db.MyTest testMethod=test_method2>
|
||||
|
||||
def test_method2(self):
|
||||
> assert 0, self.db # fail for demo purposes
|
||||
> assert 0, self.db # fail for demo purposes
|
||||
E AssertionError: <conftest.db_class.<locals>.DummyDB object at 0xdeadbeef>
|
||||
E assert 0
|
||||
|
||||
test_unittest_db.py:12: AssertionError
|
||||
========================= 2 failed in 0.12 seconds =========================
|
||||
test_unittest_db.py:13: AssertionError
|
||||
============================ 2 failed in 0.12s =============================
|
||||
|
||||
This default pytest traceback shows that the two test methods
|
||||
share the same ``self.db`` instance which was our intention
|
||||
@@ -179,17 +188,19 @@ Let's look at an ``initdir`` fixture which makes all test methods of a
|
||||
``TestCase`` class execute in a temporary directory with a
|
||||
pre-initialized ``samplefile.ini``. Our ``initdir`` fixture itself uses
|
||||
the pytest builtin :ref:`tmpdir <tmpdir>` fixture to delegate the
|
||||
creation of a per-test temporary directory::
|
||||
creation of a per-test temporary directory:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of test_unittest_cleandir.py
|
||||
import pytest
|
||||
import unittest
|
||||
|
||||
class MyTest(unittest.TestCase):
|
||||
|
||||
class MyTest(unittest.TestCase):
|
||||
@pytest.fixture(autouse=True)
|
||||
def initdir(self, tmpdir):
|
||||
tmpdir.chdir() # change to pytest-provided temporary directory
|
||||
tmpdir.chdir() # change to pytest-provided temporary directory
|
||||
tmpdir.join("samplefile.ini").write("# testdata")
|
||||
|
||||
def test_method(self):
|
||||
@@ -208,7 +219,7 @@ Running this test module ...:
|
||||
|
||||
$ pytest -q test_unittest_cleandir.py
|
||||
. [100%]
|
||||
1 passed in 0.12 seconds
|
||||
1 passed in 0.12s
|
||||
|
||||
... gives us one passed test because the ``initdir`` fixture function
|
||||
was executed ahead of the ``test_method``.
|
||||
|
||||
@@ -33,7 +33,19 @@ Running ``pytest`` can result in six different exit codes:
|
||||
:Exit code 4: pytest command line usage error
|
||||
:Exit code 5: No tests were collected
|
||||
|
||||
They are represented by the :class:`_pytest.main.ExitCode` enum.
|
||||
They are represented by the :class:`_pytest.main.ExitCode` enum. The exit codes being a part of the public API can be imported and accessed directly using:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from pytest import ExitCode
|
||||
|
||||
.. note::
|
||||
|
||||
If you would like to customize the exit code in some scenarios, specially when
|
||||
no tests are collected, consider using the
|
||||
`pytest-custom_exit_code <https://github.com/yashtodi94/pytest-custom_exit_code>`__
|
||||
plugin.
|
||||
|
||||
|
||||
Getting help on version, option names, environment variables
|
||||
--------------------------------------------------------------
|
||||
@@ -204,7 +216,7 @@ Example:
|
||||
|
||||
$ pytest -ra
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 6 items
|
||||
@@ -235,7 +247,7 @@ Example:
|
||||
XPASS test_example.py::test_xpass always xfail
|
||||
ERROR test_example.py::test_error - assert 0
|
||||
FAILED test_example.py::test_fail - assert 0
|
||||
= 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.12 seconds =
|
||||
== 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.12s ===
|
||||
|
||||
The ``-r`` options accepts a number of characters after it, with ``a`` used
|
||||
above meaning "all except passes".
|
||||
@@ -258,7 +270,7 @@ More than one character can be used, so for example to only see failed and skipp
|
||||
|
||||
$ pytest -rfs
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 6 items
|
||||
@@ -285,7 +297,7 @@ More than one character can be used, so for example to only see failed and skipp
|
||||
========================= short test summary info ==========================
|
||||
FAILED test_example.py::test_fail - assert 0
|
||||
SKIPPED [1] $REGENDOC_TMPDIR/test_example.py:23: skipping this test
|
||||
= 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.12 seconds =
|
||||
== 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.12s ===
|
||||
|
||||
Using ``p`` lists the passing tests, whilst ``P`` adds an extra section "PASSES" with those tests that passed but had
|
||||
captured output:
|
||||
@@ -294,7 +306,7 @@ captured output:
|
||||
|
||||
$ pytest -rpP
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 6 items
|
||||
@@ -324,7 +336,7 @@ captured output:
|
||||
ok
|
||||
========================= short test summary info ==========================
|
||||
PASSED test_example.py::test_ok
|
||||
= 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.12 seconds =
|
||||
== 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.12s ===
|
||||
|
||||
.. _pdb-option:
|
||||
|
||||
@@ -640,7 +652,7 @@ to all tests.
|
||||
record_testsuite_property("STORAGE_TYPE", "CEPH")
|
||||
|
||||
|
||||
class TestMe(object):
|
||||
class TestMe:
|
||||
def test_foo(self):
|
||||
assert True
|
||||
|
||||
@@ -706,6 +718,11 @@ for example ``-x`` if you only want to send one particular failure.
|
||||
|
||||
Currently only pasting to the http://bpaste.net service is implemented.
|
||||
|
||||
.. versionchanged:: 5.2
|
||||
|
||||
If creating the URL fails for any reason, a warning is generated instead of failing the
|
||||
entire test suite.
|
||||
|
||||
Early loading plugins
|
||||
---------------------
|
||||
|
||||
@@ -742,24 +759,33 @@ Calling pytest from Python code
|
||||
|
||||
|
||||
|
||||
You can invoke ``pytest`` from Python code directly::
|
||||
You can invoke ``pytest`` from Python code directly:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
pytest.main()
|
||||
|
||||
this acts as if you would call "pytest" from the command line.
|
||||
It will not raise ``SystemExit`` but return the exitcode instead.
|
||||
You can pass in options and arguments::
|
||||
You can pass in options and arguments:
|
||||
|
||||
pytest.main(['-x', 'mytestdir'])
|
||||
.. code-block:: python
|
||||
|
||||
You can specify additional plugins to ``pytest.main``::
|
||||
pytest.main(["-x", "mytestdir"])
|
||||
|
||||
You can specify additional plugins to ``pytest.main``:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of myinvoke.py
|
||||
import pytest
|
||||
class MyPlugin(object):
|
||||
|
||||
|
||||
class MyPlugin:
|
||||
def pytest_sessionfinish(self):
|
||||
print("*** test run reporting finishing")
|
||||
|
||||
|
||||
pytest.main(["-qq"], plugins=[MyPlugin()])
|
||||
|
||||
Running it will show that ``MyPlugin`` was added and its
|
||||
|
||||
@@ -28,7 +28,7 @@ Running pytest now produces this output:
|
||||
|
||||
$ pytest test_show_warnings.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 1 item
|
||||
@@ -41,7 +41,7 @@ Running pytest now produces this output:
|
||||
warnings.warn(UserWarning("api v1, should use functions from v2"))
|
||||
|
||||
-- Docs: https://docs.pytest.org/en/latest/warnings.html
|
||||
=================== 1 passed, 1 warnings in 0.12 seconds ===================
|
||||
====================== 1 passed, 1 warnings in 0.12s =======================
|
||||
|
||||
The ``-W`` flag can be passed to control which warnings will be displayed or even turn
|
||||
them into errors:
|
||||
@@ -64,7 +64,7 @@ them into errors:
|
||||
E UserWarning: api v1, should use functions from v2
|
||||
|
||||
test_show_warnings.py:5: UserWarning
|
||||
1 failed in 0.12 seconds
|
||||
1 failed in 0.12s
|
||||
|
||||
The same option can be set in the ``pytest.ini`` file using the ``filterwarnings`` ini option.
|
||||
For example, the configuration below will ignore all user warnings, but will transform
|
||||
@@ -127,7 +127,7 @@ decorator or to all tests in a module by setting the ``pytestmark`` variable:
|
||||
*Credits go to Florian Schulze for the reference implementation in the* `pytest-warnings`_
|
||||
*plugin.*
|
||||
|
||||
.. _`-W option`: https://docs.python.org/3/using/cmdline.html?highlight=#cmdoption-W
|
||||
.. _`-W option`: https://docs.python.org/3/using/cmdline.html#cmdoption-w
|
||||
.. _warnings.simplefilter: https://docs.python.org/3/library/warnings.html#warnings.simplefilter
|
||||
.. _`pytest-warnings`: https://github.com/fschulze/pytest-warnings
|
||||
|
||||
@@ -180,6 +180,7 @@ This will ignore all warnings of type ``DeprecationWarning`` where the start of
|
||||
the regular expression ``".*U.*mode is deprecated"``.
|
||||
|
||||
.. note::
|
||||
|
||||
If warnings are configured at the interpreter level, using
|
||||
the `PYTHONWARNINGS <https://docs.python.org/3/using/cmdline.html#envvar-PYTHONWARNINGS>`_ environment variable or the
|
||||
``-W`` command-line option, pytest will not configure any filters by default.
|
||||
@@ -277,7 +278,9 @@ argument ``match`` to assert that the exception matches a text or regex::
|
||||
...
|
||||
Failed: DID NOT WARN. No warnings of type ...UserWarning... was emitted...
|
||||
|
||||
You can also call ``pytest.warns`` on a function or code string::
|
||||
You can also call ``pytest.warns`` on a function or code string:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
pytest.warns(expected_warning, func, *args, **kwargs)
|
||||
pytest.warns(expected_warning, "func(*args, **kwargs)")
|
||||
@@ -404,14 +407,14 @@ defines an ``__init__`` constructor, as this prevents the class from being insta
|
||||
class Test:
|
||||
|
||||
-- Docs: https://docs.pytest.org/en/latest/warnings.html
|
||||
1 warnings in 0.12 seconds
|
||||
1 warnings in 0.12s
|
||||
|
||||
These warnings might be filtered using the same builtin mechanisms used to filter other types of warnings.
|
||||
|
||||
Please read our :ref:`backwards-compatibility` to learn how we proceed about deprecating and eventually removing
|
||||
features.
|
||||
|
||||
The following warning types ares used by pytest and are part of the public API:
|
||||
The following warning types are used by pytest and are part of the public API:
|
||||
|
||||
.. autoclass:: pytest.PytestWarning
|
||||
|
||||
@@ -430,5 +433,3 @@ The following warning types ares used by pytest and are part of the public API:
|
||||
.. autoclass:: pytest.PytestUnhandledCoroutineWarning
|
||||
|
||||
.. autoclass:: pytest.PytestUnknownMarkWarning
|
||||
|
||||
.. autoclass:: pytest.RemovedInPytest4Warning
|
||||
|
||||
@@ -164,7 +164,7 @@ If a package is installed this way, ``pytest`` will load
|
||||
.. note::
|
||||
|
||||
Make sure to include ``Framework :: Pytest`` in your list of
|
||||
`PyPI classifiers <https://python-packaging-user-guide.readthedocs.io/distributing/#classifiers>`_
|
||||
`PyPI classifiers <https://pypi.org/classifiers/>`_
|
||||
to make it easy for users to find your plugin.
|
||||
|
||||
|
||||
@@ -429,7 +429,7 @@ additionally it is possible to copy examples for an example folder before runnin
|
||||
|
||||
$ pytest
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini
|
||||
collected 2 items
|
||||
@@ -442,7 +442,7 @@ additionally it is possible to copy examples for an example folder before runnin
|
||||
testdir.copy_example("test_example.py")
|
||||
|
||||
-- Docs: https://docs.pytest.org/en/latest/warnings.html
|
||||
=================== 2 passed, 1 warnings in 0.12 seconds ===================
|
||||
====================== 2 passed, 1 warnings in 0.12s =======================
|
||||
|
||||
For more information about the result object that ``runpytest()`` returns, and
|
||||
the methods that it provides please check out the :py:class:`RunResult
|
||||
@@ -621,12 +621,61 @@ the new plugin:
|
||||
|
||||
Hooks are usually declared as do-nothing functions that contain only
|
||||
documentation describing when the hook will be called and what return values
|
||||
are expected.
|
||||
are expected. The names of the functions must start with `pytest_` otherwise pytest won't recognize them.
|
||||
|
||||
For an example, see `newhooks.py`_ from `xdist <https://github.com/pytest-dev/pytest-xdist>`_.
|
||||
Here's an example. Let's assume this code is in the ``hooks.py`` module.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def pytest_my_hook(config):
|
||||
"""
|
||||
Receives the pytest config and does things with it
|
||||
"""
|
||||
|
||||
To register the hooks with pytest they need to be structured in their own module or class. This
|
||||
class or module can then be passed to the ``pluginmanager`` using the ``pytest_addhooks`` function
|
||||
(which itself is a hook exposed by pytest).
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def pytest_addhooks(pluginmanager):
|
||||
""" This example assumes the hooks are grouped in the 'hooks' module. """
|
||||
from my_app.tests import hooks
|
||||
|
||||
pluginmanager.add_hookspecs(hooks)
|
||||
|
||||
For a real world example, see `newhooks.py`_ from `xdist <https://github.com/pytest-dev/pytest-xdist>`_.
|
||||
|
||||
.. _`newhooks.py`: https://github.com/pytest-dev/pytest-xdist/blob/974bd566c599dc6a9ea291838c6f226197208b46/xdist/newhooks.py
|
||||
|
||||
Hooks may be called both from fixtures or from other hooks. In both cases, hooks are called
|
||||
through the ``hook`` object, available in the ``config`` object. Most hooks receive a
|
||||
``config`` object directly, while fixtures may use the ``pytestconfig`` fixture which provides the same object.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.fixture()
|
||||
def my_fixture(pytestconfig):
|
||||
# call the hook called "pytest_my_hook"
|
||||
# 'result' will be a list of return values from all registered functions.
|
||||
result = pytestconfig.hook.pytest_my_hook(config=pytestconfig)
|
||||
|
||||
.. note::
|
||||
Hooks receive parameters using only keyword arguments.
|
||||
|
||||
Now your hook is ready to be used. To register a function at the hook, other plugins or users must
|
||||
now simply define the function ``pytest_my_hook`` with the correct signature in their ``conftest.py``.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def pytest_my_hook(config):
|
||||
"""
|
||||
Print all active hooks to the screen.
|
||||
"""
|
||||
print(config.hook)
|
||||
|
||||
|
||||
Optionally using hooks from 3rd party plugins
|
||||
---------------------------------------------
|
||||
@@ -644,7 +693,7 @@ declaring the hook functions directly in your plugin module, for example:
|
||||
# contents of myplugin.py
|
||||
|
||||
|
||||
class DeferPlugin(object):
|
||||
class DeferPlugin:
|
||||
"""Simple plugin to defer pytest-xdist hook functions."""
|
||||
|
||||
def pytest_testnodedown(self, node, error):
|
||||
|
||||
@@ -27,11 +27,14 @@ Module level setup/teardown
|
||||
|
||||
If you have multiple test functions and test classes in a single
|
||||
module you can optionally implement the following fixture methods
|
||||
which will usually be called once for all the functions::
|
||||
which will usually be called once for all the functions:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def setup_module(module):
|
||||
""" setup any state specific to the execution of the given module."""
|
||||
|
||||
|
||||
def teardown_module(module):
|
||||
""" teardown any state that was previously setup with a setup_module
|
||||
method.
|
||||
@@ -43,7 +46,9 @@ Class level setup/teardown
|
||||
----------------------------------
|
||||
|
||||
Similarly, the following methods are called at class level before
|
||||
and after all test methods of the class are called::
|
||||
and after all test methods of the class are called:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@classmethod
|
||||
def setup_class(cls):
|
||||
@@ -51,6 +56,7 @@ and after all test methods of the class are called::
|
||||
usually contains tests).
|
||||
"""
|
||||
|
||||
|
||||
@classmethod
|
||||
def teardown_class(cls):
|
||||
""" teardown any state that was previously setup with a call to
|
||||
@@ -60,13 +66,16 @@ and after all test methods of the class are called::
|
||||
Method and function level setup/teardown
|
||||
-----------------------------------------------
|
||||
|
||||
Similarly, the following methods are called around each method invocation::
|
||||
Similarly, the following methods are called around each method invocation:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def setup_method(self, method):
|
||||
""" setup any state tied to the execution of the given method in a
|
||||
class. setup_method is invoked for every test method of a class.
|
||||
"""
|
||||
|
||||
|
||||
def teardown_method(self, method):
|
||||
""" teardown any state that was previously setup with a setup_method
|
||||
call.
|
||||
@@ -75,13 +84,16 @@ Similarly, the following methods are called around each method invocation::
|
||||
As of pytest-3.0, the ``method`` parameter is optional.
|
||||
|
||||
If you would rather define test functions directly at module level
|
||||
you can also use the following functions to implement fixtures::
|
||||
you can also use the following functions to implement fixtures:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def setup_function(function):
|
||||
""" setup any state tied to the execution of the given function.
|
||||
Invoked for every test function in the module.
|
||||
"""
|
||||
|
||||
|
||||
def teardown_function(function):
|
||||
""" teardown any state that was previously setup with a setup_function
|
||||
call.
|
||||
|
||||
@@ -30,6 +30,11 @@ template = "changelog/_template.rst"
|
||||
name = "Features"
|
||||
showcontent = true
|
||||
|
||||
[[tool.towncrier.type]]
|
||||
directory = "improvement"
|
||||
name = "Improvements"
|
||||
showcontent = true
|
||||
|
||||
[[tool.towncrier.type]]
|
||||
directory = "bugfix"
|
||||
name = "Bug Fixes"
|
||||
|
||||
95
scripts/publish_gh_release_notes.py
Normal file
95
scripts/publish_gh_release_notes.py
Normal file
@@ -0,0 +1,95 @@
|
||||
"""
|
||||
Script used to publish GitHub release notes extracted from CHANGELOG.rst.
|
||||
|
||||
This script is meant to be executed after a successful deployment in Travis.
|
||||
|
||||
Uses the following environment variables:
|
||||
|
||||
* GIT_TAG: the name of the tag of the current commit.
|
||||
* GH_RELEASE_NOTES_TOKEN: a personal access token with 'repo' permissions. It should be encrypted using:
|
||||
|
||||
$travis encrypt GH_RELEASE_NOTES_TOKEN=<token> -r pytest-dev/pytest
|
||||
|
||||
And the contents pasted in the ``deploy.env.secure`` section in the ``travis.yml`` file.
|
||||
|
||||
The script also requires ``pandoc`` to be previously installed in the system.
|
||||
|
||||
Requires Python3.6+.
|
||||
"""
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import github3
|
||||
import pypandoc
|
||||
|
||||
|
||||
def publish_github_release(slug, token, tag_name, body):
|
||||
github = github3.login(token=token)
|
||||
owner, repo = slug.split("/")
|
||||
repo = github.repository(owner, repo)
|
||||
return repo.create_release(tag_name=tag_name, body=body)
|
||||
|
||||
|
||||
def parse_changelog(tag_name):
|
||||
p = Path(__file__).parent.parent / "CHANGELOG.rst"
|
||||
changelog_lines = p.read_text(encoding="UTF-8").splitlines()
|
||||
|
||||
title_regex = re.compile(r"pytest (\d\.\d+\.\d+) \(\d{4}-\d{2}-\d{2}\)")
|
||||
consuming_version = False
|
||||
version_lines = []
|
||||
for line in changelog_lines:
|
||||
m = title_regex.match(line)
|
||||
if m:
|
||||
# found the version we want: start to consume lines until we find the next version title
|
||||
if m.group(1) == tag_name:
|
||||
consuming_version = True
|
||||
# found a new version title while parsing the version we want: break out
|
||||
elif consuming_version:
|
||||
break
|
||||
if consuming_version:
|
||||
version_lines.append(line)
|
||||
|
||||
return "\n".join(version_lines)
|
||||
|
||||
|
||||
def convert_rst_to_md(text):
|
||||
return pypandoc.convert_text(text, "md", format="rst")
|
||||
|
||||
|
||||
def main(argv):
|
||||
if len(argv) > 1:
|
||||
tag_name = argv[1]
|
||||
else:
|
||||
tag_name = os.environ.get("TRAVIS_TAG")
|
||||
if not tag_name:
|
||||
print("tag_name not given and $TRAVIS_TAG not set", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
token = os.environ.get("GH_RELEASE_NOTES_TOKEN")
|
||||
if not token:
|
||||
print("GH_RELEASE_NOTES_TOKEN not set", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
slug = os.environ.get("TRAVIS_REPO_SLUG")
|
||||
if not slug:
|
||||
print("TRAVIS_REPO_SLUG not set", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
rst_body = parse_changelog(tag_name)
|
||||
md_body = convert_rst_to_md(rst_body)
|
||||
if not publish_github_release(slug, token, tag_name, md_body):
|
||||
print("Could not publish release notes:", file=sys.stderr)
|
||||
print(md_body, file=sys.stderr)
|
||||
return 5
|
||||
|
||||
print()
|
||||
print(f"Release notes for {tag_name} published successfully:")
|
||||
print(f"https://github.com/{slug}/releases/tag/{tag_name}")
|
||||
print()
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main(sys.argv))
|
||||
@@ -13,4 +13,6 @@ fi
|
||||
python -m coverage combine
|
||||
python -m coverage xml
|
||||
python -m coverage report -m
|
||||
bash <(curl -s https://codecov.io/bash) -Z -X gcov -X coveragepy -X search -X xcode -X gcovout -X fix -f coverage.xml
|
||||
# Set --connect-timeout to work around https://github.com/curl/curl/issues/4461
|
||||
curl -S -L --connect-timeout 5 --retry 6 -s https://codecov.io/bash -o codecov-upload.sh
|
||||
bash codecov-upload.sh -Z -X fix -f coverage.xml
|
||||
|
||||
@@ -61,3 +61,11 @@ ignore =
|
||||
|
||||
[devpi:upload]
|
||||
formats = sdist.tgz,bdist_wheel
|
||||
|
||||
[mypy]
|
||||
ignore_missing_imports = True
|
||||
no_implicit_optional = True
|
||||
strict_equality = True
|
||||
warn_redundant_casts = True
|
||||
warn_return_any = True
|
||||
warn_unused_configs = True
|
||||
|
||||
9
setup.py
9
setup.py
@@ -5,13 +5,13 @@ from setuptools import setup
|
||||
INSTALL_REQUIRES = [
|
||||
"py>=1.5.0",
|
||||
"packaging",
|
||||
"attrs>=17.4.0",
|
||||
"attrs>=17.4.0", # should match oldattrs tox env.
|
||||
"more-itertools>=4.0.0",
|
||||
"atomicwrites>=1.0",
|
||||
'pathlib2>=2.2.0;python_version<"3.6"',
|
||||
'colorama;sys_platform=="win32"',
|
||||
"pluggy>=0.12,<1.0",
|
||||
"importlib-metadata>=0.12",
|
||||
'importlib-metadata>=0.12;python_version<"3.8"',
|
||||
"wcwidth",
|
||||
]
|
||||
|
||||
@@ -21,7 +21,6 @@ def main():
|
||||
use_scm_version={"write_to": "src/_pytest/_version.py"},
|
||||
setup_requires=["setuptools-scm", "setuptools>=40.0"],
|
||||
package_dir={"": "src"},
|
||||
# fmt: off
|
||||
extras_require={
|
||||
"testing": [
|
||||
"argcomplete",
|
||||
@@ -29,9 +28,9 @@ def main():
|
||||
"mock",
|
||||
"nose",
|
||||
"requests",
|
||||
],
|
||||
"xmlschema",
|
||||
]
|
||||
},
|
||||
# fmt: on
|
||||
install_requires=INSTALL_REQUIRES,
|
||||
)
|
||||
|
||||
|
||||
@@ -56,6 +56,7 @@ If things do not work right away:
|
||||
import os
|
||||
import sys
|
||||
from glob import glob
|
||||
from typing import Optional
|
||||
|
||||
|
||||
class FastFilesCompleter:
|
||||
@@ -91,7 +92,7 @@ if os.environ.get("_ARGCOMPLETE"):
|
||||
import argcomplete.completers
|
||||
except ImportError:
|
||||
sys.exit(-1)
|
||||
filescompleter = FastFilesCompleter()
|
||||
filescompleter = FastFilesCompleter() # type: Optional[FastFilesCompleter]
|
||||
|
||||
def try_argcomplete(parser):
|
||||
argcomplete.autocomplete(parser, always_complete_options=False)
|
||||
|
||||
@@ -5,6 +5,18 @@ import traceback
|
||||
from inspect import CO_VARARGS
|
||||
from inspect import CO_VARKEYWORDS
|
||||
from traceback import format_exception_only
|
||||
from types import CodeType
|
||||
from types import TracebackType
|
||||
from typing import Any
|
||||
from typing import Dict
|
||||
from typing import Generic
|
||||
from typing import List
|
||||
from typing import Optional
|
||||
from typing import Pattern
|
||||
from typing import Set
|
||||
from typing import Tuple
|
||||
from typing import TypeVar
|
||||
from typing import Union
|
||||
from weakref import ref
|
||||
|
||||
import attr
|
||||
@@ -15,11 +27,14 @@ import _pytest
|
||||
from _pytest._io.saferepr import safeformat
|
||||
from _pytest._io.saferepr import saferepr
|
||||
|
||||
if False: # TYPE_CHECKING
|
||||
from typing import Type
|
||||
|
||||
|
||||
class Code:
|
||||
""" wrapper around Python code objects """
|
||||
|
||||
def __init__(self, rawcode):
|
||||
def __init__(self, rawcode) -> None:
|
||||
if not hasattr(rawcode, "co_filename"):
|
||||
rawcode = getrawcode(rawcode)
|
||||
try:
|
||||
@@ -28,12 +43,13 @@ class Code:
|
||||
self.name = rawcode.co_name
|
||||
except AttributeError:
|
||||
raise TypeError("not a code object: {!r}".format(rawcode))
|
||||
self.raw = rawcode
|
||||
self.raw = rawcode # type: CodeType
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.raw == other.raw
|
||||
|
||||
__hash__ = None
|
||||
# Ignore type because of https://github.com/python/mypy/issues/4266.
|
||||
__hash__ = None # type: ignore
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self == other
|
||||
@@ -188,11 +204,11 @@ class TracebackEntry:
|
||||
""" path to the source code """
|
||||
return self.frame.code.path
|
||||
|
||||
def getlocals(self):
|
||||
@property
|
||||
def locals(self):
|
||||
""" locals of underlaying frame """
|
||||
return self.frame.f_locals
|
||||
|
||||
locals = property(getlocals, None, None, "locals of underlaying frame")
|
||||
|
||||
def getfirstlinesource(self):
|
||||
return self.frame.code.firstlineno
|
||||
|
||||
@@ -255,11 +271,11 @@ class TracebackEntry:
|
||||
line = "???"
|
||||
return " File %r:%d in %s\n %s\n" % (fn, self.lineno + 1, name, line)
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
""" co_name of underlaying code """
|
||||
return self.frame.code.raw.co_name
|
||||
|
||||
name = property(name, None, None, "co_name of underlaying code")
|
||||
|
||||
|
||||
class Traceback(list):
|
||||
""" Traceback objects encapsulate and offer higher level
|
||||
@@ -340,7 +356,7 @@ class Traceback(list):
|
||||
""" return the index of the frame/TracebackEntry where recursion
|
||||
originates if appropriate, None if no recursion occurred
|
||||
"""
|
||||
cache = {}
|
||||
cache = {} # type: Dict[Tuple[Any, int, int], List[Dict[str, Any]]]
|
||||
for i, entry in enumerate(self):
|
||||
# id for the code.raw is needed to work around
|
||||
# the strange metaprogramming in the decorator lib from pypi
|
||||
@@ -370,20 +386,52 @@ co_equal = compile(
|
||||
)
|
||||
|
||||
|
||||
_E = TypeVar("_E", bound=BaseException)
|
||||
|
||||
|
||||
@attr.s(repr=False)
|
||||
class ExceptionInfo:
|
||||
class ExceptionInfo(Generic[_E]):
|
||||
""" wraps sys.exc_info() objects and offers
|
||||
help for navigating the traceback.
|
||||
"""
|
||||
|
||||
_assert_start_repr = "AssertionError('assert "
|
||||
|
||||
_excinfo = attr.ib()
|
||||
_striptext = attr.ib(default="")
|
||||
_traceback = attr.ib(default=None)
|
||||
_excinfo = attr.ib(type=Optional[Tuple["Type[_E]", "_E", TracebackType]])
|
||||
_striptext = attr.ib(type=str, default="")
|
||||
_traceback = attr.ib(type=Optional[Traceback], default=None)
|
||||
|
||||
@classmethod
|
||||
def from_current(cls, exprinfo=None):
|
||||
def from_exc_info(
|
||||
cls,
|
||||
exc_info: Tuple["Type[_E]", "_E", TracebackType],
|
||||
exprinfo: Optional[str] = None,
|
||||
) -> "ExceptionInfo[_E]":
|
||||
"""returns an ExceptionInfo for an existing exc_info tuple.
|
||||
|
||||
.. warning::
|
||||
|
||||
Experimental API
|
||||
|
||||
|
||||
:param exprinfo: a text string helping to determine if we should
|
||||
strip ``AssertionError`` from the output, defaults
|
||||
to the exception message/``__str__()``
|
||||
"""
|
||||
_striptext = ""
|
||||
if exprinfo is None and isinstance(exc_info[1], AssertionError):
|
||||
exprinfo = getattr(exc_info[1], "msg", None)
|
||||
if exprinfo is None:
|
||||
exprinfo = saferepr(exc_info[1])
|
||||
if exprinfo and exprinfo.startswith(cls._assert_start_repr):
|
||||
_striptext = "AssertionError: "
|
||||
|
||||
return cls(exc_info, _striptext)
|
||||
|
||||
@classmethod
|
||||
def from_current(
|
||||
cls, exprinfo: Optional[str] = None
|
||||
) -> "ExceptionInfo[BaseException]":
|
||||
"""returns an ExceptionInfo matching the current traceback
|
||||
|
||||
.. warning::
|
||||
@@ -397,59 +445,71 @@ class ExceptionInfo:
|
||||
"""
|
||||
tup = sys.exc_info()
|
||||
assert tup[0] is not None, "no current exception"
|
||||
_striptext = ""
|
||||
if exprinfo is None and isinstance(tup[1], AssertionError):
|
||||
exprinfo = getattr(tup[1], "msg", None)
|
||||
if exprinfo is None:
|
||||
exprinfo = saferepr(tup[1])
|
||||
if exprinfo and exprinfo.startswith(cls._assert_start_repr):
|
||||
_striptext = "AssertionError: "
|
||||
|
||||
return cls(tup, _striptext)
|
||||
assert tup[1] is not None, "no current exception"
|
||||
assert tup[2] is not None, "no current exception"
|
||||
exc_info = (tup[0], tup[1], tup[2])
|
||||
return cls.from_exc_info(exc_info)
|
||||
|
||||
@classmethod
|
||||
def for_later(cls):
|
||||
def for_later(cls) -> "ExceptionInfo[_E]":
|
||||
"""return an unfilled ExceptionInfo
|
||||
"""
|
||||
return cls(None)
|
||||
|
||||
def fill_unfilled(self, exc_info: Tuple["Type[_E]", _E, TracebackType]) -> None:
|
||||
"""fill an unfilled ExceptionInfo created with for_later()"""
|
||||
assert self._excinfo is None, "ExceptionInfo was already filled"
|
||||
self._excinfo = exc_info
|
||||
|
||||
@property
|
||||
def type(self):
|
||||
def type(self) -> "Type[_E]":
|
||||
"""the exception class"""
|
||||
assert (
|
||||
self._excinfo is not None
|
||||
), ".type can only be used after the context manager exits"
|
||||
return self._excinfo[0]
|
||||
|
||||
@property
|
||||
def value(self):
|
||||
def value(self) -> _E:
|
||||
"""the exception value"""
|
||||
assert (
|
||||
self._excinfo is not None
|
||||
), ".value can only be used after the context manager exits"
|
||||
return self._excinfo[1]
|
||||
|
||||
@property
|
||||
def tb(self):
|
||||
def tb(self) -> TracebackType:
|
||||
"""the exception raw traceback"""
|
||||
assert (
|
||||
self._excinfo is not None
|
||||
), ".tb can only be used after the context manager exits"
|
||||
return self._excinfo[2]
|
||||
|
||||
@property
|
||||
def typename(self):
|
||||
def typename(self) -> str:
|
||||
"""the type name of the exception"""
|
||||
assert (
|
||||
self._excinfo is not None
|
||||
), ".typename can only be used after the context manager exits"
|
||||
return self.type.__name__
|
||||
|
||||
@property
|
||||
def traceback(self):
|
||||
def traceback(self) -> Traceback:
|
||||
"""the traceback"""
|
||||
if self._traceback is None:
|
||||
self._traceback = Traceback(self.tb, excinfo=ref(self))
|
||||
return self._traceback
|
||||
|
||||
@traceback.setter
|
||||
def traceback(self, value):
|
||||
def traceback(self, value: Traceback) -> None:
|
||||
self._traceback = value
|
||||
|
||||
def __repr__(self):
|
||||
def __repr__(self) -> str:
|
||||
if self._excinfo is None:
|
||||
return "<ExceptionInfo for raises contextmanager>"
|
||||
return "<ExceptionInfo %s tblen=%d>" % (self.typename, len(self.traceback))
|
||||
|
||||
def exconly(self, tryshort=False):
|
||||
def exconly(self, tryshort: bool = False) -> str:
|
||||
""" return the exception as a string
|
||||
|
||||
when 'tryshort' resolves to True, and the exception is a
|
||||
@@ -465,11 +525,13 @@ class ExceptionInfo:
|
||||
text = text[len(self._striptext) :]
|
||||
return text
|
||||
|
||||
def errisinstance(self, exc):
|
||||
def errisinstance(
|
||||
self, exc: Union["Type[BaseException]", Tuple["Type[BaseException]", ...]]
|
||||
) -> bool:
|
||||
""" return True if the exception is an instance of exc """
|
||||
return isinstance(self.value, exc)
|
||||
|
||||
def _getreprcrash(self):
|
||||
def _getreprcrash(self) -> "ReprFileLocation":
|
||||
exconly = self.exconly(tryshort=True)
|
||||
entry = self.traceback.getcrashentry()
|
||||
path, lineno = entry.frame.code.raw.co_filename, entry.lineno
|
||||
@@ -477,13 +539,13 @@ class ExceptionInfo:
|
||||
|
||||
def getrepr(
|
||||
self,
|
||||
showlocals=False,
|
||||
style="long",
|
||||
abspath=False,
|
||||
tbfilter=True,
|
||||
funcargs=False,
|
||||
truncate_locals=True,
|
||||
chain=True,
|
||||
showlocals: bool = False,
|
||||
style: str = "long",
|
||||
abspath: bool = False,
|
||||
tbfilter: bool = True,
|
||||
funcargs: bool = False,
|
||||
truncate_locals: bool = True,
|
||||
chain: bool = True,
|
||||
):
|
||||
"""
|
||||
Return str()able representation of this exception info.
|
||||
@@ -534,7 +596,7 @@ class ExceptionInfo:
|
||||
)
|
||||
return fmt.repr_excinfo(self)
|
||||
|
||||
def match(self, regexp):
|
||||
def match(self, regexp: "Union[str, Pattern]") -> bool:
|
||||
"""
|
||||
Check whether the regular expression 'regexp' is found in the string
|
||||
representation of the exception using ``re.search``. If it matches
|
||||
@@ -544,7 +606,7 @@ class ExceptionInfo:
|
||||
"""
|
||||
__tracebackhide__ = True
|
||||
if not re.search(regexp, str(self.value)):
|
||||
assert 0, "Pattern '{!s}' not found in '{!s}'".format(regexp, self.value)
|
||||
assert 0, "Pattern {!r} not found in {!r}".format(regexp, str(self.value))
|
||||
return True
|
||||
|
||||
|
||||
@@ -593,7 +655,7 @@ class FormattedExcinfo:
|
||||
args.append((argname, saferepr(argvalue)))
|
||||
return ReprFuncArgs(args)
|
||||
|
||||
def get_source(self, source, line_index=-1, excinfo=None, short=False):
|
||||
def get_source(self, source, line_index=-1, excinfo=None, short=False) -> List[str]:
|
||||
""" return formatted and marked up source lines. """
|
||||
import _pytest._code
|
||||
|
||||
@@ -665,7 +727,7 @@ class FormattedExcinfo:
|
||||
else:
|
||||
line_index = entry.lineno - entry.getfirstlinesource()
|
||||
|
||||
lines = []
|
||||
lines = [] # type: List[str]
|
||||
style = entry._repr_style
|
||||
if style is None:
|
||||
style = self.style
|
||||
@@ -742,7 +804,7 @@ class FormattedExcinfo:
|
||||
exc_msg=str(e),
|
||||
max_frames=max_frames,
|
||||
total=len(traceback),
|
||||
)
|
||||
) # type: Optional[str]
|
||||
traceback = traceback[:max_frames] + traceback[-max_frames:]
|
||||
else:
|
||||
if recursionindex is not None:
|
||||
@@ -755,10 +817,12 @@ class FormattedExcinfo:
|
||||
|
||||
def repr_excinfo(self, excinfo):
|
||||
|
||||
repr_chain = []
|
||||
repr_chain = (
|
||||
[]
|
||||
) # type: List[Tuple[ReprTraceback, Optional[ReprFileLocation], Optional[str]]]
|
||||
e = excinfo.value
|
||||
descr = None
|
||||
seen = set()
|
||||
seen = set() # type: Set[int]
|
||||
while e is not None and id(e) not in seen:
|
||||
seen.add(id(e))
|
||||
if excinfo:
|
||||
@@ -811,8 +875,8 @@ class TerminalRepr:
|
||||
|
||||
|
||||
class ExceptionRepr(TerminalRepr):
|
||||
def __init__(self):
|
||||
self.sections = []
|
||||
def __init__(self) -> None:
|
||||
self.sections = [] # type: List[Tuple[str, str, str]]
|
||||
|
||||
def addsection(self, name, content, sep="-"):
|
||||
self.sections.append((name, content, sep))
|
||||
|
||||
@@ -7,6 +7,7 @@ import tokenize
|
||||
import warnings
|
||||
from ast import PyCF_ONLY_AST as _AST_FLAG
|
||||
from bisect import bisect_right
|
||||
from typing import List
|
||||
|
||||
import py
|
||||
|
||||
@@ -19,11 +20,11 @@ class Source:
|
||||
_compilecounter = 0
|
||||
|
||||
def __init__(self, *parts, **kwargs):
|
||||
self.lines = lines = []
|
||||
self.lines = lines = [] # type: List[str]
|
||||
de = kwargs.get("deindent", True)
|
||||
for part in parts:
|
||||
if not part:
|
||||
partlines = []
|
||||
partlines = [] # type: List[str]
|
||||
elif isinstance(part, Source):
|
||||
partlines = part.lines
|
||||
elif isinstance(part, (tuple, list)):
|
||||
@@ -44,7 +45,8 @@ class Source:
|
||||
return str(self) == other
|
||||
return False
|
||||
|
||||
__hash__ = None
|
||||
# Ignore type because of https://github.com/python/mypy/issues/4266.
|
||||
__hash__ = None # type: ignore
|
||||
|
||||
def __getitem__(self, key):
|
||||
if isinstance(key, int):
|
||||
@@ -156,8 +158,7 @@ class Source:
|
||||
source = "\n".join(self.lines) + "\n"
|
||||
try:
|
||||
co = compile(source, filename, mode, flag)
|
||||
except SyntaxError:
|
||||
ex = sys.exc_info()[1]
|
||||
except SyntaxError as ex:
|
||||
# re-represent syntax errors from parsing python strings
|
||||
msglines = self.lines[: ex.lineno]
|
||||
if ex.offset:
|
||||
@@ -172,7 +173,8 @@ class Source:
|
||||
if flag & _AST_FLAG:
|
||||
return co
|
||||
lines = [(x + "\n") for x in self.lines]
|
||||
linecache.cache[filename] = (1, None, lines, filename)
|
||||
# Type ignored because linecache.cache is private.
|
||||
linecache.cache[filename] = (1, None, lines, filename) # type: ignore
|
||||
return co
|
||||
|
||||
|
||||
@@ -281,7 +283,7 @@ def get_statement_startend2(lineno, node):
|
||||
return start, end
|
||||
|
||||
|
||||
def getstatementrange_ast(lineno, source, assertion=False, astnode=None):
|
||||
def getstatementrange_ast(lineno, source: Source, assertion=False, astnode=None):
|
||||
if astnode is None:
|
||||
content = str(source)
|
||||
# See #4260:
|
||||
|
||||
@@ -2,19 +2,23 @@ import pprint
|
||||
import reprlib
|
||||
|
||||
|
||||
def _call_and_format_exception(call, x, *args):
|
||||
def _format_repr_exception(exc, obj):
|
||||
exc_name = type(exc).__name__
|
||||
try:
|
||||
# Try the vanilla repr and make sure that the result is a string
|
||||
return call(x, *args)
|
||||
except Exception as exc:
|
||||
exc_name = type(exc).__name__
|
||||
try:
|
||||
exc_info = str(exc)
|
||||
except Exception:
|
||||
exc_info = "unknown"
|
||||
return '<[{}("{}") raised in repr()] {} object at 0x{:x}>'.format(
|
||||
exc_name, exc_info, x.__class__.__name__, id(x)
|
||||
)
|
||||
exc_info = str(exc)
|
||||
except Exception:
|
||||
exc_info = "unknown"
|
||||
return '<[{}("{}") raised in repr()] {} object at 0x{:x}>'.format(
|
||||
exc_name, exc_info, obj.__class__.__name__, id(obj)
|
||||
)
|
||||
|
||||
|
||||
def _ellipsize(s, maxsize):
|
||||
if len(s) > maxsize:
|
||||
i = max(0, (maxsize - 3) // 2)
|
||||
j = max(0, maxsize - 3 - i)
|
||||
return s[:i] + "..." + s[len(s) - j :]
|
||||
return s
|
||||
|
||||
|
||||
class SafeRepr(reprlib.Repr):
|
||||
@@ -22,37 +26,24 @@ class SafeRepr(reprlib.Repr):
|
||||
and includes information on exceptions raised during the call.
|
||||
"""
|
||||
|
||||
def __init__(self, maxsize):
|
||||
super().__init__()
|
||||
self.maxstring = maxsize
|
||||
self.maxsize = maxsize
|
||||
|
||||
def repr(self, x):
|
||||
return self._callhelper(reprlib.Repr.repr, self, x)
|
||||
|
||||
def repr_unicode(self, x, level):
|
||||
# Strictly speaking wrong on narrow builds
|
||||
def repr(u):
|
||||
if "'" not in u:
|
||||
return "'%s'" % u
|
||||
elif '"' not in u:
|
||||
return '"%s"' % u
|
||||
else:
|
||||
return "'%s'" % u.replace("'", r"\'")
|
||||
|
||||
s = repr(x[: self.maxstring])
|
||||
if len(s) > self.maxstring:
|
||||
i = max(0, (self.maxstring - 3) // 2)
|
||||
j = max(0, self.maxstring - 3 - i)
|
||||
s = repr(x[:i] + x[len(x) - j :])
|
||||
s = s[:i] + "..." + s[len(s) - j :]
|
||||
return s
|
||||
try:
|
||||
s = super().repr(x)
|
||||
except Exception as exc:
|
||||
s = _format_repr_exception(exc, x)
|
||||
return _ellipsize(s, self.maxsize)
|
||||
|
||||
def repr_instance(self, x, level):
|
||||
return self._callhelper(repr, x)
|
||||
|
||||
def _callhelper(self, call, x, *args):
|
||||
s = _call_and_format_exception(call, x, *args)
|
||||
if len(s) > self.maxsize:
|
||||
i = max(0, (self.maxsize - 3) // 2)
|
||||
j = max(0, self.maxsize - 3 - i)
|
||||
s = s[:i] + "..." + s[len(s) - j :]
|
||||
return s
|
||||
try:
|
||||
s = repr(x)
|
||||
except Exception as exc:
|
||||
s = _format_repr_exception(exc, x)
|
||||
return _ellipsize(s, self.maxsize)
|
||||
|
||||
|
||||
def safeformat(obj):
|
||||
@@ -60,7 +51,10 @@ def safeformat(obj):
|
||||
Failing __repr__ functions of user instances will be represented
|
||||
with a short exception info.
|
||||
"""
|
||||
return _call_and_format_exception(pprint.pformat, obj)
|
||||
try:
|
||||
return pprint.pformat(obj)
|
||||
except Exception as exc:
|
||||
return _format_repr_exception(exc, obj)
|
||||
|
||||
|
||||
def saferepr(obj, maxsize=240):
|
||||
@@ -70,9 +64,4 @@ def saferepr(obj, maxsize=240):
|
||||
care to never raise exceptions itself. This function is a wrapper
|
||||
around the Repr/reprlib functionality of the standard 2.6 lib.
|
||||
"""
|
||||
# review exception handling
|
||||
srepr = SafeRepr()
|
||||
srepr.maxstring = maxsize
|
||||
srepr.maxsize = maxsize
|
||||
srepr.maxother = 160
|
||||
return srepr.repr(obj)
|
||||
return SafeRepr(maxsize).repr(obj)
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
support for presenting detailed information in failing assertions.
|
||||
"""
|
||||
import sys
|
||||
from typing import Optional
|
||||
|
||||
from _pytest.assertion import rewrite
|
||||
from _pytest.assertion import truncate
|
||||
@@ -52,7 +53,9 @@ def register_assert_rewrite(*names):
|
||||
importhook = hook
|
||||
break
|
||||
else:
|
||||
importhook = DummyRewriteHook()
|
||||
# TODO(typing): Add a protocol for mark_rewrite() and use it
|
||||
# for importhook and for PytestPluginManager.rewrite_hook.
|
||||
importhook = DummyRewriteHook() # type: ignore
|
||||
importhook.mark_rewrite(*names)
|
||||
|
||||
|
||||
@@ -69,7 +72,7 @@ class AssertionState:
|
||||
def __init__(self, config, mode):
|
||||
self.mode = mode
|
||||
self.trace = config.trace.root.get("assertion")
|
||||
self.hook = None
|
||||
self.hook = None # type: Optional[rewrite.AssertionRewritingHook]
|
||||
|
||||
|
||||
def install_importhook(config):
|
||||
@@ -108,6 +111,7 @@ def pytest_runtest_setup(item):
|
||||
"""
|
||||
|
||||
def callbinrepr(op, left, right):
|
||||
# type: (str, object, object) -> Optional[str]
|
||||
"""Call the pytest_assertrepr_compare hook and prepare the result
|
||||
|
||||
This uses the first result from the hook and then ensures the
|
||||
@@ -133,12 +137,13 @@ def pytest_runtest_setup(item):
|
||||
if item.config.getvalue("assertmode") == "rewrite":
|
||||
res = res.replace("%", "%%")
|
||||
return res
|
||||
return None
|
||||
|
||||
util._reprcompare = callbinrepr
|
||||
|
||||
if item.ihook.pytest_assertion_pass.get_hookimpls():
|
||||
|
||||
def call_assertion_pass_hook(lineno, expl, orig):
|
||||
def call_assertion_pass_hook(lineno, orig, expl):
|
||||
item.ihook.pytest_assertion_pass(
|
||||
item=item, lineno=lineno, orig=orig, expl=expl
|
||||
)
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
import ast
|
||||
import errno
|
||||
import functools
|
||||
import importlib.abc
|
||||
import importlib.machinery
|
||||
import importlib.util
|
||||
import io
|
||||
@@ -12,6 +13,11 @@ import struct
|
||||
import sys
|
||||
import tokenize
|
||||
import types
|
||||
from typing import Dict
|
||||
from typing import List
|
||||
from typing import Optional
|
||||
from typing import Set
|
||||
from typing import Tuple
|
||||
|
||||
import atomicwrites
|
||||
|
||||
@@ -30,7 +36,7 @@ PYC_EXT = ".py" + (__debug__ and "c" or "o")
|
||||
PYC_TAIL = "." + PYTEST_TAG + PYC_EXT
|
||||
|
||||
|
||||
class AssertionRewritingHook:
|
||||
class AssertionRewritingHook(importlib.abc.MetaPathFinder):
|
||||
"""PEP302/PEP451 import hook which rewrites asserts."""
|
||||
|
||||
def __init__(self, config):
|
||||
@@ -40,13 +46,13 @@ class AssertionRewritingHook:
|
||||
except ValueError:
|
||||
self.fnpats = ["test_*.py", "*_test.py"]
|
||||
self.session = None
|
||||
self._rewritten_names = set()
|
||||
self._must_rewrite = set()
|
||||
self._rewritten_names = set() # type: Set[str]
|
||||
self._must_rewrite = set() # type: Set[str]
|
||||
# flag to guard against trying to rewrite a pyc file while we are already writing another pyc file,
|
||||
# which might result in infinite recursion (#3506)
|
||||
self._writing_pyc = False
|
||||
self._basenames_to_check_rewrite = {"conftest"}
|
||||
self._marked_for_rewrite_cache = {}
|
||||
self._marked_for_rewrite_cache = {} # type: Dict[str, bool]
|
||||
self._session_paths_checked = False
|
||||
|
||||
def set_session(self, session):
|
||||
@@ -112,24 +118,11 @@ class AssertionRewritingHook:
|
||||
write = not sys.dont_write_bytecode
|
||||
cache_dir = os.path.join(os.path.dirname(fn), "__pycache__")
|
||||
if write:
|
||||
try:
|
||||
os.mkdir(cache_dir)
|
||||
except OSError:
|
||||
e = sys.exc_info()[1].errno
|
||||
if e == errno.EEXIST:
|
||||
# Either the __pycache__ directory already exists (the
|
||||
# common case) or it's blocked by a non-dir node. In the
|
||||
# latter case, we'll ignore it in _write_pyc.
|
||||
pass
|
||||
elif e in {errno.ENOENT, errno.ENOTDIR}:
|
||||
# One of the path components was not a directory, likely
|
||||
# because we're in a zip file.
|
||||
write = False
|
||||
elif e in {errno.EACCES, errno.EROFS, errno.EPERM}:
|
||||
state.trace("read only directory: %r" % os.path.dirname(fn))
|
||||
write = False
|
||||
else:
|
||||
raise
|
||||
ok = try_mkdir(cache_dir)
|
||||
if not ok:
|
||||
write = False
|
||||
state.trace("read only directory: {}".format(os.path.dirname(fn)))
|
||||
|
||||
cache_name = os.path.basename(fn)[:-3] + PYC_TAIL
|
||||
pyc = os.path.join(cache_dir, cache_name)
|
||||
# Notice that even if we're in a read-only directory, I'm going
|
||||
@@ -208,7 +201,7 @@ class AssertionRewritingHook:
|
||||
|
||||
return self._is_marked_for_rewrite(name, state)
|
||||
|
||||
def _is_marked_for_rewrite(self, name, state):
|
||||
def _is_marked_for_rewrite(self, name: str, state):
|
||||
try:
|
||||
return self._marked_for_rewrite_cache[name]
|
||||
except KeyError:
|
||||
@@ -223,7 +216,7 @@ class AssertionRewritingHook:
|
||||
self._marked_for_rewrite_cache[name] = False
|
||||
return False
|
||||
|
||||
def mark_rewrite(self, *names):
|
||||
def mark_rewrite(self, *names: str) -> None:
|
||||
"""Mark import names as needing to be rewritten.
|
||||
|
||||
The named module or package as well as any nested modules will
|
||||
@@ -390,6 +383,7 @@ def _format_boolop(explanations, is_or):
|
||||
|
||||
|
||||
def _call_reprcompare(ops, results, expls, each_obj):
|
||||
# type: (Tuple[str, ...], Tuple[bool, ...], Tuple[str, ...], Tuple[object, ...]) -> str
|
||||
for i, res, expl in zip(range(len(ops)), results, expls):
|
||||
try:
|
||||
done = not res
|
||||
@@ -405,11 +399,13 @@ def _call_reprcompare(ops, results, expls, each_obj):
|
||||
|
||||
|
||||
def _call_assertion_pass(lineno, orig, expl):
|
||||
# type: (int, str, str) -> None
|
||||
if util._assertion_pass is not None:
|
||||
util._assertion_pass(lineno=lineno, orig=orig, expl=expl)
|
||||
util._assertion_pass(lineno, orig, expl)
|
||||
|
||||
|
||||
def _check_if_assertion_pass_impl():
|
||||
# type: () -> bool
|
||||
"""Checks if any plugins implement the pytest_assertion_pass hook
|
||||
in order not to generate explanation unecessarily (might be expensive)"""
|
||||
return True if util._assertion_pass else False
|
||||
@@ -459,17 +455,18 @@ def set_location(node, lineno, col_offset):
|
||||
return node
|
||||
|
||||
|
||||
def _get_assertion_exprs(src: bytes): # -> Dict[int, str]
|
||||
def _get_assertion_exprs(src: bytes) -> Dict[int, str]:
|
||||
"""Returns a mapping from {lineno: "assertion test expression"}"""
|
||||
ret = {}
|
||||
ret = {} # type: Dict[int, str]
|
||||
|
||||
depth = 0
|
||||
lines = []
|
||||
assert_lineno = None
|
||||
seen_lines = set()
|
||||
lines = [] # type: List[str]
|
||||
assert_lineno = None # type: Optional[int]
|
||||
seen_lines = set() # type: Set[int]
|
||||
|
||||
def _write_and_reset() -> None:
|
||||
nonlocal depth, lines, assert_lineno, seen_lines
|
||||
assert assert_lineno is not None
|
||||
ret[assert_lineno] = "".join(lines).rstrip().rstrip("\\")
|
||||
depth = 0
|
||||
lines = []
|
||||
@@ -477,21 +474,21 @@ def _get_assertion_exprs(src: bytes): # -> Dict[int, str]
|
||||
seen_lines = set()
|
||||
|
||||
tokens = tokenize.tokenize(io.BytesIO(src).readline)
|
||||
for tp, src, (lineno, offset), _, line in tokens:
|
||||
if tp == tokenize.NAME and src == "assert":
|
||||
for tp, source, (lineno, offset), _, line in tokens:
|
||||
if tp == tokenize.NAME and source == "assert":
|
||||
assert_lineno = lineno
|
||||
elif assert_lineno is not None:
|
||||
# keep track of depth for the assert-message `,` lookup
|
||||
if tp == tokenize.OP and src in "([{":
|
||||
if tp == tokenize.OP and source in "([{":
|
||||
depth += 1
|
||||
elif tp == tokenize.OP and src in ")]}":
|
||||
elif tp == tokenize.OP and source in ")]}":
|
||||
depth -= 1
|
||||
|
||||
if not lines:
|
||||
lines.append(line[offset:])
|
||||
seen_lines.add(lineno)
|
||||
# a non-nested comma separates the expression from the message
|
||||
elif depth == 0 and tp == tokenize.OP and src == ",":
|
||||
elif depth == 0 and tp == tokenize.OP and source == ",":
|
||||
# one line assert with message
|
||||
if lineno in seen_lines and len(lines) == 1:
|
||||
offset_in_trimmed = offset + len(lines[-1]) - len(line)
|
||||
@@ -582,7 +579,7 @@ class AssertionRewriter(ast.NodeVisitor):
|
||||
def _assert_expr_to_lineno(self):
|
||||
return _get_assertion_exprs(self.source)
|
||||
|
||||
def run(self, mod):
|
||||
def run(self, mod: ast.Module) -> None:
|
||||
"""Find all assert statements in *mod* and rewrite them."""
|
||||
if not mod.body:
|
||||
# Nothing to do.
|
||||
@@ -624,12 +621,12 @@ class AssertionRewriter(ast.NodeVisitor):
|
||||
]
|
||||
mod.body[pos:pos] = imports
|
||||
# Collect asserts.
|
||||
nodes = [mod]
|
||||
nodes = [mod] # type: List[ast.AST]
|
||||
while nodes:
|
||||
node = nodes.pop()
|
||||
for name, field in ast.iter_fields(node):
|
||||
if isinstance(field, list):
|
||||
new = []
|
||||
new = [] # type: List
|
||||
for i, child in enumerate(field):
|
||||
if isinstance(child, ast.Assert):
|
||||
# Transform assert.
|
||||
@@ -703,7 +700,7 @@ class AssertionRewriter(ast.NodeVisitor):
|
||||
.explanation_param().
|
||||
|
||||
"""
|
||||
self.explanation_specifiers = {}
|
||||
self.explanation_specifiers = {} # type: Dict[str, ast.expr]
|
||||
self.stack.append(self.explanation_specifiers)
|
||||
|
||||
def pop_format_context(self, expl_expr):
|
||||
@@ -746,7 +743,8 @@ class AssertionRewriter(ast.NodeVisitor):
|
||||
from _pytest.warning_types import PytestAssertRewriteWarning
|
||||
import warnings
|
||||
|
||||
warnings.warn_explicit(
|
||||
# Ignore type: typeshed bug https://github.com/python/typeshed/pull/3121
|
||||
warnings.warn_explicit( # type: ignore
|
||||
PytestAssertRewriteWarning(
|
||||
"assertion is always true, perhaps remove parentheses?"
|
||||
),
|
||||
@@ -755,15 +753,15 @@ class AssertionRewriter(ast.NodeVisitor):
|
||||
lineno=assert_.lineno,
|
||||
)
|
||||
|
||||
self.statements = []
|
||||
self.variables = []
|
||||
self.statements = [] # type: List[ast.stmt]
|
||||
self.variables = [] # type: List[str]
|
||||
self.variable_counter = itertools.count()
|
||||
|
||||
if self.enable_assertion_pass_hook:
|
||||
self.format_variables = []
|
||||
self.format_variables = [] # type: List[str]
|
||||
|
||||
self.stack = []
|
||||
self.expl_stmts = []
|
||||
self.stack = [] # type: List[Dict[str, ast.expr]]
|
||||
self.expl_stmts = [] # type: List[ast.stmt]
|
||||
self.push_format_context()
|
||||
# Rewrite assert into a bunch of statements.
|
||||
top_condition, explanation = self.visit(assert_.test)
|
||||
@@ -862,10 +860,7 @@ class AssertionRewriter(ast.NodeVisitor):
|
||||
internally already.
|
||||
See issue #3191 for more details.
|
||||
"""
|
||||
|
||||
# Using parse because it is different between py2 and py3.
|
||||
AST_NONE = ast.parse("None").body[0].value
|
||||
val_is_none = ast.Compare(node, [ast.Is()], [AST_NONE])
|
||||
val_is_none = ast.Compare(node, [ast.Is()], [ast.NameConstant(None)])
|
||||
send_warning = ast.parse(
|
||||
"""\
|
||||
from _pytest.warning_types import PytestAssertRewriteWarning
|
||||
@@ -904,7 +899,7 @@ warn_explicit(
|
||||
# Process each operand, short-circuiting if needed.
|
||||
for i, v in enumerate(boolop.values):
|
||||
if i:
|
||||
fail_inner = []
|
||||
fail_inner = [] # type: List[ast.stmt]
|
||||
# cond is set in a prior loop iteration below
|
||||
self.expl_stmts.append(ast.If(cond, fail_inner, [])) # noqa
|
||||
self.expl_stmts = fail_inner
|
||||
@@ -915,10 +910,10 @@ warn_explicit(
|
||||
call = ast.Call(app, [expl_format], [])
|
||||
self.expl_stmts.append(ast.Expr(call))
|
||||
if i < levels:
|
||||
cond = res
|
||||
cond = res # type: ast.expr
|
||||
if is_or:
|
||||
cond = ast.UnaryOp(ast.Not(), cond)
|
||||
inner = []
|
||||
inner = [] # type: List[ast.stmt]
|
||||
self.statements.append(ast.If(cond, inner, []))
|
||||
self.statements = body = inner
|
||||
self.statements = save
|
||||
@@ -984,7 +979,7 @@ warn_explicit(
|
||||
expl = pat % (res_expl, res_expl, value_expl, attr.attr)
|
||||
return res, expl
|
||||
|
||||
def visit_Compare(self, comp):
|
||||
def visit_Compare(self, comp: ast.Compare):
|
||||
self.push_format_context()
|
||||
left_res, left_expl = self.visit(comp.left)
|
||||
if isinstance(comp.left, (ast.Compare, ast.BoolOp)):
|
||||
@@ -1017,7 +1012,30 @@ warn_explicit(
|
||||
ast.Tuple(results, ast.Load()),
|
||||
)
|
||||
if len(comp.ops) > 1:
|
||||
res = ast.BoolOp(ast.And(), load_names)
|
||||
res = ast.BoolOp(ast.And(), load_names) # type: ast.expr
|
||||
else:
|
||||
res = load_names[0]
|
||||
return res, self.explanation_param(self.pop_format_context(expl_call))
|
||||
|
||||
|
||||
def try_mkdir(cache_dir):
|
||||
"""Attempts to create the given directory, returns True if successful"""
|
||||
try:
|
||||
os.mkdir(cache_dir)
|
||||
except FileExistsError:
|
||||
# Either the __pycache__ directory already exists (the
|
||||
# common case) or it's blocked by a non-dir node. In the
|
||||
# latter case, we'll ignore it in _write_pyc.
|
||||
return True
|
||||
except (FileNotFoundError, NotADirectoryError):
|
||||
# One of the path components was not a directory, likely
|
||||
# because we're in a zip file.
|
||||
return False
|
||||
except PermissionError:
|
||||
return False
|
||||
except OSError as e:
|
||||
# as of now, EROFS doesn't have an equivalent OSError-subclass
|
||||
if e.errno == errno.EROFS:
|
||||
return False
|
||||
raise
|
||||
return True
|
||||
|
||||
@@ -1,20 +1,24 @@
|
||||
"""Utilities for assertion debugging"""
|
||||
import pprint
|
||||
from collections.abc import Sequence
|
||||
from typing import Callable
|
||||
from typing import List
|
||||
from typing import Optional
|
||||
|
||||
import _pytest._code
|
||||
from _pytest import outcomes
|
||||
from _pytest._io.saferepr import saferepr
|
||||
from _pytest.compat import ATTRS_EQ_FIELD
|
||||
|
||||
# The _reprcompare attribute on the util module is used by the new assertion
|
||||
# interpretation code and assertion rewriter to detect this plugin was
|
||||
# loaded and in turn call the hooks defined here as part of the
|
||||
# DebugInterpreter.
|
||||
_reprcompare = None
|
||||
_reprcompare = None # type: Optional[Callable[[str, object, object], Optional[str]]]
|
||||
|
||||
# Works similarly as _reprcompare attribute. Is populated with the hook call
|
||||
# when pytest_runtest_setup is called.
|
||||
_assertion_pass = None
|
||||
_assertion_pass = None # type: Optional[Callable[[int, str, str], None]]
|
||||
|
||||
|
||||
def format_explanation(explanation):
|
||||
@@ -119,9 +123,9 @@ def isiterable(obj):
|
||||
|
||||
def assertrepr_compare(config, op, left, right):
|
||||
"""Return specialised explanations for some operators/operands"""
|
||||
width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op
|
||||
left_repr = saferepr(left, maxsize=int(width // 2))
|
||||
right_repr = saferepr(right, maxsize=width - len(left_repr))
|
||||
maxsize = (80 - 15 - len(op) - 2) // 2 # 15 chars indentation, 1 space around op
|
||||
left_repr = saferepr(left, maxsize=maxsize)
|
||||
right_repr = saferepr(right, maxsize=maxsize)
|
||||
|
||||
summary = "{} {} {}".format(left_repr, op, right_repr)
|
||||
|
||||
@@ -177,7 +181,7 @@ def _diff_text(left, right, verbose=0):
|
||||
"""
|
||||
from difflib import ndiff
|
||||
|
||||
explanation = []
|
||||
explanation = [] # type: List[str]
|
||||
|
||||
def escape_for_readable_diff(binary_text):
|
||||
"""
|
||||
@@ -235,7 +239,7 @@ def _compare_eq_verbose(left, right):
|
||||
left_lines = repr(left).splitlines(keepends)
|
||||
right_lines = repr(right).splitlines(keepends)
|
||||
|
||||
explanation = []
|
||||
explanation = [] # type: List[str]
|
||||
explanation += ["-" + line for line in left_lines]
|
||||
explanation += ["+" + line for line in right_lines]
|
||||
|
||||
@@ -259,7 +263,7 @@ def _compare_eq_iterable(left, right, verbose=0):
|
||||
|
||||
def _compare_eq_sequence(left, right, verbose=0):
|
||||
comparing_bytes = isinstance(left, bytes) and isinstance(right, bytes)
|
||||
explanation = []
|
||||
explanation = [] # type: List[str]
|
||||
len_left = len(left)
|
||||
len_right = len(right)
|
||||
for i in range(min(len_left, len_right)):
|
||||
@@ -327,7 +331,7 @@ def _compare_eq_set(left, right, verbose=0):
|
||||
|
||||
|
||||
def _compare_eq_dict(left, right, verbose=0):
|
||||
explanation = []
|
||||
explanation = [] # type: List[str]
|
||||
set_left = set(left)
|
||||
set_right = set(right)
|
||||
common = set_left.intersection(set_right)
|
||||
@@ -372,7 +376,9 @@ def _compare_eq_cls(left, right, verbose, type_fns):
|
||||
fields_to_check = [field for field, info in all_fields.items() if info.compare]
|
||||
elif isattrs(left):
|
||||
all_fields = left.__attrs_attrs__
|
||||
fields_to_check = [field.name for field in all_fields if field.cmp]
|
||||
fields_to_check = [
|
||||
field.name for field in all_fields if getattr(field, ATTRS_EQ_FIELD)
|
||||
]
|
||||
|
||||
same = []
|
||||
diff = []
|
||||
|
||||
@@ -14,7 +14,7 @@ import py
|
||||
import pytest
|
||||
from .pathlib import Path
|
||||
from .pathlib import resolve_from_str
|
||||
from .pathlib import rmtree
|
||||
from .pathlib import rm_rf
|
||||
|
||||
README_CONTENT = """\
|
||||
# pytest cache directory #
|
||||
@@ -44,7 +44,7 @@ class Cache:
|
||||
def for_config(cls, config):
|
||||
cachedir = cls.cache_dir_from_config(config)
|
||||
if config.getoption("cacheclear") and cachedir.exists():
|
||||
rmtree(cachedir, force=True)
|
||||
rm_rf(cachedir)
|
||||
cachedir.mkdir()
|
||||
return cls(cachedir, config)
|
||||
|
||||
@@ -135,7 +135,7 @@ class Cache:
|
||||
readme_path.write_text(README_CONTENT)
|
||||
|
||||
gitignore_path = self._cachedir.joinpath(".gitignore")
|
||||
msg = "# Created by pytest automatically.\n*"
|
||||
msg = "# Created by pytest automatically.\n*\n"
|
||||
gitignore_path.write_text(msg, encoding="UTF-8")
|
||||
|
||||
cachedir_tag_path = self._cachedir.joinpath("CACHEDIR.TAG")
|
||||
@@ -264,8 +264,8 @@ class NFPlugin:
|
||||
self.cached_nodeids = config.cache.get("cache/nodeids", [])
|
||||
|
||||
def pytest_collection_modifyitems(self, session, config, items):
|
||||
new_items = OrderedDict()
|
||||
if self.active:
|
||||
new_items = OrderedDict()
|
||||
other_items = OrderedDict()
|
||||
for item in items:
|
||||
if item.nodeid not in self.cached_nodeids:
|
||||
@@ -276,7 +276,11 @@ class NFPlugin:
|
||||
items[:] = self._get_increasing_order(
|
||||
new_items.values()
|
||||
) + self._get_increasing_order(other_items.values())
|
||||
self.cached_nodeids = [x.nodeid for x in items if isinstance(x, pytest.Item)]
|
||||
else:
|
||||
for item in items:
|
||||
if item.nodeid not in self.cached_nodeids:
|
||||
new_items[item.nodeid] = item
|
||||
self.cached_nodeids.extend(new_items)
|
||||
|
||||
def _get_increasing_order(self, items):
|
||||
return sorted(items, key=lambda item: item.fspath.mtime(), reverse=True)
|
||||
|
||||
@@ -547,6 +547,8 @@ class FDCaptureBinary:
|
||||
self.start = lambda: None
|
||||
self.done = lambda: None
|
||||
else:
|
||||
self.start = self._start
|
||||
self.done = self._done
|
||||
if targetfd == 0:
|
||||
assert not tmpfile, "cannot set tmpfile with stdin"
|
||||
tmpfile = open(os.devnull, "r")
|
||||
@@ -568,7 +570,7 @@ class FDCaptureBinary:
|
||||
self.targetfd, getattr(self, "targetfd_save", None), self._state
|
||||
)
|
||||
|
||||
def start(self):
|
||||
def _start(self):
|
||||
""" Start capturing on targetfd using memorized tmpfile. """
|
||||
try:
|
||||
os.fstat(self.targetfd_save)
|
||||
@@ -585,7 +587,7 @@ class FDCaptureBinary:
|
||||
self.tmpfile.truncate()
|
||||
return res
|
||||
|
||||
def done(self):
|
||||
def _done(self):
|
||||
""" stop capturing, restore streams, return original capture file,
|
||||
seeked to position zero. """
|
||||
targetfd_save = self.__dict__.pop("targetfd_save")
|
||||
@@ -618,7 +620,8 @@ class FDCapture(FDCaptureBinary):
|
||||
snap() produces text
|
||||
"""
|
||||
|
||||
EMPTY_BUFFER = str()
|
||||
# Ignore type because it doesn't match the type in the superclass (bytes).
|
||||
EMPTY_BUFFER = str() # type: ignore
|
||||
|
||||
def snap(self):
|
||||
res = super().snap()
|
||||
@@ -679,7 +682,8 @@ class SysCapture:
|
||||
|
||||
|
||||
class SysCaptureBinary(SysCapture):
|
||||
EMPTY_BUFFER = b""
|
||||
# Ignore type because it doesn't match the type in the superclass (str).
|
||||
EMPTY_BUFFER = b"" # type: ignore
|
||||
|
||||
def snap(self):
|
||||
res = self.tmpfile.buffer.getvalue()
|
||||
@@ -785,7 +789,11 @@ def _py36_windowsconsoleio_workaround(stream):
|
||||
|
||||
See https://github.com/pytest-dev/py/issues/103
|
||||
"""
|
||||
if not sys.platform.startswith("win32") or sys.version_info[:2] < (3, 6):
|
||||
if (
|
||||
not sys.platform.startswith("win32")
|
||||
or sys.version_info[:2] < (3, 6)
|
||||
or hasattr(sys, "pypy_version_info")
|
||||
):
|
||||
return
|
||||
|
||||
# bail out if ``stream`` doesn't seem like a proper ``io`` stream (#2666)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user