Compare commits
1214 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6c5a1150d4 | ||
|
|
e03c1f538f | ||
|
|
a254ad0436 | ||
|
|
6af674a3ac | ||
|
|
22345ef722 | ||
|
|
5ecbb02ace | ||
|
|
abb0dfcfa3 | ||
|
|
f3babf13ea | ||
|
|
64ee1ee81b | ||
|
|
818aa4d343 | ||
|
|
b7a64d6729 | ||
|
|
cb52d17d07 | ||
|
|
60599b667e | ||
|
|
db32949c50 | ||
|
|
86ccfaec22 | ||
|
|
ae92b8c624 | ||
|
|
99d3fe22dc | ||
|
|
6da8befc74 | ||
|
|
c1bdb07b2f | ||
|
|
14024c7fc1 | ||
|
|
517b8bc69e | ||
|
|
ecc5c84c1e | ||
|
|
f792cc420c | ||
|
|
f4d3ec6370 | ||
|
|
d8e00c983e | ||
|
|
b00b715cb6 | ||
|
|
d05b30ce11 | ||
|
|
2eac1bfcb8 | ||
|
|
91b25c4dec | ||
|
|
d219e033e9 | ||
|
|
3d8d04c6f8 | ||
|
|
8facf1db90 | ||
|
|
9e522c97fa | ||
|
|
7015801377 | ||
|
|
860bc50772 | ||
|
|
f75f2117d0 | ||
|
|
0d5298475d | ||
|
|
be3b8fc9c1 | ||
|
|
16b15af624 | ||
|
|
23e4447922 | ||
|
|
0cf45ee18a | ||
|
|
0b73d6d4f5 | ||
|
|
e4ae33d592 | ||
|
|
c5c728c8bc | ||
|
|
9ae8429a21 | ||
|
|
abaf496fe8 | ||
|
|
b71bd9b300 | ||
|
|
94d032a6de | ||
|
|
9424d88843 | ||
|
|
d471ecc4d8 | ||
|
|
aa765cf8c2 | ||
|
|
5f1d692072 | ||
|
|
2c45bc3019 | ||
|
|
664257c7a3 | ||
|
|
b3700f61ba | ||
|
|
3eaa6d8835 | ||
|
|
f1fe9e41ac | ||
|
|
4f5c153d29 | ||
|
|
ee4f8c98a9 | ||
|
|
f180ab3e69 | ||
|
|
fc61bdd907 | ||
|
|
45c33c4e05 | ||
|
|
1568e38997 | ||
|
|
a281d6620b | ||
|
|
c56cc93cbd | ||
|
|
cdbe2299ec | ||
|
|
5c24430555 | ||
|
|
e46f995cc7 | ||
|
|
10cdae8e38 | ||
|
|
5fab0ca312 | ||
|
|
7f990e2b9a | ||
|
|
690a63b921 | ||
|
|
4c9cde74ab | ||
|
|
94e5bdd4e0 | ||
|
|
cb1d4044e6 | ||
|
|
a252e81ced | ||
|
|
3acfa3abdc | ||
|
|
084c36d538 | ||
|
|
950d30e6e0 | ||
|
|
0385c27343 | ||
|
|
9dec146edf | ||
|
|
c2d6b3524c | ||
|
|
e025974cbd | ||
|
|
fea09cda6c | ||
|
|
b7863a5f48 | ||
|
|
a72eff5a08 | ||
|
|
46c8363e12 | ||
|
|
557cb6cffe | ||
|
|
8198e7cd33 | ||
|
|
a6a96469ca | ||
|
|
27dab4e05f | ||
|
|
3e05848ab9 | ||
|
|
5902e5a2ce | ||
|
|
243d898b38 | ||
|
|
f8f22d29ee | ||
|
|
dc20dedbc7 | ||
|
|
3cec1482eb | ||
|
|
0df5ce4082 | ||
|
|
93a9836962 | ||
|
|
935b106213 | ||
|
|
74366426b9 | ||
|
|
9628242423 | ||
|
|
9af613bef2 | ||
|
|
7b833ce5f9 | ||
|
|
11a9cbd50b | ||
|
|
2626bd9afa | ||
|
|
b6f16abfe0 | ||
|
|
e5a9f92dfb | ||
|
|
a6ff5e6bfc | ||
|
|
be15ad8d25 | ||
|
|
dc9ceda5d2 | ||
|
|
825085f699 | ||
|
|
b1312147e0 | ||
|
|
7ab3d818f0 | ||
|
|
f551cb9677 | ||
|
|
87254ca593 | ||
|
|
ac8b9c6e9d | ||
|
|
b51c1c3b8d | ||
|
|
e14ca19988 | ||
|
|
10d27f412b | ||
|
|
bee72a6622 | ||
|
|
1b260a844f | ||
|
|
e00f3a2fb7 | ||
|
|
b92530de78 | ||
|
|
afaad2f82b | ||
|
|
a507f44465 | ||
|
|
d4fdf793b5 | ||
|
|
feccf532d2 | ||
|
|
859b322773 | ||
|
|
4a74d455de | ||
|
|
c1bde8e0a2 | ||
|
|
560c055b09 | ||
|
|
a0890f98d8 | ||
|
|
401a3cd1bc | ||
|
|
b494d3d1c1 | ||
|
|
6c06057242 | ||
|
|
bb8a8495ff | ||
|
|
423e19909e | ||
|
|
f06fe43649 | ||
|
|
3137c89cf1 | ||
|
|
5dd509c963 | ||
|
|
f48a26f59c | ||
|
|
9d838fa861 | ||
|
|
91404db284 | ||
|
|
4c00552192 | ||
|
|
64762d2cfc | ||
|
|
17b8e2d45b | ||
|
|
9a884f1ccb | ||
|
|
827573c049 | ||
|
|
6fce1f0ac7 | ||
|
|
bbb9d72c13 | ||
|
|
f8b944dee0 | ||
|
|
f840521854 | ||
|
|
134b103605 | ||
|
|
fa35f650b5 | ||
|
|
cb57159e01 | ||
|
|
1752c7e710 | ||
|
|
176d27440c | ||
|
|
832b59b316 | ||
|
|
f521f5e012 | ||
|
|
d42c490bc1 | ||
|
|
5f61f0d2cb | ||
|
|
e253852e4b | ||
|
|
a4819844a4 | ||
|
|
db996335c8 | ||
|
|
85a3333824 | ||
|
|
4bf6a07fe2 | ||
|
|
db70c75807 | ||
|
|
1793ac38a9 | ||
|
|
4aa3c4fa2b | ||
|
|
a447dc86fb | ||
|
|
7cb271b46f | ||
|
|
c55ca155e9 | ||
|
|
5eb4016110 | ||
|
|
34152445cf | ||
|
|
5e0441d96e | ||
|
|
d1c9c54571 | ||
|
|
bb2ed2f898 | ||
|
|
3d88d1827b | ||
|
|
7b5f3fe83a | ||
|
|
f2b9bbdd4a | ||
|
|
c2e906ec97 | ||
|
|
1ec6805112 | ||
|
|
6befdf8b46 | ||
|
|
4cb838d978 | ||
|
|
e61e81a7b5 | ||
|
|
023e1c78df | ||
|
|
6ffa347c77 | ||
|
|
2b50911c9d | ||
|
|
a41820fbf0 | ||
|
|
21725e9304 | ||
|
|
48f52b1be0 | ||
|
|
a5b3ad2e45 | ||
|
|
e30f7094f3 | ||
|
|
5197354375 | ||
|
|
5ac4eff09b | ||
|
|
320e41b142 | ||
|
|
70976b04be | ||
|
|
d65f300988 | ||
|
|
948fd7b8b0 | ||
|
|
f2cebce2eb | ||
|
|
a192e6b430 | ||
|
|
f8a2452247 | ||
|
|
56e6bb0ff6 | ||
|
|
017e504a11 | ||
|
|
9871d5ec2d | ||
|
|
642521a9b3 | ||
|
|
0994829afe | ||
|
|
ce1cc3dddb | ||
|
|
65817dd797 | ||
|
|
c31abb1176 | ||
|
|
d4ca634ef6 | ||
|
|
af00367fed | ||
|
|
9b94313b44 | ||
|
|
5404246e64 | ||
|
|
e0038b82f7 | ||
|
|
0fea71a4f5 | ||
|
|
7571f079c8 | ||
|
|
b5d62cdb55 | ||
|
|
cc25256982 | ||
|
|
da04ff52e4 | ||
|
|
d5b5be6fbe | ||
|
|
3b65d190a4 | ||
|
|
ff04a1fb09 | ||
|
|
1f1d4aaf68 | ||
|
|
196a739f58 | ||
|
|
f20eeebde9 | ||
|
|
b17e6cea21 | ||
|
|
233c2a23de | ||
|
|
46ec0ec43a | ||
|
|
a035c89ea7 | ||
|
|
40228fce5a | ||
|
|
f258b75a24 | ||
|
|
22ab737243 | ||
|
|
0d1f142b1c | ||
|
|
8c475a45bb | ||
|
|
e6e40db9c7 | ||
|
|
cc531a1ca9 | ||
|
|
c3acf049bd | ||
|
|
bab1ef5d38 | ||
|
|
b0c0911ba3 | ||
|
|
3b7fbcd47f | ||
|
|
9ef23b686c | ||
|
|
96784c2052 | ||
|
|
9fe871016d | ||
|
|
e986d06ade | ||
|
|
ebba3ebe67 | ||
|
|
1d09e1d8ce | ||
|
|
d59786fcc4 | ||
|
|
af34164858 | ||
|
|
dcdf86ef5b | ||
|
|
167e9b954a | ||
|
|
9cde67c09c | ||
|
|
ae2fc27799 | ||
|
|
56989a8350 | ||
|
|
8d0e1a99e4 | ||
|
|
ce0b0518c7 | ||
|
|
bf19917537 | ||
|
|
777e9e1e17 | ||
|
|
bfa1993840 | ||
|
|
e041823643 | ||
|
|
63691f5656 | ||
|
|
6dc575f5ee | ||
|
|
0dc6cb298e | ||
|
|
041044eef0 | ||
|
|
1af31a09f4 | ||
|
|
f466105d66 | ||
|
|
ccdb248397 | ||
|
|
ede3a4e850 | ||
|
|
a4ea66cb1f | ||
|
|
2368fbb63c | ||
|
|
f6dfca7182 | ||
|
|
65b97c2f41 | ||
|
|
4a69104af3 | ||
|
|
1786ad16a7 | ||
|
|
2cf4c67e45 | ||
|
|
2ad43ee442 | ||
|
|
15278aacb9 | ||
|
|
57caa4e25e | ||
|
|
66ce952da6 | ||
|
|
b6fa4e248f | ||
|
|
eee8201e4f | ||
|
|
e690e191fd | ||
|
|
d40cd3ec6b | ||
|
|
9df1b037d6 | ||
|
|
28dbffdaf2 | ||
|
|
f8f4c16020 | ||
|
|
8bced7415c | ||
|
|
72d98a7c52 | ||
|
|
0dd85157ea | ||
|
|
2a45851c9e | ||
|
|
b51ee48f78 | ||
|
|
fadac0ffc0 | ||
|
|
799b72cf6f | ||
|
|
fc0f89ac14 | ||
|
|
771e860011 | ||
|
|
cb39bd0651 | ||
|
|
c94b2b227e | ||
|
|
f30911d3af | ||
|
|
cd72e23f3f | ||
|
|
7bb51b8ceb | ||
|
|
b9990f77ff | ||
|
|
6b126997e1 | ||
|
|
02ae7d8531 | ||
|
|
f947cb2613 | ||
|
|
db106d6085 | ||
|
|
b0eed7b56c | ||
|
|
51d358a7a9 | ||
|
|
c54cdd05b8 | ||
|
|
dde147641e | ||
|
|
2abd005cc9 | ||
|
|
366b88388b | ||
|
|
0fc1801d0d | ||
|
|
11ff14be1f | ||
|
|
dc0b4efffa | ||
|
|
864d7fef30 | ||
|
|
18035211f5 | ||
|
|
61080da89d | ||
|
|
ac5704290f | ||
|
|
b432f1207c | ||
|
|
cdd0e18ca8 | ||
|
|
43c3f59660 | ||
|
|
e4871f7722 | ||
|
|
517ee588c6 | ||
|
|
1b51497936 | ||
|
|
f694d8d6ad | ||
|
|
a83ee197c6 | ||
|
|
12f94b81c0 | ||
|
|
e6b01b45f1 | ||
|
|
e478f66d8b | ||
|
|
56dd7bc551 | ||
|
|
8a768b0db0 | ||
|
|
8dca8f3c9f | ||
|
|
cc335d44a0 | ||
|
|
4f4c91caf5 | ||
|
|
ba5a295544 | ||
|
|
93bdbf7572 | ||
|
|
ab8907f6f5 | ||
|
|
215a2ed3de | ||
|
|
f7285b6ab2 | ||
|
|
ee0a306ee4 | ||
|
|
7931b5b489 | ||
|
|
c5bbf8ac73 | ||
|
|
253c5786af | ||
|
|
c4550bc922 | ||
|
|
2c00f8aad1 | ||
|
|
456715a5c1 | ||
|
|
f858177495 | ||
|
|
54b8ad4554 | ||
|
|
d72154acda | ||
|
|
f55ded20a9 | ||
|
|
17655b54f2 | ||
|
|
12edc4e7b8 | ||
|
|
c25310d34f | ||
|
|
2868c31495 | ||
|
|
39a13d7064 | ||
|
|
e4e4fd1e52 | ||
|
|
d67d189d00 | ||
|
|
8187c148d9 | ||
|
|
e773c8ceda | ||
|
|
4f652c9045 | ||
|
|
126bb0760e | ||
|
|
8c059dbc48 | ||
|
|
fd66f69c19 | ||
|
|
63c01d1541 | ||
|
|
c56d7ac40e | ||
|
|
d9c428c1de | ||
|
|
bd9495486b | ||
|
|
33f1ff4e8c | ||
|
|
1d23bef3fb | ||
|
|
661055105c | ||
|
|
e8c220b9bd | ||
|
|
9646a1cd7a | ||
|
|
9087ac4010 | ||
|
|
093e19a7d9 | ||
|
|
9e867ce864 | ||
|
|
8abf30ad71 | ||
|
|
ea25eb1ecc | ||
|
|
58b6e8616c | ||
|
|
f129ba617f | ||
|
|
99d957bd3d | ||
|
|
661013c3e9 | ||
|
|
141c51f0cb | ||
|
|
d65c7658d5 | ||
|
|
7855284ef7 | ||
|
|
5b0f88712b | ||
|
|
2e42d937dc | ||
|
|
27d932e882 | ||
|
|
40091ec2c7 | ||
|
|
76fb9970c8 | ||
|
|
d32f2c5c14 | ||
|
|
49defa2890 | ||
|
|
fe2dae4885 | ||
|
|
ced62f30ba | ||
|
|
bbd1cbb0b3 | ||
|
|
d4dfd526c1 | ||
|
|
d4351ac5a2 | ||
|
|
766d2daa06 | ||
|
|
836c9f82f1 | ||
|
|
46d6a3fc27 | ||
|
|
1dfa303b1e | ||
|
|
6258248865 | ||
|
|
4808145846 | ||
|
|
a0666354dd | ||
|
|
ce55dcf64c | ||
|
|
d7be039f1b | ||
|
|
7e1fac5f91 | ||
|
|
486ded3fca | ||
|
|
0be84cd68b | ||
|
|
323c846ce6 | ||
|
|
3bd9f981a2 | ||
|
|
7ded937e19 | ||
|
|
6d0667f1db | ||
|
|
7c380b19f3 | ||
|
|
5322f422e3 | ||
|
|
c6c326f076 | ||
|
|
d6832a8b56 | ||
|
|
3bfaa8ab84 | ||
|
|
9fb305b17b | ||
|
|
e3bf9cede4 | ||
|
|
4a49715614 | ||
|
|
86c7dcff68 | ||
|
|
7268462b33 | ||
|
|
448830e656 | ||
|
|
3683d92c53 | ||
|
|
d3d8d53e41 | ||
|
|
7a271a91b0 | ||
|
|
47f5c29002 | ||
|
|
27d2683a02 | ||
|
|
792f365c14 | ||
|
|
91b2797498 | ||
|
|
c27c8f41a8 | ||
|
|
ee54fb9a6b | ||
|
|
10ddc466bf | ||
|
|
24c83d725a | ||
|
|
6bf4692c7d | ||
|
|
81426c3d19 | ||
|
|
ed42ada373 | ||
|
|
e2667106a2 | ||
|
|
29d5849519 | ||
|
|
eabf15b626 | ||
|
|
2dc619cbf4 | ||
|
|
307fa7a42a | ||
|
|
ef97121d42 | ||
|
|
d46b6b2bc3 | ||
|
|
2cb3534679 | ||
|
|
8e11fe5304 | ||
|
|
36dc671843 | ||
|
|
dbaa9464ba | ||
|
|
933de16fe4 | ||
|
|
e8348a1d12 | ||
|
|
0f5263cdc3 | ||
|
|
4736b2bdfb | ||
|
|
8ecdd4e9ff | ||
|
|
b3940666a7 | ||
|
|
e20987ce82 | ||
|
|
584051aa90 | ||
|
|
16e2737da3 | ||
|
|
36c2a101cb | ||
|
|
ebd597b2fd | ||
|
|
94829c391b | ||
|
|
b82d6f7a0b | ||
|
|
4a436b5470 | ||
|
|
ad6f63edda | ||
|
|
3036914097 | ||
|
|
2831cb9ab5 | ||
|
|
00716177b4 | ||
|
|
85cc9b8f12 | ||
|
|
fed4f73a61 | ||
|
|
d76fa59b35 | ||
|
|
2532dc1dbb | ||
|
|
642cd86dd1 | ||
|
|
b3a5b0ebe1 | ||
|
|
8b4a29357e | ||
|
|
ab3637d486 | ||
|
|
66a690928c | ||
|
|
8e00280fc1 | ||
|
|
d053cdfbbb | ||
|
|
2e39fd89d1 | ||
|
|
b48e23d54c | ||
|
|
c9a85b0e78 | ||
|
|
bf265a424d | ||
|
|
5436e42990 | ||
|
|
67f40e18a7 | ||
|
|
52ff1eaf37 | ||
|
|
602e74c2a7 | ||
|
|
be511c1a05 | ||
|
|
f36f9d2698 | ||
|
|
dbfb3ccc42 | ||
|
|
d1322570dd | ||
|
|
4c9015c3b1 | ||
|
|
c14a23d4e4 | ||
|
|
b8fc3e569a | ||
|
|
e0f6fce9e9 | ||
|
|
d93de6cc67 | ||
|
|
aeb92accb2 | ||
|
|
943bbdd8ce | ||
|
|
9a3836a0cf | ||
|
|
4b164d947d | ||
|
|
11a07211b6 | ||
|
|
28a3f0fcb9 | ||
|
|
b8c30aab2b | ||
|
|
b8958168f5 | ||
|
|
aaaae0b232 | ||
|
|
c55d641963 | ||
|
|
4ec85f934c | ||
|
|
8393fdd51d | ||
|
|
4071c8a4a8 | ||
|
|
e8c10d4a98 | ||
|
|
a86035625c | ||
|
|
4f631440be | ||
|
|
3901569f26 | ||
|
|
689b856cb7 | ||
|
|
65545d8fb2 | ||
|
|
1caf6d5907 | ||
|
|
55871c68a4 | ||
|
|
fc11b81005 | ||
|
|
a6fb4c8268 | ||
|
|
48dcc67274 | ||
|
|
ccaec8d360 | ||
|
|
66609665f2 | ||
|
|
4b36f9aa64 | ||
|
|
5ecbb0acba | ||
|
|
20902deb75 | ||
|
|
ed5556bdac | ||
|
|
ee64f1fb9f | ||
|
|
8e0e862c84 | ||
|
|
42422a7f62 | ||
|
|
f3a173b736 | ||
|
|
5c38a5160d | ||
|
|
dcf9eb0104 | ||
|
|
dd225e1b9d | ||
|
|
900cef6397 | ||
|
|
0d095fc978 | ||
|
|
dcd635ba0c | ||
|
|
33f0338eeb | ||
|
|
d5e5433553 | ||
|
|
d2906950ce | ||
|
|
fe7050ba00 | ||
|
|
a1208f5631 | ||
|
|
870a93c37b | ||
|
|
96b2ae6654 | ||
|
|
b098292352 | ||
|
|
212937eb3e | ||
|
|
70c7273640 | ||
|
|
e5ab62b1b6 | ||
|
|
b8b9e8d41c | ||
|
|
e712adc226 | ||
|
|
f9ac60807c | ||
|
|
3f03625a5d | ||
|
|
29d3faed66 | ||
|
|
c5dec6056f | ||
|
|
642847c079 | ||
|
|
f102ccc8f0 | ||
|
|
20f93ae8fa | ||
|
|
1101a20408 | ||
|
|
df435fa8bd | ||
|
|
a5269b26e0 | ||
|
|
d3673c7429 | ||
|
|
5010e02eda | ||
|
|
25fe3706a4 | ||
|
|
1a323fbd3c | ||
|
|
9d971d33be | ||
|
|
bc009a8582 | ||
|
|
5e7d427df1 | ||
|
|
dd59ed3b18 | ||
|
|
20d0f0e56b | ||
|
|
d24a7e6c5a | ||
|
|
4dc73bda45 | ||
|
|
732cc2687d | ||
|
|
5d2d64c190 | ||
|
|
7a6d16c1eb | ||
|
|
c2179c3127 | ||
|
|
d8d7f73e1c | ||
|
|
3c23b5b010 | ||
|
|
783019a8e6 | ||
|
|
d2fc7ca6e0 | ||
|
|
2d06927a06 | ||
|
|
44d29d887e | ||
|
|
32c5a113e2 | ||
|
|
ba5630e0f8 | ||
|
|
808df48ee8 | ||
|
|
a089a9577e | ||
|
|
6be2136f20 | ||
|
|
f9ab81a493 | ||
|
|
1636522563 | ||
|
|
b1fbb2ab92 | ||
|
|
e85edf5212 | ||
|
|
b03bad5dbb | ||
|
|
19ec300b2a | ||
|
|
11442f2ad7 | ||
|
|
97748b6605 | ||
|
|
2b762337bd | ||
|
|
9899b8f1fb | ||
|
|
956b3aca97 | ||
|
|
4474beeb82 | ||
|
|
5d8467bedc | ||
|
|
eca3e781b6 | ||
|
|
0d04aa7c59 | ||
|
|
c61ff31ffa | ||
|
|
e03a19f88d | ||
|
|
fcc5b6d604 | ||
|
|
42afce27b3 | ||
|
|
56d0b5a7e2 | ||
|
|
ec57cbf82d | ||
|
|
3f6a46c2a4 | ||
|
|
c30184709d | ||
|
|
4ba3cb25b0 | ||
|
|
650c458df9 | ||
|
|
58aa4f91f5 | ||
|
|
9b382ed16c | ||
|
|
f02dbaf97f | ||
|
|
41f6ea13ce | ||
|
|
f6eb39df33 | ||
|
|
7a5e11bbcf | ||
|
|
7122fa5613 | ||
|
|
7aff81739e | ||
|
|
27772f67c0 | ||
|
|
10b3b2dc68 | ||
|
|
c2841542af | ||
|
|
1f28096587 | ||
|
|
e86b01e831 | ||
|
|
83802d1494 | ||
|
|
d1fa8ae08e | ||
|
|
29dac03314 | ||
|
|
e7eb7e799b | ||
|
|
18cc74b8d0 | ||
|
|
7f48f552c1 | ||
|
|
1e2e65f0fa | ||
|
|
28c9cc7321 | ||
|
|
ccb90b5c46 | ||
|
|
048342817b | ||
|
|
d1a3aa7b2b | ||
|
|
e967d4587a | ||
|
|
a79dc12f1e | ||
|
|
37d2469266 | ||
|
|
1df6d28080 | ||
|
|
03eaad376b | ||
|
|
739f9a4a4b | ||
|
|
93224f8cf9 | ||
|
|
bb57186dd4 | ||
|
|
2803eb9fbb | ||
|
|
913c07e414 | ||
|
|
4a9f468aac | ||
|
|
05155e4db0 | ||
|
|
f53eff93db | ||
|
|
bceaede198 | ||
|
|
86a14d007d | ||
|
|
a4dd6ee3ce | ||
|
|
130cf7e0db | ||
|
|
cbb41f1ae2 | ||
|
|
fa78da3c03 | ||
|
|
ae8f3695b5 | ||
|
|
87ddb2dbd5 | ||
|
|
9aa6b0903b | ||
|
|
da6830f19b | ||
|
|
32ee0b9c88 | ||
|
|
49800ea134 | ||
|
|
feb8240410 | ||
|
|
b7dd9154c3 | ||
|
|
482bd5efd2 | ||
|
|
bf074b37a3 | ||
|
|
495a55725b | ||
|
|
53c9124fc9 | ||
|
|
8fe55b1d18 | ||
|
|
a0ce9a4441 | ||
|
|
2cf2dc3d95 | ||
|
|
7537e94ddf | ||
|
|
ab40696007 | ||
|
|
2c90b3db9e | ||
|
|
826adafe2e | ||
|
|
3dd2933dbd | ||
|
|
d12f46caef | ||
|
|
b55351274e | ||
|
|
c00d934b21 | ||
|
|
6b526cbe6a | ||
|
|
e0539e6ede | ||
|
|
5eb85efa14 | ||
|
|
9ee8d72fd2 | ||
|
|
8c4ca383ca | ||
|
|
f2a427da25 | ||
|
|
e0466d0ad8 | ||
|
|
418a66a09f | ||
|
|
5e2bd17d18 | ||
|
|
ec6fca4aa7 | ||
|
|
1f20626618 | ||
|
|
69b34f7658 | ||
|
|
531b76a513 | ||
|
|
f63c683faa | ||
|
|
410d5762c0 | ||
|
|
ddb308455a | ||
|
|
f42b5019ec | ||
|
|
adc9ed85bc | ||
|
|
4592def14d | ||
|
|
2e0a7cf78d | ||
|
|
5a52acaa92 | ||
|
|
6d497f2c77 | ||
|
|
b7560a8808 | ||
|
|
d3ca739c00 | ||
|
|
3db76ccf3d | ||
|
|
438f7a1254 | ||
|
|
47bf58d69e | ||
|
|
5ef51262f7 | ||
|
|
a054aa4797 | ||
|
|
f1cfd10c94 | ||
|
|
d3f72ca202 | ||
|
|
022c58bf64 | ||
|
|
b42518acd5 | ||
|
|
284a2d110f | ||
|
|
9ae0a3cd85 | ||
|
|
615c671434 | ||
|
|
29bfa5efa4 | ||
|
|
016f8f1536 | ||
|
|
e9417be9df | ||
|
|
c304998ed7 | ||
|
|
415a62e373 | ||
|
|
8ce3aeadbf | ||
|
|
b818314045 | ||
|
|
56d414177a | ||
|
|
0fffa6ba2f | ||
|
|
60499d221e | ||
|
|
9965ed84da | ||
|
|
7e13593452 | ||
|
|
208dd3aad1 | ||
|
|
19a01c9849 | ||
|
|
78ac7d99f5 | ||
|
|
0c8dbdcd92 | ||
|
|
8e4501ee29 | ||
|
|
0100f61b62 | ||
|
|
1a9d913ee1 | ||
|
|
51e32cf7cc | ||
|
|
3fcc4cdbd5 | ||
|
|
ffd47ceefc | ||
|
|
10f21b423a | ||
|
|
eec7081b8d | ||
|
|
b01704cce1 | ||
|
|
15ede8aab8 | ||
|
|
f7dc9b9fef | ||
|
|
dc13f0b469 | ||
|
|
a13c6a84df | ||
|
|
dfa713163a | ||
|
|
90c00dfd54 | ||
|
|
f3b9b21996 | ||
|
|
885b8a3b4c | ||
|
|
4675912d89 | ||
|
|
495b44198f | ||
|
|
8d8e68cf90 | ||
|
|
f3b0caf299 | ||
|
|
75d29acc06 | ||
|
|
cbbb36fc9b | ||
|
|
d53e449296 | ||
|
|
01df368d93 | ||
|
|
2256f2f04d | ||
|
|
95881c870e | ||
|
|
019e33ee3f | ||
|
|
19fa01b91d | ||
|
|
96aad2983b | ||
|
|
c18a5b5179 | ||
|
|
ed4b94a180 | ||
|
|
29c5ac71bc | ||
|
|
84a9f7a263 | ||
|
|
11e591e442 | ||
|
|
64f00683f2 | ||
|
|
84a033fd97 | ||
|
|
0183d46275 | ||
|
|
9bd4b0a05e | ||
|
|
f0e852b4db | ||
|
|
ade01b1f5b | ||
|
|
3035b2724d | ||
|
|
8c96eea583 | ||
|
|
338953a25d | ||
|
|
f1bd46266b | ||
|
|
77cad3c436 | ||
|
|
3ca70692de | ||
|
|
417516c378 | ||
|
|
f730291e67 | ||
|
|
d76fb8345c | ||
|
|
aea962dc21 | ||
|
|
4345efaffc | ||
|
|
bf47033169 | ||
|
|
37a65684d6 | ||
|
|
eab5020e24 | ||
|
|
8ef21f56d3 | ||
|
|
103d980b2d | ||
|
|
28c3ef1c77 | ||
|
|
67c3c28877 | ||
|
|
e040fd20a3 | ||
|
|
00e0b43010 | ||
|
|
f19cfbb825 | ||
|
|
bde3d1a0cd | ||
|
|
2e090896d5 | ||
|
|
b0a32da0b5 | ||
|
|
10c1c7c41a | ||
|
|
16f452ef98 | ||
|
|
b77e533693 | ||
|
|
a605ad4d11 | ||
|
|
4b94760c8e | ||
|
|
82a7ca9615 | ||
|
|
23295e1e98 | ||
|
|
32575f92c9 | ||
|
|
a260e58020 | ||
|
|
b2f7e02a02 | ||
|
|
29e114b463 | ||
|
|
2a059b1c1b | ||
|
|
cdc72bf5a3 | ||
|
|
f786335dbb | ||
|
|
ab5af524a4 | ||
|
|
9620b167d9 | ||
|
|
8f4685e024 | ||
|
|
10544c4cb8 | ||
|
|
1e8e17c01e | ||
|
|
80eef29681 | ||
|
|
47bb53f5cb | ||
|
|
6991a16edb | ||
|
|
2f2d5861bb | ||
|
|
a31967431f | ||
|
|
e74ad4ff9b | ||
|
|
70bdacf01a | ||
|
|
b69f853acb | ||
|
|
c31018d9bc | ||
|
|
7ae23901d3 | ||
|
|
4d19b94347 | ||
|
|
c15b537e3d | ||
|
|
2577a6ce8a | ||
|
|
dd5f5ca4cb | ||
|
|
508774742e | ||
|
|
2a917a582e | ||
|
|
325319dc3b | ||
|
|
dda5e5ea32 | ||
|
|
5e260c4d34 | ||
|
|
d3f5324386 | ||
|
|
3da88d794f | ||
|
|
71b4995775 | ||
|
|
b0541e9d31 | ||
|
|
415fcb912b | ||
|
|
f872fcb5d0 | ||
|
|
de6f2c0336 | ||
|
|
be4b359c74 | ||
|
|
72a58bbafe | ||
|
|
c336449729 | ||
|
|
1e4ecda884 | ||
|
|
8cf0e46bbf | ||
|
|
f0226e9329 | ||
|
|
dce8df45d5 | ||
|
|
f6948597e4 | ||
|
|
e3df1031ca | ||
|
|
14ffadf004 | ||
|
|
459b040d21 | ||
|
|
3396225f74 | ||
|
|
c82906105c | ||
|
|
4c14740798 | ||
|
|
5fefc48f33 | ||
|
|
72e6482994 | ||
|
|
93f783228c | ||
|
|
5f8b50c094 | ||
|
|
99e31f6fb1 | ||
|
|
f2e35c8c4f | ||
|
|
40b4fe64af | ||
|
|
d10d59c013 | ||
|
|
d54aa8ce13 | ||
|
|
52fa8c98bb | ||
|
|
3f336869e2 | ||
|
|
85482d575e | ||
|
|
6f7365509d | ||
|
|
7099ea9bb0 | ||
|
|
dccac69d82 | ||
|
|
c2cd337886 | ||
|
|
0fc4a806e5 | ||
|
|
4d3c1ab4f0 | ||
|
|
e4f76f6350 | ||
|
|
0d65783dce | ||
|
|
8bb8b91357 | ||
|
|
8804c7333a | ||
|
|
17eec5b97e | ||
|
|
cd07c4d4ff | ||
|
|
917b99e438 | ||
|
|
b08e156b79 | ||
|
|
8e2c7b4979 | ||
|
|
5a7aa123ea | ||
|
|
a12eadd9ef | ||
|
|
2137e2b15b | ||
|
|
89446af51e | ||
|
|
3b521bedf8 | ||
|
|
5b2c8fa007 | ||
|
|
eb8d145195 | ||
|
|
80bea79512 | ||
|
|
07a560ff24 | ||
|
|
717775a1c6 | ||
|
|
672f4bb5aa | ||
|
|
f1079a8222 | ||
|
|
044d2b8e6e | ||
|
|
223eef6261 | ||
|
|
43657f252f | ||
|
|
70ebab3537 | ||
|
|
d3bdfc704b | ||
|
|
4de247cfa0 | ||
|
|
d611b03589 | ||
|
|
308d789d92 | ||
|
|
539a22c750 | ||
|
|
e4bea9068b | ||
|
|
e620798d33 | ||
|
|
7ea4992f16 | ||
|
|
0564b52c0e | ||
|
|
8b2c91836b | ||
|
|
9e382e8d29 | ||
|
|
2255892d65 | ||
|
|
7d9b198f73 | ||
|
|
a6cdd0d9da | ||
|
|
c64a8c9c7f | ||
|
|
f4c5994d27 | ||
|
|
c24c7e75e2 | ||
|
|
273670b2a2 | ||
|
|
28aff051ab | ||
|
|
29975e5b37 | ||
|
|
5cf7d1dba2 | ||
|
|
2fe824b8c4 | ||
|
|
f674217c43 | ||
|
|
9f7345d663 | ||
|
|
eb2d074530 | ||
|
|
9fa7745795 | ||
|
|
14db2f91ba | ||
|
|
c3e494f6cf | ||
|
|
090f67a980 | ||
|
|
3059bfb1b3 | ||
|
|
e391c47ed8 | ||
|
|
f66764e1c0 | ||
|
|
e0b088b52e | ||
|
|
e5a3c870b4 | ||
|
|
2b71cb9c38 | ||
|
|
da9d814da4 | ||
|
|
7d4c4c66d4 | ||
|
|
939a792c41 | ||
|
|
17644ff285 | ||
|
|
64faa41d06 | ||
|
|
ca1bb9a3a1 | ||
|
|
78ef531420 | ||
|
|
212ee450b7 | ||
|
|
c1c08852f9 | ||
|
|
e06a077ac2 | ||
|
|
cb77e65c97 | ||
|
|
6367f0f5f1 | ||
|
|
b88e09a697 | ||
|
|
68bbd42213 | ||
|
|
22ee2093b8 | ||
|
|
87a99275fb | ||
|
|
abbd7c30a4 | ||
|
|
abae60c8d0 | ||
|
|
e92893ed24 | ||
|
|
27b5435a40 | ||
|
|
50db718a6a | ||
|
|
bfd0addaeb | ||
|
|
be11d3e195 | ||
|
|
266f05c4c4 | ||
|
|
d0bd01beca | ||
|
|
220288ac77 | ||
|
|
4d8903fd0b | ||
|
|
67106f056b | ||
|
|
5d3c5123f8 | ||
|
|
5f97711377 | ||
|
|
74d9f56d0f | ||
|
|
051db6a33d | ||
|
|
aa358433b0 | ||
|
|
126896f69d | ||
|
|
e723069165 | ||
|
|
855fd17014 | ||
|
|
d11781920b | ||
|
|
2c0d2eef40 | ||
|
|
ef8ec01e39 | ||
|
|
0a1c2a7ca1 | ||
|
|
fe0a76e1a6 | ||
|
|
dcafb8c48c | ||
|
|
a76cc8f8c4 | ||
|
|
4d2fa581e1 | ||
|
|
f7a3f45a18 | ||
|
|
dff7b203f7 | ||
|
|
4705fd2bbe | ||
|
|
ca0476953e | ||
|
|
7e92930fa9 | ||
|
|
33769d0328 | ||
|
|
5db2e6c7a1 | ||
|
|
804fc4063a | ||
|
|
82a2174867 | ||
|
|
a80e031c62 | ||
|
|
452e5c1cf0 | ||
|
|
c6b11b9f62 | ||
|
|
b8255308d6 | ||
|
|
a5c0fb7f6b | ||
|
|
f25683354e | ||
|
|
7d13599ba1 | ||
|
|
43664d7841 | ||
|
|
2a2f888909 | ||
|
|
ad5ddaf55a | ||
|
|
4588130c1e | ||
|
|
5003bae0de | ||
|
|
6e32a1f73d | ||
|
|
611d254ed5 | ||
|
|
57a8f208bc | ||
|
|
fcdc1d867e | ||
|
|
098dca3a9f | ||
|
|
8e2ed76227 | ||
|
|
bf7c188cc0 | ||
|
|
8c9efd8608 | ||
|
|
e1ad1a14af | ||
|
|
327fe4cfcc | ||
|
|
d02491931a | ||
|
|
032db159c9 | ||
|
|
cd2085ee71 | ||
|
|
ad305e71d7 | ||
|
|
7d8688d54b | ||
|
|
253419316c | ||
|
|
997ef59306 | ||
|
|
60b1913ba2 | ||
|
|
2c09930b6d | ||
|
|
d461e931dd | ||
|
|
eada0b1fd7 | ||
|
|
150535b6c1 | ||
|
|
f1ec02cdcd | ||
|
|
9f5d73d44a | ||
|
|
8609f8d25a | ||
|
|
953a618102 | ||
|
|
cf6d8e7e53 | ||
|
|
8af78f417f | ||
|
|
535fd1f311 | ||
|
|
762eaf443a | ||
|
|
330640eb96 | ||
|
|
e3d412d1f4 | ||
|
|
6f9a12a8a3 | ||
|
|
0e47599572 | ||
|
|
c480223e88 | ||
|
|
0b522d40a7 | ||
|
|
d900a6c8bd | ||
|
|
fe46fbb719 | ||
|
|
8d401cdb9d | ||
|
|
3f3f6f1be4 | ||
|
|
b6da5cc54c | ||
|
|
eaeeedc9c3 | ||
|
|
317cd41215 | ||
|
|
7f27512a48 | ||
|
|
bf127a63b2 | ||
|
|
fe16f81da1 | ||
|
|
d0ba242c46 | ||
|
|
fe06be8590 | ||
|
|
79b4ca92d8 | ||
|
|
57b0c60cb4 | ||
|
|
6e57d123bb | ||
|
|
011f88f7e7 | ||
|
|
2eb9301ad5 | ||
|
|
f0db64ac2e | ||
|
|
514ca6f4ad | ||
|
|
b7419bd9bb | ||
|
|
2e344d4d63 | ||
|
|
f8749eeb5c | ||
|
|
f76142508f | ||
|
|
be2afb950a | ||
|
|
19de1b7f29 | ||
|
|
f5165064ee | ||
|
|
c9a0881309 | ||
|
|
5167933395 | ||
|
|
75db608479 | ||
|
|
7bff5866b1 | ||
|
|
9720c3301a | ||
|
|
254689ff83 | ||
|
|
21f5222784 | ||
|
|
0bb29d5649 | ||
|
|
db33f03c15 | ||
|
|
ac9ceaacd8 | ||
|
|
771d3e8f4f | ||
|
|
82a11e6207 | ||
|
|
a1c3df1889 | ||
|
|
a821af6b1c | ||
|
|
d2fe619120 | ||
|
|
58e77f58bd | ||
|
|
6a4fa4f485 | ||
|
|
5be03bff61 | ||
|
|
e9fd038aae | ||
|
|
a8464a95ce | ||
|
|
a0b0c37feb | ||
|
|
35ffd29404 | ||
|
|
0565a7a4e1 | ||
|
|
6c3713226c | ||
|
|
f6ceedd15b | ||
|
|
3e599dc149 | ||
|
|
54fbc6f6e1 | ||
|
|
aa47b64e2a | ||
|
|
251adbf644 | ||
|
|
2c4759ce57 | ||
|
|
4dfe2eee94 | ||
|
|
5226c7fac3 | ||
|
|
593b451373 | ||
|
|
898544e147 | ||
|
|
61301d934e | ||
|
|
af0059079c | ||
|
|
9ef7878cbc | ||
|
|
4ae93a7a07 | ||
|
|
d4faa4056b | ||
|
|
42bbb4fa8a | ||
|
|
12c5b6104c | ||
|
|
aa9d1ad2eb | ||
|
|
49e82a4be8 | ||
|
|
803302e70c | ||
|
|
05f1d0d3ef | ||
|
|
1cd62f8c38 | ||
|
|
a522fc745a | ||
|
|
303133f013 | ||
|
|
d26a596072 | ||
|
|
f359b50fe5 | ||
|
|
18b2fc11ad | ||
|
|
d7b722e2ae | ||
|
|
1e94ac784f | ||
|
|
027d2336b8 | ||
|
|
3c19370cec | ||
|
|
7e8f7bfa16 | ||
|
|
3f5e06ecc4 | ||
|
|
50f030d233 | ||
|
|
7696d5371a | ||
|
|
b64c26674d | ||
|
|
63b25304c3 | ||
|
|
8232fd1a2d | ||
|
|
f52a5d3be2 | ||
|
|
067de257e1 | ||
|
|
4a925ef5e9 | ||
|
|
4afb8c428b | ||
|
|
2f1a2cf07f | ||
|
|
6cc4fe2412 | ||
|
|
10a8691eca | ||
|
|
b75320ba95 | ||
|
|
bc268a58d1 | ||
|
|
0b70477930 | ||
|
|
8801162275 | ||
|
|
a604a71185 | ||
|
|
66fa6bb42e | ||
|
|
713d32c4da | ||
|
|
57198d477b | ||
|
|
533f4cc10c | ||
|
|
a46b94950c | ||
|
|
952bbefaac | ||
|
|
54d3cd587d | ||
|
|
f7c929c932 | ||
|
|
b48f1d378b | ||
|
|
0fd86ec8a8 | ||
|
|
4ae7e9788c | ||
|
|
5582ad0445 | ||
|
|
982b614010 | ||
|
|
7845ab4bc3 | ||
|
|
8680dfc939 | ||
|
|
76ac670f7d | ||
|
|
7b47dfb744 | ||
|
|
3c73d6298a | ||
|
|
c220fb235a | ||
|
|
1dc5e97ac2 | ||
|
|
e9371a58a0 | ||
|
|
ea379ba10f | ||
|
|
17e01993d9 | ||
|
|
8a6345515b | ||
|
|
581d49635e | ||
|
|
e860ff7299 | ||
|
|
0672bc633f | ||
|
|
2dfb52f7e0 | ||
|
|
cc6eb9f83c | ||
|
|
6b239263da | ||
|
|
89e0a3ec27 | ||
|
|
5b186cd609 | ||
|
|
5a156b3645 | ||
|
|
95f00de0df | ||
|
|
c4c666cbc4 | ||
|
|
ee30bf45c9 | ||
|
|
603df1ea1c | ||
|
|
abbf73ad1a | ||
|
|
1226cdab47 | ||
|
|
ab80e0fba0 | ||
|
|
fb992a0c81 | ||
|
|
23581d44bd | ||
|
|
c7eb53317b | ||
|
|
de98939ebf | ||
|
|
0d3914b626 | ||
|
|
eb94bce3e2 | ||
|
|
b897008887 | ||
|
|
998d540b73 | ||
|
|
c672bfa32e | ||
|
|
8f1d8ac970 | ||
|
|
53d4710c62 | ||
|
|
31f089db6a | ||
|
|
9d60cf25c0 | ||
|
|
9e32b6ae48 | ||
|
|
99402cf1c0 | ||
|
|
3ac2ae3c8c | ||
|
|
ea906056fa | ||
|
|
c081c5ee23 | ||
|
|
3dcdaab103 | ||
|
|
3615977608 | ||
|
|
94c41bec64 | ||
|
|
8d072205e9 | ||
|
|
b5102d03a6 | ||
|
|
791bb3502c | ||
|
|
eb0c6a8287 | ||
|
|
9ddd573774 | ||
|
|
f99da9058f | ||
|
|
7d0dba18de | ||
|
|
6fc7f07a80 | ||
|
|
05b5b64379 | ||
|
|
229c8e551d | ||
|
|
d483b401ee | ||
|
|
acacf75f49 | ||
|
|
f8350c6304 | ||
|
|
fedc78522b | ||
|
|
b0474398ec | ||
|
|
dc90c9108f | ||
|
|
69031d0033 | ||
|
|
e44d4e6508 | ||
|
|
c416b1d935 | ||
|
|
7d923c389e | ||
|
|
c02e8d8b0d | ||
|
|
35df2cdbee | ||
|
|
2b1410895e |
11
.coveragerc
11
.coveragerc
@@ -1,4 +1,9 @@
|
||||
[run]
|
||||
omit =
|
||||
# standlonetemplate is read dynamically and tested by test_genscript
|
||||
*standalonetemplate.py
|
||||
source = pytest,_pytest,testing/
|
||||
parallel = 1
|
||||
branch = 1
|
||||
|
||||
[paths]
|
||||
source = src/
|
||||
.tox/*/lib/python*/site-packages/
|
||||
.tox\*\Lib\site-packages\
|
||||
|
||||
2
.gitattributes
vendored
2
.gitattributes
vendored
@@ -1 +1 @@
|
||||
CHANGELOG merge=union
|
||||
*.bat text eol=crlf
|
||||
|
||||
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -3,7 +3,7 @@ Thanks for submitting a PR, your contribution is really appreciated!
|
||||
Here's a quick checklist that should be present in PRs (you can delete this text from the final description, this is
|
||||
just a guideline):
|
||||
|
||||
- [ ] Create a new changelog file in the `changelog` folder, with a name like `<ISSUE NUMBER>.<TYPE>.rst`. See [changelog/README.rst](/changelog/README.rst) for details.
|
||||
- [ ] Create a new changelog file in the `changelog` folder, with a name like `<ISSUE NUMBER>.<TYPE>.rst`. See [changelog/README.rst](https://github.com/pytest-dev/pytest/blob/master/changelog/README.rst) for details.
|
||||
- [ ] Target the `master` branch for bug fixes, documentation updates and trivial changes.
|
||||
- [ ] Target the `features` branch for new features and removals/deprecations.
|
||||
- [ ] Include documentation when adding new features.
|
||||
|
||||
149
.github/labels.toml
vendored
Normal file
149
.github/labels.toml
vendored
Normal file
@@ -0,0 +1,149 @@
|
||||
["os: cygwin"]
|
||||
color = "006b75"
|
||||
description = "cygwin platform-specific problem"
|
||||
name = "os: cygwin"
|
||||
|
||||
["os: linux"]
|
||||
color = "1d76db"
|
||||
description = "linux platform-specific problem"
|
||||
name = "os: linux"
|
||||
|
||||
["os: mac"]
|
||||
color = "bfdadc"
|
||||
description = "mac platform-specific problem"
|
||||
name = "os: mac"
|
||||
|
||||
["os: windows"]
|
||||
color = "fbca04"
|
||||
description = "windows platform-specific problem"
|
||||
name = "os: windows"
|
||||
|
||||
["plugin: argcomplete"]
|
||||
color = "d4c5f9"
|
||||
description = "related to the argcomplete builtin plugin"
|
||||
name = "plugin: argcomplete"
|
||||
|
||||
["plugin: cache"]
|
||||
color = "c7def8"
|
||||
description = "related to the cache builtin plugin"
|
||||
name = "plugin: cache"
|
||||
|
||||
["plugin: capture"]
|
||||
color = "1d76db"
|
||||
description = "related to the capture builtin plugin"
|
||||
name = "plugin: capture"
|
||||
|
||||
["plugin: debugging"]
|
||||
color = "dd52a8"
|
||||
description = "related to the debugging builtin plugin"
|
||||
name = "plugin: debugging"
|
||||
|
||||
["plugin: doctests"]
|
||||
color = "fad8c7"
|
||||
description = "related to the doctests builtin plugin"
|
||||
name = "plugin: doctests"
|
||||
|
||||
["plugin: junitxml"]
|
||||
color = "c5def5"
|
||||
description = "related to the junitxml builtin plugin"
|
||||
name = "plugin: junitxml"
|
||||
|
||||
["plugin: logging"]
|
||||
color = "ff5432"
|
||||
description = "related to the logging builtin plugin"
|
||||
name = "plugin: logging"
|
||||
|
||||
["plugin: monkeypatch"]
|
||||
color = "0e8a16"
|
||||
description = "related to the monkeypatch builtin plugin"
|
||||
name = "plugin: monkeypatch"
|
||||
|
||||
["plugin: nose"]
|
||||
color = "bfdadc"
|
||||
description = "related to the nose integration builtin plugin"
|
||||
name = "plugin: nose"
|
||||
|
||||
["plugin: pastebin"]
|
||||
color = "bfd4f2"
|
||||
description = "related to the pastebin builtin plugin"
|
||||
name = "plugin: pastebin"
|
||||
|
||||
["plugin: pytester"]
|
||||
color = "c5def5"
|
||||
description = "related to the pytester builtin plugin"
|
||||
name = "plugin: pytester"
|
||||
|
||||
["plugin: tmpdir"]
|
||||
color = "bfd4f2"
|
||||
description = "related to the tmpdir builtin plugin"
|
||||
name = "plugin: tmpdir"
|
||||
|
||||
["plugin: unittest"]
|
||||
color = "006b75"
|
||||
description = "related to the unittest integration builtin plugin"
|
||||
name = "plugin: unittest"
|
||||
|
||||
["plugin: warnings"]
|
||||
color = "fef2c0"
|
||||
description = "related to the warnings builtin plugin"
|
||||
name = "plugin: warnings"
|
||||
|
||||
["plugin: xdist"]
|
||||
color = "5319e7"
|
||||
description = "related to the xdist external plugin"
|
||||
name = "plugin: xdist"
|
||||
|
||||
["status: critical"]
|
||||
color = "e11d21"
|
||||
description = "grave problem or usability issue that affects lots of users"
|
||||
name = "status: critical"
|
||||
|
||||
["status: easy"]
|
||||
color = "bfe5bf"
|
||||
description = "easy issue that is friendly to new contributor"
|
||||
name = "status: easy"
|
||||
|
||||
["status: help wanted"]
|
||||
color = "159818"
|
||||
description = "developers would like help from experts on this topic"
|
||||
name = "status: help wanted"
|
||||
|
||||
["status: needs information"]
|
||||
color = "5319e7"
|
||||
description = "reporter needs to provide more information; can be closed after 2 or more weeks of inactivity"
|
||||
name = "status: needs information"
|
||||
|
||||
["topic: collection"]
|
||||
color = "006b75"
|
||||
description = "related to the collection phase"
|
||||
name = "topic: collection"
|
||||
|
||||
["topic: config"]
|
||||
color = "006b75"
|
||||
description = "related to config handling, argument parsing and config file"
|
||||
name = "topic: config"
|
||||
|
||||
["topic: fixtures"]
|
||||
color = "5319e7"
|
||||
description = "anything involving fixtures directly or indirectly"
|
||||
name = "topic: fixtures"
|
||||
|
||||
["topic: marks"]
|
||||
color = "b60205"
|
||||
description = "related to marks, either the general marks or builtin"
|
||||
name = "topic: marks"
|
||||
|
||||
["topic: parametrize"]
|
||||
color = "fbca04"
|
||||
description = "related to @pytest.mark.parametrize"
|
||||
name = "topic: parametrize"
|
||||
|
||||
["topic: reporting"]
|
||||
color = "fef2c0"
|
||||
description = "related to terminal output and user-facing messages and errors"
|
||||
name = "topic: reporting"
|
||||
|
||||
["topic: rewrite"]
|
||||
color = "0e8a16"
|
||||
description = "related to the assertion rewrite mechanism"
|
||||
name = "topic: rewrite"
|
||||
6
.gitignore
vendored
6
.gitignore
vendored
@@ -24,6 +24,7 @@ src/_pytest/_version.py
|
||||
.eggs/
|
||||
|
||||
doc/*/_build
|
||||
doc/*/.doctrees
|
||||
build/
|
||||
dist/
|
||||
*.egg-info
|
||||
@@ -35,6 +36,11 @@ env/
|
||||
.cache
|
||||
.pytest_cache
|
||||
.coverage
|
||||
.coverage.*
|
||||
coverage.xml
|
||||
.ropeproject
|
||||
.idea
|
||||
.hypothesis
|
||||
.pydevproject
|
||||
.project
|
||||
.settings
|
||||
|
||||
@@ -5,26 +5,38 @@ repos:
|
||||
hooks:
|
||||
- id: black
|
||||
args: [--safe, --quiet]
|
||||
language_version: python3.6
|
||||
language_version: python3
|
||||
- repo: https://github.com/asottile/blacken-docs
|
||||
rev: v0.2.0
|
||||
rev: v0.3.0
|
||||
hooks:
|
||||
- id: blacken-docs
|
||||
additional_dependencies: [black==18.6b4]
|
||||
language_version: python3.6
|
||||
additional_dependencies: [black==18.9b0]
|
||||
language_version: python3
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v1.3.0
|
||||
rev: v2.0.0
|
||||
hooks:
|
||||
- id: trailing-whitespace
|
||||
- id: end-of-file-fixer
|
||||
- id: check-yaml
|
||||
- id: debug-statements
|
||||
exclude: _pytest/debugging.py
|
||||
language_version: python3
|
||||
- id: flake8
|
||||
- repo: https://github.com/asottile/pyupgrade
|
||||
rev: v1.2.0
|
||||
language_version: python3
|
||||
- repo: https://github.com/asottile/reorder_python_imports
|
||||
rev: v1.3.3
|
||||
hooks:
|
||||
- id: pyupgrade
|
||||
- id: reorder-python-imports
|
||||
args: ['--application-directories=.:src']
|
||||
- repo: https://github.com/asottile/pyupgrade
|
||||
rev: v1.10.1
|
||||
hooks:
|
||||
- id: pyupgrade
|
||||
args: [--keep-percent-format]
|
||||
- repo: https://github.com/pre-commit/pygrep-hooks
|
||||
rev: v1.1.0
|
||||
hooks:
|
||||
- id: rst-backticks
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: rst
|
||||
@@ -33,4 +45,9 @@ repos:
|
||||
files: ^(CHANGELOG.rst|HOWTORELEASE.rst|README.rst|changelog/.*)$
|
||||
language: python
|
||||
additional_dependencies: [pygments, restructuredtext_lint]
|
||||
python_version: python3.6
|
||||
- id: changelogs-rst
|
||||
name: changelog filenames
|
||||
language: fail
|
||||
entry: 'changelog files must be named ####.(feature|bugfix|doc|deprecation|removal|vendor|trivial).rst'
|
||||
exclude: changelog/(\d+\.(feature|bugfix|doc|deprecation|removal|vendor|trivial).rst|README.rst|_template.rst)
|
||||
files: ^changelog/
|
||||
|
||||
97
.travis.yml
97
.travis.yml
@@ -1,50 +1,66 @@
|
||||
sudo: false
|
||||
language: python
|
||||
dist: xenial
|
||||
stages:
|
||||
- linting
|
||||
- test
|
||||
- deploy
|
||||
- baseline
|
||||
- name: test
|
||||
if: repo = pytest-dev/pytest AND tag IS NOT present
|
||||
- name: deploy
|
||||
if: repo = pytest-dev/pytest AND tag IS present
|
||||
python:
|
||||
- '3.6'
|
||||
- '3.7'
|
||||
install:
|
||||
- pip install --upgrade --pre tox
|
||||
env:
|
||||
matrix:
|
||||
# coveralls is not listed in tox's envlist, but should run in travis
|
||||
- TOXENV=coveralls
|
||||
# note: please use "tox --listenvs" to populate the build matrix below
|
||||
# please remove the linting env in all cases
|
||||
- TOXENV=py27
|
||||
- TOXENV=py34
|
||||
- TOXENV=py36
|
||||
- TOXENV=py27-pexpect
|
||||
- TOXENV=py27-xdist
|
||||
- TOXENV=py27-trial
|
||||
- TOXENV=py27-numpy
|
||||
- TOXENV=py27-pluggymaster
|
||||
- TOXENV=py36-pexpect
|
||||
- TOXENV=py36-xdist
|
||||
- TOXENV=py36-trial
|
||||
- TOXENV=py36-numpy
|
||||
- TOXENV=py36-pluggymaster
|
||||
# Specialized factors for py27.
|
||||
- TOXENV=py27-nobyte
|
||||
- TOXENV=doctesting
|
||||
- TOXENV=docs
|
||||
- TOXENV=py27-xdist
|
||||
- TOXENV=py27-pluggymaster PYTEST_NO_COVERAGE=1
|
||||
# Specialized factors for py37.
|
||||
- TOXENV=py37-pexpect,py37-trial,py37-numpy
|
||||
- TOXENV=py37-pluggymaster PYTEST_NO_COVERAGE=1
|
||||
- TOXENV=py37-freeze PYTEST_NO_COVERAGE=1
|
||||
|
||||
jobs:
|
||||
include:
|
||||
- env: TOXENV=pypy
|
||||
# Coverage tracking is slow with pypy, skip it.
|
||||
- env: TOXENV=pypy PYTEST_NO_COVERAGE=1
|
||||
python: 'pypy-5.4'
|
||||
dist: trusty
|
||||
- env: TOXENV=py34
|
||||
python: '3.4'
|
||||
- env: TOXENV=py35
|
||||
python: '3.5'
|
||||
- env: TOXENV=py36-freeze
|
||||
- env: TOXENV=py36
|
||||
python: '3.6'
|
||||
- env: TOXENV=py37
|
||||
python: 'nightly'
|
||||
- &test-macos
|
||||
language: generic
|
||||
os: osx
|
||||
osx_image: xcode9.4
|
||||
sudo: required
|
||||
install:
|
||||
- python -m pip install --pre tox
|
||||
env: TOXENV=py27
|
||||
- <<: *test-macos
|
||||
env: TOXENV=py37
|
||||
before_install:
|
||||
- brew update
|
||||
- brew upgrade python
|
||||
- brew unlink python
|
||||
- brew link python
|
||||
|
||||
- stage: baseline
|
||||
env: TOXENV=py27-pexpect,py27-trial,py27-numpy
|
||||
- env: TOXENV=py37-xdist
|
||||
- env: TOXENV=linting,docs,doctesting
|
||||
python: '3.7'
|
||||
|
||||
- stage: deploy
|
||||
python: '3.6'
|
||||
env:
|
||||
env: PYTEST_NO_COVERAGE=1
|
||||
install: pip install -U setuptools setuptools_scm
|
||||
script: skip
|
||||
deploy:
|
||||
@@ -57,17 +73,30 @@ jobs:
|
||||
on:
|
||||
tags: true
|
||||
repo: pytest-dev/pytest
|
||||
- stage: linting
|
||||
python: '3.6'
|
||||
env:
|
||||
install:
|
||||
- pip install pre-commit
|
||||
- pre-commit install-hooks
|
||||
script:
|
||||
- pre-commit run --all-files
|
||||
|
||||
before_script:
|
||||
- |
|
||||
if [[ "$PYTEST_NO_COVERAGE" != 1 ]]; then
|
||||
export COVERAGE_FILE="$PWD/.coverage"
|
||||
export COVERAGE_PROCESS_START="$PWD/.coveragerc"
|
||||
export _PYTEST_TOX_COVERAGE_RUN="coverage run -m"
|
||||
export _PYTEST_TOX_EXTRA_DEP=coverage-enable-subprocess
|
||||
fi
|
||||
|
||||
script: tox --recreate
|
||||
|
||||
after_success:
|
||||
- |
|
||||
if [[ "$PYTEST_NO_COVERAGE" != 1 ]]; then
|
||||
set -e
|
||||
# Add last TOXENV to $PATH.
|
||||
PATH="$PWD/.tox/${TOXENV##*,}/bin:$PATH"
|
||||
coverage combine
|
||||
coverage xml --ignore-errors
|
||||
coverage report -m --ignore-errors
|
||||
bash <(curl -s https://codecov.io/bash) -Z -X gcov -X coveragepy -X search -X xcode -X gcovout -X fix -f coverage.xml -F "${TOXENV//-/,},linux"
|
||||
fi
|
||||
|
||||
notifications:
|
||||
irc:
|
||||
channels:
|
||||
|
||||
26
AUTHORS
26
AUTHORS
@@ -10,9 +10,11 @@ Ahn Ki-Wook
|
||||
Alan Velasco
|
||||
Alexander Johnson
|
||||
Alexei Kozlenok
|
||||
Allan Feldman
|
||||
Anatoly Bubenkoff
|
||||
Anders Hovmöller
|
||||
Andras Tim
|
||||
Andrea Cimatoribus
|
||||
Andreas Zeidler
|
||||
Andrzej Ostrowski
|
||||
Andy Freeland
|
||||
@@ -46,7 +48,9 @@ Christian Boelsen
|
||||
Christian Theunert
|
||||
Christian Tismer
|
||||
Christopher Gilling
|
||||
CrazyMerlyn
|
||||
Cyrus Maden
|
||||
Dhiren Serai
|
||||
Daniel Grana
|
||||
Daniel Hahler
|
||||
Daniel Nuri
|
||||
@@ -55,6 +59,7 @@ Danielle Jenkins
|
||||
Dave Hunt
|
||||
David Díaz-Barquero
|
||||
David Mohr
|
||||
David Szotten
|
||||
David Vierra
|
||||
Daw-Ran Liou
|
||||
Denis Kirisov
|
||||
@@ -71,6 +76,8 @@ Endre Galaczi
|
||||
Eric Hunsberger
|
||||
Eric Siegerman
|
||||
Erik M. Bray
|
||||
Fabien Zarifian
|
||||
Fabio Zadrozny
|
||||
Feng Ma
|
||||
Florian Bruhin
|
||||
Floris Bruynooghe
|
||||
@@ -89,6 +96,8 @@ Hugo van Kemenade
|
||||
Hui Wang (coldnight)
|
||||
Ian Bicking
|
||||
Ian Lesperance
|
||||
Ionuț Turturică
|
||||
Iwan Briquemont
|
||||
Jaap Broekhuizen
|
||||
Jan Balster
|
||||
Janne Vanhala
|
||||
@@ -97,6 +106,7 @@ Javier Domingo Cansino
|
||||
Javier Romero
|
||||
Jeff Rackauckas
|
||||
Jeff Widman
|
||||
Jenni Rinker
|
||||
John Eddie Ayson
|
||||
John Towler
|
||||
Jon Sonesen
|
||||
@@ -113,6 +123,7 @@ Katerina Koukiou
|
||||
Kevin Cox
|
||||
Kodi B. Arfer
|
||||
Kostis Anagnostopoulos
|
||||
Kyle Altendorf
|
||||
Lawrence Mitchell
|
||||
Lee Kamentsky
|
||||
Lev Maximov
|
||||
@@ -147,11 +158,13 @@ Michael Droettboom
|
||||
Michael Seifert
|
||||
Michal Wajszczuk
|
||||
Mihai Capotă
|
||||
Mike Hoyle (hoylemd)
|
||||
Mike Lundy
|
||||
Miro Hrončok
|
||||
Nathaniel Waisbrot
|
||||
Ned Batchelder
|
||||
Neven Mundar
|
||||
Niclas Olofsson
|
||||
Nicolas Delaby
|
||||
Oleg Pidsadnyi
|
||||
Oleg Sushchenko
|
||||
@@ -173,6 +186,7 @@ Raphael Pierzina
|
||||
Raquel Alegre
|
||||
Ravi Chandra
|
||||
Roberto Polli
|
||||
Roland Puntaier
|
||||
Romain Dorgueil
|
||||
Roman Bolshakov
|
||||
Ronny Pfannschmidt
|
||||
@@ -181,7 +195,9 @@ Russel Winder
|
||||
Ryan Wooden
|
||||
Samuel Dion-Girardeau
|
||||
Samuele Pedroni
|
||||
Sankt Petersbug
|
||||
Segev Finer
|
||||
Serhii Mozghovyi
|
||||
Simon Gomizelj
|
||||
Skylar Downes
|
||||
Srinivas Reddy Thatiparthy
|
||||
@@ -190,6 +206,8 @@ Stefan Zimmermann
|
||||
Stefano Taschini
|
||||
Steffen Allner
|
||||
Stephan Obermann
|
||||
Sven-Hendrik Haase
|
||||
Tadek Teleżyński
|
||||
Tarcisio Fischer
|
||||
Tareq Alayan
|
||||
Ted Xiao
|
||||
@@ -198,18 +216,22 @@ Thomas Hisch
|
||||
Tim Strazny
|
||||
Tom Dalton
|
||||
Tom Viner
|
||||
Tomer Keren
|
||||
Trevor Bekolay
|
||||
Tyler Goodlet
|
||||
Tzu-ping Chung
|
||||
Vasily Kuznetsov
|
||||
Victor Maryama
|
||||
Victor Uriarte
|
||||
Vidar T. Fauske
|
||||
Virgil Dupras
|
||||
Vitaly Lashmanov
|
||||
Vlad Dragos
|
||||
Wil Cooley
|
||||
William Lee
|
||||
Wim Glenn
|
||||
Wouter van Ackooy
|
||||
Xuan Luong
|
||||
Xuecong Liao
|
||||
Zac Hatfield-Dodds
|
||||
Zoltán Máté
|
||||
Roland Puntaier
|
||||
Allan Feldman
|
||||
|
||||
937
CHANGELOG.rst
937
CHANGELOG.rst
File diff suppressed because it is too large
Load Diff
16
CITATION
Normal file
16
CITATION
Normal file
@@ -0,0 +1,16 @@
|
||||
NOTE: Change "x.y" by the version you use. If you are unsure about which version
|
||||
you are using run: `pip show pytest`.
|
||||
|
||||
Text:
|
||||
|
||||
[pytest] pytest x.y, 2004
|
||||
Krekel et al., https://github.com/pytest-dev/pytest
|
||||
|
||||
BibTeX:
|
||||
|
||||
@misc{pytestx.y,
|
||||
title = {pytest x.y},
|
||||
author = {Krekel, Holger and Oliveira, Bruno and Pfannschmidt, Ronny and Bruynooghe, Floris and Laugher, Brianna and Bruhin, Florian},
|
||||
year = {2004},
|
||||
url = {https://github.com/pytest-dev/pytest},
|
||||
}
|
||||
@@ -169,7 +169,7 @@ Short version
|
||||
#. Follow **PEP-8** for naming and `black <https://github.com/ambv/black>`_ for formatting.
|
||||
#. Tests are run using ``tox``::
|
||||
|
||||
tox -e linting,py27,py36
|
||||
tox -e linting,py27,py37
|
||||
|
||||
The test environments above are usually enough to cover most cases locally.
|
||||
|
||||
@@ -237,12 +237,12 @@ Here is a simple overview, with pytest-specific bits:
|
||||
|
||||
#. Run all the tests
|
||||
|
||||
You need to have Python 2.7 and 3.6 available in your system. Now
|
||||
You need to have Python 2.7 and 3.7 available in your system. Now
|
||||
running tests is as simple as issuing this command::
|
||||
|
||||
$ tox -e linting,py27,py36
|
||||
$ tox -e linting,py27,py37
|
||||
|
||||
This command will run tests via the "tox" tool against Python 2.7 and 3.6
|
||||
This command will run tests via the "tox" tool against Python 2.7 and 3.7
|
||||
and also perform "lint" coding-style checks.
|
||||
|
||||
#. You can now edit your local working copy and run the tests again as necessary. Please follow PEP-8 for naming.
|
||||
@@ -252,9 +252,9 @@ Here is a simple overview, with pytest-specific bits:
|
||||
|
||||
$ tox -e py27 -- --pdb
|
||||
|
||||
Or to only run tests in a particular test module on Python 3.6::
|
||||
Or to only run tests in a particular test module on Python 3.7::
|
||||
|
||||
$ tox -e py36 -- testing/test_config.py
|
||||
$ tox -e py37 -- testing/test_config.py
|
||||
|
||||
|
||||
When committing, ``pre-commit`` will re-format the files if necessary.
|
||||
@@ -280,6 +280,47 @@ Here is a simple overview, with pytest-specific bits:
|
||||
base: features # if it's a feature
|
||||
|
||||
|
||||
Writing Tests
|
||||
----------------------------
|
||||
|
||||
Writing tests for plugins or for pytest itself is often done using the `testdir fixture <https://docs.pytest.org/en/latest/reference.html#testdir>`_, as a "black-box" test.
|
||||
|
||||
For example, to ensure a simple test passes you can write:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def test_true_assertion(testdir):
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
def test_foo():
|
||||
assert True
|
||||
"""
|
||||
)
|
||||
result = testdir.runpytest()
|
||||
result.assert_outcomes(failed=0, passed=1)
|
||||
|
||||
|
||||
Alternatively, it is possible to make checks based on the actual output of the termal using
|
||||
*glob-like* expressions:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def test_true_assertion(testdir):
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
def test_foo():
|
||||
assert False
|
||||
"""
|
||||
)
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines(["*assert False*", "*1 failed*"])
|
||||
|
||||
When choosing a file where to write a new test, take a look at the existing files and see if there's
|
||||
one file which looks like a good fit. For example, a regression test about a bug in the ``--lf`` option
|
||||
should go into ``test_cacheprovider.py``, given that this option is implemented in ``cacheprovider.py``.
|
||||
If in doubt, go ahead and open a PR with your best guess and we can discuss this over the code.
|
||||
|
||||
|
||||
Joining the Development Team
|
||||
----------------------------
|
||||
|
||||
|
||||
@@ -18,28 +18,23 @@ taking a lot of time to make a new one.
|
||||
|
||||
Ensure your are in a clean work tree.
|
||||
|
||||
#. Install development dependencies in a virtual environment with::
|
||||
#. Using ``tox``, generate docs, changelog, announcements::
|
||||
|
||||
$ pip3 install -U -r tasks/requirements.txt
|
||||
|
||||
#. Generate docs, changelog, announcements, and a **local** tag::
|
||||
|
||||
$ invoke generate.pre-release <VERSION>
|
||||
|
||||
#. Execute pre-commit on all files to ensure the docs are conformant and commit your results::
|
||||
|
||||
$ pre-commit run --all-files
|
||||
$ git commit -am "Fix files with pre-commit"
|
||||
$ tox -e release -- <VERSION>
|
||||
|
||||
This will generate a commit with all the changes ready for pushing.
|
||||
|
||||
#. Open a PR for this branch targeting ``master``.
|
||||
|
||||
#. After all tests pass and the PR has been approved, publish to PyPI by pushing the tag::
|
||||
|
||||
git tag <VERSION>
|
||||
git push git@github.com:pytest-dev/pytest.git <VERSION>
|
||||
|
||||
Wait for the deploy to complete, then make sure it is `available on PyPI <https://pypi.org/project/pytest>`_.
|
||||
|
||||
#. Merge the PR into ``master``.
|
||||
|
||||
#. Send an email announcement with the contents from::
|
||||
|
||||
doc/en/announce/release-<VERSION>.rst
|
||||
@@ -51,5 +46,3 @@ taking a lot of time to make a new one.
|
||||
* testing-in-python@lists.idyll.org (only major/minor releases)
|
||||
|
||||
And announce it on `Twitter <https://twitter.com/>`_ with the ``#pytest`` hashtag.
|
||||
|
||||
#. After a minor/major release, merge ``release-X.Y.Z`` into ``master`` and push (or open a PR).
|
||||
|
||||
30
README.rst
30
README.rst
@@ -1,8 +1,9 @@
|
||||
.. image:: http://docs.pytest.org/en/latest/_static/pytest1.png
|
||||
:target: http://docs.pytest.org
|
||||
.. image:: https://docs.pytest.org/en/latest/_static/pytest1.png
|
||||
:target: https://docs.pytest.org/en/latest/
|
||||
:align: center
|
||||
:alt: pytest
|
||||
|
||||
|
||||
------
|
||||
|
||||
.. image:: https://img.shields.io/pypi/v/pytest.svg
|
||||
@@ -14,8 +15,9 @@
|
||||
.. image:: https://img.shields.io/pypi/pyversions/pytest.svg
|
||||
:target: https://pypi.org/project/pytest/
|
||||
|
||||
.. image:: https://img.shields.io/coveralls/pytest-dev/pytest/master.svg
|
||||
:target: https://coveralls.io/r/pytest-dev/pytest
|
||||
.. image:: https://codecov.io/gh/pytest-dev/pytest/branch/master/graph/badge.svg
|
||||
:target: https://codecov.io/gh/pytest-dev/pytest
|
||||
:alt: Code coverage Status
|
||||
|
||||
.. image:: https://travis-ci.org/pytest-dev/pytest.svg?branch=master
|
||||
:target: https://travis-ci.org/pytest-dev/pytest
|
||||
@@ -24,7 +26,7 @@
|
||||
:target: https://ci.appveyor.com/project/pytestbot/pytest
|
||||
|
||||
.. image:: https://img.shields.io/badge/code%20style-black-000000.svg
|
||||
:target: https://github.com/ambv/black
|
||||
:target: https://github.com/ambv/black
|
||||
|
||||
.. image:: https://www.codetriage.com/pytest-dev/pytest/badges/users.svg
|
||||
:target: https://www.codetriage.com/pytest-dev/pytest
|
||||
@@ -65,23 +67,23 @@ To execute it::
|
||||
========================== 1 failed in 0.04 seconds ===========================
|
||||
|
||||
|
||||
Due to ``pytest``'s detailed assertion introspection, only plain ``assert`` statements are used. See `getting-started <http://docs.pytest.org/en/latest/getting-started.html#our-first-test-run>`_ for more examples.
|
||||
Due to ``pytest``'s detailed assertion introspection, only plain ``assert`` statements are used. See `getting-started <https://docs.pytest.org/en/latest/getting-started.html#our-first-test-run>`_ for more examples.
|
||||
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Detailed info on failing `assert statements <http://docs.pytest.org/en/latest/assert.html>`_ (no need to remember ``self.assert*`` names);
|
||||
- Detailed info on failing `assert statements <https://docs.pytest.org/en/latest/assert.html>`_ (no need to remember ``self.assert*`` names);
|
||||
|
||||
- `Auto-discovery
|
||||
<http://docs.pytest.org/en/latest/goodpractices.html#python-test-discovery>`_
|
||||
<https://docs.pytest.org/en/latest/goodpractices.html#python-test-discovery>`_
|
||||
of test modules and functions;
|
||||
|
||||
- `Modular fixtures <http://docs.pytest.org/en/latest/fixture.html>`_ for
|
||||
- `Modular fixtures <https://docs.pytest.org/en/latest/fixture.html>`_ for
|
||||
managing small or parametrized long-lived test resources;
|
||||
|
||||
- Can run `unittest <http://docs.pytest.org/en/latest/unittest.html>`_ (or trial),
|
||||
`nose <http://docs.pytest.org/en/latest/nose.html>`_ test suites out of the box;
|
||||
- Can run `unittest <https://docs.pytest.org/en/latest/unittest.html>`_ (or trial),
|
||||
`nose <https://docs.pytest.org/en/latest/nose.html>`_ test suites out of the box;
|
||||
|
||||
- Python 2.7, Python 3.4+, PyPy 2.3, Jython 2.5 (untested);
|
||||
|
||||
@@ -91,7 +93,7 @@ Features
|
||||
Documentation
|
||||
-------------
|
||||
|
||||
For full documentation, including installation, tutorials and PDF documents, please see http://docs.pytest.org.
|
||||
For full documentation, including installation, tutorials and PDF documents, please see https://docs.pytest.org/en/latest/.
|
||||
|
||||
|
||||
Bugs/Requests
|
||||
@@ -103,13 +105,13 @@ Please use the `GitHub issue tracker <https://github.com/pytest-dev/pytest/issue
|
||||
Changelog
|
||||
---------
|
||||
|
||||
Consult the `Changelog <http://docs.pytest.org/en/latest/changelog.html>`__ page for fixes and enhancements of each version.
|
||||
Consult the `Changelog <https://docs.pytest.org/en/latest/changelog.html>`__ page for fixes and enhancements of each version.
|
||||
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
Copyright Holger Krekel and others, 2004-2017.
|
||||
Copyright Holger Krekel and others, 2004-2018.
|
||||
|
||||
Distributed under the terms of the `MIT`_ license, pytest is free and open source software.
|
||||
|
||||
|
||||
57
appveyor.yml
57
appveyor.yml
@@ -1,34 +1,28 @@
|
||||
environment:
|
||||
COVERALLS_REPO_TOKEN:
|
||||
secure: 2NJ5Ct55cHJ9WEg3xbSqCuv0rdgzzb6pnzOIG5OkMbTndw3wOBrXntWFoQrXiMFi
|
||||
# this is pytest's token in coveralls.io, encrypted
|
||||
# using pytestbot account as detailed here:
|
||||
# https://www.appveyor.com/docs/build-configuration#secure-variables
|
||||
|
||||
matrix:
|
||||
# coveralls is not in the default env list
|
||||
- TOXENV: "coveralls"
|
||||
# note: please use "tox --listenvs" to populate the build matrix below
|
||||
- TOXENV: "linting"
|
||||
- TOXENV: "py27"
|
||||
- TOXENV: "py34"
|
||||
- TOXENV: "py35"
|
||||
- TOXENV: "py36"
|
||||
- TOXENV: "pypy"
|
||||
- TOXENV: "py27-pexpect"
|
||||
- TOXENV: "py37-xdist"
|
||||
- TOXENV: "py27-xdist"
|
||||
- TOXENV: "py27-trial"
|
||||
- TOXENV: "py27-numpy"
|
||||
- TOXENV: "py27"
|
||||
- TOXENV: "py37"
|
||||
- TOXENV: "linting,docs,doctesting"
|
||||
- TOXENV: "py36"
|
||||
- TOXENV: "py35"
|
||||
- TOXENV: "py34"
|
||||
- TOXENV: "pypy"
|
||||
PYTEST_NO_COVERAGE: "1"
|
||||
# Specialized factors for py27.
|
||||
- TOXENV: "py27-trial,py27-numpy,py27-nobyte"
|
||||
- TOXENV: "py27-pluggymaster"
|
||||
- TOXENV: "py36-pexpect"
|
||||
- TOXENV: "py36-xdist"
|
||||
- TOXENV: "py36-trial"
|
||||
- TOXENV: "py36-numpy"
|
||||
- TOXENV: "py36-pluggymaster"
|
||||
- TOXENV: "py27-nobyte"
|
||||
- TOXENV: "doctesting"
|
||||
- TOXENV: "py36-freeze"
|
||||
- TOXENV: "docs"
|
||||
PYTEST_NO_COVERAGE: "1"
|
||||
# Specialized factors for py37.
|
||||
- TOXENV: "py37-trial,py37-numpy"
|
||||
- TOXENV: "py37-pluggymaster"
|
||||
PYTEST_NO_COVERAGE: "1"
|
||||
- TOXENV: "py37-freeze"
|
||||
PYTEST_NO_COVERAGE: "1"
|
||||
|
||||
matrix:
|
||||
fast_finish: true
|
||||
|
||||
install:
|
||||
- echo Installed Pythons
|
||||
@@ -36,12 +30,19 @@ install:
|
||||
|
||||
- if "%TOXENV%" == "pypy" call scripts\install-pypy.bat
|
||||
|
||||
- C:\Python36\python -m pip install --upgrade pip
|
||||
- C:\Python36\python -m pip install --upgrade --pre tox
|
||||
|
||||
build: false # Not a C# project, build stuff at the test step instead.
|
||||
|
||||
before_test:
|
||||
- call scripts\prepare-coverage.bat
|
||||
|
||||
test_script:
|
||||
- call scripts\call-tox.bat
|
||||
- C:\Python36\python -m tox
|
||||
|
||||
on_success:
|
||||
- call scripts\upload-coverage.bat
|
||||
|
||||
cache:
|
||||
- '%LOCALAPPDATA%\pip\cache'
|
||||
|
||||
@@ -1,10 +1,7 @@
|
||||
|
||||
|
||||
# 10000 iterations, just for relative comparison
|
||||
# 2.7.5 3.3.2
|
||||
# FilesCompleter 75.1109 69.2116
|
||||
# FastFilesCompleter 0.7383 1.0760
|
||||
|
||||
import timeit
|
||||
|
||||
imports = [
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import py
|
||||
import six
|
||||
|
||||
for i in range(1000):
|
||||
py.builtin.exec_("def test_func_%d(): pass" % i)
|
||||
six.exec_("def test_func_%d(): pass" % i)
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
from six.moves import range
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
|
||||
@@ -14,7 +14,8 @@ Each file should be named like ``<ISSUE>.<TYPE>.rst``, where
|
||||
* ``feature``: new user facing features, like new command-line options and new behavior.
|
||||
* ``bugfix``: fixes a reported bug.
|
||||
* ``doc``: documentation improvement, like rewording an entire session or adding missing docs.
|
||||
* ``removal``: feature deprecation or removal.
|
||||
* ``deprecation``: feature deprecation.
|
||||
* ``removal``: feature removal.
|
||||
* ``vendor``: changes in packages vendored in pytest.
|
||||
* ``trivial``: fixing a small typo or internal change that might be noteworthy.
|
||||
|
||||
@@ -26,7 +27,7 @@ changelog using that instead.
|
||||
|
||||
If you are not sure what issue type to use, don't hesitate to ask in your PR.
|
||||
|
||||
Note that the ``towncrier`` tool will automatically
|
||||
reflow your text, so it will work best if you stick to a single paragraph, but multiple sentences and links are OK
|
||||
and encouraged. You can install ``towncrier`` and then run ``towncrier --draft``
|
||||
``towncrier`` preserves multiple paragraphs and formatting (code blocks, lists, and so on), but for entries
|
||||
other than ``features`` it is usually better to stick to a single paragraph to keep it concise. You can install
|
||||
``towncrier`` and then run ``towncrier --draft``
|
||||
if you want to get a preview of how your change will look in the final release notes.
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
{% if definitions[category]['showcontent'] %}
|
||||
{% for text, values in sections[section][category]|dictsort(by='value') %}
|
||||
{% set issue_joiner = joiner(', ') %}
|
||||
- {{ text }}{% if category != 'vendor' %} ({% for value in values|sort %}{{ issue_joiner() }}`{{ value }} <https://github.com/pytest-dev/pytest/issues/{{ value[1:] }}>`_{% endfor %}){% endif %}
|
||||
- {% for value in values|sort %}{{ issue_joiner() }}`{{ value }} <https://github.com/pytest-dev/pytest/issues/{{ value[1:] }}>`_{% endfor %}: {{ text }}
|
||||
|
||||
|
||||
{% endfor %}
|
||||
|
||||
@@ -1,19 +1,17 @@
|
||||
# flasky extensions. flasky pygments style based on tango style
|
||||
from pygments.style import Style
|
||||
from pygments.token import (
|
||||
Keyword,
|
||||
Name,
|
||||
Comment,
|
||||
String,
|
||||
Error,
|
||||
Number,
|
||||
Operator,
|
||||
Generic,
|
||||
Whitespace,
|
||||
Punctuation,
|
||||
Other,
|
||||
Literal,
|
||||
)
|
||||
from pygments.token import Comment
|
||||
from pygments.token import Error
|
||||
from pygments.token import Generic
|
||||
from pygments.token import Keyword
|
||||
from pygments.token import Literal
|
||||
from pygments.token import Name
|
||||
from pygments.token import Number
|
||||
from pygments.token import Operator
|
||||
from pygments.token import Other
|
||||
from pygments.token import Punctuation
|
||||
from pygments.token import String
|
||||
from pygments.token import Whitespace
|
||||
|
||||
|
||||
class FlaskyStyle(Style):
|
||||
|
||||
@@ -6,6 +6,24 @@ Release announcements
|
||||
:maxdepth: 2
|
||||
|
||||
|
||||
release-4.0.2
|
||||
release-4.0.1
|
||||
release-4.0.0
|
||||
release-3.10.1
|
||||
release-3.10.0
|
||||
release-3.9.3
|
||||
release-3.9.2
|
||||
release-3.9.1
|
||||
release-3.9.0
|
||||
release-3.8.2
|
||||
release-3.8.1
|
||||
release-3.8.0
|
||||
release-3.7.4
|
||||
release-3.7.3
|
||||
release-3.7.2
|
||||
release-3.7.1
|
||||
release-3.7.0
|
||||
release-3.6.4
|
||||
release-3.6.3
|
||||
release-3.6.2
|
||||
release-3.6.1
|
||||
|
||||
@@ -124,7 +124,7 @@ The py.test Development Team
|
||||
Thanks `@biern`_ for the PR.
|
||||
|
||||
* Fix `traceback style docs`_ to describe all of the available options
|
||||
(auto/long/short/line/native/no), with `auto` being the default since v2.6.
|
||||
(auto/long/short/line/native/no), with ``auto`` being the default since v2.6.
|
||||
Thanks `@hackebrot`_ for the PR.
|
||||
|
||||
* Fix (`#1422`_): junit record_xml_property doesn't allow multiple records
|
||||
|
||||
43
doc/en/announce/release-3.10.0.rst
Normal file
43
doc/en/announce/release-3.10.0.rst
Normal file
@@ -0,0 +1,43 @@
|
||||
pytest-3.10.0
|
||||
=======================================
|
||||
|
||||
The pytest team is proud to announce the 3.10.0 release!
|
||||
|
||||
pytest is a mature Python testing tool with more than a 2000 tests
|
||||
against itself, passing on many different interpreters and platforms.
|
||||
|
||||
This release contains a number of bugs fixes and improvements, so users are encouraged
|
||||
to take a look at the CHANGELOG:
|
||||
|
||||
https://docs.pytest.org/en/latest/changelog.html
|
||||
|
||||
For complete documentation, please visit:
|
||||
|
||||
https://docs.pytest.org/en/latest/
|
||||
|
||||
As usual, you can upgrade from pypi via:
|
||||
|
||||
pip install -U pytest
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Anders Hovmöller
|
||||
* Andreu Vallbona Plazas
|
||||
* Ankit Goel
|
||||
* Anthony Sottile
|
||||
* Bernardo Gomes
|
||||
* Brianna Laugher
|
||||
* Bruno Oliveira
|
||||
* Daniel Hahler
|
||||
* David Szotten
|
||||
* Mick Koch
|
||||
* Niclas Olofsson
|
||||
* Palash Chatterjee
|
||||
* Ronny Pfannschmidt
|
||||
* Sven-Hendrik Haase
|
||||
* Ville Skyttä
|
||||
* William Jamir Silva
|
||||
|
||||
|
||||
Happy testing,
|
||||
The Pytest Development Team
|
||||
24
doc/en/announce/release-3.10.1.rst
Normal file
24
doc/en/announce/release-3.10.1.rst
Normal file
@@ -0,0 +1,24 @@
|
||||
pytest-3.10.1
|
||||
=======================================
|
||||
|
||||
pytest 3.10.1 has just been released to PyPI.
|
||||
|
||||
This is a bug-fix release, being a drop-in replacement. To upgrade::
|
||||
|
||||
pip install --upgrade pytest
|
||||
|
||||
The full changelog is available at https://docs.pytest.org/en/latest/changelog.html.
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Anthony Sottile
|
||||
* Boris Feld
|
||||
* Bruno Oliveira
|
||||
* Daniel Hahler
|
||||
* Fabien ZARIFIAN
|
||||
* Jon Dufresne
|
||||
* Ronny Pfannschmidt
|
||||
|
||||
|
||||
Happy testing,
|
||||
The pytest Development Team
|
||||
@@ -20,8 +20,7 @@ Thanks to all who contributed to this release, among them:
|
||||
* Ondřej Súkup
|
||||
* Ronny Pfannschmidt
|
||||
* T.E.A de Souza
|
||||
* Victor
|
||||
* victor
|
||||
* Victor Maryama
|
||||
|
||||
|
||||
Happy testing,
|
||||
|
||||
24
doc/en/announce/release-3.6.4.rst
Normal file
24
doc/en/announce/release-3.6.4.rst
Normal file
@@ -0,0 +1,24 @@
|
||||
pytest-3.6.4
|
||||
=======================================
|
||||
|
||||
pytest 3.6.4 has just been released to PyPI.
|
||||
|
||||
This is a bug-fix release, being a drop-in replacement. To upgrade::
|
||||
|
||||
pip install --upgrade pytest
|
||||
|
||||
The full changelog is available at http://doc.pytest.org/en/latest/changelog.html.
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Anthony Sottile
|
||||
* Bernhard M. Wiedemann
|
||||
* Bruno Oliveira
|
||||
* Drew
|
||||
* E Hershey
|
||||
* Hugo Martins
|
||||
* Vlad Shcherbina
|
||||
|
||||
|
||||
Happy testing,
|
||||
The pytest Development Team
|
||||
41
doc/en/announce/release-3.7.0.rst
Normal file
41
doc/en/announce/release-3.7.0.rst
Normal file
@@ -0,0 +1,41 @@
|
||||
pytest-3.7.0
|
||||
=======================================
|
||||
|
||||
The pytest team is proud to announce the 3.7.0 release!
|
||||
|
||||
pytest is a mature Python testing tool with more than a 2000 tests
|
||||
against itself, passing on many different interpreters and platforms.
|
||||
|
||||
This release contains a number of bugs fixes and improvements, so users are encouraged
|
||||
to take a look at the CHANGELOG:
|
||||
|
||||
http://doc.pytest.org/en/latest/changelog.html
|
||||
|
||||
For complete documentation, please visit:
|
||||
|
||||
http://docs.pytest.org
|
||||
|
||||
As usual, you can upgrade from pypi via:
|
||||
|
||||
pip install -U pytest
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Alan
|
||||
* Alan Brammer
|
||||
* Ammar Najjar
|
||||
* Anthony Sottile
|
||||
* Bruno Oliveira
|
||||
* Jeffrey Rackauckas
|
||||
* Kale Kundert
|
||||
* Ronny Pfannschmidt
|
||||
* Serhii Mozghovyi
|
||||
* Tadek Teleżyński
|
||||
* Wil Cooley
|
||||
* abrammer
|
||||
* avirlrma
|
||||
* turturica
|
||||
|
||||
|
||||
Happy testing,
|
||||
The Pytest Development Team
|
||||
21
doc/en/announce/release-3.7.1.rst
Normal file
21
doc/en/announce/release-3.7.1.rst
Normal file
@@ -0,0 +1,21 @@
|
||||
pytest-3.7.1
|
||||
=======================================
|
||||
|
||||
pytest 3.7.1 has just been released to PyPI.
|
||||
|
||||
This is a bug-fix release, being a drop-in replacement. To upgrade::
|
||||
|
||||
pip install --upgrade pytest
|
||||
|
||||
The full changelog is available at http://doc.pytest.org/en/latest/changelog.html.
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Anthony Sottile
|
||||
* Bruno Oliveira
|
||||
* Kale Kundert
|
||||
* Ronny Pfannschmidt
|
||||
|
||||
|
||||
Happy testing,
|
||||
The pytest Development Team
|
||||
25
doc/en/announce/release-3.7.2.rst
Normal file
25
doc/en/announce/release-3.7.2.rst
Normal file
@@ -0,0 +1,25 @@
|
||||
pytest-3.7.2
|
||||
=======================================
|
||||
|
||||
pytest 3.7.2 has just been released to PyPI.
|
||||
|
||||
This is a bug-fix release, being a drop-in replacement. To upgrade::
|
||||
|
||||
pip install --upgrade pytest
|
||||
|
||||
The full changelog is available at http://doc.pytest.org/en/latest/changelog.html.
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Anthony Sottile
|
||||
* Bruno Oliveira
|
||||
* Daniel Hahler
|
||||
* Josh Holland
|
||||
* Ronny Pfannschmidt
|
||||
* Sankt Petersbug
|
||||
* Wes Thomas
|
||||
* turturica
|
||||
|
||||
|
||||
Happy testing,
|
||||
The pytest Development Team
|
||||
32
doc/en/announce/release-3.7.3.rst
Normal file
32
doc/en/announce/release-3.7.3.rst
Normal file
@@ -0,0 +1,32 @@
|
||||
pytest-3.7.3
|
||||
=======================================
|
||||
|
||||
pytest 3.7.3 has just been released to PyPI.
|
||||
|
||||
This is a bug-fix release, being a drop-in replacement. To upgrade::
|
||||
|
||||
pip install --upgrade pytest
|
||||
|
||||
The full changelog is available at http://doc.pytest.org/en/latest/changelog.html.
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Andrew Champion
|
||||
* Anthony Sottile
|
||||
* Bruno Oliveira
|
||||
* Daniel Hahler
|
||||
* Gandalf Saxe
|
||||
* Jennifer Rinker
|
||||
* Natan Lao
|
||||
* Ondřej Súkup
|
||||
* Ronny Pfannschmidt
|
||||
* Sankt Petersbug
|
||||
* Tyler Richard
|
||||
* Victor Maryama
|
||||
* Vlad Shcherbina
|
||||
* turturica
|
||||
* wim glenn
|
||||
|
||||
|
||||
Happy testing,
|
||||
The pytest Development Team
|
||||
22
doc/en/announce/release-3.7.4.rst
Normal file
22
doc/en/announce/release-3.7.4.rst
Normal file
@@ -0,0 +1,22 @@
|
||||
pytest-3.7.4
|
||||
=======================================
|
||||
|
||||
pytest 3.7.4 has just been released to PyPI.
|
||||
|
||||
This is a bug-fix release, being a drop-in replacement. To upgrade::
|
||||
|
||||
pip install --upgrade pytest
|
||||
|
||||
The full changelog is available at https://docs.pytest.org/en/latest/changelog.html.
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Anthony Sottile
|
||||
* Bruno Oliveira
|
||||
* Daniel Hahler
|
||||
* Jiri Kuncar
|
||||
* Steve Piercy
|
||||
|
||||
|
||||
Happy testing,
|
||||
The pytest Development Team
|
||||
38
doc/en/announce/release-3.8.0.rst
Normal file
38
doc/en/announce/release-3.8.0.rst
Normal file
@@ -0,0 +1,38 @@
|
||||
pytest-3.8.0
|
||||
=======================================
|
||||
|
||||
The pytest team is proud to announce the 3.8.0 release!
|
||||
|
||||
pytest is a mature Python testing tool with more than a 2000 tests
|
||||
against itself, passing on many different interpreters and platforms.
|
||||
|
||||
This release contains a number of bugs fixes and improvements, so users are encouraged
|
||||
to take a look at the CHANGELOG:
|
||||
|
||||
https://docs.pytest.org/en/latest/changelog.html
|
||||
|
||||
For complete documentation, please visit:
|
||||
|
||||
https://docs.pytest.org/en/latest/
|
||||
|
||||
As usual, you can upgrade from pypi via:
|
||||
|
||||
pip install -U pytest
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Anthony Sottile
|
||||
* Bruno Oliveira
|
||||
* CrazyMerlyn
|
||||
* Daniel Hahler
|
||||
* Fabio Zadrozny
|
||||
* Jeffrey Rackauckas
|
||||
* Ronny Pfannschmidt
|
||||
* Virgil Dupras
|
||||
* dhirensr
|
||||
* hoefling
|
||||
* wim glenn
|
||||
|
||||
|
||||
Happy testing,
|
||||
The Pytest Development Team
|
||||
25
doc/en/announce/release-3.8.1.rst
Normal file
25
doc/en/announce/release-3.8.1.rst
Normal file
@@ -0,0 +1,25 @@
|
||||
pytest-3.8.1
|
||||
=======================================
|
||||
|
||||
pytest 3.8.1 has just been released to PyPI.
|
||||
|
||||
This is a bug-fix release, being a drop-in replacement. To upgrade::
|
||||
|
||||
pip install --upgrade pytest
|
||||
|
||||
The full changelog is available at https://docs.pytest.org/en/latest/changelog.html.
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Ankit Goel
|
||||
* Anthony Sottile
|
||||
* Bruno Oliveira
|
||||
* Daniel Hahler
|
||||
* Maximilian Albert
|
||||
* Ronny Pfannschmidt
|
||||
* William Jamir Silva
|
||||
* wim glenn
|
||||
|
||||
|
||||
Happy testing,
|
||||
The pytest Development Team
|
||||
28
doc/en/announce/release-3.8.2.rst
Normal file
28
doc/en/announce/release-3.8.2.rst
Normal file
@@ -0,0 +1,28 @@
|
||||
pytest-3.8.2
|
||||
=======================================
|
||||
|
||||
pytest 3.8.2 has just been released to PyPI.
|
||||
|
||||
This is a bug-fix release, being a drop-in replacement. To upgrade::
|
||||
|
||||
pip install --upgrade pytest
|
||||
|
||||
The full changelog is available at https://docs.pytest.org/en/latest/changelog.html.
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Ankit Goel
|
||||
* Anthony Sottile
|
||||
* Bruno Oliveira
|
||||
* Daniel Hahler
|
||||
* Denis Otkidach
|
||||
* Harry Percival
|
||||
* Jeffrey Rackauckas
|
||||
* Jose Carlos Menezes
|
||||
* Ronny Pfannschmidt
|
||||
* Zac-HD
|
||||
* iwanb
|
||||
|
||||
|
||||
Happy testing,
|
||||
The pytest Development Team
|
||||
43
doc/en/announce/release-3.9.0.rst
Normal file
43
doc/en/announce/release-3.9.0.rst
Normal file
@@ -0,0 +1,43 @@
|
||||
pytest-3.9.0
|
||||
=======================================
|
||||
|
||||
The pytest team is proud to announce the 3.9.0 release!
|
||||
|
||||
pytest is a mature Python testing tool with more than a 2000 tests
|
||||
against itself, passing on many different interpreters and platforms.
|
||||
|
||||
This release contains a number of bugs fixes and improvements, so users are encouraged
|
||||
to take a look at the CHANGELOG:
|
||||
|
||||
https://docs.pytest.org/en/latest/changelog.html
|
||||
|
||||
For complete documentation, please visit:
|
||||
|
||||
https://docs.pytest.org/en/latest/
|
||||
|
||||
As usual, you can upgrade from pypi via:
|
||||
|
||||
pip install -U pytest
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Andrea Cimatoribus
|
||||
* Ankit Goel
|
||||
* Anthony Sottile
|
||||
* Ben Eyal
|
||||
* Bruno Oliveira
|
||||
* Daniel Hahler
|
||||
* Jeffrey Rackauckas
|
||||
* Jose Carlos Menezes
|
||||
* Kyle Altendorf
|
||||
* Niklas JQ
|
||||
* Palash Chatterjee
|
||||
* Ronny Pfannschmidt
|
||||
* Thomas Hess
|
||||
* Thomas Hisch
|
||||
* Tomer Keren
|
||||
* Victor Maryama
|
||||
|
||||
|
||||
Happy testing,
|
||||
The Pytest Development Team
|
||||
20
doc/en/announce/release-3.9.1.rst
Normal file
20
doc/en/announce/release-3.9.1.rst
Normal file
@@ -0,0 +1,20 @@
|
||||
pytest-3.9.1
|
||||
=======================================
|
||||
|
||||
pytest 3.9.1 has just been released to PyPI.
|
||||
|
||||
This is a bug-fix release, being a drop-in replacement. To upgrade::
|
||||
|
||||
pip install --upgrade pytest
|
||||
|
||||
The full changelog is available at https://docs.pytest.org/en/latest/changelog.html.
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Bruno Oliveira
|
||||
* Ronny Pfannschmidt
|
||||
* Thomas Hisch
|
||||
|
||||
|
||||
Happy testing,
|
||||
The pytest Development Team
|
||||
23
doc/en/announce/release-3.9.2.rst
Normal file
23
doc/en/announce/release-3.9.2.rst
Normal file
@@ -0,0 +1,23 @@
|
||||
pytest-3.9.2
|
||||
=======================================
|
||||
|
||||
pytest 3.9.2 has just been released to PyPI.
|
||||
|
||||
This is a bug-fix release, being a drop-in replacement. To upgrade::
|
||||
|
||||
pip install --upgrade pytest
|
||||
|
||||
The full changelog is available at https://docs.pytest.org/en/latest/changelog.html.
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Ankit Goel
|
||||
* Anthony Sottile
|
||||
* Bruno Oliveira
|
||||
* Ronny Pfannschmidt
|
||||
* Vincent Barbaresi
|
||||
* ykantor
|
||||
|
||||
|
||||
Happy testing,
|
||||
The pytest Development Team
|
||||
24
doc/en/announce/release-3.9.3.rst
Normal file
24
doc/en/announce/release-3.9.3.rst
Normal file
@@ -0,0 +1,24 @@
|
||||
pytest-3.9.3
|
||||
=======================================
|
||||
|
||||
pytest 3.9.3 has just been released to PyPI.
|
||||
|
||||
This is a bug-fix release, being a drop-in replacement. To upgrade::
|
||||
|
||||
pip install --upgrade pytest
|
||||
|
||||
The full changelog is available at https://docs.pytest.org/en/latest/changelog.html.
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Andreas Profous
|
||||
* Ankit Goel
|
||||
* Anthony Sottile
|
||||
* Bruno Oliveira
|
||||
* Daniel Hahler
|
||||
* Jon Dufresne
|
||||
* Ronny Pfannschmidt
|
||||
|
||||
|
||||
Happy testing,
|
||||
The pytest Development Team
|
||||
30
doc/en/announce/release-4.0.0.rst
Normal file
30
doc/en/announce/release-4.0.0.rst
Normal file
@@ -0,0 +1,30 @@
|
||||
pytest-4.0.0
|
||||
=======================================
|
||||
|
||||
The pytest team is proud to announce the 4.0.0 release!
|
||||
|
||||
pytest is a mature Python testing tool with more than a 2000 tests
|
||||
against itself, passing on many different interpreters and platforms.
|
||||
|
||||
This release contains a number of bugs fixes and improvements, so users are encouraged
|
||||
to take a look at the CHANGELOG:
|
||||
|
||||
https://docs.pytest.org/en/latest/changelog.html
|
||||
|
||||
For complete documentation, please visit:
|
||||
|
||||
https://docs.pytest.org/en/latest/
|
||||
|
||||
As usual, you can upgrade from pypi via:
|
||||
|
||||
pip install -U pytest
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Bruno Oliveira
|
||||
* Daniel Hahler
|
||||
* Ronny Pfannschmidt
|
||||
|
||||
|
||||
Happy testing,
|
||||
The Pytest Development Team
|
||||
23
doc/en/announce/release-4.0.1.rst
Normal file
23
doc/en/announce/release-4.0.1.rst
Normal file
@@ -0,0 +1,23 @@
|
||||
pytest-4.0.1
|
||||
=======================================
|
||||
|
||||
pytest 4.0.1 has just been released to PyPI.
|
||||
|
||||
This is a bug-fix release, being a drop-in replacement. To upgrade::
|
||||
|
||||
pip install --upgrade pytest
|
||||
|
||||
The full changelog is available at https://docs.pytest.org/en/latest/changelog.html.
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Anthony Sottile
|
||||
* Bruno Oliveira
|
||||
* Daniel Hahler
|
||||
* Michael D. Hoyle
|
||||
* Ronny Pfannschmidt
|
||||
* Slam
|
||||
|
||||
|
||||
Happy testing,
|
||||
The pytest Development Team
|
||||
24
doc/en/announce/release-4.0.2.rst
Normal file
24
doc/en/announce/release-4.0.2.rst
Normal file
@@ -0,0 +1,24 @@
|
||||
pytest-4.0.2
|
||||
=======================================
|
||||
|
||||
pytest 4.0.2 has just been released to PyPI.
|
||||
|
||||
This is a bug-fix release, being a drop-in replacement. To upgrade::
|
||||
|
||||
pip install --upgrade pytest
|
||||
|
||||
The full changelog is available at https://docs.pytest.org/en/latest/changelog.html.
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Anthony Sottile
|
||||
* Bruno Oliveira
|
||||
* Daniel Hahler
|
||||
* Pedro Algarvio
|
||||
* Ronny Pfannschmidt
|
||||
* Tomer Keren
|
||||
* Yash Todi
|
||||
|
||||
|
||||
Happy testing,
|
||||
The pytest Development Team
|
||||
@@ -22,11 +22,13 @@ following::
|
||||
assert f() == 4
|
||||
|
||||
to assert that your function returns a certain value. If this assertion fails
|
||||
you will see the return value of the function call::
|
||||
you will see the return value of the function call:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest test_assert1.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 1 item
|
||||
|
||||
@@ -165,11 +167,13 @@ when it encounters comparisons. For example::
|
||||
set2 = set("8035")
|
||||
assert set1 == set2
|
||||
|
||||
if you run this module::
|
||||
if you run this module:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest test_assert2.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 1 item
|
||||
|
||||
@@ -235,7 +239,9 @@ now, given this test module::
|
||||
assert f1 == f2
|
||||
|
||||
you can run the test module and get the custom output defined in
|
||||
the conftest file::
|
||||
the conftest file:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q test_foocompare.py
|
||||
F [100%]
|
||||
@@ -264,8 +270,12 @@ Advanced assertion introspection
|
||||
Reporting details about a failing assertion is achieved by rewriting assert
|
||||
statements before they are run. Rewritten assert statements put introspection
|
||||
information into the assertion failure message. ``pytest`` only rewrites test
|
||||
modules directly discovered by its test collection process, so asserts in
|
||||
supporting modules which are not themselves test modules will not be rewritten.
|
||||
modules directly discovered by its test collection process, so **asserts in
|
||||
supporting modules which are not themselves test modules will not be rewritten**.
|
||||
|
||||
You can manually enable assertion rewriting for an imported module by calling
|
||||
`register_assert_rewrite <https://docs.pytest.org/en/latest/writing_plugins.html#assertion-rewriting>`_
|
||||
before you import it (a good place to do that is in ``conftest.py``).
|
||||
|
||||
.. note::
|
||||
|
||||
|
||||
@@ -7,14 +7,16 @@ Keeping backwards compatibility has a very high priority in the pytest project.
|
||||
|
||||
With the pytest 3.0 release we introduced a clear communication scheme for when we will actually remove the old busted joint and politely ask you to use the new hotness instead, while giving you enough time to adjust your tests or raise concerns if there are valid reasons to keep deprecated functionality around.
|
||||
|
||||
To communicate changes we are already issuing deprecation warnings, but they are not displayed by default. In pytest 3.0 we changed the default setting so that pytest deprecation warnings are displayed if not explicitly silenced (with ``--disable-pytest-warnings``).
|
||||
To communicate changes we issue deprecation warnings using a custom warning hierarchy (see :ref:`internal-warnings`). These warnings may be suppressed using the standard means: ``-W`` command-line flag or ``filterwarnings`` ini options (see :ref:`warnings`), but we suggest to use these sparingly and temporarily, and heed the warnings when possible.
|
||||
|
||||
We will only remove deprecated functionality in major releases (e.g. if we deprecate something in 3.0 we will remove it in 4.0), and keep it around for at least two minor releases (e.g. if we deprecate something in 3.9 and 4.0 is the next release, we will not remove it in 4.0 but in 5.0).
|
||||
We will only start the removal of deprecated functionality in major releases (e.g. if we deprecate something in 3.0 we will start to remove it in 4.0), and keep it around for at least two minor releases (e.g. if we deprecate something in 3.9 and 4.0 is the next release, we start to remove it in 5.0, not in 4.0).
|
||||
|
||||
When the deprecation expires (e.g. 4.0 is released), we won't remove the deprecated functionality immediately, but will use the standard warning filters to turn them into **errors** by default. This approach makes it explicit that removal is imminent, and still gives you time to turn the deprecated feature into a warning instead of an error so it can be dealt with in your own time. In the next minor release (e.g. 4.1), the feature will be effectively removed.
|
||||
|
||||
|
||||
Deprecation Roadmap
|
||||
-------------------
|
||||
|
||||
We track deprecation and removal of features using milestones and the `deprecation <https://github.com/pytest-dev/pytest/issues?q=label%3A%22type%3A+deprecation%22>`_ and `removal <https://github.com/pytest-dev/pytest/labels/type%3A%20removal>`_ labels on GitHub.
|
||||
Features currently deprecated and removed in previous releases can be found in :ref:`deprecations`.
|
||||
|
||||
Following our deprecation policy, after starting issuing deprecation warnings we keep features for *at least* two minor versions before considering removal.
|
||||
We track future deprecation and removal of features using milestones and the `deprecation <https://github.com/pytest-dev/pytest/issues?q=label%3A%22type%3A+deprecation%22>`_ and `removal <https://github.com/pytest-dev/pytest/labels/type%3A%20removal>`_ labels on GitHub.
|
||||
|
||||
@@ -12,7 +12,9 @@ For information on plugin hooks and objects, see :ref:`plugins`.
|
||||
|
||||
For information on the ``pytest.mark`` mechanism, see :ref:`mark`.
|
||||
|
||||
For information about fixtures, see :ref:`fixtures`. To see a complete list of available fixtures (add ``-v`` to also see fixtures with leading ``_``), type ::
|
||||
For information about fixtures, see :ref:`fixtures`. To see a complete list of available fixtures (add ``-v`` to also see fixtures with leading ``_``), type :
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q --fixtures
|
||||
cache
|
||||
@@ -75,7 +77,7 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a
|
||||
caplog
|
||||
Access and control log capturing.
|
||||
|
||||
Captured logs are available through the following methods::
|
||||
Captured logs are available through the following properties/methods::
|
||||
|
||||
* caplog.text -> string containing formatted log output
|
||||
* caplog.records -> list of logging.LogRecord instances
|
||||
@@ -104,7 +106,9 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a
|
||||
See http://docs.python.org/library/warnings.html for information
|
||||
on warning categories.
|
||||
tmpdir_factory
|
||||
Return a TempdirFactory instance for the test session.
|
||||
Return a :class:`_pytest.tmpdir.TempdirFactory` instance for the test session.
|
||||
tmp_path_factory
|
||||
Return a :class:`_pytest.tmpdir.TempPathFactory` instance for the test session.
|
||||
tmpdir
|
||||
Return a temporary directory path object
|
||||
which is unique to each test function invocation,
|
||||
@@ -113,6 +117,16 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a
|
||||
path object.
|
||||
|
||||
.. _`py.path.local`: https://py.readthedocs.io/en/latest/path.html
|
||||
tmp_path
|
||||
Return a temporary directory path object
|
||||
which is unique to each test function invocation,
|
||||
created as a sub directory of the base temporary
|
||||
directory. The returned object is a :class:`pathlib.Path`
|
||||
object.
|
||||
|
||||
.. note::
|
||||
|
||||
in python < 3.6 this is a pathlib2.Path
|
||||
|
||||
no tests ran in 0.12 seconds
|
||||
|
||||
|
||||
@@ -43,7 +43,9 @@ First, let's create 50 test invocation of which only 2 fail::
|
||||
if i in (17, 25):
|
||||
pytest.fail("bad luck")
|
||||
|
||||
If you run this for the first time you will see two failures::
|
||||
If you run this for the first time you will see two failures:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q
|
||||
.................F.......F........................ [100%]
|
||||
@@ -72,11 +74,13 @@ If you run this for the first time you will see two failures::
|
||||
test_50.py:6: Failed
|
||||
2 failed, 48 passed in 0.12 seconds
|
||||
|
||||
If you then run it with ``--lf``::
|
||||
If you then run it with ``--lf``:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest --lf
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 50 items / 48 deselected
|
||||
run-last-failure: rerun previous 2 failures
|
||||
@@ -113,11 +117,13 @@ not been run ("deselected").
|
||||
|
||||
Now, if you run with the ``--ff`` option, all tests will be run but the first
|
||||
previous failures will be executed first (as can be seen from the series
|
||||
of ``FF`` and dots)::
|
||||
of ``FF`` and dots):
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest --ff
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 50 items
|
||||
run-last-failure: rerun previous 2 failures first
|
||||
@@ -162,8 +168,8 @@ When no tests failed in the last run, or when no cached ``lastfailed`` data was
|
||||
found, ``pytest`` can be configured either to run all of the tests or no tests,
|
||||
using the ``--last-failed-no-failures`` option, which takes one of the following values::
|
||||
|
||||
pytest --last-failed-no-failures all # run all tests (default behavior)
|
||||
pytest --last-failed-no-failures none # run no tests and exit
|
||||
pytest --last-failed --last-failed-no-failures all # run all tests (default behavior)
|
||||
pytest --last-failed --last-failed-no-failures none # run no tests and exit
|
||||
|
||||
The new config.cache object
|
||||
--------------------------------
|
||||
@@ -192,7 +198,9 @@ across pytest invocations::
|
||||
assert mydata == 23
|
||||
|
||||
If you run this command once, it will take a while because
|
||||
of the sleep::
|
||||
of the sleep:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q
|
||||
F [100%]
|
||||
@@ -209,7 +217,9 @@ of the sleep::
|
||||
1 failed in 0.12 seconds
|
||||
|
||||
If you run it a second time the value will be retrieved from
|
||||
the cache and this will be quick::
|
||||
the cache and this will be quick:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q
|
||||
F [100%]
|
||||
@@ -232,11 +242,13 @@ Inspecting Cache content
|
||||
-------------------------------
|
||||
|
||||
You can always peek at the content of the cache using the
|
||||
``--cache-show`` command line option::
|
||||
``--cache-show`` command line option:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest --cache-show
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
cachedir: $REGENDOC_TMPDIR/.pytest_cache
|
||||
------------------------------- cache values -------------------------------
|
||||
@@ -244,6 +256,8 @@ You can always peek at the content of the cache using the
|
||||
{'test_caching.py::test_function': True}
|
||||
cache/nodeids contains:
|
||||
['test_caching.py::test_function']
|
||||
cache/stepwise contains:
|
||||
[]
|
||||
example/value contains:
|
||||
42
|
||||
|
||||
@@ -260,3 +274,9 @@ by adding the ``--cache-clear`` option like this::
|
||||
This is recommended for invocations from Continuous Integration
|
||||
servers where isolation and correctness is more important
|
||||
than speed.
|
||||
|
||||
|
||||
Stepwise
|
||||
--------
|
||||
|
||||
As an alternative to ``--lf -x``, especially for cases where you expect a large part of the test suite will fail, ``--sw``, ``--stepwise`` allows you to fix them one at a time. The test suite will run until the first failure and then stop. At the next invocation, tests will continue from the last failing test and then run until the next failing test. You may use the ``--stepwise-skip`` option to ignore one failing test and stop the test execution on the second failing test instead. This is useful if you get stuck on a failing test and just want to ignore it until later.
|
||||
|
||||
@@ -52,7 +52,7 @@ is that you can use print statements for debugging::
|
||||
# content of test_module.py
|
||||
|
||||
def setup_function(function):
|
||||
print ("setting up %s" % function)
|
||||
print("setting up %s" % function)
|
||||
|
||||
def test_func1():
|
||||
assert True
|
||||
@@ -61,11 +61,13 @@ is that you can use print statements for debugging::
|
||||
assert False
|
||||
|
||||
and running this module will show you precisely the output
|
||||
of the failing function and hide the other one::
|
||||
of the failing function and hide the other one:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 2 items
|
||||
|
||||
|
||||
@@ -1,7 +1,4 @@
|
||||
|
||||
.. _changelog:
|
||||
|
||||
Changelog history
|
||||
=================================
|
||||
|
||||
.. include:: ../../CHANGELOG.rst
|
||||
|
||||
@@ -10,17 +10,15 @@
|
||||
#
|
||||
# All configuration values have a default; values that are commented out
|
||||
# serve to show the default.
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
# built documents.
|
||||
#
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
# The short X.Y version.
|
||||
|
||||
import datetime
|
||||
import os
|
||||
import sys
|
||||
import datetime
|
||||
|
||||
from _pytest import __version__ as version
|
||||
|
||||
@@ -42,6 +40,7 @@ todo_include_todos = 1
|
||||
# Add any Sphinx extension module names here, as strings. They can be extensions
|
||||
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
||||
extensions = [
|
||||
"pygments_pytest",
|
||||
"sphinx.ext.autodoc",
|
||||
"sphinx.ext.todo",
|
||||
"sphinx.ext.autosummary",
|
||||
@@ -65,7 +64,7 @@ master_doc = "contents"
|
||||
# General information about the project.
|
||||
project = u"pytest"
|
||||
year = datetime.datetime.utcnow().year
|
||||
copyright = u"2015–{} , holger krekel and pytest-dev team".format(year)
|
||||
copyright = u"2015–2018 , holger krekel and pytest-dev team"
|
||||
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
@@ -329,7 +328,7 @@ texinfo_documents = [
|
||||
|
||||
|
||||
# Example configuration for intersphinx: refer to the Python standard library.
|
||||
intersphinx_mapping = {"python": ("http://docs.python.org/3", None)}
|
||||
intersphinx_mapping = {"python": ("https://docs.python.org/3", None)}
|
||||
|
||||
|
||||
def setup(app):
|
||||
|
||||
@@ -33,12 +33,14 @@ Full pytest documentation
|
||||
reference
|
||||
|
||||
goodpractices
|
||||
flaky
|
||||
pythonpath
|
||||
customize
|
||||
example/index
|
||||
bash-completion
|
||||
|
||||
backwards-compatibility
|
||||
deprecations
|
||||
historical-notes
|
||||
license
|
||||
contributing
|
||||
|
||||
@@ -32,7 +32,7 @@ Here's a summary what ``pytest`` uses ``rootdir`` for:
|
||||
class name, function name and parametrization (if any).
|
||||
|
||||
* Is used by plugins as a stable location to store project/test run specific information;
|
||||
for example, the internal :ref:`cache <cache>` plugin creates a ``.cache`` subdirectory
|
||||
for example, the internal :ref:`cache <cache>` plugin creates a ``.pytest_cache`` subdirectory
|
||||
in ``rootdir`` to store its cross-test run state.
|
||||
|
||||
Important to emphasize that ``rootdir`` is **NOT** used to modify ``sys.path``/``PYTHONPATH`` or
|
||||
@@ -139,7 +139,7 @@ line options while the environment is in use::
|
||||
|
||||
Here's how the command-line is built in the presence of ``addopts`` or the environment variable::
|
||||
|
||||
<pytest.ini:addopts> $PYTEST_ADDOTPS <extra command-line arguments>
|
||||
<pytest.ini:addopts> $PYTEST_ADDOPTS <extra command-line arguments>
|
||||
|
||||
So if the user executes in the command-line::
|
||||
|
||||
|
||||
386
doc/en/deprecations.rst
Normal file
386
doc/en/deprecations.rst
Normal file
@@ -0,0 +1,386 @@
|
||||
.. _deprecations:
|
||||
|
||||
Deprecations and Removals
|
||||
=========================
|
||||
|
||||
This page lists all pytest features that are currently deprecated or have been removed in past major releases.
|
||||
The objective is to give users a clear rationale why a certain feature has been removed, and what alternatives
|
||||
should be used instead.
|
||||
|
||||
Deprecated Features
|
||||
-------------------
|
||||
|
||||
Below is a complete list of all pytest features which are considered deprecated. Using those features will issue
|
||||
:class:`_pytest.warning_types.PytestWarning` or subclasses, which can be filtered using
|
||||
:ref:`standard warning filters <warnings>`.
|
||||
|
||||
Internal classes accessed through ``Node``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. deprecated:: 3.9
|
||||
|
||||
Access of ``Module``, ``Function``, ``Class``, ``Instance``, ``File`` and ``Item`` through ``Node`` instances now issue
|
||||
this warning::
|
||||
|
||||
usage of Function.Module is deprecated, please use pytest.Module instead
|
||||
|
||||
Users should just ``import pytest`` and access those objects using the ``pytest`` module.
|
||||
|
||||
This has been documented as deprecated for years, but only now we are actually emitting deprecation warnings.
|
||||
|
||||
``cached_setup``
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
.. deprecated:: 3.9
|
||||
|
||||
``request.cached_setup`` was the precursor of the setup/teardown mechanism available to fixtures.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.fixture
|
||||
def db_session():
|
||||
return request.cached_setup(
|
||||
setup=Session.create, teardown=lambda session: session.close(), scope="module"
|
||||
)
|
||||
|
||||
This should be updated to make use of standard fixture mechanisms:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def db_session():
|
||||
session = Session.create()
|
||||
yield session
|
||||
session.close()
|
||||
|
||||
|
||||
You can consult `funcarg comparison section in the docs <https://docs.pytest.org/en/latest/funcarg_compare.html>`_ for
|
||||
more information.
|
||||
|
||||
This has been documented as deprecated for years, but only now we are actually emitting deprecation warnings.
|
||||
|
||||
|
||||
Using ``Class`` in custom Collectors
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. deprecated:: 3.9
|
||||
|
||||
Using objects named ``"Class"`` as a way to customize the type of nodes that are collected in ``Collector``
|
||||
subclasses has been deprecated. Users instead should use ``pytest_pycollect_makeitem`` to customize node types during
|
||||
collection.
|
||||
|
||||
This issue should affect only advanced plugins who create new collection types, so if you see this warning
|
||||
message please contact the authors so they can change the code.
|
||||
|
||||
|
||||
``Config.warn`` and ``Node.warn``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. deprecated:: 3.8
|
||||
|
||||
Those methods were part of the internal pytest warnings system, but since ``3.8`` pytest is using the builtin warning
|
||||
system for its own warnings, so those two functions are now deprecated.
|
||||
|
||||
``Config.warn`` should be replaced by calls to the standard ``warnings.warn``, example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
config.warn("C1", "some warning")
|
||||
|
||||
Becomes:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
warnings.warn(pytest.PytestWarning("some warning"))
|
||||
|
||||
``Node.warn`` now supports two signatures:
|
||||
|
||||
* ``node.warn(PytestWarning("some message"))``: is now the **recommended** way to call this function.
|
||||
The warning instance must be a PytestWarning or subclass.
|
||||
|
||||
* ``node.warn("CI", "some message")``: this code/message form is now **deprecated** and should be converted to the warning instance form above.
|
||||
|
||||
|
||||
``pytest_namespace``
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. deprecated:: 3.7
|
||||
|
||||
This hook is deprecated because it greatly complicates the pytest internals regarding configuration and initialization, making some
|
||||
bug fixes and refactorings impossible.
|
||||
|
||||
Example of usage:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class MySymbol:
|
||||
...
|
||||
|
||||
|
||||
def pytest_namespace():
|
||||
return {"my_symbol": MySymbol()}
|
||||
|
||||
|
||||
Plugin authors relying on this hook should instead require that users now import the plugin modules directly (with an appropriate public API).
|
||||
|
||||
As a stopgap measure, plugin authors may still inject their names into pytest's namespace, usually during ``pytest_configure``:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
def pytest_configure():
|
||||
pytest.my_symbol = MySymbol()
|
||||
|
||||
|
||||
|
||||
Calling fixtures directly
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. deprecated:: 3.7
|
||||
|
||||
Calling a fixture function directly, as opposed to request them in a test function, is deprecated.
|
||||
|
||||
For example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.fixture
|
||||
def cell():
|
||||
return ...
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def full_cell():
|
||||
cell = cell()
|
||||
cell.make_full()
|
||||
return cell
|
||||
|
||||
This is a great source of confusion to new users, which will often call the fixture functions and request them from test functions interchangeably, which breaks the fixture resolution model.
|
||||
|
||||
In those cases just request the function directly in the dependent fixture:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.fixture
|
||||
def cell():
|
||||
return ...
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def full_cell(cell):
|
||||
cell.make_full()
|
||||
return cell
|
||||
|
||||
``Node.get_marker``
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. deprecated:: 3.6
|
||||
|
||||
As part of a large :ref:`marker-revamp`, :meth:`_pytest.nodes.Node.get_marker` is deprecated. See
|
||||
:ref:`the documentation <update marker code>` on tips on how to update your code.
|
||||
|
||||
|
||||
record_xml_property
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. deprecated:: 3.5
|
||||
|
||||
The ``record_xml_property`` fixture is now deprecated in favor of the more generic ``record_property``, which
|
||||
can be used by other consumers (for example ``pytest-html``) to obtain custom information about the test run.
|
||||
|
||||
This is just a matter of renaming the fixture as the API is the same:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def test_foo(record_xml_property):
|
||||
...
|
||||
|
||||
Change to:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def test_foo(record_property):
|
||||
...
|
||||
|
||||
pytest_plugins in non-top-level conftest files
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. deprecated:: 3.5
|
||||
|
||||
Defining ``pytest_plugins`` is now deprecated in non-top-level conftest.py
|
||||
files because they will activate referenced plugins *globally*, which is surprising because for all other pytest
|
||||
features ``conftest.py`` files are only *active* for tests at or below it.
|
||||
|
||||
Metafunc.addcall
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
.. deprecated:: 3.3
|
||||
|
||||
:meth:`_pytest.python.Metafunc.addcall` was a precursor to the current parametrized mechanism. Users should use
|
||||
:meth:`_pytest.python.Metafunc.parametrize` instead.
|
||||
|
||||
marks in ``pytest.mark.parametrize``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. deprecated:: 3.2
|
||||
|
||||
Applying marks to values of a ``pytest.mark.parametrize`` call is now deprecated. For example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"a, b", [(3, 9), pytest.mark.xfail(reason="flaky")(6, 36), (10, 100)]
|
||||
)
|
||||
def test_foo(a, b):
|
||||
...
|
||||
|
||||
This code applies the ``pytest.mark.xfail(reason="flaky")`` mark to the ``(6, 36)`` value of the above parametrization
|
||||
call.
|
||||
|
||||
This was considered hard to read and understand, and also its implementation presented problems to the code preventing
|
||||
further internal improvements in the marks architecture.
|
||||
|
||||
To update the code, use ``pytest.param``:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"a, b",
|
||||
[(3, 9), pytest.param((6, 36), marks=pytest.mark.xfail(reason="flaky")), (10, 100)],
|
||||
)
|
||||
def test_foo(a, b):
|
||||
...
|
||||
|
||||
|
||||
|
||||
Passing command-line string to ``pytest.main()``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. deprecated:: 3.0
|
||||
|
||||
Passing a command-line string to ``pytest.main()`` is deprecated:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
pytest.main("-v -s")
|
||||
|
||||
Pass a list instead:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
pytest.main(["-v", "-s"])
|
||||
|
||||
|
||||
By passing a string, users expect that pytest will interpret that command-line using the shell rules they are working
|
||||
on (for example ``bash`` or ``Powershell``), but this is very hard/impossible to do in a portable way.
|
||||
|
||||
|
||||
``yield`` tests
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
.. deprecated:: 3.0
|
||||
|
||||
pytest supports ``yield``-style tests, where a test function actually ``yield`` functions and values
|
||||
that are then turned into proper test methods. Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def check(x, y):
|
||||
assert x ** x == y
|
||||
|
||||
|
||||
def test_squared():
|
||||
yield check, 2, 4
|
||||
yield check, 3, 9
|
||||
|
||||
This would result into two actual test functions being generated.
|
||||
|
||||
This form of test function doesn't support fixtures properly, and users should switch to ``pytest.mark.parametrize``:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.mark.parametrize("x, y", [(2, 4), (3, 9)])
|
||||
def test_squared(x, y):
|
||||
assert x ** x == y
|
||||
|
||||
|
||||
``pytest_funcarg__`` prefix
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. deprecated:: 3.0
|
||||
|
||||
In very early pytest versions fixtures could be defined using the ``pytest_funcarg__`` prefix:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def pytest_funcarg__data():
|
||||
return SomeData()
|
||||
|
||||
Switch over to the ``@pytest.fixture`` decorator:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.fixture
|
||||
def data():
|
||||
return SomeData()
|
||||
|
||||
[pytest] section in setup.cfg files
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. deprecated:: 3.0
|
||||
|
||||
``[pytest]`` sections in ``setup.cfg`` files should now be named ``[tool:pytest]``
|
||||
to avoid conflicts with other distutils commands.
|
||||
|
||||
Result log (``--result-log``)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. deprecated:: 3.0
|
||||
|
||||
The ``--resultlog`` command line option has been deprecated: it is little used
|
||||
and there are more modern and better alternatives, for example `pytest-tap <https://tappy.readthedocs.io/en/latest/>`_.
|
||||
|
||||
Removed Features
|
||||
----------------
|
||||
|
||||
As stated in our :ref:`backwards-compatibility` policy, deprecated features are removed only in major releases after
|
||||
an appropriate period of deprecation has passed.
|
||||
|
||||
|
||||
Reinterpretation mode (``--assert=reinterp``)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
*Removed in version 3.0.*
|
||||
|
||||
Reinterpretation mode has now been removed and only plain and rewrite
|
||||
mode are available, consequently the ``--assert=reinterp`` option is
|
||||
no longer available. This also means files imported from plugins or
|
||||
``conftest.py`` will not benefit from improved assertions by
|
||||
default, you should use ``pytest.register_assert_rewrite()`` to
|
||||
explicitly turn on assertion rewriting for those files.
|
||||
|
||||
Removed command-line options
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
*Removed in version 3.0.*
|
||||
|
||||
The following deprecated commandline options were removed:
|
||||
|
||||
* ``--genscript``: no longer supported;
|
||||
* ``--no-assert``: use ``--assert=plain`` instead;
|
||||
* ``--nomagic``: use ``--assert=plain`` instead;
|
||||
* ``--report``: use ``-r`` instead;
|
||||
|
||||
py.test-X* entry points
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
*Removed in version 3.0.*
|
||||
|
||||
Removed all ``py.test-X*`` entry points. The versioned, suffixed entry points
|
||||
were never documented and a leftover from a pre-virtualenv era. These entry
|
||||
points also created broken entry points in wheels, so removing them also
|
||||
removes a source of confusion for users.
|
||||
@@ -39,6 +39,11 @@ avoid creating labels just for the sake of creating them.
|
||||
|
||||
Each label should include a description in the GitHub's interface stating its purpose.
|
||||
|
||||
Labels are managed using `labels <https://github.com/hackebrot/labels>`_. All the labels in the repository
|
||||
are kept in ``.github/labels.toml``, so any changes should be via PRs to that file.
|
||||
After a PR is accepted and merged, one of the maintainers must manually synchronize the labels file with the
|
||||
GitHub repository.
|
||||
|
||||
Temporary labels
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
|
||||
@@ -58,11 +58,13 @@ and another like this::
|
||||
"""
|
||||
return 42
|
||||
|
||||
then you can just invoke ``pytest`` without command line options::
|
||||
then you can just invoke ``pytest`` without command line options:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini
|
||||
collected 1 item
|
||||
|
||||
@@ -152,6 +154,9 @@ which can then be used in your doctests directly::
|
||||
"""
|
||||
pass
|
||||
|
||||
Note that like the normal ``conftest.py``, the fixtures are discovered in the directory tree conftest is in.
|
||||
Meaning that if you put your doctest with your source code, the relevant conftest.py needs to be in the same directory tree.
|
||||
Fixtures will not be discovered in a sibling directory tree!
|
||||
|
||||
Output format
|
||||
-------------
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
from pytest import raises
|
||||
import six
|
||||
|
||||
import _pytest._code
|
||||
import py
|
||||
import pytest
|
||||
from pytest import raises
|
||||
|
||||
|
||||
def otherfunc(a, b):
|
||||
@@ -15,15 +17,11 @@ def otherfunc_multi(a, b):
|
||||
assert a == b
|
||||
|
||||
|
||||
@pytest.mark.parametrize("param1, param2", [(3, 6)])
|
||||
def test_generative(param1, param2):
|
||||
assert param1 * 2 < param2
|
||||
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
if "param1" in metafunc.fixturenames:
|
||||
metafunc.addcall(funcargs=dict(param1=3, param2=6))
|
||||
|
||||
|
||||
class TestFailing(object):
|
||||
def test_simple(self):
|
||||
def f():
|
||||
@@ -177,7 +175,7 @@ def test_dynamic_compile_shows_nicely():
|
||||
name = "abc-123"
|
||||
module = imp.new_module(name)
|
||||
code = _pytest._code.compile(src, name, "exec")
|
||||
py.builtin.exec_(code, module.__dict__)
|
||||
six.exec_(code, module.__dict__)
|
||||
sys.modules[name] = module
|
||||
module.foo()
|
||||
|
||||
@@ -247,7 +245,7 @@ class TestCustomAssertMsg(object):
|
||||
b = 2
|
||||
assert (
|
||||
A.a == b
|
||||
), "A.a appears not to be b\n" "or does not appear to be b\none of those"
|
||||
), "A.a appears not to be b\nor does not appear to be b\none of those"
|
||||
|
||||
def test_custom_repr(self):
|
||||
class JSON(object):
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import pytest
|
||||
import py
|
||||
|
||||
import pytest
|
||||
|
||||
mydir = py.path.local(__file__).dirpath()
|
||||
|
||||
|
||||
@@ -10,4 +11,4 @@ def pytest_runtest_setup(item):
|
||||
return
|
||||
mod = item.getparent(pytest.Module).obj
|
||||
if hasattr(mod, "hello"):
|
||||
print("mod.hello %r" % (mod.hello,))
|
||||
print("mod.hello {!r}".format(mod.hello))
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
|
||||
hello = "world"
|
||||
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
|
||||
import py
|
||||
|
||||
failure_demo = py.path.local(__file__).dirpath("failure_demo.py")
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
|
||||
@@ -27,11 +27,13 @@ You can "mark" a test function with custom metadata like this::
|
||||
|
||||
.. versionadded:: 2.2
|
||||
|
||||
You can then restrict a test run to only run tests marked with ``webtest``::
|
||||
You can then restrict a test run to only run tests marked with ``webtest``:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -v -m webtest
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6
|
||||
cachedir: .pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collecting ... collected 4 items / 3 deselected
|
||||
@@ -40,11 +42,13 @@ You can then restrict a test run to only run tests marked with ``webtest``::
|
||||
|
||||
================== 1 passed, 3 deselected in 0.12 seconds ==================
|
||||
|
||||
Or the inverse, running all tests except the webtest ones::
|
||||
Or the inverse, running all tests except the webtest ones:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -v -m "not webtest"
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6
|
||||
cachedir: .pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collecting ... collected 4 items / 1 deselected
|
||||
@@ -60,11 +64,13 @@ Selecting tests based on their node ID
|
||||
|
||||
You can provide one or more :ref:`node IDs <node-id>` as positional
|
||||
arguments to select only specified tests. This makes it easy to select
|
||||
tests based on their module, class, method, or function name::
|
||||
tests based on their module, class, method, or function name:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -v test_server.py::TestClass::test_method
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6
|
||||
cachedir: .pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collecting ... collected 1 item
|
||||
@@ -73,11 +79,13 @@ tests based on their module, class, method, or function name::
|
||||
|
||||
========================= 1 passed in 0.12 seconds =========================
|
||||
|
||||
You can also select on the class::
|
||||
You can also select on the class:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -v test_server.py::TestClass
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6
|
||||
cachedir: .pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collecting ... collected 1 item
|
||||
@@ -86,19 +94,21 @@ You can also select on the class::
|
||||
|
||||
========================= 1 passed in 0.12 seconds =========================
|
||||
|
||||
Or select multiple nodes::
|
||||
Or select multiple nodes:
|
||||
|
||||
$ pytest -v test_server.py::TestClass test_server.py::test_send_http
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5
|
||||
cachedir: .pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collecting ... collected 2 items
|
||||
.. code-block:: pytest
|
||||
|
||||
test_server.py::TestClass::test_method PASSED [ 50%]
|
||||
test_server.py::test_send_http PASSED [100%]
|
||||
$ pytest -v test_server.py::TestClass test_server.py::test_send_http
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6
|
||||
cachedir: .pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collecting ... collected 2 items
|
||||
|
||||
========================= 2 passed in 0.12 seconds =========================
|
||||
test_server.py::TestClass::test_method PASSED [ 50%]
|
||||
test_server.py::test_send_http PASSED [100%]
|
||||
|
||||
========================= 2 passed in 0.12 seconds =========================
|
||||
|
||||
.. _node-id:
|
||||
|
||||
@@ -124,11 +134,13 @@ Using ``-k expr`` to select tests based on their name
|
||||
You can use the ``-k`` command line option to specify an expression
|
||||
which implements a substring match on the test names instead of the
|
||||
exact match on markers that ``-m`` provides. This makes it easy to
|
||||
select tests based on their names::
|
||||
select tests based on their names:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -v -k http # running with the above defined example module
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6
|
||||
cachedir: .pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collecting ... collected 4 items / 3 deselected
|
||||
@@ -137,11 +149,13 @@ select tests based on their names::
|
||||
|
||||
================== 1 passed, 3 deselected in 0.12 seconds ==================
|
||||
|
||||
And you can also run all tests except the ones that match the keyword::
|
||||
And you can also run all tests except the ones that match the keyword:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -k "not send_http" -v
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6
|
||||
cachedir: .pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collecting ... collected 4 items / 1 deselected
|
||||
@@ -152,11 +166,13 @@ And you can also run all tests except the ones that match the keyword::
|
||||
|
||||
================== 3 passed, 1 deselected in 0.12 seconds ==================
|
||||
|
||||
Or to select "http" and "quick" tests::
|
||||
Or to select "http" and "quick" tests:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -k "http or quick" -v
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6
|
||||
cachedir: .pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collecting ... collected 4 items / 2 deselected
|
||||
@@ -200,15 +216,17 @@ You can ask which markers exist for your test suite - the list includes our just
|
||||
$ pytest --markers
|
||||
@pytest.mark.webtest: mark a test as a webtest.
|
||||
|
||||
@pytest.mark.filterwarnings(warning): add a warning filter to the given test. see https://docs.pytest.org/en/latest/warnings.html#pytest-mark-filterwarnings
|
||||
|
||||
@pytest.mark.skip(reason=None): skip the given test function with an optional reason. Example: skip(reason="no way of currently testing this") skips the test.
|
||||
|
||||
@pytest.mark.skipif(condition): skip the given test function if eval(condition) results in a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform. see http://pytest.org/latest/skipping.html
|
||||
@pytest.mark.skipif(condition): skip the given test function if eval(condition) results in a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform. see https://docs.pytest.org/en/latest/skipping.html
|
||||
|
||||
@pytest.mark.xfail(condition, reason=None, run=True, raises=None, strict=False): mark the test function as an expected failure if eval(condition) has a True value. Optionally specify a reason for better reporting and run=False if you don't even want to execute the test function. If only specific exception(s) are expected, you can list them in raises, and if the test fails in other ways, it will be reported as a true failure. See http://pytest.org/latest/skipping.html
|
||||
@pytest.mark.xfail(condition, reason=None, run=True, raises=None, strict=False): mark the test function as an expected failure if eval(condition) has a True value. Optionally specify a reason for better reporting and run=False if you don't even want to execute the test function. If only specific exception(s) are expected, you can list them in raises, and if the test fails in other ways, it will be reported as a true failure. See https://docs.pytest.org/en/latest/skipping.html
|
||||
|
||||
@pytest.mark.parametrize(argnames, argvalues): call a test function multiple times passing in different arguments in turn. argvalues generally needs to be a list of values if argnames specifies only one name or a list of tuples of values if argnames specifies multiple names. Example: @parametrize('arg1', [1,2]) would lead to two calls of the decorated test function, one with arg1=1 and another with arg1=2.see http://pytest.org/latest/parametrize.html for more info and examples.
|
||||
@pytest.mark.parametrize(argnames, argvalues): call a test function multiple times passing in different arguments in turn. argvalues generally needs to be a list of values if argnames specifies only one name or a list of tuples of values if argnames specifies multiple names. Example: @parametrize('arg1', [1,2]) would lead to two calls of the decorated test function, one with arg1=1 and another with arg1=2.see https://docs.pytest.org/en/latest/parametrize.html for more info and examples.
|
||||
|
||||
@pytest.mark.usefixtures(fixturename1, fixturename2, ...): mark tests as needing all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures
|
||||
@pytest.mark.usefixtures(fixturename1, fixturename2, ...): mark tests as needing all of the specified fixtures. see https://docs.pytest.org/en/latest/fixture.html#usefixtures
|
||||
|
||||
@pytest.mark.tryfirst: mark a hook implementation function such that the plugin machinery will try to call it first/as early as possible.
|
||||
|
||||
@@ -269,8 +287,12 @@ You can also set a module level marker::
|
||||
import pytest
|
||||
pytestmark = pytest.mark.webtest
|
||||
|
||||
in which case it will be applied to all functions and
|
||||
methods defined in the module.
|
||||
or multiple markers::
|
||||
|
||||
pytestmark = [pytest.mark.webtest, pytest.mark.slowtest]
|
||||
|
||||
in which case markers will be applied (in left-to-right order) to
|
||||
all functions and methods defined in the module.
|
||||
|
||||
.. _`marking individual tests when using parametrize`:
|
||||
|
||||
@@ -299,10 +321,10 @@ Skip and xfail marks can also be applied in this way, see :ref:`skip/xfail with
|
||||
.. note::
|
||||
|
||||
If the data you are parametrizing happen to be single callables, you need to be careful
|
||||
when marking these items. `pytest.mark.xfail(my_func)` won't work because it's also the
|
||||
when marking these items. ``pytest.mark.xfail(my_func)`` won't work because it's also the
|
||||
signature of a function being decorated. To resolve this ambiguity, you need to pass a
|
||||
reason argument:
|
||||
`pytest.mark.xfail(func_bar, reason="Issue#7")`.
|
||||
``pytest.mark.xfail(func_bar, reason="Issue#7")``.
|
||||
|
||||
|
||||
.. _`adding a custom marker from a plugin`:
|
||||
@@ -345,11 +367,13 @@ A test file using this local plugin::
|
||||
pass
|
||||
|
||||
and an example invocations specifying a different environment than what
|
||||
the test needs::
|
||||
the test needs:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -E stage2
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 1 item
|
||||
|
||||
@@ -357,11 +381,13 @@ the test needs::
|
||||
|
||||
======================== 1 skipped in 0.12 seconds =========================
|
||||
|
||||
and here is one that specifies exactly the environment needed::
|
||||
and here is one that specifies exactly the environment needed:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -E stage1
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 1 item
|
||||
|
||||
@@ -374,15 +400,17 @@ The ``--markers`` option always gives you a list of available markers::
|
||||
$ pytest --markers
|
||||
@pytest.mark.env(name): mark test to run only on named environment
|
||||
|
||||
@pytest.mark.filterwarnings(warning): add a warning filter to the given test. see https://docs.pytest.org/en/latest/warnings.html#pytest-mark-filterwarnings
|
||||
|
||||
@pytest.mark.skip(reason=None): skip the given test function with an optional reason. Example: skip(reason="no way of currently testing this") skips the test.
|
||||
|
||||
@pytest.mark.skipif(condition): skip the given test function if eval(condition) results in a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform. see http://pytest.org/latest/skipping.html
|
||||
@pytest.mark.skipif(condition): skip the given test function if eval(condition) results in a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform. see https://docs.pytest.org/en/latest/skipping.html
|
||||
|
||||
@pytest.mark.xfail(condition, reason=None, run=True, raises=None, strict=False): mark the test function as an expected failure if eval(condition) has a True value. Optionally specify a reason for better reporting and run=False if you don't even want to execute the test function. If only specific exception(s) are expected, you can list them in raises, and if the test fails in other ways, it will be reported as a true failure. See http://pytest.org/latest/skipping.html
|
||||
@pytest.mark.xfail(condition, reason=None, run=True, raises=None, strict=False): mark the test function as an expected failure if eval(condition) has a True value. Optionally specify a reason for better reporting and run=False if you don't even want to execute the test function. If only specific exception(s) are expected, you can list them in raises, and if the test fails in other ways, it will be reported as a true failure. See https://docs.pytest.org/en/latest/skipping.html
|
||||
|
||||
@pytest.mark.parametrize(argnames, argvalues): call a test function multiple times passing in different arguments in turn. argvalues generally needs to be a list of values if argnames specifies only one name or a list of tuples of values if argnames specifies multiple names. Example: @parametrize('arg1', [1,2]) would lead to two calls of the decorated test function, one with arg1=1 and another with arg1=2.see http://pytest.org/latest/parametrize.html for more info and examples.
|
||||
@pytest.mark.parametrize(argnames, argvalues): call a test function multiple times passing in different arguments in turn. argvalues generally needs to be a list of values if argnames specifies only one name or a list of tuples of values if argnames specifies multiple names. Example: @parametrize('arg1', [1,2]) would lead to two calls of the decorated test function, one with arg1=1 and another with arg1=2.see https://docs.pytest.org/en/latest/parametrize.html for more info and examples.
|
||||
|
||||
@pytest.mark.usefixtures(fixturename1, fixturename2, ...): mark tests as needing all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures
|
||||
@pytest.mark.usefixtures(fixturename1, fixturename2, ...): mark tests as needing all of the specified fixtures. see https://docs.pytest.org/en/latest/fixture.html#usefixtures
|
||||
|
||||
@pytest.mark.tryfirst: mark a hook implementation function such that the plugin machinery will try to call it first/as early as possible.
|
||||
|
||||
@@ -420,7 +448,9 @@ However, if there is a callable as the single positional argument with no keywor
|
||||
def test_with_args():
|
||||
pass
|
||||
|
||||
The output is as follows::
|
||||
The output is as follows:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q -s
|
||||
Mark(name='my_marker', args=(<function hello_world at 0xdeadbeef>,), kwargs={})
|
||||
@@ -458,10 +488,12 @@ test function. From a conftest file we can read it like this::
|
||||
|
||||
def pytest_runtest_setup(item):
|
||||
for mark in item.iter_markers(name='glob'):
|
||||
print ("glob args=%s kwargs=%s" %(mark.args, mark.kwargs))
|
||||
print("glob args=%s kwargs=%s" % (mark.args, mark.kwargs))
|
||||
sys.stdout.flush()
|
||||
|
||||
Let's run this without capturing output and see what we get::
|
||||
Let's run this without capturing output and see what we get:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q -s
|
||||
glob args=('function',) kwargs={'x': 3}
|
||||
@@ -516,11 +548,13 @@ Let's do a little test file to show how this looks like::
|
||||
def test_runs_everywhere():
|
||||
pass
|
||||
|
||||
then you will see two tests skipped and two executed tests as expected::
|
||||
then you will see two tests skipped and two executed tests as expected:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -rs # this option reports skip reasons
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 4 items
|
||||
|
||||
@@ -530,11 +564,13 @@ then you will see two tests skipped and two executed tests as expected::
|
||||
|
||||
=================== 2 passed, 2 skipped in 0.12 seconds ====================
|
||||
|
||||
Note that if you specify a platform via the marker-command line option like this::
|
||||
Note that if you specify a platform via the marker-command line option like this:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -m linux
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 4 items / 3 deselected
|
||||
|
||||
@@ -581,48 +617,52 @@ We want to dynamically define two markers and can do it in a
|
||||
elif "event" in item.nodeid:
|
||||
item.add_marker(pytest.mark.event)
|
||||
|
||||
We can now use the ``-m option`` to select one set::
|
||||
We can now use the ``-m option`` to select one set:
|
||||
|
||||
$ pytest -m interface --tb=short
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 4 items / 2 deselected
|
||||
.. code-block:: pytest
|
||||
|
||||
test_module.py FF [100%]
|
||||
$ pytest -m interface --tb=short
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 4 items / 2 deselected
|
||||
|
||||
================================= FAILURES =================================
|
||||
__________________________ test_interface_simple ___________________________
|
||||
test_module.py:3: in test_interface_simple
|
||||
assert 0
|
||||
E assert 0
|
||||
__________________________ test_interface_complex __________________________
|
||||
test_module.py:6: in test_interface_complex
|
||||
assert 0
|
||||
E assert 0
|
||||
================== 2 failed, 2 deselected in 0.12 seconds ==================
|
||||
test_module.py FF [100%]
|
||||
|
||||
or to select both "event" and "interface" tests::
|
||||
================================= FAILURES =================================
|
||||
__________________________ test_interface_simple ___________________________
|
||||
test_module.py:3: in test_interface_simple
|
||||
assert 0
|
||||
E assert 0
|
||||
__________________________ test_interface_complex __________________________
|
||||
test_module.py:6: in test_interface_complex
|
||||
assert 0
|
||||
E assert 0
|
||||
================== 2 failed, 2 deselected in 0.12 seconds ==================
|
||||
|
||||
$ pytest -m "interface or event" --tb=short
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 4 items / 1 deselected
|
||||
or to select both "event" and "interface" tests:
|
||||
|
||||
test_module.py FFF [100%]
|
||||
.. code-block:: pytest
|
||||
|
||||
================================= FAILURES =================================
|
||||
__________________________ test_interface_simple ___________________________
|
||||
test_module.py:3: in test_interface_simple
|
||||
assert 0
|
||||
E assert 0
|
||||
__________________________ test_interface_complex __________________________
|
||||
test_module.py:6: in test_interface_complex
|
||||
assert 0
|
||||
E assert 0
|
||||
____________________________ test_event_simple _____________________________
|
||||
test_module.py:9: in test_event_simple
|
||||
assert 0
|
||||
E assert 0
|
||||
================== 3 failed, 1 deselected in 0.12 seconds ==================
|
||||
$ pytest -m "interface or event" --tb=short
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 4 items / 1 deselected
|
||||
|
||||
test_module.py FFF [100%]
|
||||
|
||||
================================= FAILURES =================================
|
||||
__________________________ test_interface_simple ___________________________
|
||||
test_module.py:3: in test_interface_simple
|
||||
assert 0
|
||||
E assert 0
|
||||
__________________________ test_interface_complex __________________________
|
||||
test_module.py:6: in test_interface_complex
|
||||
assert 0
|
||||
E assert 0
|
||||
____________________________ test_event_simple _____________________________
|
||||
test_module.py:9: in test_event_simple
|
||||
assert 0
|
||||
E assert 0
|
||||
================== 3 failed, 1 deselected in 0.12 seconds ==================
|
||||
|
||||
@@ -2,9 +2,11 @@
|
||||
module containing a parametrized tests testing cross-python
|
||||
serialization via the pickle module.
|
||||
"""
|
||||
import textwrap
|
||||
|
||||
import py
|
||||
|
||||
import pytest
|
||||
import _pytest._code
|
||||
|
||||
pythonlist = ["python2.7", "python3.4", "python3.5"]
|
||||
|
||||
@@ -24,42 +26,44 @@ class Python(object):
|
||||
def __init__(self, version, picklefile):
|
||||
self.pythonpath = py.path.local.sysfind(version)
|
||||
if not self.pythonpath:
|
||||
pytest.skip("%r not found" % (version,))
|
||||
pytest.skip("{!r} not found".format(version))
|
||||
self.picklefile = picklefile
|
||||
|
||||
def dumps(self, obj):
|
||||
dumpfile = self.picklefile.dirpath("dump.py")
|
||||
dumpfile.write(
|
||||
_pytest._code.Source(
|
||||
"""
|
||||
import pickle
|
||||
f = open(%r, 'wb')
|
||||
s = pickle.dump(%r, f, protocol=2)
|
||||
f.close()
|
||||
"""
|
||||
% (str(self.picklefile), obj)
|
||||
textwrap.dedent(
|
||||
r"""
|
||||
import pickle
|
||||
f = open({!r}, 'wb')
|
||||
s = pickle.dump({!r}, f, protocol=2)
|
||||
f.close()
|
||||
""".format(
|
||||
str(self.picklefile), obj
|
||||
)
|
||||
)
|
||||
)
|
||||
py.process.cmdexec("%s %s" % (self.pythonpath, dumpfile))
|
||||
py.process.cmdexec("{} {}".format(self.pythonpath, dumpfile))
|
||||
|
||||
def load_and_is_true(self, expression):
|
||||
loadfile = self.picklefile.dirpath("load.py")
|
||||
loadfile.write(
|
||||
_pytest._code.Source(
|
||||
"""
|
||||
import pickle
|
||||
f = open(%r, 'rb')
|
||||
obj = pickle.load(f)
|
||||
f.close()
|
||||
res = eval(%r)
|
||||
if not res:
|
||||
raise SystemExit(1)
|
||||
"""
|
||||
% (str(self.picklefile), expression)
|
||||
textwrap.dedent(
|
||||
r"""
|
||||
import pickle
|
||||
f = open({!r}, 'rb')
|
||||
obj = pickle.load(f)
|
||||
f.close()
|
||||
res = eval({!r})
|
||||
if not res:
|
||||
raise SystemExit(1)
|
||||
""".format(
|
||||
str(self.picklefile), expression
|
||||
)
|
||||
)
|
||||
)
|
||||
print(loadfile)
|
||||
py.process.cmdexec("%s %s" % (self.pythonpath, loadfile))
|
||||
py.process.cmdexec("{} {}".format(self.pythonpath, loadfile))
|
||||
|
||||
|
||||
@pytest.mark.parametrize("obj", [42, {}, {1: 3}])
|
||||
|
||||
@@ -23,11 +23,13 @@ You can create a simple example file:
|
||||
:literal:
|
||||
|
||||
and if you installed `PyYAML`_ or a compatible YAML-parser you can
|
||||
now execute the test specification::
|
||||
now execute the test specification:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
nonpython $ pytest test_simple.yml
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR/nonpython, inifile:
|
||||
collected 2 items
|
||||
|
||||
@@ -55,11 +57,13 @@ your own domain specific testing language this way.
|
||||
will be reported as a (red) string.
|
||||
|
||||
``reportinfo()`` is used for representing the test location and is also
|
||||
consulted when reporting in ``verbose`` mode::
|
||||
consulted when reporting in ``verbose`` mode:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
nonpython $ pytest -v
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6
|
||||
cachedir: .pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR/nonpython, inifile:
|
||||
collecting ... collected 2 items
|
||||
@@ -77,15 +81,18 @@ consulted when reporting in ``verbose`` mode::
|
||||
.. regendoc:wipe
|
||||
|
||||
While developing your custom test collection and execution it's also
|
||||
interesting to just look at the collection tree::
|
||||
interesting to just look at the collection tree:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
nonpython $ pytest --collect-only
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR/nonpython, inifile:
|
||||
collected 2 items
|
||||
<YamlFile 'test_simple.yml'>
|
||||
<YamlItem 'hello'>
|
||||
<YamlItem 'ok'>
|
||||
<Package '$REGENDOC_TMPDIR/nonpython'>
|
||||
<YamlFile 'test_simple.yml'>
|
||||
<YamlItem 'hello'>
|
||||
<YamlItem 'ok'>
|
||||
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
# content of conftest.py
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
|
||||
@@ -42,14 +42,18 @@ Now we add a test configuration like this::
|
||||
end = 2
|
||||
metafunc.parametrize("param1", range(end))
|
||||
|
||||
This means that we only run 2 tests if we do not pass ``--all``::
|
||||
This means that we only run 2 tests if we do not pass ``--all``:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q test_compute.py
|
||||
.. [100%]
|
||||
2 passed in 0.12 seconds
|
||||
|
||||
We run only two computations, so we see two dots.
|
||||
let's run the full monty::
|
||||
let's run the full monty:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q --all
|
||||
....F [100%]
|
||||
@@ -134,12 +138,13 @@ used as the test IDs. These are succinct, but can be a pain to maintain.
|
||||
In ``test_timedistance_v2``, we specified ``ids`` as a function that can generate a
|
||||
string representation to make part of the test ID. So our ``datetime`` values use the
|
||||
label generated by ``idfn``, but because we didn't generate a label for ``timedelta``
|
||||
objects, they are still using the default pytest representation::
|
||||
objects, they are still using the default pytest representation:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest test_time.py --collect-only
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 8 items
|
||||
<Module 'test_time.py'>
|
||||
@@ -191,11 +196,13 @@ only have to work a bit to construct the correct arguments for pytest's
|
||||
def test_demo2(self, attribute):
|
||||
assert isinstance(attribute, str)
|
||||
|
||||
this is a fully self-contained example which you can run with::
|
||||
this is a fully self-contained example which you can run with:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest test_scenarios.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 4 items
|
||||
|
||||
@@ -203,17 +210,17 @@ this is a fully self-contained example which you can run with::
|
||||
|
||||
========================= 4 passed in 0.12 seconds =========================
|
||||
|
||||
If you just collect tests you'll also nicely see 'advanced' and 'basic' as variants for the test function::
|
||||
If you just collect tests you'll also nicely see 'advanced' and 'basic' as variants for the test function:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest --collect-only test_scenarios.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 4 items
|
||||
<Module 'test_scenarios.py'>
|
||||
<Class 'TestSampleWithScenarios'>
|
||||
<Instance '()'>
|
||||
<Function 'test_demo1[basic]'>
|
||||
<Function 'test_demo2[basic]'>
|
||||
<Function 'test_demo1[advanced]'>
|
||||
@@ -269,11 +276,13 @@ creates a database object for the actual test invocations::
|
||||
else:
|
||||
raise ValueError("invalid internal test config")
|
||||
|
||||
Let's first see how it looks like at collection time::
|
||||
Let's first see how it looks like at collection time:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest test_backends.py --collect-only
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 2 items
|
||||
<Module 'test_backends.py'>
|
||||
@@ -282,7 +291,9 @@ Let's first see how it looks like at collection time::
|
||||
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
|
||||
And then when we run the test::
|
||||
And then when we run the test:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q test_backends.py
|
||||
.F [100%]
|
||||
@@ -330,11 +341,13 @@ will be passed to respective fixture function::
|
||||
assert x == 'aaa'
|
||||
assert y == 'b'
|
||||
|
||||
The result of this test will be successful::
|
||||
The result of this test will be successful:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest test_indirect_list.py --collect-only
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 1 item
|
||||
<Module 'test_indirect_list.py'>
|
||||
@@ -378,7 +391,9 @@ parametrizer`_ but in a lot less code::
|
||||
pytest.raises(ZeroDivisionError, "a/b")
|
||||
|
||||
Our test generator looks up a class-level definition which specifies which
|
||||
argument sets to use for each test function. Let's run it::
|
||||
argument sets to use for each test function. Let's run it:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q
|
||||
F.. [100%]
|
||||
@@ -408,11 +423,15 @@ is to be run with different sets of arguments for its three arguments:
|
||||
|
||||
.. literalinclude:: multipython.py
|
||||
|
||||
Running it results in some skips if we don't have all the python interpreters installed and otherwise runs all combinations (5 interpreters times 5 interpreters times 3 objects to serialize/deserialize)::
|
||||
Running it results in some skips if we don't have all the python interpreters installed and otherwise runs all combinations (5 interpreters times 5 interpreters times 3 objects to serialize/deserialize):
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
. $ pytest -rs -q multipython.py
|
||||
........................... [100%]
|
||||
27 passed in 0.12 seconds
|
||||
...sss...sssssssss...sss... [100%]
|
||||
========================= short test summary info ==========================
|
||||
SKIP [15] $REGENDOC_TMPDIR/CWD/multipython.py:30: 'python3.4' not found
|
||||
12 passed, 15 skipped in 0.12 seconds
|
||||
|
||||
Indirect parametrization of optional implementations/imports
|
||||
--------------------------------------------------------------------
|
||||
@@ -455,11 +474,13 @@ And finally a little test module::
|
||||
assert round(basemod.func1(), 3) == round(optmod.func1(), 3)
|
||||
|
||||
|
||||
If you run this with reporting for skips enabled::
|
||||
If you run this with reporting for skips enabled:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -rs test_module.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 2 items
|
||||
|
||||
@@ -510,21 +531,22 @@ we mark the rest three parametrized tests with the custom marker ``basic``,
|
||||
and for the fourth test we also use the built-in mark ``xfail`` to indicate this
|
||||
test is expected to fail. For explicitness, we set test ids for some tests.
|
||||
|
||||
Then run ``pytest`` with verbose mode and with only the ``basic`` marker::
|
||||
Then run ``pytest`` with verbose mode and with only the ``basic`` marker:
|
||||
|
||||
pytest -v -m basic
|
||||
============================================ test session starts =============================================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -v -m basic
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6
|
||||
cachedir: .pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 4 items
|
||||
collecting ... collected 17 items / 14 deselected
|
||||
|
||||
test_pytest_param_example.py::test_eval[1+7-8] PASSED
|
||||
test_pytest_param_example.py::test_eval[basic_2+4] PASSED
|
||||
test_pytest_param_example.py::test_eval[basic_6*9] xfail
|
||||
========================================== short test summary info ===========================================
|
||||
XFAIL test_pytest_param_example.py::test_eval[basic_6*9]
|
||||
test_pytest_param_example.py::test_eval[1+7-8] PASSED [ 33%]
|
||||
test_pytest_param_example.py::test_eval[basic_2+4] PASSED [ 66%]
|
||||
test_pytest_param_example.py::test_eval[basic_6*9] xfail [100%]
|
||||
|
||||
============================================= 1 tests deselected =============================================
|
||||
============ 2 passed, 14 deselected, 1 xfailed in 0.12 seconds ============
|
||||
|
||||
As the result:
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import sys
|
||||
|
||||
import pytest
|
||||
|
||||
py3 = sys.version_info[0] >= 3
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
|
||||
def test_exception_syntax():
|
||||
try:
|
||||
0/0
|
||||
0 / 0
|
||||
except ZeroDivisionError, e:
|
||||
pass
|
||||
assert e
|
||||
|
||||
@@ -2,4 +2,4 @@ def test_exception_syntax():
|
||||
try:
|
||||
0 / 0
|
||||
except ZeroDivisionError as e:
|
||||
pass
|
||||
assert e
|
||||
|
||||
@@ -24,20 +24,22 @@ by passing the ``--ignore=path`` option on the cli. ``pytest`` allows multiple
|
||||
'-- test_world_03.py
|
||||
|
||||
Now if you invoke ``pytest`` with ``--ignore=tests/foobar/test_foobar_03.py --ignore=tests/hello/``,
|
||||
you will see that ``pytest`` only collects test-modules, which do not match the patterns specified::
|
||||
you will see that ``pytest`` only collects test-modules, which do not match the patterns specified:
|
||||
|
||||
========= test session starts ==========
|
||||
platform darwin -- Python 2.7.10, pytest-2.8.2, py-1.4.30, pluggy-0.3.1
|
||||
.. code-block:: pytest
|
||||
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 5 items
|
||||
|
||||
tests/example/test_example_01.py .
|
||||
tests/example/test_example_02.py .
|
||||
tests/example/test_example_03.py .
|
||||
tests/foobar/test_foobar_01.py .
|
||||
tests/foobar/test_foobar_02.py .
|
||||
tests/example/test_example_01.py . [ 20%]
|
||||
tests/example/test_example_02.py . [ 40%]
|
||||
tests/example/test_example_03.py . [ 60%]
|
||||
tests/foobar/test_foobar_01.py . [ 80%]
|
||||
tests/foobar/test_foobar_02.py . [100%]
|
||||
|
||||
======= 5 passed in 0.02 seconds =======
|
||||
========================= 5 passed in 0.02 seconds =========================
|
||||
|
||||
Deselect tests during test collection
|
||||
-------------------------------------
|
||||
@@ -100,19 +102,21 @@ Changing naming conventions
|
||||
|
||||
You can configure different naming conventions by setting
|
||||
the :confval:`python_files`, :confval:`python_classes` and
|
||||
:confval:`python_functions` configuration options. Example::
|
||||
:confval:`python_functions` configuration options.
|
||||
Here is an example::
|
||||
|
||||
# content of pytest.ini
|
||||
# Example 1: have pytest look for "check" instead of "test"
|
||||
# can also be defined in tox.ini or setup.cfg file, although the section
|
||||
# name in setup.cfg files should be "tool:pytest"
|
||||
[pytest]
|
||||
python_files=check_*.py
|
||||
python_classes=Check
|
||||
python_functions=*_check
|
||||
python_files = check_*.py
|
||||
python_classes = Check
|
||||
python_functions = *_check
|
||||
|
||||
This would make ``pytest`` look for tests in files that match the ``check_*
|
||||
.py`` glob-pattern, ``Check`` prefixes in classes, and functions and methods
|
||||
that match ``*_check``. For example, if we have::
|
||||
that match ``*_check``. For example, if we have::
|
||||
|
||||
# content of check_myapp.py
|
||||
class CheckMyApp(object):
|
||||
@@ -121,26 +125,35 @@ that match ``*_check``. For example, if we have::
|
||||
def complex_check(self):
|
||||
pass
|
||||
|
||||
then the test collection looks like this::
|
||||
The test collection would look like this:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest --collect-only
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini
|
||||
collected 2 items
|
||||
<Module 'check_myapp.py'>
|
||||
<Class 'CheckMyApp'>
|
||||
<Instance '()'>
|
||||
<Function 'simple_check'>
|
||||
<Function 'complex_check'>
|
||||
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
|
||||
You can check for multiple glob patterns by adding a space between the patterns::
|
||||
|
||||
# Example 2: have pytest look for files with "test" and "example"
|
||||
# content of pytest.ini, tox.ini, or setup.cfg file (replace "pytest"
|
||||
# with "tool:pytest" for setup.cfg)
|
||||
[pytest]
|
||||
python_files = test_*.py example_*.py
|
||||
|
||||
.. note::
|
||||
|
||||
the ``python_functions`` and ``python_classes`` options has no effect
|
||||
for ``unittest.TestCase`` test discovery because pytest delegates
|
||||
detection of test case methods to unittest code.
|
||||
discovery of test case methods to unittest code.
|
||||
|
||||
Interpreting cmdline arguments as Python packages
|
||||
-----------------------------------------------------
|
||||
@@ -167,17 +180,18 @@ treat it as a filesystem path.
|
||||
Finding out what is collected
|
||||
-----------------------------------------------
|
||||
|
||||
You can always peek at the collection tree without running tests like this::
|
||||
You can always peek at the collection tree without running tests like this:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
. $ pytest --collect-only pythoncollection.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini
|
||||
collected 3 items
|
||||
<Module 'CWD/pythoncollection.py'>
|
||||
<Function 'test_function'>
|
||||
<Class 'TestClass'>
|
||||
<Instance '()'>
|
||||
<Function 'test_method'>
|
||||
<Function 'test_anothermethod'>
|
||||
|
||||
@@ -223,7 +237,9 @@ and a ``setup.py`` dummy file like this::
|
||||
0/0 # will raise exception if imported
|
||||
|
||||
If you run with a Python 2 interpreter then you will find the one test and will
|
||||
leave out the ``setup.py`` file::
|
||||
leave out the ``setup.py`` file:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
#$ pytest --collect-only
|
||||
====== test session starts ======
|
||||
@@ -236,11 +252,13 @@ leave out the ``setup.py`` file::
|
||||
====== no tests ran in 0.04 seconds ======
|
||||
|
||||
If you run with a Python 3 interpreter both the one test and the ``setup.py``
|
||||
file will be left out::
|
||||
file will be left out:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest --collect-only
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini
|
||||
collected 0 items
|
||||
|
||||
|
||||
@@ -7,26 +7,29 @@ Demo of Python failure reports with pytest
|
||||
Here is a nice run of several tens of failures
|
||||
and how ``pytest`` presents things (unfortunately
|
||||
not showing the nice colors here in the HTML that you
|
||||
get on the terminal - we are working on that)::
|
||||
get on the terminal - we are working on that):
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
assertion $ pytest failure_demo.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR/assertion, inifile:
|
||||
collected 42 items
|
||||
|
||||
failure_demo.py FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF [100%]
|
||||
|
||||
================================= FAILURES =================================
|
||||
____________________________ test_generative[0] ____________________________
|
||||
___________________________ test_generative[3-6] ___________________________
|
||||
|
||||
param1 = 3, param2 = 6
|
||||
|
||||
@pytest.mark.parametrize("param1, param2", [(3, 6)])
|
||||
def test_generative(param1, param2):
|
||||
> assert param1 * 2 < param2
|
||||
E assert (3 * 2) < 6
|
||||
|
||||
failure_demo.py:19: AssertionError
|
||||
failure_demo.py:22: AssertionError
|
||||
_________________________ TestFailing.test_simple __________________________
|
||||
|
||||
self = <failure_demo.TestFailing object at 0xdeadbeef>
|
||||
@@ -43,7 +46,7 @@ get on the terminal - we are working on that)::
|
||||
E + where 42 = <function TestFailing.test_simple.<locals>.f at 0xdeadbeef>()
|
||||
E + and 43 = <function TestFailing.test_simple.<locals>.g at 0xdeadbeef>()
|
||||
|
||||
failure_demo.py:35: AssertionError
|
||||
failure_demo.py:33: AssertionError
|
||||
____________________ TestFailing.test_simple_multiline _____________________
|
||||
|
||||
self = <failure_demo.TestFailing object at 0xdeadbeef>
|
||||
@@ -51,7 +54,7 @@ get on the terminal - we are working on that)::
|
||||
def test_simple_multiline(self):
|
||||
> otherfunc_multi(42, 6 * 9)
|
||||
|
||||
failure_demo.py:38:
|
||||
failure_demo.py:36:
|
||||
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
||||
|
||||
a = 42, b = 54
|
||||
@@ -60,7 +63,7 @@ get on the terminal - we are working on that)::
|
||||
> assert a == b
|
||||
E assert 42 == 54
|
||||
|
||||
failure_demo.py:15: AssertionError
|
||||
failure_demo.py:17: AssertionError
|
||||
___________________________ TestFailing.test_not ___________________________
|
||||
|
||||
self = <failure_demo.TestFailing object at 0xdeadbeef>
|
||||
@@ -73,7 +76,7 @@ get on the terminal - we are working on that)::
|
||||
E assert not 42
|
||||
E + where 42 = <function TestFailing.test_not.<locals>.f at 0xdeadbeef>()
|
||||
|
||||
failure_demo.py:44: AssertionError
|
||||
failure_demo.py:42: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_eq_text _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -84,7 +87,7 @@ get on the terminal - we are working on that)::
|
||||
E - spam
|
||||
E + eggs
|
||||
|
||||
failure_demo.py:49: AssertionError
|
||||
failure_demo.py:47: AssertionError
|
||||
_____________ TestSpecialisedExplanations.test_eq_similar_text _____________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -97,7 +100,7 @@ get on the terminal - we are working on that)::
|
||||
E + foo 2 bar
|
||||
E ? ^
|
||||
|
||||
failure_demo.py:52: AssertionError
|
||||
failure_demo.py:50: AssertionError
|
||||
____________ TestSpecialisedExplanations.test_eq_multiline_text ____________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -110,7 +113,7 @@ get on the terminal - we are working on that)::
|
||||
E + eggs
|
||||
E bar
|
||||
|
||||
failure_demo.py:55: AssertionError
|
||||
failure_demo.py:53: AssertionError
|
||||
______________ TestSpecialisedExplanations.test_eq_long_text _______________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -127,7 +130,7 @@ get on the terminal - we are working on that)::
|
||||
E + 1111111111b222222222
|
||||
E ? ^
|
||||
|
||||
failure_demo.py:60: AssertionError
|
||||
failure_demo.py:58: AssertionError
|
||||
_________ TestSpecialisedExplanations.test_eq_long_text_multiline __________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -147,7 +150,7 @@ get on the terminal - we are working on that)::
|
||||
E
|
||||
E ...Full output truncated (7 lines hidden), use '-vv' to show
|
||||
|
||||
failure_demo.py:65: AssertionError
|
||||
failure_demo.py:63: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_eq_list _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -158,7 +161,7 @@ get on the terminal - we are working on that)::
|
||||
E At index 2 diff: 2 != 3
|
||||
E Use -v to get the full diff
|
||||
|
||||
failure_demo.py:68: AssertionError
|
||||
failure_demo.py:66: AssertionError
|
||||
______________ TestSpecialisedExplanations.test_eq_list_long _______________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -171,7 +174,7 @@ get on the terminal - we are working on that)::
|
||||
E At index 100 diff: 1 != 2
|
||||
E Use -v to get the full diff
|
||||
|
||||
failure_demo.py:73: AssertionError
|
||||
failure_demo.py:71: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_eq_dict _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -189,7 +192,7 @@ get on the terminal - we are working on that)::
|
||||
E
|
||||
E ...Full output truncated (2 lines hidden), use '-vv' to show
|
||||
|
||||
failure_demo.py:76: AssertionError
|
||||
failure_demo.py:74: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_eq_set __________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -207,7 +210,7 @@ get on the terminal - we are working on that)::
|
||||
E
|
||||
E ...Full output truncated (2 lines hidden), use '-vv' to show
|
||||
|
||||
failure_demo.py:79: AssertionError
|
||||
failure_demo.py:77: AssertionError
|
||||
_____________ TestSpecialisedExplanations.test_eq_longer_list ______________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -218,7 +221,7 @@ get on the terminal - we are working on that)::
|
||||
E Right contains more items, first extra item: 3
|
||||
E Use -v to get the full diff
|
||||
|
||||
failure_demo.py:82: AssertionError
|
||||
failure_demo.py:80: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_in_list _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -227,7 +230,7 @@ get on the terminal - we are working on that)::
|
||||
> assert 1 in [0, 2, 3, 4, 5]
|
||||
E assert 1 in [0, 2, 3, 4, 5]
|
||||
|
||||
failure_demo.py:85: AssertionError
|
||||
failure_demo.py:83: AssertionError
|
||||
__________ TestSpecialisedExplanations.test_not_in_text_multiline __________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -246,7 +249,7 @@ get on the terminal - we are working on that)::
|
||||
E
|
||||
E ...Full output truncated (2 lines hidden), use '-vv' to show
|
||||
|
||||
failure_demo.py:89: AssertionError
|
||||
failure_demo.py:87: AssertionError
|
||||
___________ TestSpecialisedExplanations.test_not_in_text_single ____________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -259,7 +262,7 @@ get on the terminal - we are working on that)::
|
||||
E single foo line
|
||||
E ? +++
|
||||
|
||||
failure_demo.py:93: AssertionError
|
||||
failure_demo.py:91: AssertionError
|
||||
_________ TestSpecialisedExplanations.test_not_in_text_single_long _________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -272,7 +275,7 @@ get on the terminal - we are working on that)::
|
||||
E head head foo tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail
|
||||
E ? +++
|
||||
|
||||
failure_demo.py:97: AssertionError
|
||||
failure_demo.py:95: AssertionError
|
||||
______ TestSpecialisedExplanations.test_not_in_text_single_long_term _______
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -285,7 +288,7 @@ get on the terminal - we are working on that)::
|
||||
E head head fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffftail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail
|
||||
E ? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
|
||||
|
||||
failure_demo.py:101: AssertionError
|
||||
failure_demo.py:99: AssertionError
|
||||
______________________________ test_attribute ______________________________
|
||||
|
||||
def test_attribute():
|
||||
@@ -297,7 +300,7 @@ get on the terminal - we are working on that)::
|
||||
E assert 1 == 2
|
||||
E + where 1 = <failure_demo.test_attribute.<locals>.Foo object at 0xdeadbeef>.b
|
||||
|
||||
failure_demo.py:109: AssertionError
|
||||
failure_demo.py:107: AssertionError
|
||||
_________________________ test_attribute_instance __________________________
|
||||
|
||||
def test_attribute_instance():
|
||||
@@ -309,7 +312,7 @@ get on the terminal - we are working on that)::
|
||||
E + where 1 = <failure_demo.test_attribute_instance.<locals>.Foo object at 0xdeadbeef>.b
|
||||
E + where <failure_demo.test_attribute_instance.<locals>.Foo object at 0xdeadbeef> = <class 'failure_demo.test_attribute_instance.<locals>.Foo'>()
|
||||
|
||||
failure_demo.py:116: AssertionError
|
||||
failure_demo.py:114: AssertionError
|
||||
__________________________ test_attribute_failure __________________________
|
||||
|
||||
def test_attribute_failure():
|
||||
@@ -322,7 +325,7 @@ get on the terminal - we are working on that)::
|
||||
i = Foo()
|
||||
> assert i.b == 2
|
||||
|
||||
failure_demo.py:127:
|
||||
failure_demo.py:125:
|
||||
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
||||
|
||||
self = <failure_demo.test_attribute_failure.<locals>.Foo object at 0xdeadbeef>
|
||||
@@ -331,7 +334,7 @@ get on the terminal - we are working on that)::
|
||||
> raise Exception("Failed to get attrib")
|
||||
E Exception: Failed to get attrib
|
||||
|
||||
failure_demo.py:122: Exception
|
||||
failure_demo.py:120: Exception
|
||||
_________________________ test_attribute_multiple __________________________
|
||||
|
||||
def test_attribute_multiple():
|
||||
@@ -348,7 +351,7 @@ get on the terminal - we are working on that)::
|
||||
E + and 2 = <failure_demo.test_attribute_multiple.<locals>.Bar object at 0xdeadbeef>.b
|
||||
E + where <failure_demo.test_attribute_multiple.<locals>.Bar object at 0xdeadbeef> = <class 'failure_demo.test_attribute_multiple.<locals>.Bar'>()
|
||||
|
||||
failure_demo.py:137: AssertionError
|
||||
failure_demo.py:135: AssertionError
|
||||
__________________________ TestRaises.test_raises __________________________
|
||||
|
||||
self = <failure_demo.TestRaises object at 0xdeadbeef>
|
||||
@@ -357,13 +360,13 @@ get on the terminal - we are working on that)::
|
||||
s = "qwe" # NOQA
|
||||
> raises(TypeError, "int(s)")
|
||||
|
||||
failure_demo.py:147:
|
||||
failure_demo.py:145:
|
||||
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
||||
|
||||
> int(s)
|
||||
E ValueError: invalid literal for int() with base 10: 'qwe'
|
||||
|
||||
<0-codegen $PYTHON_PREFIX/lib/python3.5/site-packages/_pytest/python_api.py:635>:1: ValueError
|
||||
<0-codegen $REGENDOC_TMPDIR/assertion/failure_demo.py:145>:1: ValueError
|
||||
______________________ TestRaises.test_raises_doesnt _______________________
|
||||
|
||||
self = <failure_demo.TestRaises object at 0xdeadbeef>
|
||||
@@ -372,7 +375,7 @@ get on the terminal - we are working on that)::
|
||||
> raises(IOError, "int('3')")
|
||||
E Failed: DID NOT RAISE <class 'OSError'>
|
||||
|
||||
failure_demo.py:150: Failed
|
||||
failure_demo.py:148: Failed
|
||||
__________________________ TestRaises.test_raise ___________________________
|
||||
|
||||
self = <failure_demo.TestRaises object at 0xdeadbeef>
|
||||
@@ -381,7 +384,7 @@ get on the terminal - we are working on that)::
|
||||
> raise ValueError("demo error")
|
||||
E ValueError: demo error
|
||||
|
||||
failure_demo.py:153: ValueError
|
||||
failure_demo.py:151: ValueError
|
||||
________________________ TestRaises.test_tupleerror ________________________
|
||||
|
||||
self = <failure_demo.TestRaises object at 0xdeadbeef>
|
||||
@@ -390,7 +393,7 @@ get on the terminal - we are working on that)::
|
||||
> a, b = [1] # NOQA
|
||||
E ValueError: not enough values to unpack (expected 2, got 1)
|
||||
|
||||
failure_demo.py:156: ValueError
|
||||
failure_demo.py:154: ValueError
|
||||
______ TestRaises.test_reinterpret_fails_with_print_for_the_fun_of_it ______
|
||||
|
||||
self = <failure_demo.TestRaises object at 0xdeadbeef>
|
||||
@@ -401,7 +404,7 @@ get on the terminal - we are working on that)::
|
||||
> a, b = items.pop()
|
||||
E TypeError: 'int' object is not iterable
|
||||
|
||||
failure_demo.py:161: TypeError
|
||||
failure_demo.py:159: TypeError
|
||||
--------------------------- Captured stdout call ---------------------------
|
||||
items is [1, 2, 3]
|
||||
________________________ TestRaises.test_some_error ________________________
|
||||
@@ -412,7 +415,7 @@ get on the terminal - we are working on that)::
|
||||
> if namenotexi: # NOQA
|
||||
E NameError: name 'namenotexi' is not defined
|
||||
|
||||
failure_demo.py:164: NameError
|
||||
failure_demo.py:162: NameError
|
||||
____________________ test_dynamic_compile_shows_nicely _____________________
|
||||
|
||||
def test_dynamic_compile_shows_nicely():
|
||||
@@ -423,18 +426,18 @@ get on the terminal - we are working on that)::
|
||||
name = "abc-123"
|
||||
module = imp.new_module(name)
|
||||
code = _pytest._code.compile(src, name, "exec")
|
||||
py.builtin.exec_(code, module.__dict__)
|
||||
six.exec_(code, module.__dict__)
|
||||
sys.modules[name] = module
|
||||
> module.foo()
|
||||
|
||||
failure_demo.py:182:
|
||||
failure_demo.py:180:
|
||||
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
||||
|
||||
def foo():
|
||||
> assert 1 == 0
|
||||
E AssertionError
|
||||
|
||||
<2-codegen 'abc-123' $REGENDOC_TMPDIR/assertion/failure_demo.py:179>:2: AssertionError
|
||||
<2-codegen 'abc-123' $REGENDOC_TMPDIR/assertion/failure_demo.py:177>:2: AssertionError
|
||||
____________________ TestMoreErrors.test_complex_error _____________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
|
||||
@@ -448,9 +451,9 @@ get on the terminal - we are working on that)::
|
||||
|
||||
> somefunc(f(), g())
|
||||
|
||||
failure_demo.py:193:
|
||||
failure_demo.py:191:
|
||||
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
||||
failure_demo.py:11: in somefunc
|
||||
failure_demo.py:13: in somefunc
|
||||
otherfunc(x, y)
|
||||
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
||||
|
||||
@@ -460,7 +463,7 @@ get on the terminal - we are working on that)::
|
||||
> assert a == b
|
||||
E assert 44 == 43
|
||||
|
||||
failure_demo.py:7: AssertionError
|
||||
failure_demo.py:9: AssertionError
|
||||
___________________ TestMoreErrors.test_z1_unpack_error ____________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
|
||||
@@ -470,7 +473,7 @@ get on the terminal - we are working on that)::
|
||||
> a, b = items
|
||||
E ValueError: not enough values to unpack (expected 2, got 0)
|
||||
|
||||
failure_demo.py:197: ValueError
|
||||
failure_demo.py:195: ValueError
|
||||
____________________ TestMoreErrors.test_z2_type_error _____________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
|
||||
@@ -480,7 +483,7 @@ get on the terminal - we are working on that)::
|
||||
> a, b = items
|
||||
E TypeError: 'int' object is not iterable
|
||||
|
||||
failure_demo.py:201: TypeError
|
||||
failure_demo.py:199: TypeError
|
||||
______________________ TestMoreErrors.test_startswith ______________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
|
||||
@@ -493,7 +496,7 @@ get on the terminal - we are working on that)::
|
||||
E + where False = <built-in method startswith of str object at 0xdeadbeef>('456')
|
||||
E + where <built-in method startswith of str object at 0xdeadbeef> = '123'.startswith
|
||||
|
||||
failure_demo.py:206: AssertionError
|
||||
failure_demo.py:204: AssertionError
|
||||
__________________ TestMoreErrors.test_startswith_nested ___________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
|
||||
@@ -512,7 +515,7 @@ get on the terminal - we are working on that)::
|
||||
E + where '123' = <function TestMoreErrors.test_startswith_nested.<locals>.f at 0xdeadbeef>()
|
||||
E + and '456' = <function TestMoreErrors.test_startswith_nested.<locals>.g at 0xdeadbeef>()
|
||||
|
||||
failure_demo.py:215: AssertionError
|
||||
failure_demo.py:213: AssertionError
|
||||
_____________________ TestMoreErrors.test_global_func ______________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
|
||||
@@ -523,7 +526,7 @@ get on the terminal - we are working on that)::
|
||||
E + where False = isinstance(43, float)
|
||||
E + where 43 = globf(42)
|
||||
|
||||
failure_demo.py:218: AssertionError
|
||||
failure_demo.py:216: AssertionError
|
||||
_______________________ TestMoreErrors.test_instance _______________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
|
||||
@@ -534,7 +537,7 @@ get on the terminal - we are working on that)::
|
||||
E assert 42 != 42
|
||||
E + where 42 = <failure_demo.TestMoreErrors object at 0xdeadbeef>.x
|
||||
|
||||
failure_demo.py:222: AssertionError
|
||||
failure_demo.py:220: AssertionError
|
||||
_______________________ TestMoreErrors.test_compare ________________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
|
||||
@@ -544,7 +547,7 @@ get on the terminal - we are working on that)::
|
||||
E assert 11 < 5
|
||||
E + where 11 = globf(10)
|
||||
|
||||
failure_demo.py:225: AssertionError
|
||||
failure_demo.py:223: AssertionError
|
||||
_____________________ TestMoreErrors.test_try_finally ______________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
|
||||
@@ -555,7 +558,7 @@ get on the terminal - we are working on that)::
|
||||
> assert x == 0
|
||||
E assert 1 == 0
|
||||
|
||||
failure_demo.py:230: AssertionError
|
||||
failure_demo.py:228: AssertionError
|
||||
___________________ TestCustomAssertMsg.test_single_line ___________________
|
||||
|
||||
self = <failure_demo.TestCustomAssertMsg object at 0xdeadbeef>
|
||||
@@ -570,7 +573,7 @@ get on the terminal - we are working on that)::
|
||||
E assert 1 == 2
|
||||
E + where 1 = <class 'failure_demo.TestCustomAssertMsg.test_single_line.<locals>.A'>.a
|
||||
|
||||
failure_demo.py:241: AssertionError
|
||||
failure_demo.py:239: AssertionError
|
||||
____________________ TestCustomAssertMsg.test_multiline ____________________
|
||||
|
||||
self = <failure_demo.TestCustomAssertMsg object at 0xdeadbeef>
|
||||
@@ -582,14 +585,14 @@ get on the terminal - we are working on that)::
|
||||
b = 2
|
||||
> assert (
|
||||
A.a == b
|
||||
), "A.a appears not to be b\n" "or does not appear to be b\none of those"
|
||||
), "A.a appears not to be b\nor does not appear to be b\none of those"
|
||||
E AssertionError: A.a appears not to be b
|
||||
E or does not appear to be b
|
||||
E one of those
|
||||
E assert 1 == 2
|
||||
E + where 1 = <class 'failure_demo.TestCustomAssertMsg.test_multiline.<locals>.A'>.a
|
||||
|
||||
failure_demo.py:248: AssertionError
|
||||
failure_demo.py:246: AssertionError
|
||||
___________________ TestCustomAssertMsg.test_custom_repr ___________________
|
||||
|
||||
self = <failure_demo.TestCustomAssertMsg object at 0xdeadbeef>
|
||||
@@ -611,11 +614,5 @@ get on the terminal - we are working on that)::
|
||||
E assert 1 == 2
|
||||
E + where 1 = This is JSON\n{\n 'foo': 'bar'\n}.a
|
||||
|
||||
failure_demo.py:261: AssertionError
|
||||
============================= warnings summary =============================
|
||||
<undetermined location>
|
||||
Metafunc.addcall is deprecated and scheduled to be removed in pytest 4.0.
|
||||
Please use Metafunc.parametrize instead.
|
||||
|
||||
-- Docs: http://doc.pytest.org/en/latest/warnings.html
|
||||
================== 42 failed, 1 warnings in 0.12 seconds ===================
|
||||
failure_demo.py:259: AssertionError
|
||||
======================== 42 failed in 0.12 seconds =========================
|
||||
|
||||
@@ -43,7 +43,9 @@ provide the ``cmdopt`` through a :ref:`fixture function <fixture function>`:
|
||||
def cmdopt(request):
|
||||
return request.config.getoption("--cmdopt")
|
||||
|
||||
Let's run this without supplying our new option::
|
||||
Let's run this without supplying our new option:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q test_sample.py
|
||||
F [100%]
|
||||
@@ -65,7 +67,9 @@ Let's run this without supplying our new option::
|
||||
first
|
||||
1 failed in 0.12 seconds
|
||||
|
||||
And now with supplying a command line option::
|
||||
And now with supplying a command line option:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q --cmdopt=type2
|
||||
F [100%]
|
||||
@@ -117,11 +121,13 @@ the command line arguments before they get processed:
|
||||
If you have the `xdist plugin <https://pypi.org/project/pytest-xdist/>`_ installed
|
||||
you will now always perform test runs using a number
|
||||
of subprocesses close to your CPU. Running in an empty
|
||||
directory with the above conftest.py::
|
||||
directory with the above conftest.py:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 0 items
|
||||
|
||||
@@ -175,11 +181,13 @@ We can now write a test module like this:
|
||||
def test_func_slow():
|
||||
pass
|
||||
|
||||
and when running it will see a skipped "slow" test::
|
||||
and when running it will see a skipped "slow" test:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -rs # "-rs" means report details on the little 's'
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 2 items
|
||||
|
||||
@@ -189,11 +197,13 @@ and when running it will see a skipped "slow" test::
|
||||
|
||||
=================== 1 passed, 1 skipped in 0.12 seconds ====================
|
||||
|
||||
Or run it including the ``slow`` marked test::
|
||||
Or run it including the ``slow`` marked test:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest --runslow
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 2 items
|
||||
|
||||
@@ -230,7 +240,9 @@ Example:
|
||||
The ``__tracebackhide__`` setting influences ``pytest`` showing
|
||||
of tracebacks: the ``checkconfig`` function will not be shown
|
||||
unless the ``--full-trace`` command line option is specified.
|
||||
Let's run our little function::
|
||||
Let's run our little function:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q test_checkconfig.py
|
||||
F [100%]
|
||||
@@ -327,11 +339,13 @@ It's easy to present extra information in a ``pytest`` run:
|
||||
def pytest_report_header(config):
|
||||
return "project deps: mylib-1.1"
|
||||
|
||||
which will add the string to the test header accordingly::
|
||||
which will add the string to the test header accordingly:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
project deps: mylib-1.1
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 0 items
|
||||
@@ -353,11 +367,13 @@ display more information if applicable:
|
||||
if config.getoption("verbose") > 0:
|
||||
return ["info1: did you know that ...", "did you?"]
|
||||
|
||||
which will add info only when run with "--v"::
|
||||
which will add info only when run with "--v":
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -v
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6
|
||||
cachedir: .pytest_cache
|
||||
info1: did you know that ...
|
||||
did you?
|
||||
@@ -366,11 +382,13 @@ which will add info only when run with "--v"::
|
||||
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
|
||||
and nothing when run plainly::
|
||||
and nothing when run plainly:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 0 items
|
||||
|
||||
@@ -403,11 +421,13 @@ out which tests are the slowest. Let's make an artificial test suite:
|
||||
def test_funcslow2():
|
||||
time.sleep(0.3)
|
||||
|
||||
Now we can profile which test functions execute the slowest::
|
||||
Now we can profile which test functions execute the slowest:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest --durations=3
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 3 items
|
||||
|
||||
@@ -416,7 +436,7 @@ Now we can profile which test functions execute the slowest::
|
||||
========================= slowest 3 test durations =========================
|
||||
0.30s call test_some_are_slow.py::test_funcslow2
|
||||
0.20s call test_some_are_slow.py::test_funcslow1
|
||||
0.13s call test_some_are_slow.py::test_funcfast
|
||||
0.10s call test_some_are_slow.py::test_funcfast
|
||||
========================= 3 passed in 0.12 seconds =========================
|
||||
|
||||
incremental testing - test steps
|
||||
@@ -475,11 +495,13 @@ tests in a class. Here is a test module example:
|
||||
def test_normal():
|
||||
pass
|
||||
|
||||
If we run this::
|
||||
If we run this:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -rx
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 4 items
|
||||
|
||||
@@ -496,7 +518,7 @@ If we run this::
|
||||
|
||||
test_step.py:11: AssertionError
|
||||
========================= short test summary info ==========================
|
||||
XFAIL test_step.py::TestUserHandling::()::test_deletion
|
||||
XFAIL test_step.py::TestUserHandling::test_deletion
|
||||
reason: previous test failed (test_modification)
|
||||
============== 1 failed, 2 passed, 1 xfailed in 0.12 seconds ===============
|
||||
|
||||
@@ -556,11 +578,13 @@ the ``db`` fixture:
|
||||
def test_root(db): # no db here, will error out
|
||||
pass
|
||||
|
||||
We can run this::
|
||||
We can run this:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 7 items
|
||||
|
||||
@@ -574,7 +598,7 @@ We can run this::
|
||||
file $REGENDOC_TMPDIR/b/test_error.py, line 1
|
||||
def test_root(db): # no db here, will error out
|
||||
E fixture 'db' not found
|
||||
> available fixtures: cache, capfd, capfdbinary, caplog, capsys, capsysbinary, doctest_namespace, monkeypatch, pytestconfig, record_property, record_xml_attribute, record_xml_property, recwarn, tmpdir, tmpdir_factory
|
||||
> available fixtures: cache, capfd, capfdbinary, caplog, capsys, capsysbinary, doctest_namespace, monkeypatch, pytestconfig, record_property, record_xml_attribute, record_xml_property, recwarn, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory
|
||||
> use 'pytest --fixtures [testpath]' for help on them.
|
||||
|
||||
$REGENDOC_TMPDIR/b/test_error.py:1
|
||||
@@ -667,11 +691,13 @@ if you then have failing tests:
|
||||
def test_fail2():
|
||||
assert 0
|
||||
|
||||
and run them::
|
||||
and run them:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest test_module.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 2 items
|
||||
|
||||
@@ -766,11 +792,13 @@ if you then have failing tests:
|
||||
def test_fail2():
|
||||
assert 0
|
||||
|
||||
and run it::
|
||||
and run it:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -s test_module.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 3 items
|
||||
|
||||
@@ -852,6 +880,8 @@ In that order.
|
||||
can be changed between releases (even bug fixes) so it shouldn't be relied on for scripting
|
||||
or automation.
|
||||
|
||||
.. _freezing-pytest:
|
||||
|
||||
Freezing pytest
|
||||
---------------
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ calls it::
|
||||
|
||||
@pytest.fixture(scope="session", autouse=True)
|
||||
def callattr_ahead_of_alltests(request):
|
||||
print ("callattr_ahead_of_alltests called")
|
||||
print("callattr_ahead_of_alltests called")
|
||||
seen = set([None])
|
||||
session = request.node
|
||||
for item in session.items:
|
||||
@@ -31,20 +31,20 @@ will be called ahead of running any tests::
|
||||
class TestHello(object):
|
||||
@classmethod
|
||||
def callme(cls):
|
||||
print ("callme called!")
|
||||
print("callme called!")
|
||||
|
||||
def test_method1(self):
|
||||
print ("test_method1 called")
|
||||
print("test_method1 called")
|
||||
|
||||
def test_method2(self):
|
||||
print ("test_method1 called")
|
||||
print("test_method1 called")
|
||||
|
||||
class TestOther(object):
|
||||
@classmethod
|
||||
def callme(cls):
|
||||
print ("callme other called")
|
||||
print("callme other called")
|
||||
def test_other(self):
|
||||
print ("test other")
|
||||
print("test other")
|
||||
|
||||
# works with unittest as well ...
|
||||
import unittest
|
||||
@@ -52,12 +52,14 @@ will be called ahead of running any tests::
|
||||
class SomeTest(unittest.TestCase):
|
||||
@classmethod
|
||||
def callme(self):
|
||||
print ("SomeTest callme called")
|
||||
print("SomeTest callme called")
|
||||
|
||||
def test_unit1(self):
|
||||
print ("test_unit1 method called")
|
||||
print("test_unit1 method called")
|
||||
|
||||
If you run this without output capturing::
|
||||
If you run this without output capturing:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q -s test_module.py
|
||||
callattr_ahead_of_alltests called
|
||||
|
||||
@@ -55,22 +55,24 @@ using it::
|
||||
import pytest
|
||||
|
||||
@pytest.fixture
|
||||
def smtp():
|
||||
def smtp_connection():
|
||||
import smtplib
|
||||
return smtplib.SMTP("smtp.gmail.com", 587, timeout=5)
|
||||
|
||||
def test_ehlo(smtp):
|
||||
response, msg = smtp.ehlo()
|
||||
def test_ehlo(smtp_connection):
|
||||
response, msg = smtp_connection.ehlo()
|
||||
assert response == 250
|
||||
assert 0 # for demo purposes
|
||||
|
||||
Here, the ``test_ehlo`` needs the ``smtp`` fixture value. pytest
|
||||
Here, the ``test_ehlo`` needs the ``smtp_connection`` fixture value. pytest
|
||||
will discover and call the :py:func:`@pytest.fixture <_pytest.python.fixture>`
|
||||
marked ``smtp`` fixture function. Running the test looks like this::
|
||||
marked ``smtp_connection`` fixture function. Running the test looks like this:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest test_smtpsimple.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 1 item
|
||||
|
||||
@@ -79,10 +81,10 @@ marked ``smtp`` fixture function. Running the test looks like this::
|
||||
================================= FAILURES =================================
|
||||
________________________________ test_ehlo _________________________________
|
||||
|
||||
smtp = <smtplib.SMTP object at 0xdeadbeef>
|
||||
smtp_connection = <smtplib.SMTP object at 0xdeadbeef>
|
||||
|
||||
def test_ehlo(smtp):
|
||||
response, msg = smtp.ehlo()
|
||||
def test_ehlo(smtp_connection):
|
||||
response, msg = smtp_connection.ehlo()
|
||||
assert response == 250
|
||||
> assert 0 # for demo purposes
|
||||
E assert 0
|
||||
@@ -91,18 +93,18 @@ marked ``smtp`` fixture function. Running the test looks like this::
|
||||
========================= 1 failed in 0.12 seconds =========================
|
||||
|
||||
In the failure traceback we see that the test function was called with a
|
||||
``smtp`` argument, the ``smtplib.SMTP()`` instance created by the fixture
|
||||
``smtp_connection`` argument, the ``smtplib.SMTP()`` instance created by the fixture
|
||||
function. The test function fails on our deliberate ``assert 0``. Here is
|
||||
the exact protocol used by ``pytest`` to call the test function this way:
|
||||
|
||||
1. pytest :ref:`finds <test discovery>` the ``test_ehlo`` because
|
||||
of the ``test_`` prefix. The test function needs a function argument
|
||||
named ``smtp``. A matching fixture function is discovered by
|
||||
looking for a fixture-marked function named ``smtp``.
|
||||
named ``smtp_connection``. A matching fixture function is discovered by
|
||||
looking for a fixture-marked function named ``smtp_connection``.
|
||||
|
||||
2. ``smtp()`` is called to create an instance.
|
||||
2. ``smtp_connection()`` is called to create an instance.
|
||||
|
||||
3. ``test_ehlo(<SMTP instance>)`` is called and fails in the last
|
||||
3. ``test_ehlo(<smtp_connection instance>)`` is called and fails in the last
|
||||
line of the test function.
|
||||
|
||||
Note that if you misspell a function argument or want
|
||||
@@ -153,7 +155,7 @@ This makes use of the automatic caching mechanisms of pytest.
|
||||
|
||||
Another good approach is by adding the data files in the ``tests`` folder.
|
||||
There are also community plugins available to help managing this aspect of
|
||||
testing, e.g. `pytest-datadir <https://github.com/gabrielcnr/pytest-datadir>`__
|
||||
testing, e.g. `pytest-datadir <https://pypi.org/project/pytest-datadir/>`__
|
||||
and `pytest-datafiles <https://pypi.org/project/pytest-datafiles/>`__.
|
||||
|
||||
.. _smtpshared:
|
||||
@@ -167,10 +169,11 @@ Fixtures requiring network access depend on connectivity and are
|
||||
usually time-expensive to create. Extending the previous example, we
|
||||
can add a ``scope="module"`` parameter to the
|
||||
:py:func:`@pytest.fixture <_pytest.python.fixture>` invocation
|
||||
to cause the decorated ``smtp`` fixture function to only be invoked once
|
||||
per test *module* (the default is to invoke once per test *function*).
|
||||
to cause the decorated ``smtp_connection`` fixture function to only be invoked
|
||||
once per test *module* (the default is to invoke once per test *function*).
|
||||
Multiple test functions in a test module will thus
|
||||
each receive the same ``smtp`` fixture instance, thus saving time.
|
||||
each receive the same ``smtp_connection`` fixture instance, thus saving time.
|
||||
Possible values for ``scope`` are: ``function``, ``class``, ``module``, ``package`` or ``session``.
|
||||
|
||||
The next example puts the fixture function into a separate ``conftest.py`` file
|
||||
so that tests from multiple test modules in the directory can
|
||||
@@ -181,32 +184,35 @@ access the fixture function::
|
||||
import smtplib
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def smtp():
|
||||
def smtp_connection():
|
||||
return smtplib.SMTP("smtp.gmail.com", 587, timeout=5)
|
||||
|
||||
The name of the fixture again is ``smtp`` and you can access its result by
|
||||
listing the name ``smtp`` as an input parameter in any test or fixture
|
||||
function (in or below the directory where ``conftest.py`` is located)::
|
||||
The name of the fixture again is ``smtp_connection`` and you can access its
|
||||
result by listing the name ``smtp_connection`` as an input parameter in any
|
||||
test or fixture function (in or below the directory where ``conftest.py`` is
|
||||
located)::
|
||||
|
||||
# content of test_module.py
|
||||
|
||||
def test_ehlo(smtp):
|
||||
response, msg = smtp.ehlo()
|
||||
def test_ehlo(smtp_connection):
|
||||
response, msg = smtp_connection.ehlo()
|
||||
assert response == 250
|
||||
assert b"smtp.gmail.com" in msg
|
||||
assert 0 # for demo purposes
|
||||
|
||||
def test_noop(smtp):
|
||||
response, msg = smtp.noop()
|
||||
def test_noop(smtp_connection):
|
||||
response, msg = smtp_connection.noop()
|
||||
assert response == 250
|
||||
assert 0 # for demo purposes
|
||||
|
||||
We deliberately insert failing ``assert 0`` statements in order to
|
||||
inspect what is going on and can now run the tests::
|
||||
inspect what is going on and can now run the tests:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest test_module.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 2 items
|
||||
|
||||
@@ -215,10 +221,10 @@ inspect what is going on and can now run the tests::
|
||||
================================= FAILURES =================================
|
||||
________________________________ test_ehlo _________________________________
|
||||
|
||||
smtp = <smtplib.SMTP object at 0xdeadbeef>
|
||||
smtp_connection = <smtplib.SMTP object at 0xdeadbeef>
|
||||
|
||||
def test_ehlo(smtp):
|
||||
response, msg = smtp.ehlo()
|
||||
def test_ehlo(smtp_connection):
|
||||
response, msg = smtp_connection.ehlo()
|
||||
assert response == 250
|
||||
assert b"smtp.gmail.com" in msg
|
||||
> assert 0 # for demo purposes
|
||||
@@ -227,10 +233,10 @@ inspect what is going on and can now run the tests::
|
||||
test_module.py:6: AssertionError
|
||||
________________________________ test_noop _________________________________
|
||||
|
||||
smtp = <smtplib.SMTP object at 0xdeadbeef>
|
||||
smtp_connection = <smtplib.SMTP object at 0xdeadbeef>
|
||||
|
||||
def test_noop(smtp):
|
||||
response, msg = smtp.noop()
|
||||
def test_noop(smtp_connection):
|
||||
response, msg = smtp_connection.noop()
|
||||
assert response == 250
|
||||
> assert 0 # for demo purposes
|
||||
E assert 0
|
||||
@@ -239,24 +245,45 @@ inspect what is going on and can now run the tests::
|
||||
========================= 2 failed in 0.12 seconds =========================
|
||||
|
||||
You see the two ``assert 0`` failing and more importantly you can also see
|
||||
that the same (module-scoped) ``smtp`` object was passed into the two
|
||||
test functions because pytest shows the incoming argument values in the
|
||||
traceback. As a result, the two test functions using ``smtp`` run as
|
||||
quick as a single one because they reuse the same instance.
|
||||
that the same (module-scoped) ``smtp_connection`` object was passed into the
|
||||
two test functions because pytest shows the incoming argument values in the
|
||||
traceback. As a result, the two test functions using ``smtp_connection`` run
|
||||
as quick as a single one because they reuse the same instance.
|
||||
|
||||
If you decide that you rather want to have a session-scoped ``smtp``
|
||||
If you decide that you rather want to have a session-scoped ``smtp_connection``
|
||||
instance, you can simply declare it:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def smtp():
|
||||
def smtp_connection():
|
||||
# the returned fixture value will be shared for
|
||||
# all tests needing it
|
||||
...
|
||||
|
||||
Finally, the ``class`` scope will invoke the fixture once per test *class*.
|
||||
|
||||
.. note::
|
||||
|
||||
Pytest will only cache one instance of a fixture at a time.
|
||||
This means that when using a parametrized fixture, pytest may invoke a fixture more than once in the given scope.
|
||||
|
||||
|
||||
``package`` scope (experimental)
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. versionadded:: 3.7
|
||||
|
||||
In pytest 3.7 the ``package`` scope has been introduced. Package-scoped fixtures
|
||||
are finalized when the last test of a *package* finishes.
|
||||
|
||||
.. warning::
|
||||
This functionality is considered **experimental** and may be removed in future
|
||||
versions if hidden corner-cases or serious problems with this functionality
|
||||
are discovered after it gets more usage in the wild.
|
||||
|
||||
Use this new feature sparingly and please make sure to report any issues you find.
|
||||
|
||||
|
||||
Higher-scoped fixtures are instantiated first
|
||||
---------------------------------------------
|
||||
@@ -323,11 +350,11 @@ the code after the *yield* statement serves as the teardown code:
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def smtp():
|
||||
smtp = smtplib.SMTP("smtp.gmail.com", 587, timeout=5)
|
||||
yield smtp # provide the fixture value
|
||||
def smtp_connection():
|
||||
smtp_connection = smtplib.SMTP("smtp.gmail.com", 587, timeout=5)
|
||||
yield smtp_connection # provide the fixture value
|
||||
print("teardown smtp")
|
||||
smtp.close()
|
||||
smtp_connection.close()
|
||||
|
||||
The ``print`` and ``smtp.close()`` statements will execute when the last test in
|
||||
the module has finished execution, regardless of the exception status of the
|
||||
@@ -340,7 +367,7 @@ Let's execute it::
|
||||
|
||||
2 failed in 0.12 seconds
|
||||
|
||||
We see that the ``smtp`` instance is finalized after the two
|
||||
We see that the ``smtp_connection`` instance is finalized after the two
|
||||
tests finished execution. Note that if we decorated our fixture
|
||||
function with ``scope='function'`` then fixture setup and cleanup would
|
||||
occur around each single test. In either case the test
|
||||
@@ -358,13 +385,13 @@ Note that we can also seamlessly use the ``yield`` syntax with ``with`` statemen
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def smtp():
|
||||
with smtplib.SMTP("smtp.gmail.com", 587, timeout=5) as smtp:
|
||||
yield smtp # provide the fixture value
|
||||
def smtp_connection():
|
||||
with smtplib.SMTP("smtp.gmail.com", 587, timeout=5) as smtp_connection:
|
||||
yield smtp_connection # provide the fixture value
|
||||
|
||||
|
||||
The ``smtp`` connection will be closed after the test finished execution
|
||||
because the ``smtp`` object automatically closes when
|
||||
The ``smtp_connection`` connection will be closed after the test finished
|
||||
execution because the ``smtp_connection`` object automatically closes when
|
||||
the ``with`` statement ends.
|
||||
|
||||
Note that if an exception happens during the *setup* code (before the ``yield`` keyword), the
|
||||
@@ -374,7 +401,7 @@ An alternative option for executing *teardown* code is to
|
||||
make use of the ``addfinalizer`` method of the `request-context`_ object to register
|
||||
finalization functions.
|
||||
|
||||
Here's the ``smtp`` fixture changed to use ``addfinalizer`` for cleanup:
|
||||
Here's the ``smtp_connection`` fixture changed to use ``addfinalizer`` for cleanup:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@@ -384,15 +411,15 @@ Here's the ``smtp`` fixture changed to use ``addfinalizer`` for cleanup:
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def smtp(request):
|
||||
smtp = smtplib.SMTP("smtp.gmail.com", 587, timeout=5)
|
||||
def smtp_connection(request):
|
||||
smtp_connection = smtplib.SMTP("smtp.gmail.com", 587, timeout=5)
|
||||
|
||||
def fin():
|
||||
print("teardown smtp")
|
||||
smtp.close()
|
||||
print("teardown smtp_connection")
|
||||
smtp_connection.close()
|
||||
|
||||
request.addfinalizer(fin)
|
||||
return smtp # provide the fixture value
|
||||
return smtp_connection # provide the fixture value
|
||||
|
||||
|
||||
Both ``yield`` and ``addfinalizer`` methods work similarly by calling their code after the test
|
||||
@@ -425,7 +452,7 @@ Fixtures can introspect the requesting test context
|
||||
|
||||
Fixture functions can accept the :py:class:`request <FixtureRequest>` object
|
||||
to introspect the "requesting" test function, class or module context.
|
||||
Further extending the previous ``smtp`` fixture example, let's
|
||||
Further extending the previous ``smtp_connection`` fixture example, let's
|
||||
read an optional server URL from the test module which uses our fixture::
|
||||
|
||||
# content of conftest.py
|
||||
@@ -433,12 +460,12 @@ read an optional server URL from the test module which uses our fixture::
|
||||
import smtplib
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def smtp(request):
|
||||
def smtp_connection(request):
|
||||
server = getattr(request.module, "smtpserver", "smtp.gmail.com")
|
||||
smtp = smtplib.SMTP(server, 587, timeout=5)
|
||||
yield smtp
|
||||
print ("finalizing %s (%s)" % (smtp, server))
|
||||
smtp.close()
|
||||
smtp_connection = smtplib.SMTP(server, 587, timeout=5)
|
||||
yield smtp_connection
|
||||
print("finalizing %s (%s)" % (smtp_connection, server))
|
||||
smtp_connection.close()
|
||||
|
||||
We use the ``request.module`` attribute to optionally obtain an
|
||||
``smtpserver`` attribute from the test module. If we just execute
|
||||
@@ -456,23 +483,25 @@ server URL in its module namespace::
|
||||
|
||||
smtpserver = "mail.python.org" # will be read by smtp fixture
|
||||
|
||||
def test_showhelo(smtp):
|
||||
assert 0, smtp.helo()
|
||||
def test_showhelo(smtp_connection):
|
||||
assert 0, smtp_connection.helo()
|
||||
|
||||
Running it::
|
||||
Running it:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -qq --tb=short test_anothersmtp.py
|
||||
F [100%]
|
||||
================================= FAILURES =================================
|
||||
______________________________ test_showhelo _______________________________
|
||||
test_anothersmtp.py:5: in test_showhelo
|
||||
assert 0, smtp.helo()
|
||||
assert 0, smtp_connection.helo()
|
||||
E AssertionError: (250, b'mail.python.org')
|
||||
E assert 0
|
||||
------------------------- Captured stdout teardown -------------------------
|
||||
finalizing <smtplib.SMTP object at 0xdeadbeef> (mail.python.org)
|
||||
|
||||
voila! The ``smtp`` fixture function picked up our mail server name
|
||||
voila! The ``smtp_connection`` fixture function picked up our mail server name
|
||||
from the module namespace.
|
||||
|
||||
.. _`fixture-factory`:
|
||||
@@ -535,13 +564,13 @@ Parametrizing fixtures
|
||||
|
||||
Fixture functions can be parametrized in which case they will be called
|
||||
multiple times, each time executing the set of dependent tests, i. e. the
|
||||
tests that depend on this fixture. Test functions do usually not need
|
||||
tests that depend on this fixture. Test functions usually do not need
|
||||
to be aware of their re-running. Fixture parametrization helps to
|
||||
write exhaustive functional tests for components which themselves can be
|
||||
configured in multiple ways.
|
||||
|
||||
Extending the previous example, we can flag the fixture to create two
|
||||
``smtp`` fixture instances which will cause all tests using the fixture
|
||||
``smtp_connection`` fixture instances which will cause all tests using the fixture
|
||||
to run twice. The fixture function gets access to each parameter
|
||||
through the special :py:class:`request <FixtureRequest>` object::
|
||||
|
||||
@@ -551,27 +580,29 @@ through the special :py:class:`request <FixtureRequest>` object::
|
||||
|
||||
@pytest.fixture(scope="module",
|
||||
params=["smtp.gmail.com", "mail.python.org"])
|
||||
def smtp(request):
|
||||
smtp = smtplib.SMTP(request.param, 587, timeout=5)
|
||||
yield smtp
|
||||
print ("finalizing %s" % smtp)
|
||||
smtp.close()
|
||||
def smtp_connection(request):
|
||||
smtp_connection = smtplib.SMTP(request.param, 587, timeout=5)
|
||||
yield smtp_connection
|
||||
print("finalizing %s" % smtp_connection)
|
||||
smtp_connection.close()
|
||||
|
||||
The main change is the declaration of ``params`` with
|
||||
:py:func:`@pytest.fixture <_pytest.python.fixture>`, a list of values
|
||||
for each of which the fixture function will execute and can access
|
||||
a value via ``request.param``. No test function code needs to change.
|
||||
So let's just do another run::
|
||||
So let's just do another run:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q test_module.py
|
||||
FFFF [100%]
|
||||
================================= FAILURES =================================
|
||||
________________________ test_ehlo[smtp.gmail.com] _________________________
|
||||
|
||||
smtp = <smtplib.SMTP object at 0xdeadbeef>
|
||||
smtp_connection = <smtplib.SMTP object at 0xdeadbeef>
|
||||
|
||||
def test_ehlo(smtp):
|
||||
response, msg = smtp.ehlo()
|
||||
def test_ehlo(smtp_connection):
|
||||
response, msg = smtp_connection.ehlo()
|
||||
assert response == 250
|
||||
assert b"smtp.gmail.com" in msg
|
||||
> assert 0 # for demo purposes
|
||||
@@ -580,10 +611,10 @@ So let's just do another run::
|
||||
test_module.py:6: AssertionError
|
||||
________________________ test_noop[smtp.gmail.com] _________________________
|
||||
|
||||
smtp = <smtplib.SMTP object at 0xdeadbeef>
|
||||
smtp_connection = <smtplib.SMTP object at 0xdeadbeef>
|
||||
|
||||
def test_noop(smtp):
|
||||
response, msg = smtp.noop()
|
||||
def test_noop(smtp_connection):
|
||||
response, msg = smtp_connection.noop()
|
||||
assert response == 250
|
||||
> assert 0 # for demo purposes
|
||||
E assert 0
|
||||
@@ -591,10 +622,10 @@ So let's just do another run::
|
||||
test_module.py:11: AssertionError
|
||||
________________________ test_ehlo[mail.python.org] ________________________
|
||||
|
||||
smtp = <smtplib.SMTP object at 0xdeadbeef>
|
||||
smtp_connection = <smtplib.SMTP object at 0xdeadbeef>
|
||||
|
||||
def test_ehlo(smtp):
|
||||
response, msg = smtp.ehlo()
|
||||
def test_ehlo(smtp_connection):
|
||||
response, msg = smtp_connection.ehlo()
|
||||
assert response == 250
|
||||
> assert b"smtp.gmail.com" in msg
|
||||
E AssertionError: assert b'smtp.gmail.com' in b'mail.python.org\nPIPELINING\nSIZE 51200000\nETRN\nSTARTTLS\nAUTH DIGEST-MD5 NTLM CRAM-MD5\nENHANCEDSTATUSCODES\n8BITMIME\nDSN\nSMTPUTF8'
|
||||
@@ -604,10 +635,10 @@ So let's just do another run::
|
||||
finalizing <smtplib.SMTP object at 0xdeadbeef>
|
||||
________________________ test_noop[mail.python.org] ________________________
|
||||
|
||||
smtp = <smtplib.SMTP object at 0xdeadbeef>
|
||||
smtp_connection = <smtplib.SMTP object at 0xdeadbeef>
|
||||
|
||||
def test_noop(smtp):
|
||||
response, msg = smtp.noop()
|
||||
def test_noop(smtp_connection):
|
||||
response, msg = smtp_connection.noop()
|
||||
assert response == 250
|
||||
> assert 0 # for demo purposes
|
||||
E assert 0
|
||||
@@ -618,7 +649,7 @@ So let's just do another run::
|
||||
4 failed in 0.12 seconds
|
||||
|
||||
We see that our two test functions each ran twice, against the different
|
||||
``smtp`` instances. Note also, that with the ``mail.python.org``
|
||||
``smtp_connection`` instances. Note also, that with the ``mail.python.org``
|
||||
connection the second test fails in ``test_ehlo`` because a
|
||||
different server string is expected than what arrived.
|
||||
|
||||
@@ -663,11 +694,13 @@ a function which will be called with the fixture value and then
|
||||
has to return a string to use. In the latter case if the function
|
||||
return ``None`` then pytest's auto-generated ID will be used.
|
||||
|
||||
Running the above tests results in the following test IDs being used::
|
||||
Running the above tests results in the following test IDs being used:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest --collect-only
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 10 items
|
||||
<Module 'test_anothersmtp.py'>
|
||||
@@ -705,11 +738,13 @@ Example::
|
||||
def test_data(data_set):
|
||||
pass
|
||||
|
||||
Running this test will *skip* the invocation of ``data_set`` with value ``2``::
|
||||
Running this test will *skip* the invocation of ``data_set`` with value ``2``:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest test_fixture_marks.py -v
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6
|
||||
cachedir: .pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collecting ... collected 3 items
|
||||
@@ -730,46 +765,48 @@ can use other fixtures themselves. This contributes to a modular design
|
||||
of your fixtures and allows re-use of framework-specific fixtures across
|
||||
many projects. As a simple example, we can extend the previous example
|
||||
and instantiate an object ``app`` where we stick the already defined
|
||||
``smtp`` resource into it::
|
||||
``smtp_connection`` resource into it::
|
||||
|
||||
# content of test_appsetup.py
|
||||
|
||||
import pytest
|
||||
|
||||
class App(object):
|
||||
def __init__(self, smtp):
|
||||
self.smtp = smtp
|
||||
def __init__(self, smtp_connection):
|
||||
self.smtp_connection = smtp_connection
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def app(smtp):
|
||||
return App(smtp)
|
||||
def app(smtp_connection):
|
||||
return App(smtp_connection)
|
||||
|
||||
def test_smtp_exists(app):
|
||||
assert app.smtp
|
||||
def test_smtp_connection_exists(app):
|
||||
assert app.smtp_connection
|
||||
|
||||
Here we declare an ``app`` fixture which receives the previously defined
|
||||
``smtp`` fixture and instantiates an ``App`` object with it. Let's run it::
|
||||
``smtp_connection`` fixture and instantiates an ``App`` object with it. Let's run it:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -v test_appsetup.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6
|
||||
cachedir: .pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_appsetup.py::test_smtp_exists[smtp.gmail.com] PASSED [ 50%]
|
||||
test_appsetup.py::test_smtp_exists[mail.python.org] PASSED [100%]
|
||||
test_appsetup.py::test_smtp_connection_exists[smtp.gmail.com] PASSED [ 50%]
|
||||
test_appsetup.py::test_smtp_connection_exists[mail.python.org] PASSED [100%]
|
||||
|
||||
========================= 2 passed in 0.12 seconds =========================
|
||||
|
||||
Due to the parametrization of ``smtp`` the test will run twice with two
|
||||
Due to the parametrization of ``smtp_connection``, the test will run twice with two
|
||||
different ``App`` instances and respective smtp servers. There is no
|
||||
need for the ``app`` fixture to be aware of the ``smtp`` parametrization
|
||||
as pytest will fully analyse the fixture dependency graph.
|
||||
need for the ``app`` fixture to be aware of the ``smtp_connection``
|
||||
parametrization because pytest will fully analyse the fixture dependency graph.
|
||||
|
||||
Note, that the ``app`` fixture has a scope of ``module`` and uses a
|
||||
module-scoped ``smtp`` fixture. The example would still work if ``smtp``
|
||||
was cached on a ``session`` scope: it is fine for fixtures to use
|
||||
module-scoped ``smtp_connection`` fixture. The example would still work if
|
||||
``smtp_connection`` was cached on a ``session`` scope: it is fine for fixtures to use
|
||||
"broader" scoped fixtures but not the other way round:
|
||||
A session-scoped fixture could not use a module-scoped one in a
|
||||
meaningful way.
|
||||
@@ -788,7 +825,7 @@ first execute with one instance and then finalizers are called
|
||||
before the next fixture instance is created. Among other things,
|
||||
this eases testing of applications which create and use global state.
|
||||
|
||||
The following example uses two parametrized fixture, one of which is
|
||||
The following example uses two parametrized fixtures, one of which is
|
||||
scoped on a per-module basis, and all the functions perform ``print`` calls
|
||||
to show the setup/teardown flow::
|
||||
|
||||
@@ -798,30 +835,32 @@ to show the setup/teardown flow::
|
||||
@pytest.fixture(scope="module", params=["mod1", "mod2"])
|
||||
def modarg(request):
|
||||
param = request.param
|
||||
print (" SETUP modarg %s" % param)
|
||||
print(" SETUP modarg %s" % param)
|
||||
yield param
|
||||
print (" TEARDOWN modarg %s" % param)
|
||||
print(" TEARDOWN modarg %s" % param)
|
||||
|
||||
@pytest.fixture(scope="function", params=[1,2])
|
||||
def otherarg(request):
|
||||
param = request.param
|
||||
print (" SETUP otherarg %s" % param)
|
||||
print(" SETUP otherarg %s" % param)
|
||||
yield param
|
||||
print (" TEARDOWN otherarg %s" % param)
|
||||
print(" TEARDOWN otherarg %s" % param)
|
||||
|
||||
def test_0(otherarg):
|
||||
print (" RUN test0 with otherarg %s" % otherarg)
|
||||
print(" RUN test0 with otherarg %s" % otherarg)
|
||||
def test_1(modarg):
|
||||
print (" RUN test1 with modarg %s" % modarg)
|
||||
print(" RUN test1 with modarg %s" % modarg)
|
||||
def test_2(otherarg, modarg):
|
||||
print (" RUN test2 with otherarg %s and modarg %s" % (otherarg, modarg))
|
||||
print(" RUN test2 with otherarg %s and modarg %s" % (otherarg, modarg))
|
||||
|
||||
|
||||
Let's run the tests in verbose mode and with looking at the print-output::
|
||||
Let's run the tests in verbose mode and with looking at the print-output:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -v -s test_module.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6
|
||||
cachedir: .pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collecting ... collected 8 items
|
||||
@@ -919,7 +958,9 @@ and declare its use in a test module via a ``usefixtures`` marker::
|
||||
Due to the ``usefixtures`` marker, the ``cleandir`` fixture
|
||||
will be required for the execution of each test method, just as if
|
||||
you specified a "cleandir" function argument to each of them. Let's run it
|
||||
to verify our fixture is activated and the tests pass::
|
||||
to verify our fixture is activated and the tests pass:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q
|
||||
.. [100%]
|
||||
@@ -943,7 +984,7 @@ a generic feature of the mark mechanism:
|
||||
Note that the assigned variable *must* be called ``pytestmark``, assigning e.g.
|
||||
``foomark`` will not activate the fixtures.
|
||||
|
||||
Lastly you can put fixtures required by all tests in your project
|
||||
It is also possible to put fixtures required by all tests in your project
|
||||
into an ini-file:
|
||||
|
||||
.. code-block:: ini
|
||||
@@ -953,6 +994,22 @@ into an ini-file:
|
||||
usefixtures = cleandir
|
||||
|
||||
|
||||
.. warning::
|
||||
|
||||
Note this mark has no effect in **fixture functions**. For example,
|
||||
this **will not work as expected**:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.mark.usefixtures("my_other_fixture")
|
||||
@pytest.fixture
|
||||
def my_fixture_that_sadly_wont_use_my_other_fixture():
|
||||
...
|
||||
|
||||
Currently this will not generate any error or warning, but this is intended
|
||||
to be handled by `#3664 <https://github.com/pytest-dev/pytest/issues/3664>`_.
|
||||
|
||||
|
||||
.. _`autouse`:
|
||||
.. _`autouse fixtures`:
|
||||
|
||||
@@ -1002,7 +1059,9 @@ which implies that all test methods in the class will use this fixture
|
||||
without a need to state it in the test function signature or with a
|
||||
class-level ``usefixtures`` decorator.
|
||||
|
||||
If we run it, we get two passing tests::
|
||||
If we run it, we get two passing tests:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q
|
||||
.. [100%]
|
||||
|
||||
125
doc/en/flaky.rst
Normal file
125
doc/en/flaky.rst
Normal file
@@ -0,0 +1,125 @@
|
||||
|
||||
Flaky tests
|
||||
-----------
|
||||
|
||||
A "flaky" test is one that exhibits intermittent or sporadic failure, that seems to have non-deterministic behaviour. Sometimes it passes, sometimes it fails, and it's not clear why. This page discusses pytest features that can help and other general strategies for identifying, fixing or mitigating them.
|
||||
|
||||
Why flaky tests are a problem
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Flaky tests are particularly troublesome when a continuous integration (CI) server is being used, so that all tests must pass before a new code change can be merged. If the test result is not a reliable signal -- that a test failure means the code change broke the test -- developers can become mistrustful of the test results, which can lead to overlooking genuine failures. It is also a source of wasted time as developers must re-run test suites and investigate spurious failures.
|
||||
|
||||
|
||||
Potential root causes
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
System state
|
||||
~~~~~~~~~~~~
|
||||
|
||||
Broadly speaking, a flaky test indicates that the test relies on some system state that is not being appropriately controlled - the test environment is not sufficiently isolated. Higher level tests are more likely to be flaky as they rely on more state.
|
||||
|
||||
Flaky tests sometimes appear when a test suite is run in parallel (such as use of pytest-xdist). This can indicate a test is reliant on test ordering.
|
||||
|
||||
- Perhaps a different test is failing to clean up after itself and leaving behind data which causes the flaky test to fail.
|
||||
- The flaky test is reliant on data from a previous test that doesn't clean up after itself, and in parallel runs that previous test is not always present
|
||||
- Tests that modify global state typically cannot be run in parallel.
|
||||
|
||||
|
||||
Overly strict assertion
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Overly strict assertions can cause problems with floating point comparison as well as timing issues. `pytest.approx <https://docs.pytest.org/en/latest/reference.html#pytest-approx>`_ is useful here.
|
||||
|
||||
|
||||
Pytest features
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
Xfail strict
|
||||
~~~~~~~~~~~~
|
||||
|
||||
:ref:`pytest.mark.xfail ref` with ``strict=False`` can be used to mark a test so that its failure does not cause the whole build to break. This could be considered like a manual quarantine, and is rather dangerous to use permanently.
|
||||
|
||||
|
||||
PYTEST_CURRENT_TEST
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
:ref:`pytest current test env` may be useful for figuring out "which test got stuck".
|
||||
|
||||
|
||||
Plugins
|
||||
~~~~~~~
|
||||
|
||||
Rerunning any failed tests can mitigate the negative effects of flaky tests by giving them additional chances to pass, so that the overall build does not fail. Several pytest plugins support this:
|
||||
|
||||
* `flaky <https://github.com/box/flaky>`_
|
||||
* `pytest-flakefinder <https://github.com/dropbox/pytest-flakefinder>`_ - `blog post <https://blogs.dropbox.com/tech/2016/03/open-sourcing-pytest-tools/>`_
|
||||
* `pytest-rerunfailures <https://github.com/pytest-dev/pytest-rerunfailures>`_
|
||||
* `pytest-replay <https://github.com/ESSS/pytest-replay>`_: This plugin helps to reproduce locally crashes or flaky tests observed during CI runs.
|
||||
|
||||
Plugins to deliberately randomize tests can help expose tests with state problems:
|
||||
|
||||
* `pytest-random-order <https://github.com/jbasko/pytest-random-order>`_
|
||||
* `pytest-randomly <https://github.com/pytest-dev/pytest-randomly>`_
|
||||
|
||||
|
||||
Other general strategies
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Split up test suites
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
It can be common to split a single test suite into two, such as unit vs integration, and only use the unit test suite as a CI gate. This also helps keep build times manageable as high level tests tend to be slower. However, it means it does become possible for code that breaks the build to be merged, so extra vigilance is needed for monitoring the integration test results.
|
||||
|
||||
|
||||
Video/screenshot on failure
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
For UI tests these are important for understanding what the state of the UI was when the test failed. pytest-splinter can be used with plugins like pytest-bdd and can `save a screenshot on test failure <https://pytest-splinter.readthedocs.io/en/latest/#automatic-screenshots-on-test-failure>`_, which can help to isolate the cause.
|
||||
|
||||
|
||||
Delete or rewrite the test
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
If the functionality is covered by other tests, perhaps the test can be removed. If not, perhaps it can be rewritten at a lower level which will remove the flakiness or make its source more apparent.
|
||||
|
||||
|
||||
Quarantine
|
||||
~~~~~~~~~~
|
||||
|
||||
Mark Lapierre discusses the `Pros and Cons of Quarantined Tests <https://dev.to/mlapierre/pros-and-cons-of-quarantined-tests-2emj>`_ in a post from 2018.
|
||||
|
||||
|
||||
|
||||
CI tools that rerun on failure
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Azure Pipelines (the Azure cloud CI/CD tool, formerly Visual Studio Team Services or VSTS) has a feature to `identify flaky tests <https://docs.microsoft.com/en-us/azure/devops/release-notes/2017/dec-11-vsts#identify-flaky-tests>`_ and rerun failed tests.
|
||||
|
||||
|
||||
|
||||
Research
|
||||
^^^^^^^^
|
||||
|
||||
This is a limited list, please submit an issue or pull request to expand it!
|
||||
|
||||
* Gao, Zebao, Yalan Liang, Myra B. Cohen, Atif M. Memon, and Zhen Wang. "Making system user interactive tests repeatable: When and what should we control?." In *Software Engineering (ICSE), 2015 IEEE/ACM 37th IEEE International Conference on*, vol. 1, pp. 55-65. IEEE, 2015. `PDF <http://www.cs.umd.edu/~atif/pubs/gao-icse15.pdf>`__
|
||||
* Palomba, Fabio, and Andy Zaidman. "Does refactoring of test smells induce fixing flaky tests?." In *Software Maintenance and Evolution (ICSME), 2017 IEEE International Conference on*, pp. 1-12. IEEE, 2017. `PDF in Google Drive <https://drive.google.com/file/d/10HdcCQiuQVgW3yYUJD-TSTq1NbYEprl0/view>`__
|
||||
* Bell, Jonathan, Owolabi Legunsen, Michael Hilton, Lamyaa Eloussi, Tifany Yung, and Darko Marinov. "DeFlaker: Automatically detecting flaky tests." In *Proceedings of the 2018 International Conference on Software Engineering*. 2018. `PDF <https://www.jonbell.net/icse18-deflaker.pdf>`__
|
||||
|
||||
|
||||
Resources
|
||||
^^^^^^^^^
|
||||
|
||||
* `Eradicating Non-Determinism in Tests <https://martinfowler.com/articles/nonDeterminism.html>`_ by Martin Fowler, 2011
|
||||
* `No more flaky tests on the Go team <https://www.thoughtworks.com/insights/blog/no-more-flaky-tests-go-team>`_ by Pavan Sudarshan, 2012
|
||||
* `The Build That Cried Broken: Building Trust in your Continuous Integration Tests <https://www.youtube.com/embed/VotJqV4n8ig>`_ talk (video) by `Angie Jones <http://angiejones.tech/>`_ at SeleniumConf Austin 2017
|
||||
* `Test and Code Podcast: Flaky Tests and How to Deal with Them <https://testandcode.com/50>`_ by Brian Okken and Anthony Shaw, 2018
|
||||
* Microsoft:
|
||||
|
||||
* `How we approach testing VSTS to enable continuous delivery <https://blogs.msdn.microsoft.com/bharry/2017/06/28/testing-in-a-cloud-delivery-cadence/>`_ by Brian Harry MS, 2017
|
||||
* `Eliminating Flaky Tests <https://docs.microsoft.com/en-us/azure/devops/learn/devops-at-microsoft/eliminating-flaky-tests>`_ blog and talk (video) by Munil Shah, 2017
|
||||
|
||||
* Google:
|
||||
|
||||
* `Flaky Tests at Google and How We Mitigate Them <https://testing.googleblog.com/2016/05/flaky-tests-at-google-and-how-we.html>`_ by John Micco, 2016
|
||||
* `Where do Google's flaky tests come from? <https://docs.google.com/document/d/1mZ0-Kc97DI_F3tf_GBW_NB_aqka-P1jVOsFfufxqUUM/edit#heading=h.ec0r4fypsleh>`_ by Jeff Listfield, 2017
|
||||
@@ -7,7 +7,7 @@ pytest-2.3: reasoning for fixture/funcarg evolution
|
||||
|
||||
**Target audience**: Reading this document requires basic knowledge of
|
||||
python testing, xUnit setup methods and the (previous) basic pytest
|
||||
funcarg mechanism, see http://pytest.org/2.2.4/funcargs.html
|
||||
funcarg mechanism, see https://docs.pytest.org/en/latest/historical-notes.html#funcargs-and-pytest-funcarg.
|
||||
If you are new to pytest, then you can simply ignore this
|
||||
section and read the other sections.
|
||||
|
||||
@@ -26,9 +26,9 @@ a per-session Database object::
|
||||
# content of conftest.py
|
||||
class Database(object):
|
||||
def __init__(self):
|
||||
print ("database instance created")
|
||||
print("database instance created")
|
||||
def destroy(self):
|
||||
print ("database instance destroyed")
|
||||
print("database instance destroyed")
|
||||
|
||||
def pytest_funcarg__db(request):
|
||||
return request.cached_setup(setup=DataBase,
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
Installation and Getting Started
|
||||
===================================
|
||||
|
||||
**Pythons**: Python 2.7, 3.4, 3.5, 3.6, Jython, PyPy-2.3
|
||||
**Pythons**: Python 2.7, 3.4, 3.5, 3.6, 3.7, Jython, PyPy-2.3
|
||||
|
||||
**Platforms**: Unix/Posix and Windows
|
||||
|
||||
@@ -27,7 +27,7 @@ Install ``pytest``
|
||||
2. Check that you installed the correct version::
|
||||
|
||||
$ pytest --version
|
||||
This is pytest version 3.x.y, imported from $PYTHON_PREFIX/lib/python3.5/site-packages/pytest.py
|
||||
This is pytest version 4.x.y, imported from $PYTHON_PREFIX/lib/python3.6/site-packages/pytest.py
|
||||
|
||||
.. _`simpletest`:
|
||||
|
||||
@@ -43,11 +43,13 @@ Create a simple test function with just four lines of code::
|
||||
def test_answer():
|
||||
assert func(3) == 5
|
||||
|
||||
That’s it. You can now execute the test function::
|
||||
That’s it. You can now execute the test function:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 1 item
|
||||
|
||||
@@ -90,7 +92,9 @@ Use the ``raises`` helper to assert that some code raises an exception::
|
||||
with pytest.raises(SystemExit):
|
||||
f()
|
||||
|
||||
Execute the test function with “quiet” reporting mode::
|
||||
Execute the test function with “quiet” reporting mode:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q test_sysexit.py
|
||||
. [100%]
|
||||
@@ -111,7 +115,9 @@ Once you develop multiple tests, you may want to group them into a class. pytest
|
||||
x = "hello"
|
||||
assert hasattr(x, 'check')
|
||||
|
||||
``pytest`` discovers all tests following its :ref:`Conventions for Python test discovery <test discovery>`, so it finds both ``test_`` prefixed functions. There is no need to subclass anything. We can simply run the module by passing its filename::
|
||||
``pytest`` discovers all tests following its :ref:`Conventions for Python test discovery <test discovery>`, so it finds both ``test_`` prefixed functions. There is no need to subclass anything. We can simply run the module by passing its filename:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q test_class.py
|
||||
.F [100%]
|
||||
@@ -138,10 +144,12 @@ Request a unique temporary directory for functional tests
|
||||
|
||||
# content of test_tmpdir.py
|
||||
def test_needsfiles(tmpdir):
|
||||
print (tmpdir)
|
||||
print(tmpdir)
|
||||
assert 0
|
||||
|
||||
List the name ``tmpdir`` in the test function signature and ``pytest`` will lookup and call a fixture factory to create the resource before performing the test function call. Before the test runs, ``pytest`` creates a unique-per-test-invocation temporary directory::
|
||||
List the name ``tmpdir`` in the test function signature and ``pytest`` will lookup and call a fixture factory to create the resource before performing the test function call. Before the test runs, ``pytest`` creates a unique-per-test-invocation temporary directory:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q test_tmpdir.py
|
||||
F [100%]
|
||||
@@ -151,7 +159,7 @@ List the name ``tmpdir`` in the test function signature and ``pytest`` will look
|
||||
tmpdir = local('PYTEST_TMPDIR/test_needsfiles0')
|
||||
|
||||
def test_needsfiles(tmpdir):
|
||||
print (tmpdir)
|
||||
print(tmpdir)
|
||||
> assert 0
|
||||
E assert 0
|
||||
|
||||
|
||||
@@ -4,6 +4,27 @@
|
||||
Good Integration Practices
|
||||
=================================================
|
||||
|
||||
Install package with pip
|
||||
-------------------------------------------------
|
||||
|
||||
For development, we recommend to use virtualenv_ environments and pip_
|
||||
for installing your application and any dependencies
|
||||
as well as the ``pytest`` package itself. This ensures your code and
|
||||
dependencies are isolated from the system Python installation.
|
||||
|
||||
First you need to place a ``setup.py`` file in the root of your package with the following minimum content::
|
||||
|
||||
from setuptools import setup, find_packages
|
||||
|
||||
setup(name="PACKAGENAME", packages=find_packages())
|
||||
|
||||
Where ``PACKAGENAME`` is the name of your package. You can then install your package in "editable" mode by running from the same directory::
|
||||
|
||||
pip install -e .
|
||||
|
||||
which lets you change your source code (both tests and application) and rerun tests at will.
|
||||
This is similar to running ``python setup.py develop`` or ``conda develop`` in that it installs
|
||||
your package using a symlink to your development code.
|
||||
|
||||
.. _`test discovery`:
|
||||
.. _`Python test discovery`:
|
||||
@@ -177,19 +198,6 @@ Note that this layout also works in conjunction with the ``src`` layout mentione
|
||||
tox
|
||||
------
|
||||
|
||||
For development, we recommend to use virtualenv_ environments and pip_
|
||||
for installing your application and any dependencies
|
||||
as well as the ``pytest`` package itself. This ensures your code and
|
||||
dependencies are isolated from the system Python installation.
|
||||
|
||||
You can then install your package in "editable" mode::
|
||||
|
||||
pip install -e .
|
||||
|
||||
which lets you change your source code (both tests and application) and rerun tests at will.
|
||||
This is similar to running `python setup.py develop` or `conda develop` in that it installs
|
||||
your package using a symlink to your development code.
|
||||
|
||||
Once you are done with your work and want to make sure that your actual
|
||||
package passes all tests you may want to look into `tox`_, the
|
||||
virtualenv test automation tool and its `pytest support
|
||||
@@ -282,7 +290,7 @@ your own setuptools Test command for invoking pytest.
|
||||
setup(
|
||||
# ...,
|
||||
tests_require=["pytest"],
|
||||
cmdclass={"test": PyTest},
|
||||
cmdclass={"pytest": PyTest},
|
||||
)
|
||||
|
||||
Now if you run::
|
||||
|
||||
@@ -175,3 +175,13 @@ Previous to version 2.4 to set a break point in code one needed to use ``pytest.
|
||||
This is no longer needed and one can use the native ``import pdb;pdb.set_trace()`` call directly.
|
||||
|
||||
For more details see :ref:`breakpoints`.
|
||||
|
||||
"compat" properties
|
||||
-------------------
|
||||
|
||||
.. deprecated:: 3.9
|
||||
|
||||
Access of ``Module``, ``Function``, ``Class``, ``Instance``, ``File`` and ``Item`` through ``Node`` instances have long
|
||||
been documented as deprecated, but started to emit warnings from pytest ``3.9`` and onward.
|
||||
|
||||
Users should just ``import pytest`` and access those objects using the ``pytest`` module.
|
||||
|
||||
@@ -22,11 +22,13 @@ An example of a simple test:
|
||||
assert inc(3) == 5
|
||||
|
||||
|
||||
To execute it::
|
||||
To execute it:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 1 item
|
||||
|
||||
|
||||
@@ -52,22 +52,22 @@ should add ``--strict`` to ``addopts``:
|
||||
serial
|
||||
|
||||
|
||||
.. `marker-iteration`
|
||||
.. _marker-revamp:
|
||||
|
||||
Marker revamp and iteration
|
||||
---------------------------
|
||||
|
||||
.. versionadded:: 3.6
|
||||
|
||||
pytest's marker implementation traditionally worked by simply updating the ``__dict__`` attribute of functions to add markers, in a cumulative manner. As a result of the this, markers would unintendely be passed along class hierarchies in surprising ways plus the API for retriving them was inconsistent, as markers from parameterization would be stored differently than markers applied using the ``@pytest.mark`` decorator and markers added via ``node.add_marker``.
|
||||
pytest's marker implementation traditionally worked by simply updating the ``__dict__`` attribute of functions to cumulatively add markers. As a result, markers would unintentionally be passed along class hierarchies in surprising ways. Further, the API for retrieving them was inconsistent, as markers from parameterization would be stored differently than markers applied using the ``@pytest.mark`` decorator and markers added via ``node.add_marker``.
|
||||
|
||||
This state of things made it technically next to impossible to use data from markers correctly without having a deep understanding of the internals, leading to subtle and hard to understand bugs in more advanced usages.
|
||||
|
||||
Depending on how a marker got declared/changed one would get either a ``MarkerInfo`` which might contain markers from sibling classes,
|
||||
``MarkDecorators`` when marks came from parameterization or from a ``node.add_marker`` call, discarding prior marks. Also ``MarkerInfo`` acts like a single mark, when it in fact represents a merged view on multiple marks with the same name.
|
||||
|
||||
On top of that markers where not accessible the same way for modules, classes, and functions/methods,
|
||||
in fact, markers where only accessible in functions, even if they where declared on classes/modules.
|
||||
On top of that markers were not accessible the same way for modules, classes, and functions/methods.
|
||||
In fact, markers were only accessible in functions, even if they were declared on classes/modules.
|
||||
|
||||
A new API to access markers has been introduced in pytest 3.6 in order to solve the problems with the initial design, providing :func:`_pytest.nodes.Node.iter_markers` method to iterate over markers in a consistent manner and reworking the internals, which solved great deal of problems with the initial design.
|
||||
|
||||
@@ -85,7 +85,7 @@ In general there are two scenarios on how markers should be handled:
|
||||
1. Marks overwrite each other. Order matters but you only want to think of your mark as a single item. E.g.
|
||||
``log_level('info')`` at a module level can be overwritten by ``log_level('debug')`` for a specific test.
|
||||
|
||||
In this case replace use ``Node.get_closest_marker(name)``:
|
||||
In this case, use ``Node.get_closest_marker(name)``:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@@ -99,7 +99,7 @@ In general there are two scenarios on how markers should be handled:
|
||||
if marker:
|
||||
level = marker.args[0]
|
||||
|
||||
2. Marks compose additive. E.g. ``skipif(condition)`` marks means you just want to evaluate all of them,
|
||||
2. Marks compose in an additive manner. E.g. ``skipif(condition)`` marks mean you just want to evaluate all of them,
|
||||
order doesn't even matter. You probably want to think of your marks as a set here.
|
||||
|
||||
In this case iterate over each mark and handle their ``*args`` and ``**kwargs`` individually.
|
||||
@@ -129,31 +129,31 @@ Here is a non-exhaustive list of issues fixed by the new implementation:
|
||||
|
||||
* Marks don't pick up nested classes (`#199 <https://github.com/pytest-dev/pytest/issues/199>`_).
|
||||
|
||||
* markers stains on all related classes (`#568 <https://github.com/pytest-dev/pytest/issues/568>`_).
|
||||
* Markers stain on all related classes (`#568 <https://github.com/pytest-dev/pytest/issues/568>`_).
|
||||
|
||||
* combining marks - args and kwargs calculation (`#2897 <https://github.com/pytest-dev/pytest/issues/2897>`_).
|
||||
* Combining marks - args and kwargs calculation (`#2897 <https://github.com/pytest-dev/pytest/issues/2897>`_).
|
||||
|
||||
* ``request.node.get_marker('name')`` returns ``None`` for markers applied in classes (`#902 <https://github.com/pytest-dev/pytest/issues/902>`_).
|
||||
|
||||
* marks applied in parametrize are stored as markdecorator (`#2400 <https://github.com/pytest-dev/pytest/issues/2400>`_).
|
||||
* Marks applied in parametrize are stored as markdecorator (`#2400 <https://github.com/pytest-dev/pytest/issues/2400>`_).
|
||||
|
||||
* fix marker interaction in a backward incompatible way (`#1670 <https://github.com/pytest-dev/pytest/issues/1670>`_).
|
||||
* Fix marker interaction in a backward incompatible way (`#1670 <https://github.com/pytest-dev/pytest/issues/1670>`_).
|
||||
|
||||
* Refactor marks to get rid of the current "marks transfer" mechanism (`#2363 <https://github.com/pytest-dev/pytest/issues/2363>`_).
|
||||
|
||||
* Introduce FunctionDefinition node, use it in generate_tests (`#2522 <https://github.com/pytest-dev/pytest/issues/2522>`_).
|
||||
|
||||
* remove named marker attributes and collect markers in items (`#891 <https://github.com/pytest-dev/pytest/issues/891>`_).
|
||||
* Remove named marker attributes and collect markers in items (`#891 <https://github.com/pytest-dev/pytest/issues/891>`_).
|
||||
|
||||
* skipif mark from parametrize hides module level skipif mark (`#1540 <https://github.com/pytest-dev/pytest/issues/1540>`_).
|
||||
|
||||
* skipif + parametrize not skipping tests (`#1296 <https://github.com/pytest-dev/pytest/issues/1296>`_).
|
||||
|
||||
* marker transfer incompatible with inheritance (`#535 <https://github.com/pytest-dev/pytest/issues/535>`_).
|
||||
* Marker transfer incompatible with inheritance (`#535 <https://github.com/pytest-dev/pytest/issues/535>`_).
|
||||
|
||||
More details can be found in the `original PR <https://github.com/pytest-dev/pytest/pull/3317>`_.
|
||||
|
||||
.. note::
|
||||
|
||||
in a future major relase of pytest we will introduce class based markers,
|
||||
at which points markers will no longer be limited to instances of :py:class:`Mark`
|
||||
at which point markers will no longer be limited to instances of :py:class:`Mark`
|
||||
|
||||
@@ -50,11 +50,13 @@ to an expected output::
|
||||
|
||||
Here, the ``@parametrize`` decorator defines three different ``(test_input,expected)``
|
||||
tuples so that the ``test_eval`` function will run three times using
|
||||
them in turn::
|
||||
them in turn:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 3 items
|
||||
|
||||
@@ -99,11 +101,13 @@ for example with the builtin ``mark.xfail``::
|
||||
def test_eval(test_input, expected):
|
||||
assert eval(test_input) == expected
|
||||
|
||||
Let's run this::
|
||||
Let's run this:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 3 items
|
||||
|
||||
@@ -114,6 +118,10 @@ Let's run this::
|
||||
The one parameter set which caused a failure previously now
|
||||
shows up as an "xfailed (expected to fail)" test.
|
||||
|
||||
In case the values provided to ``parametrize`` result in an empty list - for
|
||||
example, if they're dynamically generated by some function - the behaviour of
|
||||
pytest is defined by the :confval:`empty_parameter_set_mark` option.
|
||||
|
||||
To get all combinations of multiple parametrized arguments you can stack
|
||||
``parametrize`` decorators::
|
||||
|
||||
@@ -168,7 +176,9 @@ If we now pass two stringinput values, our test will run twice::
|
||||
.. [100%]
|
||||
2 passed in 0.12 seconds
|
||||
|
||||
Let's also run with a stringinput that will lead to a failing test::
|
||||
Let's also run with a stringinput that will lead to a failing test:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q --stringinput="!" test_strings.py
|
||||
F [100%]
|
||||
@@ -190,7 +200,9 @@ As expected our test function fails.
|
||||
|
||||
If you don't specify a stringinput it will be skipped because
|
||||
``metafunc.parametrize()`` will be called with an empty parameter
|
||||
list::
|
||||
list:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q -rs test_strings.py
|
||||
s [100%]
|
||||
|
||||
@@ -59,9 +59,9 @@ To see a complete list of all plugins with their latest testing
|
||||
status against different pytest and Python versions, please visit
|
||||
`plugincompat <http://plugincompat.herokuapp.com/>`_.
|
||||
|
||||
You may also discover more plugins through a `pytest- pypi.python.org search`_.
|
||||
You may also discover more plugins through a `pytest- pypi.org search`_.
|
||||
|
||||
.. _`pytest- pypi.python.org search`: https://pypi.org/search/?q=pytest-
|
||||
.. _`pytest- pypi.org search`: https://pypi.org/search/?q=pytest-
|
||||
|
||||
|
||||
.. _`available installable plugins`:
|
||||
@@ -69,17 +69,15 @@ You may also discover more plugins through a `pytest- pypi.python.org search`_.
|
||||
Requiring/Loading plugins in a test module or conftest file
|
||||
-----------------------------------------------------------
|
||||
|
||||
You can require plugins in a test module or a conftest file like this::
|
||||
You can require plugins in a test module or a conftest file like this:
|
||||
|
||||
pytest_plugins = "myapp.testsupport.myplugin",
|
||||
.. code-block:: python
|
||||
|
||||
pytest_plugins = ("myapp.testsupport.myplugin",)
|
||||
|
||||
When the test module or conftest plugin is loaded the specified plugins
|
||||
will be loaded as well.
|
||||
|
||||
pytest_plugins = "myapp.testsupport.myplugin"
|
||||
|
||||
which will import the specified module as a ``pytest`` plugin.
|
||||
|
||||
.. note::
|
||||
Requiring plugins using a ``pytest_plugins`` variable in non-root
|
||||
``conftest.py`` files is deprecated. See
|
||||
|
||||
@@ -75,7 +75,7 @@ Issues
|
||||
------
|
||||
|
||||
* By using ``request.getfuncargvalue()`` we rely on actual fixture function
|
||||
execution to know what fixtures are involved, due to it's dynamic nature
|
||||
execution to know what fixtures are involved, due to its dynamic nature
|
||||
* More importantly, ``request.getfuncargvalue()`` cannot be combined with
|
||||
parametrized fixtures, such as ``extra_context``
|
||||
* This is very inconvenient if you wish to extend an existing test suite by
|
||||
|
||||
@@ -84,6 +84,12 @@ pytest.warns
|
||||
.. autofunction:: pytest.warns(expected_warning: Exception, [match])
|
||||
:with:
|
||||
|
||||
pytest.freeze_includes
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
**Tutorial**: :ref:`freezing-pytest`.
|
||||
|
||||
.. autofunction:: pytest.freeze_includes
|
||||
|
||||
.. _`marks ref`:
|
||||
|
||||
@@ -111,6 +117,7 @@ Add warning filters to marked test items.
|
||||
A *warning specification string*, which is composed of contents of the tuple ``(action, message, category, module, lineno)``
|
||||
as specified in `The Warnings filter <https://docs.python.org/3/library/warnings.html#warning-filter>`_ section of
|
||||
the Python documentation, separated by ``":"``. Optional fields can be omitted.
|
||||
Module names passed for filtering are not regex-escaped.
|
||||
|
||||
For example:
|
||||
|
||||
@@ -161,6 +168,25 @@ Skip a test function if a condition is ``True``.
|
||||
:keyword str reason: Reason why the test function is being skipped.
|
||||
|
||||
|
||||
.. _`pytest.mark.usefixtures ref`:
|
||||
|
||||
pytest.mark.usefixtures
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
**Tutorial**: :ref:`usefixtures`.
|
||||
|
||||
Mark a test function as using the given fixture names.
|
||||
|
||||
.. warning::
|
||||
|
||||
This mark has no effect when applied
|
||||
to a **fixture** function.
|
||||
|
||||
.. py:function:: pytest.mark.usefixtures(*names)
|
||||
|
||||
:param args: the names of the fixture to use, as strings
|
||||
|
||||
|
||||
.. _`pytest.mark.xfail ref`:
|
||||
|
||||
pytest.mark.xfail
|
||||
@@ -441,7 +467,7 @@ To use it, include in your top-most ``conftest.py`` file::
|
||||
|
||||
|
||||
.. autoclass:: Testdir()
|
||||
:members: runpytest,runpytest_subprocess,runpytest_inprocess,makeconftest,makepyfile
|
||||
:members:
|
||||
|
||||
.. autoclass:: RunResult()
|
||||
:members:
|
||||
@@ -592,6 +618,8 @@ Session related reporting hooks:
|
||||
.. autofunction:: pytest_terminal_summary
|
||||
.. autofunction:: pytest_fixture_setup
|
||||
.. autofunction:: pytest_fixture_post_finalizer
|
||||
.. autofunction:: pytest_logwarning
|
||||
.. autofunction:: pytest_warning_captured
|
||||
|
||||
And here is the central hook for reporting about
|
||||
test execution:
|
||||
@@ -768,7 +796,7 @@ TestReport
|
||||
_Result
|
||||
~~~~~~~
|
||||
|
||||
.. autoclass:: pluggy._Result
|
||||
.. autoclass:: pluggy.callers._Result
|
||||
:members:
|
||||
|
||||
Special Variables
|
||||
@@ -847,6 +875,11 @@ Contains comma-separated list of modules that should be loaded as plugins:
|
||||
|
||||
export PYTEST_PLUGINS=mymodule.plugin,xdist
|
||||
|
||||
PYTEST_DISABLE_PLUGIN_AUTOLOAD
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
When set, disables plugin auto-loading through setuptools entrypoints. Only explicitly specified plugins will be
|
||||
loaded.
|
||||
|
||||
PYTEST_CURRENT_TEST
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
@@ -916,6 +949,7 @@ passed multiple times. The expected format is ``name=value``. For example::
|
||||
|
||||
* ``classic``: classic pytest output.
|
||||
* ``progress``: like classic pytest output, but with a progress indicator.
|
||||
* ``count``: like progress, but shows progress as the number of tests completed instead of a percent.
|
||||
|
||||
The default is ``progress``, but you can fallback to ``classic`` if you prefer or
|
||||
the new mode is causing unexpected problems:
|
||||
@@ -949,6 +983,7 @@ passed multiple times. The expected format is ``name=value``. For example::
|
||||
|
||||
* ``skip`` skips tests with an empty parameterset (default)
|
||||
* ``xfail`` marks tests with an empty parameterset as xfail(run=False)
|
||||
* ``fail_at_collect`` raises an exception if parametrize collects an empty parameter set
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
@@ -1210,7 +1245,8 @@ passed multiple times. The expected format is ``name=value``. For example::
|
||||
.. confval:: python_classes
|
||||
|
||||
One or more name prefixes or glob-style patterns determining which classes
|
||||
are considered for test collection. By default, pytest will consider any
|
||||
are considered for test collection. Search for multiple glob patterns by
|
||||
adding a space between patterns. By default, pytest will consider any
|
||||
class prefixed with ``Test`` as a test collection. Here is an example of how
|
||||
to collect tests from classes that end in ``Suite``:
|
||||
|
||||
@@ -1227,15 +1263,33 @@ passed multiple times. The expected format is ``name=value``. For example::
|
||||
.. confval:: python_files
|
||||
|
||||
One or more Glob-style file patterns determining which python files
|
||||
are considered as test modules. By default, pytest will consider
|
||||
any file matching with ``test_*.py`` and ``*_test.py`` globs as a test
|
||||
module.
|
||||
are considered as test modules. Search for multiple glob patterns by
|
||||
adding a space between patterns:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[pytest]
|
||||
python_files = test_*.py check_*.py example_*.py
|
||||
|
||||
Or one per line:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[pytest]
|
||||
python_files =
|
||||
test_*.py
|
||||
check_*.py
|
||||
example_*.py
|
||||
|
||||
By default, files matching ``test_*.py`` and ``*_test.py`` will be considered
|
||||
test modules.
|
||||
|
||||
|
||||
.. confval:: python_functions
|
||||
|
||||
One or more name prefixes or glob-patterns determining which test functions
|
||||
and methods are considered tests. By default, pytest will consider any
|
||||
and methods are considered tests. Search for multiple glob patterns by
|
||||
adding a space between patterns. By default, pytest will consider any
|
||||
function prefixed with ``test`` as a test. Here is an example of how
|
||||
to collect test functions and methods that end in ``_test``:
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
# pinning sphinx to 1.4.* due to search issues with rtd:
|
||||
# https://github.com/rtfd/readthedocs-sphinx-ext/issues/25
|
||||
sphinx ==1.4.*
|
||||
pygments-pytest>=1.1.0
|
||||
sphinx>=1.8.2
|
||||
sphinxcontrib-trio
|
||||
|
||||
@@ -58,18 +58,20 @@ by calling the ``pytest.skip(reason)`` function:
|
||||
if not valid_config():
|
||||
pytest.skip("unsupported configuration")
|
||||
|
||||
The imperative method is useful when it is not possible to evaluate the skip condition
|
||||
during import time.
|
||||
|
||||
It is also possible to skip the whole module using
|
||||
``pytest.skip(reason, allow_module_level=True)`` at the module level:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import sys
|
||||
import pytest
|
||||
|
||||
if not pytest.config.getoption("--custom-flag"):
|
||||
pytest.skip("--custom-flag is missing, skipping tests", allow_module_level=True)
|
||||
if not sys.platform.startswith("win"):
|
||||
pytest.skip("skipping windows-only tests", allow_module_level=True)
|
||||
|
||||
The imperative method is useful when it is not possible to evaluate the skip condition
|
||||
during import time.
|
||||
|
||||
**Reference**: :ref:`pytest.mark.skip ref`
|
||||
|
||||
@@ -136,12 +138,6 @@ You can use the ``skipif`` marker (as any other marker) on classes::
|
||||
If the condition is ``True``, this marker will produce a skip result for
|
||||
each of the test methods of that class.
|
||||
|
||||
.. warning::
|
||||
|
||||
The use of ``skipif`` on classes that use inheritance is strongly
|
||||
discouraged. `A Known bug <https://github.com/pytest-dev/pytest/issues/568>`_
|
||||
in pytest's markers may cause unexpected behavior in super classes.
|
||||
|
||||
If you want to skip all test functions of a module, you may use
|
||||
the ``pytestmark`` name on the global level:
|
||||
|
||||
@@ -283,7 +279,7 @@ on a particular platform::
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
If you want to be more specific as to why the test is failing, you can specify
|
||||
a single exception, or a list of exceptions, in the ``raises`` argument.
|
||||
a single exception, or a tuple of exceptions, in the ``raises`` argument.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@@ -327,11 +323,13 @@ Here is a simple test file with the several usages:
|
||||
|
||||
.. literalinclude:: example/xfail_demo.py
|
||||
|
||||
Running it with the report-on-xfail option gives this output::
|
||||
Running it with the report-on-xfail option gives this output:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
example $ pytest -rx xfail_demo.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR/example, inifile:
|
||||
collected 7 items
|
||||
|
||||
|
||||
@@ -14,12 +14,17 @@ Talks and Tutorials
|
||||
Books
|
||||
---------------------------------------------
|
||||
|
||||
- `pytest Quick Start Guide, by Bruno Oliveira (2018)
|
||||
<https://www.packtpub.com/web-development/pytest-quick-start-guide>`_.
|
||||
|
||||
- `Python Testing with pytest, by Brian Okken (2017)
|
||||
<https://pragprog.com/book/bopytest/python-testing-with-pytest>`_.
|
||||
|
||||
Talks and blog postings
|
||||
---------------------------------------------
|
||||
|
||||
- pytest: recommendations, basic packages for testing in Python and Django, Andreu Vallbona, PyconES 2017 (`slides in english <http://talks.apsl.io/testing-pycones-2017/>`_, `video in spanish <https://www.youtube.com/watch?v=K20GeR-lXDk>`_)
|
||||
|
||||
- `Pythonic testing, Igor Starikov (Russian, PyNsk, November 2016)
|
||||
<https://www.youtube.com/watch?v=_92nfdd5nK8>`_.
|
||||
|
||||
|
||||
@@ -5,6 +5,78 @@
|
||||
Temporary directories and files
|
||||
================================================
|
||||
|
||||
The ``tmp_path`` fixture
|
||||
------------------------
|
||||
|
||||
.. versionadded:: 3.9
|
||||
|
||||
|
||||
You can use the ``tmp_path`` fixture which will
|
||||
provide a temporary directory unique to the test invocation,
|
||||
created in the `base temporary directory`_.
|
||||
|
||||
``tmp_path`` is a ``pathlib/pathlib2.Path`` object. Here is an example test usage:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of test_tmp_path.py
|
||||
import os
|
||||
|
||||
CONTENT = u"content"
|
||||
|
||||
|
||||
def test_create_file(tmp_path):
|
||||
d = tmp_path / "sub"
|
||||
d.mkdir()
|
||||
p = d / "hello.txt"
|
||||
p.write_text(CONTENT)
|
||||
assert p.read_text() == CONTENT
|
||||
assert len(list(tmp_path.iterdir())) == 1
|
||||
assert 0
|
||||
|
||||
Running this would result in a passed test except for the last
|
||||
``assert 0`` line which we use to look at values:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest test_tmp_path.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 1 item
|
||||
|
||||
test_tmp_path.py F [100%]
|
||||
|
||||
================================= FAILURES =================================
|
||||
_____________________________ test_create_file _____________________________
|
||||
|
||||
tmp_path = PosixPath('PYTEST_TMPDIR/test_create_file0')
|
||||
|
||||
def test_create_file(tmp_path):
|
||||
d = tmp_path / "sub"
|
||||
d.mkdir()
|
||||
p = d / "hello.txt"
|
||||
p.write_text(CONTENT)
|
||||
assert p.read_text() == CONTENT
|
||||
assert len(list(tmp_path.iterdir())) == 1
|
||||
> assert 0
|
||||
E assert 0
|
||||
|
||||
test_tmp_path.py:13: AssertionError
|
||||
========================= 1 failed in 0.12 seconds =========================
|
||||
|
||||
The ``tmp_path_factory`` fixture
|
||||
--------------------------------
|
||||
|
||||
.. versionadded:: 3.9
|
||||
|
||||
|
||||
The ``tmp_path_factory`` is a session-scoped fixture which can be used
|
||||
to create arbitrary temporary directories from any other fixture or test.
|
||||
|
||||
It is intended to replace ``tmpdir_factory``, and returns :class:`pathlib.Path` instances.
|
||||
|
||||
|
||||
The 'tmpdir' fixture
|
||||
--------------------
|
||||
|
||||
@@ -25,11 +97,13 @@ and more. Here is an example test usage::
|
||||
assert 0
|
||||
|
||||
Running this would result in a passed test except for the last
|
||||
``assert 0`` line which we use to look at values::
|
||||
``assert 0`` line which we use to look at values:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest test_tmpdir.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 1 item
|
||||
|
||||
|
||||
@@ -22,16 +22,15 @@ Almost all ``unittest`` features are supported:
|
||||
|
||||
* ``@unittest.skip`` style decorators;
|
||||
* ``setUp/tearDown``;
|
||||
* ``setUpClass/tearDownClass()``;
|
||||
* ``setUpClass/tearDownClass``;
|
||||
* ``setUpModule/tearDownModule``;
|
||||
|
||||
.. _`load_tests protocol`: https://docs.python.org/3/library/unittest.html#load-tests-protocol
|
||||
.. _`setUpModule/tearDownModule`: https://docs.python.org/3/library/unittest.html#setupmodule-and-teardownmodule
|
||||
.. _`subtests`: https://docs.python.org/3/library/unittest.html#distinguishing-test-iterations-using-subtests
|
||||
|
||||
Up to this point pytest does not have support for the following features:
|
||||
|
||||
* `load_tests protocol`_;
|
||||
* `setUpModule/tearDownModule`_;
|
||||
* `subtests`_;
|
||||
|
||||
Benefits out of the box
|
||||
@@ -123,11 +122,13 @@ fixture definition::
|
||||
The ``@pytest.mark.usefixtures("db_class")`` class-decorator makes sure that
|
||||
the pytest fixture function ``db_class`` is called once per class.
|
||||
Due to the deliberately failing assert statements, we can take a look at
|
||||
the ``self.db`` values in the traceback::
|
||||
the ``self.db`` values in the traceback:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest test_unittest_db.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 2 items
|
||||
|
||||
@@ -200,7 +201,9 @@ used for all methods of the class where it is defined. This is a
|
||||
shortcut for using a ``@pytest.mark.usefixtures("initdir")`` marker
|
||||
on the class like in the previous example.
|
||||
|
||||
Running this test module ...::
|
||||
Running this test module ...:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q test_unittest_cleandir.py
|
||||
. [100%]
|
||||
|
||||
@@ -140,6 +140,53 @@ will be shown (because KeyboardInterrupt is caught by pytest). By using this
|
||||
option you make sure a trace is shown.
|
||||
|
||||
|
||||
.. _`pytest.detailed_failed_tests_usage`:
|
||||
|
||||
Detailed summary report
|
||||
-----------------------
|
||||
|
||||
.. versionadded:: 2.9
|
||||
|
||||
The ``-r`` flag can be used to display test results summary at the end of the test session,
|
||||
making it easy in large test suites to get a clear picture of all failures, skips, xfails, etc.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -ra
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 0 items
|
||||
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
|
||||
The ``-r`` options accepts a number of characters after it, with ``a`` used above meaning "all except passes".
|
||||
|
||||
Here is the full list of available characters that can be used:
|
||||
|
||||
- ``f`` - failed
|
||||
- ``E`` - error
|
||||
- ``s`` - skipped
|
||||
- ``x`` - xfailed
|
||||
- ``X`` - xpassed
|
||||
- ``p`` - passed
|
||||
- ``P`` - passed with output
|
||||
- ``a`` - all except ``pP``
|
||||
|
||||
More than one character can be used, so for example to only see failed and skipped tests, you can execute:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -rfs
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 0 items
|
||||
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
|
||||
.. _pdb-option:
|
||||
|
||||
Dropping to PDB_ (Python Debugger) on failures
|
||||
@@ -171,6 +218,18 @@ for example::
|
||||
>>> sys.last_value
|
||||
AssertionError('assert result == "ok"',)
|
||||
|
||||
.. _trace-option:
|
||||
|
||||
Dropping to PDB_ (Python Debugger) at the start of a test
|
||||
----------------------------------------------------------
|
||||
|
||||
|
||||
``pytest`` allows one to drop into the PDB_ prompt immediately at the start of each test via a command line option::
|
||||
|
||||
pytest --trace
|
||||
|
||||
This will invoke the Python debugger at the start of every test.
|
||||
|
||||
.. _breakpoints:
|
||||
|
||||
Setting breakpoints
|
||||
@@ -200,8 +259,8 @@ Pytest supports the use of ``breakpoint()`` with the following behaviours:
|
||||
|
||||
- When ``breakpoint()`` is called and ``PYTHONBREAKPOINT`` is set to the default value, pytest will use the custom internal PDB trace UI instead of the system default ``Pdb``.
|
||||
- When tests are complete, the system will default back to the system ``Pdb`` trace UI.
|
||||
- If ``--pdb`` is called on execution of pytest, the custom internal Pdb trace UI is used on ``bothbreakpoint()`` and failed tests/unhandled exceptions.
|
||||
- If ``--pdbcls`` is used, the custom class debugger will be executed when a test fails (as expected within existing behaviour), but also when ``breakpoint()`` is called from within a test, the custom class debugger will be instantiated.
|
||||
- With ``--pdb`` passed to pytest, the custom internal Pdb trace UI is used with both ``breakpoint()`` and failed tests/unhandled exceptions.
|
||||
- ``--pdbcls`` can be used to specify a custom debugger class.
|
||||
|
||||
.. _durations:
|
||||
|
||||
@@ -214,6 +273,7 @@ To get a list of the slowest 10 test durations::
|
||||
|
||||
pytest --durations=10
|
||||
|
||||
By default, pytest will not show test durations that are too small (<0.01s) unless ``-vv`` is passed on the command-line.
|
||||
|
||||
Creating JUnitXML format files
|
||||
----------------------------------------------------
|
||||
|
||||
@@ -18,11 +18,13 @@ and displays them at the end of the session::
|
||||
def test_one():
|
||||
assert api_v1() == 1
|
||||
|
||||
Running pytest now produces this output::
|
||||
Running pytest now produces this output:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest test_show_warnings.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 1 item
|
||||
|
||||
@@ -33,13 +35,13 @@ Running pytest now produces this output::
|
||||
$REGENDOC_TMPDIR/test_show_warnings.py:4: UserWarning: api v1, should use functions from v2
|
||||
warnings.warn(UserWarning("api v1, should use functions from v2"))
|
||||
|
||||
-- Docs: http://doc.pytest.org/en/latest/warnings.html
|
||||
-- Docs: https://docs.pytest.org/en/latest/warnings.html
|
||||
=================== 1 passed, 1 warnings in 0.12 seconds ===================
|
||||
|
||||
Pytest by default catches all warnings except for ``DeprecationWarning`` and ``PendingDeprecationWarning``.
|
||||
|
||||
The ``-W`` flag can be passed to control which warnings will be displayed or even turn
|
||||
them into errors::
|
||||
them into errors:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q test_show_warnings.py -W error::UserWarning
|
||||
F [100%]
|
||||
@@ -78,7 +80,6 @@ Both ``-W`` command-line option and ``filterwarnings`` ini option are based on P
|
||||
`-W option`_ and `warnings.simplefilter`_, so please refer to those sections in the Python
|
||||
documentation for other examples and advanced usage.
|
||||
|
||||
|
||||
.. _`filterwarnings`:
|
||||
|
||||
``@pytest.mark.filterwarnings``
|
||||
@@ -117,24 +118,6 @@ decorator or to all tests in a module by setting the ``pytestmark`` variable:
|
||||
pytestmark = pytest.mark.filterwarnings("error")
|
||||
|
||||
|
||||
.. note::
|
||||
|
||||
Except for these features, pytest does not change the python warning filter; it only captures
|
||||
and displays the warnings which are issued with respect to the currently configured filter,
|
||||
including changes to the filter made by test functions or by the system under test.
|
||||
|
||||
.. note::
|
||||
|
||||
``DeprecationWarning`` and ``PendingDeprecationWarning`` are hidden by the standard library
|
||||
by default so you have to explicitly configure them to be displayed in your ``pytest.ini``:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[pytest]
|
||||
filterwarnings =
|
||||
once::DeprecationWarning
|
||||
once::PendingDeprecationWarning
|
||||
|
||||
|
||||
*Credits go to Florian Schulze for the reference implementation in the* `pytest-warnings`_
|
||||
*plugin.*
|
||||
@@ -143,18 +126,102 @@ decorator or to all tests in a module by setting the ``pytestmark`` variable:
|
||||
.. _warnings.simplefilter: https://docs.python.org/3/library/warnings.html#warnings.simplefilter
|
||||
.. _`pytest-warnings`: https://github.com/fschulze/pytest-warnings
|
||||
|
||||
Disabling warnings summary
|
||||
--------------------------
|
||||
|
||||
Disabling warning capture
|
||||
-------------------------
|
||||
Although not recommended, you can use the ``--disable-warnings`` command-line option to suppress the
|
||||
warning summary entirely from the test run output.
|
||||
|
||||
This feature is enabled by default but can be disabled entirely in your ``pytest.ini`` file with:
|
||||
Disabling warning capture entirely
|
||||
----------------------------------
|
||||
|
||||
This plugin is enabled by default but can be disabled entirely in your ``pytest.ini`` file with:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[pytest]
|
||||
addopts = -p no:warnings
|
||||
|
||||
Or passing ``-p no:warnings`` in the command-line.
|
||||
Or passing ``-p no:warnings`` in the command-line. This might be useful if your test suites handles warnings
|
||||
using an external system.
|
||||
|
||||
|
||||
.. _`deprecation-warnings`:
|
||||
|
||||
DeprecationWarning and PendingDeprecationWarning
|
||||
------------------------------------------------
|
||||
|
||||
.. versionadded:: 3.8
|
||||
.. versionchanged:: 3.9
|
||||
|
||||
By default pytest will display ``DeprecationWarning`` and ``PendingDeprecationWarning`` warnings from
|
||||
user code and third-party libraries, as recommended by `PEP-0506 <https://www.python.org/dev/peps/pep-0565>`_.
|
||||
This helps users keep their code modern and avoid breakages when deprecated warnings are effectively removed.
|
||||
|
||||
Sometimes it is useful to hide some specific deprecation warnings that happen in code that you have no control over
|
||||
(such as third-party libraries), in which case you might use the warning filters options (ini or marks) to ignore
|
||||
those warnings.
|
||||
|
||||
For example:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[pytest]
|
||||
filterwarnings =
|
||||
ignore:.*U.*mode is deprecated:DeprecationWarning
|
||||
|
||||
|
||||
This will ignore all warnings of type ``DeprecationWarning`` where the start of the message matches
|
||||
the regular expression ``".*U.*mode is deprecated"``.
|
||||
|
||||
.. note::
|
||||
If warnings are configured at the interpreter level, using
|
||||
the `PYTHONWARNINGS <https://docs.python.org/3/using/cmdline.html#envvar-PYTHONWARNINGS>`_ environment variable or the
|
||||
``-W`` command-line option, pytest will not configure any filters by default.
|
||||
|
||||
Also pytest doesn't follow ``PEP-0506`` suggestion of resetting all warning filters because
|
||||
it might break test suites that configure warning filters themselves
|
||||
by calling ``warnings.simplefilter`` (see issue `#2430 <https://github.com/pytest-dev/pytest/issues/2430>`_
|
||||
for an example of that).
|
||||
|
||||
|
||||
.. _`ensuring a function triggers a deprecation warning`:
|
||||
|
||||
.. _ensuring_function_triggers:
|
||||
|
||||
Ensuring code triggers a deprecation warning
|
||||
--------------------------------------------
|
||||
|
||||
You can also call a global helper for checking
|
||||
that a certain function call triggers a ``DeprecationWarning`` or
|
||||
``PendingDeprecationWarning``::
|
||||
|
||||
import pytest
|
||||
|
||||
def test_global():
|
||||
pytest.deprecated_call(myfunction, 17)
|
||||
|
||||
By default, ``DeprecationWarning`` and ``PendingDeprecationWarning`` will not be
|
||||
caught when using ``pytest.warns`` or ``recwarn`` because default Python warnings filters hide
|
||||
them. If you wish to record them in your own code, use the
|
||||
command ``warnings.simplefilter('always')``::
|
||||
|
||||
import warnings
|
||||
import pytest
|
||||
|
||||
def test_deprecation(recwarn):
|
||||
warnings.simplefilter('always')
|
||||
warnings.warn("deprecated", DeprecationWarning)
|
||||
assert len(recwarn) == 1
|
||||
assert recwarn.pop(DeprecationWarning)
|
||||
|
||||
You can also use it as a contextmanager::
|
||||
|
||||
def test_global():
|
||||
with pytest.deprecated_call():
|
||||
myobject.deprecated_method()
|
||||
|
||||
|
||||
|
||||
.. _`asserting warnings`:
|
||||
|
||||
@@ -261,38 +328,52 @@ warnings, or index into it to get a particular recorded warning.
|
||||
|
||||
Full API: :class:`WarningsRecorder`.
|
||||
|
||||
.. _`ensuring a function triggers a deprecation warning`:
|
||||
|
||||
.. _ensuring_function_triggers:
|
||||
.. _internal-warnings:
|
||||
|
||||
Ensuring a function triggers a deprecation warning
|
||||
-------------------------------------------------------
|
||||
Internal pytest warnings
|
||||
------------------------
|
||||
|
||||
You can also call a global helper for checking
|
||||
that a certain function call triggers a ``DeprecationWarning`` or
|
||||
``PendingDeprecationWarning``::
|
||||
.. versionadded:: 3.8
|
||||
|
||||
import pytest
|
||||
pytest may generate its own warnings in some situations, such as improper usage or deprecated features.
|
||||
|
||||
def test_global():
|
||||
pytest.deprecated_call(myfunction, 17)
|
||||
For example, pytest will emit a warning if it encounters a class that matches :confval:`python_classes` but also
|
||||
defines an ``__init__`` constructor, as this prevents the class from being instantiated:
|
||||
|
||||
By default, ``DeprecationWarning`` and ``PendingDeprecationWarning`` will not be
|
||||
caught when using ``pytest.warns`` or ``recwarn`` because default Python warnings filters hide
|
||||
them. If you wish to record them in your own code, use the
|
||||
command ``warnings.simplefilter('always')``::
|
||||
.. code-block:: python
|
||||
|
||||
import warnings
|
||||
import pytest
|
||||
# content of test_pytest_warnings.py
|
||||
class Test:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def test_deprecation(recwarn):
|
||||
warnings.simplefilter('always')
|
||||
warnings.warn("deprecated", DeprecationWarning)
|
||||
assert len(recwarn) == 1
|
||||
assert recwarn.pop(DeprecationWarning)
|
||||
def test_foo(self):
|
||||
assert 1 == 1
|
||||
|
||||
You can also use it as a contextmanager::
|
||||
.. code-block:: pytest
|
||||
|
||||
def test_global():
|
||||
with pytest.deprecated_call():
|
||||
myobject.deprecated_method()
|
||||
$ pytest test_pytest_warnings.py -q
|
||||
|
||||
============================= warnings summary =============================
|
||||
test_pytest_warnings.py:1
|
||||
$REGENDOC_TMPDIR/test_pytest_warnings.py:1: PytestWarning: cannot collect test class 'Test' because it has a __init__ constructor
|
||||
class Test:
|
||||
|
||||
-- Docs: https://docs.pytest.org/en/latest/warnings.html
|
||||
1 warnings in 0.12 seconds
|
||||
|
||||
These warnings might be filtered using the same builtin mechanisms used to filter other types of warnings.
|
||||
|
||||
Please read our :ref:`backwards-compatibility` to learn how we proceed about deprecating and eventually removing
|
||||
features.
|
||||
|
||||
The following warning types ares used by pytest and are part of the public API:
|
||||
|
||||
.. autoclass:: pytest.PytestWarning
|
||||
|
||||
.. autoclass:: pytest.PytestDeprecationWarning
|
||||
|
||||
.. autoclass:: pytest.RemovedInPytest4Warning
|
||||
|
||||
.. autoclass:: pytest.PytestExperimentalApiWarning
|
||||
|
||||
@@ -73,7 +73,7 @@ sub directory but not for other directories::
|
||||
a/conftest.py:
|
||||
def pytest_runtest_setup(item):
|
||||
# called for running each test in 'a' directory
|
||||
print ("setting up", item)
|
||||
print("setting up", item)
|
||||
|
||||
a/test_sub.py:
|
||||
def test_sub():
|
||||
@@ -386,11 +386,53 @@ return a result object, with which we can assert the tests' outcomes.
|
||||
result.assert_outcomes(passed=4)
|
||||
|
||||
|
||||
additionally it is possible to copy examples for an example folder before running pytest on it
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
# content of pytest.ini
|
||||
[pytest]
|
||||
pytester_example_dir = .
|
||||
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of test_example.py
|
||||
|
||||
|
||||
def test_plugin(testdir):
|
||||
testdir.copy_example("test_example.py")
|
||||
testdir.runpytest("-k", "test_example")
|
||||
|
||||
|
||||
def test_example():
|
||||
pass
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini
|
||||
collected 2 items
|
||||
|
||||
test_example.py .. [100%]
|
||||
|
||||
============================= warnings summary =============================
|
||||
test_example.py::test_plugin
|
||||
$REGENDOC_TMPDIR/test_example.py:4: PytestExperimentalApiWarning: testdir.copy_example is an experimental api that may change over time
|
||||
testdir.copy_example("test_example.py")
|
||||
|
||||
-- Docs: https://docs.pytest.org/en/latest/warnings.html
|
||||
=================== 2 passed, 1 warnings in 0.12 seconds ===================
|
||||
|
||||
For more information about the result object that ``runpytest()`` returns, and
|
||||
the methods that it provides please check out the :py:class:`RunResult
|
||||
<_pytest.pytester.RunResult>` documentation.
|
||||
|
||||
|
||||
|
||||
|
||||
.. _`writinghooks`:
|
||||
|
||||
Writing hook functions
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import json
|
||||
|
||||
import py
|
||||
import requests
|
||||
|
||||
@@ -64,9 +65,9 @@ def report(issues):
|
||||
print(title)
|
||||
# print()
|
||||
# lines = body.split("\n")
|
||||
# print ("\n".join(lines[:3]))
|
||||
# print("\n".join(lines[:3]))
|
||||
# if len(lines) > 3 or len(body) > 240:
|
||||
# print ("...")
|
||||
# print("...")
|
||||
print("\n\nFound %s open issues" % len(issues))
|
||||
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
[build-system]
|
||||
requires = [
|
||||
"setuptools",
|
||||
# sync with setup.py until we discard non-pep-517/518
|
||||
"setuptools>=40.0",
|
||||
"setuptools-scm",
|
||||
"wheel",
|
||||
]
|
||||
@@ -10,11 +11,17 @@ package = "pytest"
|
||||
package_dir = "src"
|
||||
filename = "CHANGELOG.rst"
|
||||
directory = "changelog/"
|
||||
title_format = "pytest {version} ({project_date})"
|
||||
template = "changelog/_template.rst"
|
||||
|
||||
[[tool.towncrier.type]]
|
||||
directory = "removal"
|
||||
name = "Deprecations and Removals"
|
||||
name = "Removals"
|
||||
showcontent = true
|
||||
|
||||
[[tool.towncrier.type]]
|
||||
directory = "deprecation"
|
||||
name = "Deprecations"
|
||||
showcontent = true
|
||||
|
||||
[[tool.towncrier.type]]
|
||||
|
||||
21
scripts/appveyor-retry.cmd
Normal file
21
scripts/appveyor-retry.cmd
Normal file
@@ -0,0 +1,21 @@
|
||||
@echo off
|
||||
rem Source: https://github.com/appveyor/ci/blob/master/scripts/appveyor-retry.cmd
|
||||
rem initiate the retry number
|
||||
set retryNumber=0
|
||||
set maxRetries=3
|
||||
|
||||
:RUN
|
||||
%*
|
||||
set LastErrorLevel=%ERRORLEVEL%
|
||||
IF %LastErrorLevel% == 0 GOTO :EOF
|
||||
set /a retryNumber=%retryNumber%+1
|
||||
IF %reTryNumber% == %maxRetries% (GOTO :FAILED)
|
||||
|
||||
:RETRY
|
||||
set /a retryNumberDisp=%retryNumber%+1
|
||||
@echo Command "%*" failed with exit code %LastErrorLevel%. Retrying %retryNumberDisp% of %maxRetries%
|
||||
GOTO :RUN
|
||||
|
||||
: FAILED
|
||||
@echo Sorry, we tried running command for %maxRetries% times and all attempts were unsuccessful!
|
||||
EXIT /B %LastErrorLevel%
|
||||
@@ -1,8 +0,0 @@
|
||||
REM skip "coveralls" run in PRs or forks
|
||||
if "%TOXENV%" == "coveralls" (
|
||||
if not defined COVERALLS_REPO_TOKEN (
|
||||
echo skipping coveralls run because COVERALLS_REPO_TOKEN is not defined
|
||||
exit /b 0
|
||||
)
|
||||
)
|
||||
C:\Python36\python -m tox
|
||||
10
scripts/prepare-coverage.bat
Normal file
10
scripts/prepare-coverage.bat
Normal file
@@ -0,0 +1,10 @@
|
||||
REM scripts called by AppVeyor to setup the environment variables to enable coverage
|
||||
if not defined PYTEST_NO_COVERAGE (
|
||||
set "COVERAGE_FILE=%CD%\.coverage"
|
||||
set "COVERAGE_PROCESS_START=%CD%\.coveragerc"
|
||||
set "_PYTEST_TOX_COVERAGE_RUN=coverage run -m"
|
||||
set "_PYTEST_TOX_EXTRA_DEP=coverage-enable-subprocess"
|
||||
echo Coverage setup completed
|
||||
) else (
|
||||
echo Skipping coverage setup, PYTEST_NO_COVERAGE is set
|
||||
)
|
||||
@@ -9,11 +9,11 @@ against itself, passing on many different interpreters and platforms.
|
||||
This release contains a number of bugs fixes and improvements, so users are encouraged
|
||||
to take a look at the CHANGELOG:
|
||||
|
||||
http://doc.pytest.org/en/latest/changelog.html
|
||||
https://docs.pytest.org/en/latest/changelog.html
|
||||
|
||||
For complete documentation, please visit:
|
||||
|
||||
http://docs.pytest.org
|
||||
https://docs.pytest.org/en/latest/
|
||||
|
||||
As usual, you can upgrade from pypi via:
|
||||
|
||||
@@ -7,7 +7,7 @@ This is a bug-fix release, being a drop-in replacement. To upgrade::
|
||||
|
||||
pip install --upgrade pytest
|
||||
|
||||
The full changelog is available at http://doc.pytest.org/en/latest/changelog.html.
|
||||
The full changelog is available at https://docs.pytest.org/en/latest/changelog.html.
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
@@ -1,14 +1,17 @@
|
||||
"""
|
||||
Invoke development tasks.
|
||||
"""
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
from subprocess import check_output, check_call
|
||||
from subprocess import call
|
||||
from subprocess import check_call
|
||||
from subprocess import check_output
|
||||
|
||||
import invoke
|
||||
from colorama import Fore
|
||||
from colorama import init
|
||||
|
||||
|
||||
@invoke.task(help={"version": "version being released"})
|
||||
def announce(ctx, version):
|
||||
def announce(version):
|
||||
"""Generates a new release announcement entry in the docs."""
|
||||
# Get our list of authors
|
||||
stdout = check_output(["git", "describe", "--abbrev=0", "--tags"])
|
||||
@@ -38,7 +41,7 @@ def announce(ctx, version):
|
||||
"../doc/en/announce/release-{}.rst".format(version)
|
||||
)
|
||||
target.write_text(text, encoding="UTF-8")
|
||||
print("[generate.announce] Generated {}".format(target.name))
|
||||
print(f"{Fore.CYAN}[generate.announce] {Fore.RESET}Generated {target.name}")
|
||||
|
||||
# Update index with the new release entry
|
||||
index_path = Path(__file__).parent.joinpath("../doc/en/announce/index.rst")
|
||||
@@ -50,69 +53,63 @@ def announce(ctx, version):
|
||||
if line != new_line:
|
||||
lines.insert(index, new_line)
|
||||
index_path.write_text("\n".join(lines) + "\n", encoding="UTF-8")
|
||||
print("[generate.announce] Updated {}".format(index_path.name))
|
||||
print(
|
||||
f"{Fore.CYAN}[generate.announce] {Fore.RESET}Updated {index_path.name}"
|
||||
)
|
||||
else:
|
||||
print(
|
||||
"[generate.announce] Skip {} (already contains release)".format(
|
||||
index_path.name
|
||||
)
|
||||
f"{Fore.CYAN}[generate.announce] {Fore.RESET}Skip {index_path.name} (already contains release)"
|
||||
)
|
||||
break
|
||||
|
||||
check_call(["git", "add", str(target)])
|
||||
|
||||
|
||||
@invoke.task()
|
||||
def regen(ctx):
|
||||
def regen():
|
||||
"""Call regendoc tool to update examples and pytest output in the docs."""
|
||||
print("[generate.regen] Updating docs")
|
||||
print(f"{Fore.CYAN}[generate.regen] {Fore.RESET}Updating docs")
|
||||
check_call(["tox", "-e", "regen"])
|
||||
|
||||
|
||||
@invoke.task()
|
||||
def make_tag(ctx, version):
|
||||
"""Create a new, local tag for the release, only if the repository is clean."""
|
||||
from git import Repo
|
||||
|
||||
repo = Repo(".")
|
||||
if repo.is_dirty():
|
||||
print("Current repository is dirty. Please commit any changes and try again.")
|
||||
raise invoke.Exit(code=2)
|
||||
|
||||
tag_names = [x.name for x in repo.tags]
|
||||
if version in tag_names:
|
||||
print("[generate.make_tag] Delete existing tag {}".format(version))
|
||||
repo.delete_tag(version)
|
||||
|
||||
print("[generate.make_tag] Create tag {}".format(version))
|
||||
repo.create_tag(version)
|
||||
def fix_formatting():
|
||||
"""Runs pre-commit in all files to ensure they are formatted correctly"""
|
||||
print(
|
||||
f"{Fore.CYAN}[generate.fix linting] {Fore.RESET}Fixing formatting using pre-commit"
|
||||
)
|
||||
call(["pre-commit", "run", "--all-files"])
|
||||
|
||||
|
||||
@invoke.task(help={"version": "version being released"})
|
||||
def pre_release(ctx, version):
|
||||
def pre_release(version):
|
||||
"""Generates new docs, release announcements and creates a local tag."""
|
||||
announce(ctx, version)
|
||||
regen(ctx)
|
||||
changelog(ctx, version, write_out=True)
|
||||
announce(version)
|
||||
regen()
|
||||
changelog(version, write_out=True)
|
||||
fix_formatting()
|
||||
|
||||
msg = "Preparing release version {}".format(version)
|
||||
check_call(["git", "commit", "-a", "-m", msg])
|
||||
|
||||
make_tag(ctx, version)
|
||||
|
||||
print()
|
||||
print("[generate.pre_release] Please push your branch and open a PR.")
|
||||
print(f"{Fore.CYAN}[generate.pre_release] {Fore.GREEN}All done!")
|
||||
print()
|
||||
print(f"Please push your branch and open a PR.")
|
||||
|
||||
|
||||
@invoke.task(
|
||||
help={
|
||||
"version": "version being released",
|
||||
"write_out": "write changes to the actual changelog",
|
||||
}
|
||||
)
|
||||
def changelog(ctx, version, write_out=False):
|
||||
def changelog(version, write_out=False):
|
||||
if write_out:
|
||||
addopts = []
|
||||
else:
|
||||
addopts = ["--draft"]
|
||||
check_call(["towncrier", "--yes", "--version", version] + addopts)
|
||||
|
||||
|
||||
def main():
|
||||
init(autoreset=True)
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("version", help="Release version")
|
||||
options = parser.parse_args()
|
||||
pre_release(options.version)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
11
scripts/upload-coverage.bat
Normal file
11
scripts/upload-coverage.bat
Normal file
@@ -0,0 +1,11 @@
|
||||
REM script called by AppVeyor to combine and upload coverage information to codecov
|
||||
if not defined PYTEST_NO_COVERAGE (
|
||||
echo Prepare to upload coverage information
|
||||
C:\Python36\Scripts\pip install codecov
|
||||
C:\Python36\Scripts\coverage combine
|
||||
C:\Python36\Scripts\coverage xml --ignore-errors
|
||||
C:\Python36\Scripts\coverage report -m --ignore-errors
|
||||
scripts\appveyor-retry C:\Python36\Scripts\codecov --required -X gcov pycov search -f coverage.xml --flags %TOXENV:-= % windows
|
||||
) else (
|
||||
echo Skipping coverage upload, PYTEST_NO_COVERAGE is set
|
||||
)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user