Compare commits
1317 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4621638f07 | ||
|
|
8881b201aa | ||
|
|
278b289f37 | ||
|
|
e7ade066b6 | ||
|
|
dee520e310 | ||
|
|
4e931b258d | ||
|
|
4011021823 | ||
|
|
bfda2a0050 | ||
|
|
2812c087ec | ||
|
|
6b5cddc48a | ||
|
|
403f556928 | ||
|
|
d8ef86aadf | ||
|
|
a9fe1e159a | ||
|
|
65c8e8a09e | ||
|
|
46d9243eb0 | ||
|
|
63a01bdb33 | ||
|
|
d53209956b | ||
|
|
951213ee09 | ||
|
|
ae067df941 | ||
|
|
40718efacc | ||
|
|
d406786a8d | ||
|
|
0ac069da13 | ||
|
|
d17ea7a9c0 | ||
|
|
c92021fc4f | ||
|
|
50a5cebba8 | ||
|
|
6c602c2282 | ||
|
|
76c70cbf4c | ||
|
|
3d9e68ecfd | ||
|
|
8b0b7156d9 | ||
|
|
cf6e2ceafd | ||
|
|
69a55d334a | ||
|
|
241b7433cd | ||
|
|
057c97812b | ||
|
|
02188e399d | ||
|
|
aae02863db | ||
|
|
49f36bb028 | ||
|
|
52730f6330 | ||
|
|
538efef1ba | ||
|
|
9311d822c7 | ||
|
|
351529cb50 | ||
|
|
94a2e3dddc | ||
|
|
ee96214a8d | ||
|
|
e1ae469504 | ||
|
|
0d00be4f4f | ||
|
|
23146e7527 | ||
|
|
b18df936ea | ||
|
|
4148663706 | ||
|
|
3e1971eb16 | ||
|
|
bcdb86ee7e | ||
|
|
2d77018d1b | ||
|
|
ceef0af1ae | ||
|
|
e4eec3416a | ||
|
|
645774295f | ||
|
|
f2e0c740d3 | ||
|
|
d856f4e51f | ||
|
|
7b9a414524 | ||
|
|
0c63f99016 | ||
|
|
3bc9cbea63 | ||
|
|
6eff3069da | ||
|
|
58a14b6b99 | ||
|
|
b53bf44139 | ||
|
|
d8758443bd | ||
|
|
51f64c2920 | ||
|
|
cea42ff9e4 | ||
|
|
2df9d05981 | ||
|
|
4142c41ffc | ||
|
|
de44293d59 | ||
|
|
5efe6ab93c | ||
|
|
ce59f42ce1 | ||
|
|
7da7b9610c | ||
|
|
d44e42ec15 | ||
|
|
0ea1889265 | ||
|
|
6352cf2374 | ||
|
|
3127ec737b | ||
|
|
aa0b657e58 | ||
|
|
d0f3f26fff | ||
|
|
08f3b02dfc | ||
|
|
2d690b83bf | ||
|
|
0642da0145 | ||
|
|
afa985c135 | ||
|
|
fd64fa1863 | ||
|
|
56dc01ffe0 | ||
|
|
5df45f5b27 | ||
|
|
05d55b86df | ||
|
|
475119988c | ||
|
|
7a6bcc3639 | ||
|
|
8e125c9759 | ||
|
|
ade773390a | ||
|
|
5c26ba9cb1 | ||
|
|
5a544d4fac | ||
|
|
2e7d6a6202 | ||
|
|
ea7357bc58 | ||
|
|
b3319a6074 | ||
|
|
c9628c52d6 | ||
|
|
dcbdcc729b | ||
|
|
15d608867d | ||
|
|
ea2c6b8a88 | ||
|
|
0e6cf0ff28 | ||
|
|
553951c443 | ||
|
|
15ef168821 | ||
|
|
cc6e5ec345 | ||
|
|
77643122a8 | ||
|
|
832cef953b | ||
|
|
bcdbb6b677 | ||
|
|
543779fc43 | ||
|
|
2ade3d5c89 | ||
|
|
7939e5327c | ||
|
|
f7171034f9 | ||
|
|
c7c120fba6 | ||
|
|
415899d428 | ||
|
|
8dda5613ef | ||
|
|
714f2113bb | ||
|
|
a50b92ea67 | ||
|
|
da81c1e49a | ||
|
|
23ab43233e | ||
|
|
1a119a22d1 | ||
|
|
77c5191ad7 | ||
|
|
7395501d1d | ||
|
|
920bffbfbb | ||
|
|
751c061d9a | ||
|
|
a624b84097 | ||
|
|
c75dd10671 | ||
|
|
b696666f5a | ||
|
|
f4f6cb7532 | ||
|
|
1e3d5a0412 | ||
|
|
98981276a0 | ||
|
|
8c96b65082 | ||
|
|
c926999cfb | ||
|
|
519157cfcf | ||
|
|
5d14362a75 | ||
|
|
15fe8c6e90 | ||
|
|
c1e01c2992 | ||
|
|
5e27ea5528 | ||
|
|
33d4c96aa2 | ||
|
|
b3eb5d1eb7 | ||
|
|
2af0a023c9 | ||
|
|
5f52d5ee17 | ||
|
|
95701566f3 | ||
|
|
57be1d60dd | ||
|
|
62f96eea6b | ||
|
|
fa3cca51e1 | ||
|
|
d441fa66fe | ||
|
|
43aee15ba3 | ||
|
|
5c57d92978 | ||
|
|
7afe17740f | ||
|
|
158432217c | ||
|
|
437ff1c01a | ||
|
|
40072b9511 | ||
|
|
520af9d767 | ||
|
|
bdac9d3dd0 | ||
|
|
37158f5303 | ||
|
|
612c3784e5 | ||
|
|
951e07d71d | ||
|
|
36f774a8fb | ||
|
|
a2b921f890 | ||
|
|
bd70f5c148 | ||
|
|
134b957bf4 | ||
|
|
4d21dc4f2d | ||
|
|
74416525d2 | ||
|
|
44cb51010c | ||
|
|
90597226eb | ||
|
|
7fb5ad82d9 | ||
|
|
f4bcb44025 | ||
|
|
b7ae7a654b | ||
|
|
148e6a30c8 | ||
|
|
47bd1688ed | ||
|
|
6630d96253 | ||
|
|
d32ab6029f | ||
|
|
76c00d1c09 | ||
|
|
492cc4219c | ||
|
|
51bf7c3aef | ||
|
|
489c61a22d | ||
|
|
0f3d630634 | ||
|
|
a0f652c559 | ||
|
|
877b57ae9b | ||
|
|
dc7ae41f33 | ||
|
|
2d43f42769 | ||
|
|
03ef546706 | ||
|
|
de5aa3847e | ||
|
|
0f4905a259 | ||
|
|
936f725b81 | ||
|
|
c86d2daf81 | ||
|
|
a70c1ca100 | ||
|
|
4668ee03f6 | ||
|
|
236bada755 | ||
|
|
76687030f0 | ||
|
|
2b3d69da2b | ||
|
|
8481e438bd | ||
|
|
bd2c9bedcf | ||
|
|
2fe922608f | ||
|
|
07fa69335c | ||
|
|
ddb16a1ab1 | ||
|
|
2e871f35f3 | ||
|
|
fa94e3c1b2 | ||
|
|
c6eb3413f3 | ||
|
|
54c70bc02c | ||
|
|
83558a0ba3 | ||
|
|
23ea04f910 | ||
|
|
c334adc78f | ||
|
|
f3f6cb2093 | ||
|
|
c4aa57bc4c | ||
|
|
2970c1df24 | ||
|
|
35c85f0db9 | ||
|
|
0deb7b1696 | ||
|
|
53b8aa065c | ||
|
|
6a2d122a50 | ||
|
|
d97473e551 | ||
|
|
525639eaa0 | ||
|
|
7dceabfcb2 | ||
|
|
e1f97e41e3 | ||
|
|
2d2f6cd4fd | ||
|
|
44c940765b | ||
|
|
ed68fcf665 | ||
|
|
907e9495a2 | ||
|
|
dac164cc99 | ||
|
|
4290cacb86 | ||
|
|
db5cc35b44 | ||
|
|
90031edde8 | ||
|
|
a96907a9db | ||
|
|
f8160f7bc5 | ||
|
|
f0d7773ffa | ||
|
|
84555c89de | ||
|
|
f7a3e001f7 | ||
|
|
42561db1ae | ||
|
|
0d31e852b1 | ||
|
|
75e1fde668 | ||
|
|
9cb71af9e5 | ||
|
|
0dd4cb0f8f | ||
|
|
33db5e081d | ||
|
|
a51dc0c7ce | ||
|
|
276ffa81f6 | ||
|
|
50610311a7 | ||
|
|
c30ab1014e | ||
|
|
df8869cf1a | ||
|
|
8b447878dc | ||
|
|
9c590fa474 | ||
|
|
a868a9ac13 | ||
|
|
55b78ff780 | ||
|
|
ccab469a0c | ||
|
|
e711a6c275 | ||
|
|
c1e3128b3f | ||
|
|
05bb5ffb65 | ||
|
|
ee95d666f8 | ||
|
|
1e2810e07d | ||
|
|
fec656b3b1 | ||
|
|
31174f3f83 | ||
|
|
5a0f379289 | ||
|
|
0138e9cbb0 | ||
|
|
b5cf61312b | ||
|
|
16cbb3196c | ||
|
|
f1254c4461 | ||
|
|
cd9415baf2 | ||
|
|
6bd77c0abd | ||
|
|
fb7ee7f42c | ||
|
|
9dcd6f2a87 | ||
|
|
e3eb26f91a | ||
|
|
86070f0b7d | ||
|
|
3fbe100a02 | ||
|
|
ee62674322 | ||
|
|
a4192160ce | ||
|
|
4d9296c71f | ||
|
|
d5d190335c | ||
|
|
7428064f79 | ||
|
|
af706edd59 | ||
|
|
4eb40ef283 | ||
|
|
f85f36ed03 | ||
|
|
904f1ca1ce | ||
|
|
32b85e4ccc | ||
|
|
c7bbb2a788 | ||
|
|
29112d7e0b | ||
|
|
3fd2f43fb6 | ||
|
|
2cf1de3f2d | ||
|
|
d9bdf5cfca | ||
|
|
f494eefcae | ||
|
|
c9e69438b1 | ||
|
|
2e89812fad | ||
|
|
a0207274f4 | ||
|
|
759d7fde5d | ||
|
|
a0f5c4c8f5 | ||
|
|
2e210acd00 | ||
|
|
ede6387caa | ||
|
|
ff25b52110 | ||
|
|
dc8c27037a | ||
|
|
3e11bd0d6e | ||
|
|
6a4c7063fd | ||
|
|
15fe60aa25 | ||
|
|
1ec7f60484 | ||
|
|
63e7f8e340 | ||
|
|
1cf9c2e76f | ||
|
|
0ca1f6e0f4 | ||
|
|
a68f4fd2b9 | ||
|
|
5b35241470 | ||
|
|
b26b731498 | ||
|
|
da305966d2 | ||
|
|
4ee10d2266 | ||
|
|
e1aeb6915e | ||
|
|
e75915bb73 | ||
|
|
ba2a43266a | ||
|
|
cfaa8bbee8 | ||
|
|
6b661795cf | ||
|
|
fa65b71c98 | ||
|
|
da5dec83f6 | ||
|
|
2ef3cb2510 | ||
|
|
c8a87e48ab | ||
|
|
b9561e29ff | ||
|
|
bf6dcd64dc | ||
|
|
214c331236 | ||
|
|
9cb504ca9a | ||
|
|
f0a9f9042f | ||
|
|
ff015f6308 | ||
|
|
eeac28f4ab | ||
|
|
5505826db9 | ||
|
|
31c869b4c4 | ||
|
|
0395996756 | ||
|
|
986dd84375 | ||
|
|
a36e986920 | ||
|
|
68dc433bf5 | ||
|
|
e59fc730f8 | ||
|
|
4f9e835472 | ||
|
|
498b994eb4 | ||
|
|
40eef6da0c | ||
|
|
71373b04b0 | ||
|
|
6fb7269979 | ||
|
|
3460105def | ||
|
|
e3824d23bc | ||
|
|
80ad448590 | ||
|
|
59f69dd9e7 | ||
|
|
6e1ee0802f | ||
|
|
5cf58a9ae9 | ||
|
|
2106515f6d | ||
|
|
66b4709830 | ||
|
|
0b1f813c38 | ||
|
|
0b6960446e | ||
|
|
4bc2f96c93 | ||
|
|
0e4750d837 | ||
|
|
40cec637d7 | ||
|
|
407d4a0cf0 | ||
|
|
c84ae0bb7a | ||
|
|
1dbf440194 | ||
|
|
afaaa7e411 | ||
|
|
7b91952645 | ||
|
|
799bcccd1b | ||
|
|
8726be27a6 | ||
|
|
e26c5bda6e | ||
|
|
f672b7e39e | ||
|
|
f0e6bf7604 | ||
|
|
f729d5d4ee | ||
|
|
04a941c818 | ||
|
|
215d537624 | ||
|
|
f63fbf8114 | ||
|
|
b595587031 | ||
|
|
82cc3d8cc2 | ||
|
|
e20e376881 | ||
|
|
8052d01a37 | ||
|
|
d03444db4a | ||
|
|
f9c1329dab | ||
|
|
a8003286b5 | ||
|
|
747a8ae3a6 | ||
|
|
b759ebdb93 | ||
|
|
b41632e9a8 | ||
|
|
b4be228330 | ||
|
|
821b6ef2a6 | ||
|
|
31c948184a | ||
|
|
67dd10de26 | ||
|
|
f13935da53 | ||
|
|
dc8af18a0e | ||
|
|
7bee359459 | ||
|
|
61b9246afe | ||
|
|
9feb4941f4 | ||
|
|
237f690f8b | ||
|
|
386e801a5a | ||
|
|
5cf05ce149 | ||
|
|
aee67bb1a7 | ||
|
|
5e2d740829 | ||
|
|
82b8ec37fc | ||
|
|
f73fa47b1f | ||
|
|
fd1684e70b | ||
|
|
19501028ca | ||
|
|
ed01dc6567 | ||
|
|
3a366f451a | ||
|
|
7f6108beb1 | ||
|
|
76b0660f47 | ||
|
|
fc8800c71f | ||
|
|
75a12b9d2b | ||
|
|
9bcbf552d6 | ||
|
|
32c6d4f603 | ||
|
|
b4b2f58eab | ||
|
|
a131cd6c3b | ||
|
|
9c03196e79 | ||
|
|
8b92d10fb3 | ||
|
|
8e220f0e6f | ||
|
|
e191a65ebb | ||
|
|
5ca81596bb | ||
|
|
64e8185ff7 | ||
|
|
7bb504b807 | ||
|
|
9be069f899 | ||
|
|
913a2da6e5 | ||
|
|
ea732464aa | ||
|
|
ddbea29c12 | ||
|
|
4c7ddb8d9b | ||
|
|
a1fcd6e445 | ||
|
|
7b8fd0cc12 | ||
|
|
391dc549c0 | ||
|
|
526f4a95cc | ||
|
|
4cd268dc5d | ||
|
|
59e6fb94b5 | ||
|
|
2f083504ee | ||
|
|
3384ffc6eb | ||
|
|
e276bd3332 | ||
|
|
7445b5345f | ||
|
|
429485e621 | ||
|
|
52d497570b | ||
|
|
1876a928d3 | ||
|
|
2dc2a19db5 | ||
|
|
0c5e717f43 | ||
|
|
54af0f4c65 | ||
|
|
678dfaa6eb | ||
|
|
fc5d4654e5 | ||
|
|
19c93d16d1 | ||
|
|
0ce8b910ca | ||
|
|
c780d1fa7c | ||
|
|
584c052da4 | ||
|
|
315374008b | ||
|
|
a9457345ee | ||
|
|
726e165932 | ||
|
|
2264db7f4a | ||
|
|
4e93dc2c97 | ||
|
|
8003d8d279 | ||
|
|
f0ecb25acd | ||
|
|
7ec1a1407a | ||
|
|
7dbe40092d | ||
|
|
e53563ebbe | ||
|
|
c2c9b27771 | ||
|
|
c3d7340542 | ||
|
|
2461a43e00 | ||
|
|
7dcd9bf5ad | ||
|
|
fa979a4290 | ||
|
|
e2a15c79e7 | ||
|
|
02962fabda | ||
|
|
b77d168d58 | ||
|
|
c0e6543b5a | ||
|
|
b96e162131 | ||
|
|
1dc16ad77b | ||
|
|
acece23697 | ||
|
|
e5f823a3a7 | ||
|
|
b41dc03930 | ||
|
|
ade5f2c8c5 | ||
|
|
3e0e819158 | ||
|
|
eb92e57509 | ||
|
|
7ad499ad76 | ||
|
|
2d7582bd92 | ||
|
|
7e8e593a45 | ||
|
|
6c3b86369f | ||
|
|
6aba60ab08 | ||
|
|
d720312df0 | ||
|
|
5b09eb1d74 | ||
|
|
5119abe498 | ||
|
|
1c5009c3fb | ||
|
|
8a1afe4213 | ||
|
|
fd4289dae0 | ||
|
|
977adf1354 | ||
|
|
c1fe07276c | ||
|
|
c166b80a8c | ||
|
|
afe9fd5ffd | ||
|
|
5567c772cd | ||
|
|
c75bd08807 | ||
|
|
f7d7555521 | ||
|
|
16f8cdac95 | ||
|
|
9905a73ae0 | ||
|
|
51dd738b1a | ||
|
|
067f2c6148 | ||
|
|
e2cd2cd409 | ||
|
|
37aab5dd6b | ||
|
|
7ddfc04793 | ||
|
|
7b10474fed | ||
|
|
8cf097635e | ||
|
|
2d18546870 | ||
|
|
0f546c4670 | ||
|
|
6d38868950 | ||
|
|
8723eb16ea | ||
|
|
daf39112e7 | ||
|
|
9543d1901f | ||
|
|
ba452dbcf0 | ||
|
|
a2954578aa | ||
|
|
1bcb2f91cc | ||
|
|
92a2c1a9c4 | ||
|
|
9f86e83478 | ||
|
|
f01f434311 | ||
|
|
0c6ca0da62 | ||
|
|
095ce2ca7f | ||
|
|
dbb6c18c44 | ||
|
|
653c685667 | ||
|
|
ec5e279f93 | ||
|
|
e69b1255d7 | ||
|
|
57bf9d6740 | ||
|
|
677a7d06da | ||
|
|
f28b834426 | ||
|
|
04bd147d46 | ||
|
|
6154a5ac02 | ||
|
|
1a04e8903a | ||
|
|
85c5fa9f64 | ||
|
|
8967976443 | ||
|
|
bcacc40775 | ||
|
|
4ecf29380a | ||
|
|
aaa7d36bc9 | ||
|
|
8937e39afd | ||
|
|
343430c537 | ||
|
|
335cc5d651 | ||
|
|
2e551c32b6 | ||
|
|
af2ee1e80a | ||
|
|
e2a9aaf24b | ||
|
|
4947eb85c0 | ||
|
|
1a358df998 | ||
|
|
5903f4596a | ||
|
|
5bb0be1e24 | ||
|
|
6504746652 | ||
|
|
42bb0b3904 | ||
|
|
1bb463a980 | ||
|
|
16546b7342 | ||
|
|
f2174c16cc | ||
|
|
e48f68953d | ||
|
|
25081d8e30 | ||
|
|
34eeda1c09 | ||
|
|
3efb26ae7f | ||
|
|
77da4f118c | ||
|
|
3241fc3103 | ||
|
|
acb3e8e8a7 | ||
|
|
5f16ff3acc | ||
|
|
71a745270a | ||
|
|
5dcb370f78 | ||
|
|
0f918b1a9d | ||
|
|
a6988aa0b9 | ||
|
|
2359663437 | ||
|
|
bace28517e | ||
|
|
9f6d9efc1d | ||
|
|
a0ab5a7cd8 | ||
|
|
ba8b3be61a | ||
|
|
2467831913 | ||
|
|
e4a21b11d5 | ||
|
|
f3b6425324 | ||
|
|
7ee03e0996 | ||
|
|
081accb62c | ||
|
|
fe4835c15e | ||
|
|
df3b5557d1 | ||
|
|
948a5d5ac6 | ||
|
|
85055a9efe | ||
|
|
149620f858 | ||
|
|
6ee5d431a0 | ||
|
|
a4c426b1a8 | ||
|
|
4f38c610c3 | ||
|
|
38adb23bd2 | ||
|
|
e24031fb36 | ||
|
|
99ef8c6d16 | ||
|
|
e8152207c4 | ||
|
|
d7465895d0 | ||
|
|
5d0bcb4419 | ||
|
|
01151ff566 | ||
|
|
d0e9b4812f | ||
|
|
5a8e674e92 | ||
|
|
e380d4306b | ||
|
|
9d297c06e8 | ||
|
|
e24fdb138d | ||
|
|
0da5531c7c | ||
|
|
0c4898670c | ||
|
|
be7eb22e88 | ||
|
|
8b48621687 | ||
|
|
56aecfc081 | ||
|
|
82a0308bc6 | ||
|
|
7f671586b0 | ||
|
|
9e62f9d64e | ||
|
|
81c2780d2b | ||
|
|
b39b69a730 | ||
|
|
30c7a7bd69 | ||
|
|
cf5a9aebb2 | ||
|
|
1a9979a803 | ||
|
|
1eef53b6fe | ||
|
|
388aff16c8 | ||
|
|
83ec0228d1 | ||
|
|
2dc8cc1e48 | ||
|
|
658fa35642 | ||
|
|
b2c4ed9a2b | ||
|
|
b5cd43bc95 | ||
|
|
134ace98d9 | ||
|
|
134641fcb5 | ||
|
|
8f8d3114dd | ||
|
|
102ffc69e8 | ||
|
|
64a353f2b6 | ||
|
|
b258764ffe | ||
|
|
3947b859dc | ||
|
|
9f9f6ee48b | ||
|
|
58fc918d0a | ||
|
|
ece01b0f56 | ||
|
|
c378cb4793 | ||
|
|
d888d5c933 | ||
|
|
b2d3ae257a | ||
|
|
a93f41233a | ||
|
|
9138419379 | ||
|
|
197fd69ddc | ||
|
|
c400d8b2d8 | ||
|
|
0115766df3 | ||
|
|
8563364d8b | ||
|
|
0a40ae4c6a | ||
|
|
e63c7a13ff | ||
|
|
b7e8171cf8 | ||
|
|
75e93e5168 | ||
|
|
843d00c219 | ||
|
|
c6d27d8224 | ||
|
|
84390acccc | ||
|
|
f04d3c8b7d | ||
|
|
60773e0a97 | ||
|
|
3cf44b3037 | ||
|
|
1499778d5e | ||
|
|
a7e401656e | ||
|
|
6e1b1abfa7 | ||
|
|
8e287c5c77 | ||
|
|
231863b133 | ||
|
|
ae5d5b8f59 | ||
|
|
fd48cd57f9 | ||
|
|
98987177a0 | ||
|
|
437f44a1f4 | ||
|
|
b76104e722 | ||
|
|
1e80a9cb34 | ||
|
|
6c5a1150d4 | ||
|
|
26d202a7bd | ||
|
|
b390c66dc4 | ||
|
|
f96e1b6f3e | ||
|
|
e03c1f538f | ||
|
|
15b0a89fb1 | ||
|
|
5b83417afc | ||
|
|
a254ad0436 | ||
|
|
9b3be870dc | ||
|
|
7a600ea3eb | ||
|
|
110fe2473f | ||
|
|
f8d31d2400 | ||
|
|
6af674a3ac | ||
|
|
e3d30f8ebf | ||
|
|
5d79baf3f8 | ||
|
|
ec4507d12a | ||
|
|
b1e766c30e | ||
|
|
316cca204f | ||
|
|
0bccfc44a7 | ||
|
|
3cd11617ea | ||
|
|
9839ceffe0 | ||
|
|
a44776ed48 | ||
|
|
cfbd387a5d | ||
|
|
bb363c8ff2 | ||
|
|
ebe0a88226 | ||
|
|
3445eae737 | ||
|
|
8152b6837e | ||
|
|
0e4e8e00a9 | ||
|
|
7b1cb885c7 | ||
|
|
fc4aa27cae | ||
|
|
22345ef722 | ||
|
|
038f1f94c2 | ||
|
|
539d3dc34d | ||
|
|
5ecbb02ace | ||
|
|
0db5ccb0dd | ||
|
|
abb0dfcfa3 | ||
|
|
f3babf13ea | ||
|
|
64ee1ee81b | ||
|
|
76884c73bf | ||
|
|
5ebacc49c6 | ||
|
|
8a0ed7e2b3 | ||
|
|
62b8f2f731 | ||
|
|
8fd60483ef | ||
|
|
7a7ad0c120 | ||
|
|
41031fce2f | ||
|
|
818aa4d343 | ||
|
|
b7a64d6729 | ||
|
|
cb52d17d07 | ||
|
|
e1e4b226c6 | ||
|
|
26d27df6fc | ||
|
|
3e6f1fa2db | ||
|
|
aaf7f7fcca | ||
|
|
e0c2ab1901 | ||
|
|
59a11b6a5d | ||
|
|
9fc9b2926f | ||
|
|
1654b77ca0 | ||
|
|
60599b667e | ||
|
|
db32949c50 | ||
|
|
d237197de3 | ||
|
|
86ccfaec22 | ||
|
|
ae92b8c624 | ||
|
|
5db46d2087 | ||
|
|
99d3fe22dc | ||
|
|
b88c3f8f82 | ||
|
|
6da8befc74 | ||
|
|
c1bdb07b2f | ||
|
|
14024c7fc1 | ||
|
|
517b8bc69e | ||
|
|
ecc5c84c1e | ||
|
|
4a3c8e22d7 | ||
|
|
f792cc420c | ||
|
|
f4d3ec6370 | ||
|
|
a131f0acf6 | ||
|
|
4ffa13728d | ||
|
|
44b74c8c25 | ||
|
|
40b85d7ee8 | ||
|
|
090f7ff449 | ||
|
|
b05061dcd2 | ||
|
|
06dc6e3490 | ||
|
|
63f38de38e | ||
|
|
e0ba1cbf8d | ||
|
|
847eacea19 | ||
|
|
b531f7d585 | ||
|
|
7eb28f9eb7 | ||
|
|
1d86247b2c | ||
|
|
1bba0a9714 | ||
|
|
5cf69fae7d | ||
|
|
b73e083d9d | ||
|
|
d8e00c983e | ||
|
|
b00b715cb6 | ||
|
|
7d3ca68be6 | ||
|
|
e9b2475e29 | ||
|
|
d05b30ce11 | ||
|
|
2eac1bfcb8 | ||
|
|
91b25c4dec | ||
|
|
d219e033e9 | ||
|
|
59f65230b5 | ||
|
|
3d8d04c6f8 | ||
|
|
5bee396e4b | ||
|
|
8facf1db90 | ||
|
|
9e522c97fa | ||
|
|
7015801377 | ||
|
|
860bc50772 | ||
|
|
33b877cc01 | ||
|
|
f75f2117d0 | ||
|
|
0d5298475d | ||
|
|
be3b8fc9c1 | ||
|
|
16b15af624 | ||
|
|
23e4447922 | ||
|
|
0cf45ee18a | ||
|
|
63f90a2bcd | ||
|
|
0b73d6d4f5 | ||
|
|
e4ae33d592 | ||
|
|
c5c728c8bc | ||
|
|
f987b368e8 | ||
|
|
5d2e2377ff | ||
|
|
9ae8429a21 | ||
|
|
2eaf3db6ae | ||
|
|
88bf01a31e | ||
|
|
abaf496fe8 | ||
|
|
b71bd9b300 | ||
|
|
94d032a6de | ||
|
|
9424d88843 | ||
|
|
d471ecc4d8 | ||
|
|
aa765cf8c2 | ||
|
|
5f1d692072 | ||
|
|
2c45bc3019 | ||
|
|
664257c7a3 | ||
|
|
b3700f61ba | ||
|
|
d894bf4535 | ||
|
|
3eaa6d8835 | ||
|
|
f1fe9e41ac | ||
|
|
4f5c153d29 | ||
|
|
ee4f8c98a9 | ||
|
|
f180ab3e69 | ||
|
|
9ed63c607e | ||
|
|
fc61bdd907 | ||
|
|
d52ea4b6cf | ||
|
|
0ffb8ddd7f | ||
|
|
45c33c4e05 | ||
|
|
95c6d591f7 | ||
|
|
9a1e518cc3 | ||
|
|
9ca0ab6e2b | ||
|
|
8395b9e25d | ||
|
|
3d92d5a659 | ||
|
|
50e3783f07 | ||
|
|
1568e38997 | ||
|
|
6e85febf20 | ||
|
|
ba17363d75 | ||
|
|
92a2884b09 | ||
|
|
2754a13f86 | ||
|
|
a281d6620b | ||
|
|
4eddf634e7 | ||
|
|
1a8d9bf254 | ||
|
|
c56cc93cbd | ||
|
|
cdbe2299ec | ||
|
|
62967b3110 | ||
|
|
5c24430555 | ||
|
|
e46f995cc7 | ||
|
|
10cdae8e38 | ||
|
|
5fab0ca312 | ||
|
|
7f990e2b9a | ||
|
|
690a63b921 | ||
|
|
4c9cde74ab | ||
|
|
94e5bdd4e0 | ||
|
|
cb1d4044e6 | ||
|
|
a252e81ced | ||
|
|
3acfa3abdc | ||
|
|
084c36d538 | ||
|
|
950d30e6e0 | ||
|
|
5872e1c35a | ||
|
|
0385c27343 | ||
|
|
9dec146edf | ||
|
|
c2d6b3524c | ||
|
|
e025974cbd | ||
|
|
fea09cda6c | ||
|
|
ba457f5feb | ||
|
|
ed91d5f086 | ||
|
|
b7863a5f48 | ||
|
|
b83e97802e | ||
|
|
a72eff5a08 | ||
|
|
46c8363e12 | ||
|
|
557cb6cffe | ||
|
|
8198e7cd33 | ||
|
|
a6a96469ca | ||
|
|
27dab4e05f | ||
|
|
3e05848ab9 | ||
|
|
5902e5a2ce | ||
|
|
243d898b38 | ||
|
|
2bffd6829e | ||
|
|
f8f22d29ee | ||
|
|
dc20dedbc7 | ||
|
|
4e99c80425 | ||
|
|
3cec1482eb | ||
|
|
0df5ce4082 | ||
|
|
93a9836962 | ||
|
|
a663f60b05 | ||
|
|
e1e81e315e | ||
|
|
025d160dfc | ||
|
|
a3e388a73a | ||
|
|
1847cc7420 | ||
|
|
87b019d5f9 | ||
|
|
1184db8273 | ||
|
|
a0ba881c22 | ||
|
|
d42f1e87c3 | ||
|
|
9769bc05c6 | ||
|
|
935b106213 | ||
|
|
74366426b9 | ||
|
|
9628242423 | ||
|
|
9af613bef2 | ||
|
|
7b833ce5f9 | ||
|
|
11a9cbd50b | ||
|
|
2626bd9afa | ||
|
|
b6f16abfe0 | ||
|
|
e5a9f92dfb | ||
|
|
a6ff5e6bfc | ||
|
|
be15ad8d25 | ||
|
|
dc9ceda5d2 | ||
|
|
825085f699 | ||
|
|
b1312147e0 | ||
|
|
7ab3d818f0 | ||
|
|
f551cb9677 | ||
|
|
87254ca593 | ||
|
|
ac8b9c6e9d | ||
|
|
b51c1c3b8d | ||
|
|
e14ca19988 | ||
|
|
10d27f412b | ||
|
|
bee72a6622 | ||
|
|
1b260a844f | ||
|
|
e00f3a2fb7 | ||
|
|
b92530de78 | ||
|
|
afaad2f82b | ||
|
|
a507f44465 | ||
|
|
d4fdf793b5 | ||
|
|
feccf532d2 | ||
|
|
859b322773 | ||
|
|
4a74d455de | ||
|
|
c1bde8e0a2 | ||
|
|
560c055b09 | ||
|
|
a0890f98d8 | ||
|
|
401a3cd1bc | ||
|
|
b494d3d1c1 | ||
|
|
6c06057242 | ||
|
|
bb8a8495ff | ||
|
|
423e19909e | ||
|
|
f06fe43649 | ||
|
|
3137c89cf1 | ||
|
|
5dd509c963 | ||
|
|
f48a26f59c | ||
|
|
9d838fa861 | ||
|
|
91404db284 | ||
|
|
4c00552192 | ||
|
|
64762d2cfc | ||
|
|
17b8e2d45b | ||
|
|
9a884f1ccb | ||
|
|
827573c049 | ||
|
|
6fce1f0ac7 | ||
|
|
bbb9d72c13 | ||
|
|
f8b944dee0 | ||
|
|
f840521854 | ||
|
|
134b103605 | ||
|
|
fa35f650b5 | ||
|
|
cb57159e01 | ||
|
|
1752c7e710 | ||
|
|
176d27440c | ||
|
|
832b59b316 | ||
|
|
f521f5e012 | ||
|
|
d42c490bc1 | ||
|
|
5f61f0d2cb | ||
|
|
e253852e4b | ||
|
|
a4819844a4 | ||
|
|
db996335c8 | ||
|
|
85a3333824 | ||
|
|
4bf6a07fe2 | ||
|
|
db70c75807 | ||
|
|
1793ac38a9 | ||
|
|
4aa3c4fa2b | ||
|
|
a447dc86fb | ||
|
|
7cb271b46f | ||
|
|
c55ca155e9 | ||
|
|
5eb4016110 | ||
|
|
34152445cf | ||
|
|
5e0441d96e | ||
|
|
d1c9c54571 | ||
|
|
bb2ed2f898 | ||
|
|
3d88d1827b | ||
|
|
7b5f3fe83a | ||
|
|
f2b9bbdd4a | ||
|
|
c2e906ec97 | ||
|
|
1ec6805112 | ||
|
|
6befdf8b46 | ||
|
|
4cb838d978 | ||
|
|
e61e81a7b5 | ||
|
|
023e1c78df | ||
|
|
6ffa347c77 | ||
|
|
2b50911c9d | ||
|
|
a41820fbf0 | ||
|
|
21725e9304 | ||
|
|
48f52b1be0 | ||
|
|
a5b3ad2e45 | ||
|
|
e30f7094f3 | ||
|
|
5197354375 | ||
|
|
5ac4eff09b | ||
|
|
320e41b142 | ||
|
|
70976b04be | ||
|
|
d65f300988 | ||
|
|
948fd7b8b0 | ||
|
|
f2cebce2eb | ||
|
|
a192e6b430 | ||
|
|
f8a2452247 | ||
|
|
56e6bb0ff6 | ||
|
|
017e504a11 | ||
|
|
9871d5ec2d | ||
|
|
642521a9b3 | ||
|
|
0994829afe | ||
|
|
ce1cc3dddb | ||
|
|
65817dd797 | ||
|
|
c31abb1176 | ||
|
|
d4ca634ef6 | ||
|
|
af00367fed | ||
|
|
9b94313b44 | ||
|
|
5404246e64 | ||
|
|
e0038b82f7 | ||
|
|
0fea71a4f5 | ||
|
|
7571f079c8 | ||
|
|
b5d62cdb55 | ||
|
|
cc25256982 | ||
|
|
da04ff52e4 | ||
|
|
d5b5be6fbe | ||
|
|
3b65d190a4 | ||
|
|
ff04a1fb09 | ||
|
|
1f1d4aaf68 | ||
|
|
196a739f58 | ||
|
|
f20eeebde9 | ||
|
|
b17e6cea21 | ||
|
|
233c2a23de | ||
|
|
46ec0ec43a | ||
|
|
a035c89ea7 | ||
|
|
40228fce5a | ||
|
|
f258b75a24 | ||
|
|
22ab737243 | ||
|
|
0d1f142b1c | ||
|
|
8c475a45bb | ||
|
|
e6e40db9c7 | ||
|
|
cc531a1ca9 | ||
|
|
c3acf049bd | ||
|
|
bab1ef5d38 | ||
|
|
b0c0911ba3 | ||
|
|
3b7fbcd47f | ||
|
|
9ef23b686c | ||
|
|
96784c2052 | ||
|
|
9fe871016d | ||
|
|
e986d06ade | ||
|
|
ebba3ebe67 | ||
|
|
1d09e1d8ce | ||
|
|
d59786fcc4 | ||
|
|
af34164858 | ||
|
|
dcdf86ef5b | ||
|
|
167e9b954a | ||
|
|
9cde67c09c | ||
|
|
ae2fc27799 | ||
|
|
56989a8350 | ||
|
|
8d0e1a99e4 | ||
|
|
ce0b0518c7 | ||
|
|
bf19917537 | ||
|
|
777e9e1e17 | ||
|
|
bfa1993840 | ||
|
|
e041823643 | ||
|
|
63691f5656 | ||
|
|
6dc575f5ee | ||
|
|
0dc6cb298e | ||
|
|
041044eef0 | ||
|
|
1af31a09f4 | ||
|
|
f466105d66 | ||
|
|
ccdb248397 | ||
|
|
ede3a4e850 | ||
|
|
a4ea66cb1f | ||
|
|
2368fbb63c | ||
|
|
f6dfca7182 | ||
|
|
65b97c2f41 | ||
|
|
4a69104af3 | ||
|
|
1786ad16a7 | ||
|
|
2cf4c67e45 | ||
|
|
2ad43ee442 | ||
|
|
15278aacb9 | ||
|
|
57caa4e25e | ||
|
|
66ce952da6 | ||
|
|
b6fa4e248f | ||
|
|
eee8201e4f | ||
|
|
e690e191fd | ||
|
|
d40cd3ec6b | ||
|
|
9df1b037d6 | ||
|
|
28dbffdaf2 | ||
|
|
f8f4c16020 | ||
|
|
8bced7415c | ||
|
|
72d98a7c52 | ||
|
|
0dd85157ea | ||
|
|
2a45851c9e | ||
|
|
b51ee48f78 | ||
|
|
fadac0ffc0 | ||
|
|
799b72cf6f | ||
|
|
fc0f89ac14 | ||
|
|
771e860011 | ||
|
|
cb39bd0651 | ||
|
|
c94b2b227e | ||
|
|
f30911d3af | ||
|
|
cd72e23f3f | ||
|
|
7bb51b8ceb | ||
|
|
b9990f77ff | ||
|
|
6b126997e1 | ||
|
|
02ae7d8531 | ||
|
|
f947cb2613 | ||
|
|
db106d6085 | ||
|
|
b0eed7b56c | ||
|
|
51d358a7a9 | ||
|
|
c54cdd05b8 | ||
|
|
dde147641e | ||
|
|
2abd005cc9 | ||
|
|
366b88388b | ||
|
|
0fc1801d0d | ||
|
|
11ff14be1f | ||
|
|
dc0b4efffa | ||
|
|
864d7fef30 | ||
|
|
18035211f5 | ||
|
|
61080da89d | ||
|
|
ac5704290f | ||
|
|
b432f1207c | ||
|
|
cdd0e18ca8 | ||
|
|
43c3f59660 | ||
|
|
e4871f7722 | ||
|
|
517ee588c6 | ||
|
|
1b51497936 | ||
|
|
f694d8d6ad | ||
|
|
a83ee197c6 | ||
|
|
12f94b81c0 | ||
|
|
e6b01b45f1 | ||
|
|
e478f66d8b | ||
|
|
56dd7bc551 | ||
|
|
8a768b0db0 | ||
|
|
8dca8f3c9f | ||
|
|
cc335d44a0 | ||
|
|
4f4c91caf5 | ||
|
|
ba5a295544 | ||
|
|
93bdbf7572 | ||
|
|
ab8907f6f5 | ||
|
|
215a2ed3de | ||
|
|
f7285b6ab2 | ||
|
|
ee0a306ee4 | ||
|
|
7931b5b489 | ||
|
|
c5bbf8ac73 | ||
|
|
253c5786af | ||
|
|
c4550bc922 | ||
|
|
2c00f8aad1 | ||
|
|
456715a5c1 | ||
|
|
f858177495 | ||
|
|
54b8ad4554 | ||
|
|
d72154acda | ||
|
|
f55ded20a9 | ||
|
|
17655b54f2 | ||
|
|
12edc4e7b8 | ||
|
|
c25310d34f | ||
|
|
2868c31495 | ||
|
|
39a13d7064 | ||
|
|
e4e4fd1e52 | ||
|
|
d67d189d00 | ||
|
|
8187c148d9 | ||
|
|
e773c8ceda | ||
|
|
4f652c9045 | ||
|
|
126bb0760e | ||
|
|
8c059dbc48 | ||
|
|
fd66f69c19 | ||
|
|
63c01d1541 | ||
|
|
c56d7ac40e | ||
|
|
d9c428c1de | ||
|
|
bd9495486b | ||
|
|
33f1ff4e8c | ||
|
|
1d23bef3fb | ||
|
|
661055105c | ||
|
|
e8c220b9bd | ||
|
|
9646a1cd7a | ||
|
|
9087ac4010 | ||
|
|
093e19a7d9 | ||
|
|
9e867ce864 | ||
|
|
8abf30ad71 | ||
|
|
ea25eb1ecc | ||
|
|
58b6e8616c | ||
|
|
f129ba617f | ||
|
|
99d957bd3d | ||
|
|
661013c3e9 | ||
|
|
141c51f0cb | ||
|
|
d65c7658d5 | ||
|
|
7855284ef7 | ||
|
|
5b0f88712b | ||
|
|
2e42d937dc | ||
|
|
27d932e882 | ||
|
|
40091ec2c7 | ||
|
|
76fb9970c8 | ||
|
|
d32f2c5c14 | ||
|
|
49defa2890 | ||
|
|
fe2dae4885 | ||
|
|
ced62f30ba | ||
|
|
bbd1cbb0b3 | ||
|
|
d4dfd526c1 | ||
|
|
d4351ac5a2 | ||
|
|
766d2daa06 | ||
|
|
836c9f82f1 | ||
|
|
46d6a3fc27 | ||
|
|
1dfa303b1e | ||
|
|
6258248865 | ||
|
|
4808145846 | ||
|
|
a0666354dd | ||
|
|
ce55dcf64c | ||
|
|
d7be039f1b | ||
|
|
7e1fac5f91 | ||
|
|
486ded3fca | ||
|
|
0be84cd68b | ||
|
|
323c846ce6 | ||
|
|
3bd9f981a2 | ||
|
|
7ded937e19 | ||
|
|
6d0667f1db | ||
|
|
7c380b19f3 | ||
|
|
5322f422e3 | ||
|
|
c6c326f076 | ||
|
|
d6832a8b56 | ||
|
|
3bfaa8ab84 | ||
|
|
9fb305b17b | ||
|
|
e3bf9cede4 | ||
|
|
4a49715614 | ||
|
|
86c7dcff68 | ||
|
|
7268462b33 | ||
|
|
448830e656 | ||
|
|
3683d92c53 | ||
|
|
d3d8d53e41 | ||
|
|
7a271a91b0 | ||
|
|
47f5c29002 | ||
|
|
27d2683a02 | ||
|
|
792f365c14 | ||
|
|
91b2797498 | ||
|
|
c27c8f41a8 | ||
|
|
ee54fb9a6b | ||
|
|
10ddc466bf | ||
|
|
24c83d725a | ||
|
|
6bf4692c7d | ||
|
|
81426c3d19 | ||
|
|
ed42ada373 | ||
|
|
e2667106a2 | ||
|
|
29d5849519 | ||
|
|
eabf15b626 | ||
|
|
2dc619cbf4 | ||
|
|
307fa7a42a | ||
|
|
ef97121d42 | ||
|
|
d46b6b2bc3 | ||
|
|
2cb3534679 | ||
|
|
8e11fe5304 | ||
|
|
36dc671843 | ||
|
|
dbaa9464ba | ||
|
|
933de16fe4 | ||
|
|
e8348a1d12 | ||
|
|
0f5263cdc3 | ||
|
|
4736b2bdfb | ||
|
|
8ecdd4e9ff | ||
|
|
b3940666a7 | ||
|
|
e20987ce82 | ||
|
|
584051aa90 | ||
|
|
16e2737da3 | ||
|
|
36c2a101cb | ||
|
|
ebd597b2fd | ||
|
|
94829c391b | ||
|
|
b82d6f7a0b | ||
|
|
4a436b5470 | ||
|
|
ad6f63edda | ||
|
|
3036914097 | ||
|
|
2831cb9ab5 | ||
|
|
00716177b4 | ||
|
|
85cc9b8f12 | ||
|
|
fed4f73a61 | ||
|
|
d76fa59b35 | ||
|
|
2532dc1dbb | ||
|
|
642cd86dd1 | ||
|
|
b3a5b0ebe1 | ||
|
|
8b4a29357e | ||
|
|
ab3637d486 | ||
|
|
66a690928c | ||
|
|
8e00280fc1 | ||
|
|
d053cdfbbb | ||
|
|
2e39fd89d1 | ||
|
|
b48e23d54c | ||
|
|
c9a85b0e78 | ||
|
|
bf265a424d | ||
|
|
5436e42990 | ||
|
|
67f40e18a7 | ||
|
|
52ff1eaf37 | ||
|
|
602e74c2a7 | ||
|
|
be511c1a05 | ||
|
|
f36f9d2698 | ||
|
|
dbfb3ccc42 | ||
|
|
d1322570dd | ||
|
|
4c9015c3b1 | ||
|
|
c14a23d4e4 | ||
|
|
b8fc3e569a | ||
|
|
e0f6fce9e9 | ||
|
|
d93de6cc67 | ||
|
|
aeb92accb2 | ||
|
|
943bbdd8ce | ||
|
|
9a3836a0cf | ||
|
|
4b164d947d | ||
|
|
11a07211b6 | ||
|
|
28a3f0fcb9 | ||
|
|
b8c30aab2b | ||
|
|
b8958168f5 | ||
|
|
aaaae0b232 | ||
|
|
c55d641963 | ||
|
|
4ec85f934c | ||
|
|
8393fdd51d | ||
|
|
4071c8a4a8 | ||
|
|
e8c10d4a98 | ||
|
|
a86035625c | ||
|
|
4f631440be | ||
|
|
3901569f26 | ||
|
|
689b856cb7 | ||
|
|
65545d8fb2 | ||
|
|
1caf6d5907 | ||
|
|
55871c68a4 | ||
|
|
fc11b81005 | ||
|
|
a6fb4c8268 | ||
|
|
48dcc67274 | ||
|
|
ccaec8d360 | ||
|
|
66609665f2 | ||
|
|
4b36f9aa64 | ||
|
|
5ecbb0acba | ||
|
|
20902deb75 | ||
|
|
ed5556bdac | ||
|
|
ee64f1fb9f | ||
|
|
8e0e862c84 | ||
|
|
42422a7f62 | ||
|
|
f3a173b736 | ||
|
|
5c38a5160d | ||
|
|
dcf9eb0104 | ||
|
|
dd225e1b9d | ||
|
|
900cef6397 | ||
|
|
0d095fc978 | ||
|
|
dcd635ba0c | ||
|
|
33f0338eeb | ||
|
|
d5e5433553 | ||
|
|
d2906950ce | ||
|
|
fe7050ba00 | ||
|
|
a1208f5631 | ||
|
|
870a93c37b | ||
|
|
96b2ae6654 | ||
|
|
b098292352 | ||
|
|
212937eb3e | ||
|
|
70c7273640 | ||
|
|
e5ab62b1b6 | ||
|
|
b8b9e8d41c | ||
|
|
e712adc226 | ||
|
|
f9ac60807c | ||
|
|
3f03625a5d | ||
|
|
29d3faed66 | ||
|
|
c5dec6056f | ||
|
|
642847c079 | ||
|
|
f102ccc8f0 | ||
|
|
20f93ae8fa | ||
|
|
1101a20408 | ||
|
|
df435fa8bd | ||
|
|
a5269b26e0 | ||
|
|
5010e02eda | ||
|
|
956b3aca97 | ||
|
|
5d8467bedc | ||
|
|
0d04aa7c59 | ||
|
|
e03a19f88d | ||
|
|
fcc5b6d604 | ||
|
|
42afce27b3 | ||
|
|
56d0b5a7e2 | ||
|
|
c30184709d | ||
|
|
83802d1494 | ||
|
|
18cc74b8d0 | ||
|
|
048342817b | ||
|
|
d1a3aa7b2b | ||
|
|
e967d4587a | ||
|
|
a79dc12f1e | ||
|
|
913c07e414 | ||
|
|
4a9f468aac | ||
|
|
05155e4db0 | ||
|
|
bceaede198 | ||
|
|
ae8f3695b5 | ||
|
|
da6830f19b | ||
|
|
32ee0b9c88 | ||
|
|
feb8240410 | ||
|
|
b7dd9154c3 | ||
|
|
482bd5efd2 | ||
|
|
bf074b37a3 | ||
|
|
495a55725b | ||
|
|
53c9124fc9 | ||
|
|
ab40696007 | ||
|
|
d12f46caef |
15
.coveragerc
15
.coveragerc
@@ -1,9 +1,18 @@
|
||||
[run]
|
||||
source = pytest,_pytest,testing/
|
||||
include =
|
||||
src/*
|
||||
testing/*
|
||||
*/lib/python*/site-packages/_pytest/*
|
||||
*/lib/python*/site-packages/pytest.py
|
||||
*/pypy*/site-packages/_pytest/*
|
||||
*/pypy*/site-packages/pytest.py
|
||||
*\Lib\site-packages\_pytest\*
|
||||
*\Lib\site-packages\pytest.py
|
||||
parallel = 1
|
||||
branch = 1
|
||||
|
||||
[paths]
|
||||
source = src/
|
||||
.tox/*/lib/python*/site-packages/
|
||||
.tox\*\Lib\site-packages\
|
||||
*/lib/python*/site-packages/
|
||||
*/pypy*/site-packages/
|
||||
*\Lib\site-packages\
|
||||
|
||||
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -3,7 +3,7 @@ Thanks for submitting a PR, your contribution is really appreciated!
|
||||
Here's a quick checklist that should be present in PRs (you can delete this text from the final description, this is
|
||||
just a guideline):
|
||||
|
||||
- [ ] Create a new changelog file in the `changelog` folder, with a name like `<ISSUE NUMBER>.<TYPE>.rst`. See [changelog/README.rst](/changelog/README.rst) for details.
|
||||
- [ ] Create a new changelog file in the `changelog` folder, with a name like `<ISSUE NUMBER>.<TYPE>.rst`. See [changelog/README.rst](https://github.com/pytest-dev/pytest/blob/master/changelog/README.rst) for details.
|
||||
- [ ] Target the `master` branch for bug fixes, documentation updates and trivial changes.
|
||||
- [ ] Target the `features` branch for new features and removals/deprecations.
|
||||
- [ ] Include documentation when adding new features.
|
||||
|
||||
2
.github/config.yml
vendored
Normal file
2
.github/config.yml
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
rtd:
|
||||
project: pytest
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -44,3 +44,7 @@ coverage.xml
|
||||
.pydevproject
|
||||
.project
|
||||
.settings
|
||||
.vscode
|
||||
|
||||
# generated by pip
|
||||
pip-wheel-metadata/
|
||||
|
||||
@@ -1,32 +1,43 @@
|
||||
exclude: doc/en/example/py2py3/test_py2.py
|
||||
repos:
|
||||
- repo: https://github.com/ambv/black
|
||||
rev: 18.6b4
|
||||
rev: 18.9b0
|
||||
hooks:
|
||||
- id: black
|
||||
args: [--safe, --quiet]
|
||||
language_version: python3
|
||||
- repo: https://github.com/asottile/blacken-docs
|
||||
rev: v0.2.0
|
||||
rev: v0.3.0
|
||||
hooks:
|
||||
- id: blacken-docs
|
||||
additional_dependencies: [black==18.6b4]
|
||||
additional_dependencies: [black==18.9b0]
|
||||
language_version: python3
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v1.3.0
|
||||
rev: v2.1.0
|
||||
hooks:
|
||||
- id: trailing-whitespace
|
||||
- id: end-of-file-fixer
|
||||
- id: check-yaml
|
||||
- id: debug-statements
|
||||
exclude: _pytest/debugging.py
|
||||
- id: flake8
|
||||
- repo: https://github.com/asottile/pyupgrade
|
||||
rev: v1.2.0
|
||||
language_version: python3
|
||||
- repo: https://gitlab.com/pycqa/flake8
|
||||
rev: 3.7.0
|
||||
hooks:
|
||||
- id: pyupgrade
|
||||
- id: flake8
|
||||
language_version: python3
|
||||
- repo: https://github.com/asottile/reorder_python_imports
|
||||
rev: v1.3.5
|
||||
hooks:
|
||||
- id: reorder-python-imports
|
||||
args: ['--application-directories=.:src']
|
||||
- repo: https://github.com/asottile/pyupgrade
|
||||
rev: v1.11.1
|
||||
hooks:
|
||||
- id: pyupgrade
|
||||
args: [--keep-percent-format]
|
||||
- repo: https://github.com/pre-commit/pygrep-hooks
|
||||
rev: v1.0.0
|
||||
rev: v1.2.0
|
||||
hooks:
|
||||
- id: rst-backticks
|
||||
- repo: local
|
||||
@@ -40,6 +51,20 @@ repos:
|
||||
- id: changelogs-rst
|
||||
name: changelog filenames
|
||||
language: fail
|
||||
entry: 'changelog files must be named ####.(feature|bugfix|doc|removal|vendor|trivial).rst'
|
||||
exclude: changelog/(\d+\.(feature|bugfix|doc|removal|vendor|trivial).rst|README.rst|_template.rst)
|
||||
entry: 'changelog files must be named ####.(feature|bugfix|doc|deprecation|removal|vendor|trivial).rst'
|
||||
exclude: changelog/(\d+\.(feature|bugfix|doc|deprecation|removal|vendor|trivial).rst|README.rst|_template.rst)
|
||||
files: ^changelog/
|
||||
- id: py-deprecated
|
||||
name: py library is deprecated
|
||||
language: pygrep
|
||||
entry: >
|
||||
(?x)\bpy\.(
|
||||
_code\.|
|
||||
builtin\.|
|
||||
code\.|
|
||||
io\.(BytesIO|saferepr)|
|
||||
path\.local\.sysfind|
|
||||
process\.|
|
||||
std\.
|
||||
)
|
||||
types: [python]
|
||||
|
||||
165
.travis.yml
165
.travis.yml
@@ -1,71 +1,103 @@
|
||||
sudo: false
|
||||
language: python
|
||||
dist: xenial
|
||||
stages:
|
||||
- baseline
|
||||
- name: test
|
||||
if: repo = pytest-dev/pytest AND tag IS NOT present
|
||||
- name: deploy
|
||||
if: repo = pytest-dev/pytest AND tag IS present
|
||||
python:
|
||||
- '3.6'
|
||||
install:
|
||||
- pip install --upgrade --pre tox
|
||||
python: '3.7'
|
||||
cache: false
|
||||
|
||||
env:
|
||||
matrix:
|
||||
# note: please use "tox --listenvs" to populate the build matrix below
|
||||
# please remove the linting env in all cases
|
||||
- TOXENV=py27-pexpect
|
||||
- TOXENV=py27-xdist
|
||||
- TOXENV=py27-trial
|
||||
- TOXENV=py27-numpy
|
||||
- TOXENV=py27-pluggymaster PYTEST_NO_COVERAGE=1
|
||||
- TOXENV=py36-pexpect
|
||||
- TOXENV=py36-xdist
|
||||
- TOXENV=py36-trial
|
||||
- TOXENV=py36-numpy
|
||||
- TOXENV=py36-pluggymaster PYTEST_NO_COVERAGE=1
|
||||
- TOXENV=py27-nobyte
|
||||
- TOXENV=doctesting
|
||||
- TOXENV=docs PYTEST_NO_COVERAGE=1
|
||||
global:
|
||||
- PYTEST_ADDOPTS=-vv
|
||||
|
||||
install:
|
||||
- python -m pip install --upgrade --pre tox
|
||||
|
||||
jobs:
|
||||
include:
|
||||
# Coverage tracking is slow with pypy, skip it.
|
||||
- env: TOXENV=pypy PYTEST_NO_COVERAGE=1
|
||||
python: 'pypy-5.4'
|
||||
- env: TOXENV=py35
|
||||
python: '3.5'
|
||||
- env: TOXENV=py36-freeze PYTEST_NO_COVERAGE=1
|
||||
python: '3.6'
|
||||
# OSX tests - first (in test stage), since they are the slower ones.
|
||||
- &test-macos
|
||||
# NOTE: (tests with) pexpect appear to be buggy on Travis,
|
||||
# at least with coverage.
|
||||
# Log: https://travis-ci.org/pytest-dev/pytest/jobs/500358864
|
||||
os: osx
|
||||
osx_image: xcode10.1
|
||||
language: generic
|
||||
# Coverage for:
|
||||
# - py2 with symlink in test_cmdline_python_package_symlink.
|
||||
env: TOXENV=py27-xdist PYTEST_COVERAGE=1
|
||||
before_install:
|
||||
- python -V
|
||||
- test $(python -c 'import sys; print("%d%d" % sys.version_info[0:2])') = 27
|
||||
- <<: *test-macos
|
||||
env: TOXENV=py37-xdist
|
||||
before_install:
|
||||
- which python3
|
||||
- python3 -V
|
||||
- ln -sfn "$(which python3)" /usr/local/bin/python
|
||||
- python -V
|
||||
- test $(python -c 'import sys; print("%d%d" % sys.version_info[0:2])') = 37
|
||||
|
||||
# Full run of latest (major) supported versions, without xdist.
|
||||
- env: TOXENV=py27
|
||||
python: '2.7'
|
||||
- env: TOXENV=py37
|
||||
python: '3.7'
|
||||
sudo: required
|
||||
dist: xenial
|
||||
- &test-macos
|
||||
language: generic
|
||||
os: osx
|
||||
osx_image: xcode9.4
|
||||
sudo: required
|
||||
install:
|
||||
- python -m pip install --pre tox
|
||||
env: TOXENV=py27
|
||||
- <<: *test-macos
|
||||
env: TOXENV=py37
|
||||
before_install:
|
||||
- brew update
|
||||
- brew upgrade python
|
||||
- brew unlink python
|
||||
- brew link python
|
||||
|
||||
# Coverage tracking is slow with pypy, skip it.
|
||||
- env: TOXENV=pypy-xdist
|
||||
python: 'pypy2.7-6.0'
|
||||
- env: TOXENV=pypy3-xdist
|
||||
python: 'pypy3.5-6.0'
|
||||
|
||||
- env: TOXENV=py34-xdist
|
||||
python: '3.4'
|
||||
- env: TOXENV=py35-xdist
|
||||
python: '3.5'
|
||||
|
||||
# Coverage for:
|
||||
# - pytester's LsofFdLeakChecker
|
||||
# - TestArgComplete (linux only)
|
||||
# - numpy
|
||||
# Empty PYTEST_ADDOPTS to run this non-verbose.
|
||||
- env: TOXENV=py37-lsof-numpy-xdist PYTEST_COVERAGE=1 PYTEST_ADDOPTS=
|
||||
|
||||
# Specialized factors for py27.
|
||||
- env: TOXENV=py27-nobyte-numpy-xdist
|
||||
python: '2.7'
|
||||
- env: TOXENV=py27-pluggymaster-xdist
|
||||
python: '2.7'
|
||||
|
||||
# Specialized factors for py37.
|
||||
# Coverage for:
|
||||
# - test_sys_breakpoint_interception (via pexpect).
|
||||
- env: TOXENV=py37-pexpect,py37-twisted PYTEST_COVERAGE=1
|
||||
- env: TOXENV=py37-pluggymaster-xdist
|
||||
- env: TOXENV=py37-freeze
|
||||
|
||||
# Jobs only run via Travis cron jobs (currently daily).
|
||||
- env: TOXENV=py38-xdist
|
||||
python: '3.8-dev'
|
||||
if: type = cron
|
||||
|
||||
- stage: baseline
|
||||
env: TOXENV=py27
|
||||
- env: TOXENV=py34
|
||||
- env: TOXENV=py36
|
||||
- env: TOXENV=linting PYTEST_NO_COVERAGE=1
|
||||
# Coverage for:
|
||||
# - _pytest.unittest._handle_skip (via pexpect).
|
||||
env: TOXENV=py27-pexpect,py27-twisted PYTEST_COVERAGE=1
|
||||
python: '2.7'
|
||||
# Use py36 here for faster baseline.
|
||||
- env: TOXENV=py36-xdist
|
||||
python: '3.6'
|
||||
- env: TOXENV=linting,docs,doctesting PYTEST_COVERAGE=1
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/.cache/pre-commit
|
||||
|
||||
- stage: deploy
|
||||
python: '3.6'
|
||||
env: PYTEST_NO_COVERAGE=1
|
||||
install: pip install -U setuptools setuptools_scm
|
||||
script: skip
|
||||
deploy:
|
||||
@@ -79,9 +111,19 @@ jobs:
|
||||
tags: true
|
||||
repo: pytest-dev/pytest
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- python: '3.8-dev'
|
||||
env: TOXENV=py38-xdist
|
||||
|
||||
before_script:
|
||||
- |
|
||||
if [[ "$PYTEST_NO_COVERAGE" != 1 ]]; then
|
||||
# Do not (re-)upload coverage with cron runs.
|
||||
if [[ "$TRAVIS_EVENT_TYPE" = cron ]]; then
|
||||
PYTEST_COVERAGE=0
|
||||
fi
|
||||
- |
|
||||
if [[ "$PYTEST_COVERAGE" = 1 ]]; then
|
||||
export COVERAGE_FILE="$PWD/.coverage"
|
||||
export COVERAGE_PROCESS_START="$PWD/.coveragerc"
|
||||
export _PYTEST_TOX_COVERAGE_RUN="coverage run -m"
|
||||
@@ -92,19 +134,14 @@ script: tox --recreate
|
||||
|
||||
after_success:
|
||||
- |
|
||||
if [[ "$PYTEST_NO_COVERAGE" != 1 ]]; then
|
||||
if [[ "$PYTEST_COVERAGE" = 1 ]]; then
|
||||
set -e
|
||||
pip install coverage
|
||||
# Add last TOXENV to $PATH.
|
||||
PATH="$PWD/.tox/${TOXENV##*,}/bin:$PATH"
|
||||
coverage combine
|
||||
coverage xml --ignore-errors
|
||||
coverage report -m --ignore-errors
|
||||
bash <(curl -s https://codecov.io/bash) -Z -X gcov -X coveragepy -X search -X xcode -X gcovout -X fix -f coverage.xml -F "${TOXENV//-/,},linux"
|
||||
|
||||
# Coveralls does not support merged reports.
|
||||
if [[ "$TOXENV" = py37 ]]; then
|
||||
pip install coveralls
|
||||
coveralls
|
||||
fi
|
||||
coverage xml
|
||||
coverage report -m
|
||||
bash <(curl -s https://codecov.io/bash) -Z -X gcov -X coveragepy -X search -X xcode -X gcovout -X fix -f coverage.xml -n $TOXENV-$TRAVIS_OS_NAME
|
||||
fi
|
||||
|
||||
notifications:
|
||||
@@ -116,7 +153,3 @@ notifications:
|
||||
skip_join: true
|
||||
email:
|
||||
- pytest-commit@python.org
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/.cache/pip
|
||||
- $HOME/.cache/pre-commit
|
||||
|
||||
24
AUTHORS
24
AUTHORS
@@ -6,21 +6,29 @@ Contributors include::
|
||||
Aaron Coleman
|
||||
Abdeali JK
|
||||
Abhijeet Kasurde
|
||||
Adam Johnson
|
||||
Adam Uhlir
|
||||
Ahn Ki-Wook
|
||||
Alan Velasco
|
||||
Alexander Johnson
|
||||
Alexei Kozlenok
|
||||
Allan Feldman
|
||||
Aly Sivji
|
||||
Anatoly Bubenkoff
|
||||
Anders Hovmöller
|
||||
Andras Mitzki
|
||||
Andras Tim
|
||||
Andrea Cimatoribus
|
||||
Andreas Zeidler
|
||||
Andrey Paramonov
|
||||
Andrzej Ostrowski
|
||||
Andy Freeland
|
||||
Anthon van der Neut
|
||||
Anthony Shaw
|
||||
Anthony Sottile
|
||||
Anton Lodder
|
||||
Antony Lee
|
||||
Arel Cordero
|
||||
Armin Rigo
|
||||
Aron Coyle
|
||||
Aron Curzon
|
||||
@@ -44,9 +52,11 @@ Charles Cloud
|
||||
Charnjit SiNGH (CCSJ)
|
||||
Chris Lamb
|
||||
Christian Boelsen
|
||||
Christian Fetzer
|
||||
Christian Theunert
|
||||
Christian Tismer
|
||||
Christopher Gilling
|
||||
Christopher Dignam
|
||||
CrazyMerlyn
|
||||
Cyrus Maden
|
||||
Dhiren Serai
|
||||
@@ -58,6 +68,7 @@ Danielle Jenkins
|
||||
Dave Hunt
|
||||
David Díaz-Barquero
|
||||
David Mohr
|
||||
David Szotten
|
||||
David Vierra
|
||||
Daw-Ran Liou
|
||||
Denis Kirisov
|
||||
@@ -74,6 +85,7 @@ Endre Galaczi
|
||||
Eric Hunsberger
|
||||
Eric Siegerman
|
||||
Erik M. Bray
|
||||
Fabien Zarifian
|
||||
Fabio Zadrozny
|
||||
Feng Ma
|
||||
Florian Bruhin
|
||||
@@ -111,6 +123,7 @@ Jonas Obrist
|
||||
Jordan Guymon
|
||||
Jordan Moldow
|
||||
Jordan Speicher
|
||||
Joseph Hunkeler
|
||||
Joshua Bronson
|
||||
Jurko Gospodnetić
|
||||
Justyna Janczyszyn
|
||||
@@ -120,6 +133,8 @@ Katerina Koukiou
|
||||
Kevin Cox
|
||||
Kodi B. Arfer
|
||||
Kostis Anagnostopoulos
|
||||
Kristoffer Nordström
|
||||
Kyle Altendorf
|
||||
Lawrence Mitchell
|
||||
Lee Kamentsky
|
||||
Lev Maximov
|
||||
@@ -154,11 +169,15 @@ Michael Droettboom
|
||||
Michael Seifert
|
||||
Michal Wajszczuk
|
||||
Mihai Capotă
|
||||
Mike Hoyle (hoylemd)
|
||||
Mike Lundy
|
||||
Miro Hrončok
|
||||
Nathaniel Waisbrot
|
||||
Ned Batchelder
|
||||
Neven Mundar
|
||||
Nicholas Devenish
|
||||
Nicholas Murphy
|
||||
Niclas Olofsson
|
||||
Nicolas Delaby
|
||||
Oleg Pidsadnyi
|
||||
Oleg Sushchenko
|
||||
@@ -166,6 +185,7 @@ Oliver Bestwalter
|
||||
Omar Kohl
|
||||
Omer Hadari
|
||||
Ondřej Súkup
|
||||
Oscar Benjamin
|
||||
Patrick Hayes
|
||||
Paweł Adamczak
|
||||
Pedro Algarvio
|
||||
@@ -200,7 +220,9 @@ Stefan Zimmermann
|
||||
Stefano Taschini
|
||||
Steffen Allner
|
||||
Stephan Obermann
|
||||
Sven-Hendrik Haase
|
||||
Tadek Teleżyński
|
||||
Takafumi Arakaki
|
||||
Tarcisio Fischer
|
||||
Tareq Alayan
|
||||
Ted Xiao
|
||||
@@ -209,6 +231,7 @@ Thomas Hisch
|
||||
Tim Strazny
|
||||
Tom Dalton
|
||||
Tom Viner
|
||||
Tomer Keren
|
||||
Trevor Bekolay
|
||||
Tyler Goodlet
|
||||
Tzu-ping Chung
|
||||
@@ -219,6 +242,7 @@ Vidar T. Fauske
|
||||
Virgil Dupras
|
||||
Vitaly Lashmanov
|
||||
Vlad Dragos
|
||||
Volodymyr Piskun
|
||||
Wil Cooley
|
||||
William Lee
|
||||
Wim Glenn
|
||||
|
||||
1082
CHANGELOG.rst
1082
CHANGELOG.rst
File diff suppressed because it is too large
Load Diff
@@ -169,7 +169,7 @@ Short version
|
||||
#. Follow **PEP-8** for naming and `black <https://github.com/ambv/black>`_ for formatting.
|
||||
#. Tests are run using ``tox``::
|
||||
|
||||
tox -e linting,py27,py36
|
||||
tox -e linting,py27,py37
|
||||
|
||||
The test environments above are usually enough to cover most cases locally.
|
||||
|
||||
@@ -237,12 +237,12 @@ Here is a simple overview, with pytest-specific bits:
|
||||
|
||||
#. Run all the tests
|
||||
|
||||
You need to have Python 2.7 and 3.6 available in your system. Now
|
||||
You need to have Python 2.7 and 3.7 available in your system. Now
|
||||
running tests is as simple as issuing this command::
|
||||
|
||||
$ tox -e linting,py27,py36
|
||||
$ tox -e linting,py27,py37
|
||||
|
||||
This command will run tests via the "tox" tool against Python 2.7 and 3.6
|
||||
This command will run tests via the "tox" tool against Python 2.7 and 3.7
|
||||
and also perform "lint" coding-style checks.
|
||||
|
||||
#. You can now edit your local working copy and run the tests again as necessary. Please follow PEP-8 for naming.
|
||||
@@ -252,9 +252,9 @@ Here is a simple overview, with pytest-specific bits:
|
||||
|
||||
$ tox -e py27 -- --pdb
|
||||
|
||||
Or to only run tests in a particular test module on Python 3.6::
|
||||
Or to only run tests in a particular test module on Python 3.7::
|
||||
|
||||
$ tox -e py36 -- testing/test_config.py
|
||||
$ tox -e py37 -- testing/test_config.py
|
||||
|
||||
|
||||
When committing, ``pre-commit`` will re-format the files if necessary.
|
||||
@@ -280,6 +280,47 @@ Here is a simple overview, with pytest-specific bits:
|
||||
base: features # if it's a feature
|
||||
|
||||
|
||||
Writing Tests
|
||||
----------------------------
|
||||
|
||||
Writing tests for plugins or for pytest itself is often done using the `testdir fixture <https://docs.pytest.org/en/latest/reference.html#testdir>`_, as a "black-box" test.
|
||||
|
||||
For example, to ensure a simple test passes you can write:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def test_true_assertion(testdir):
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
def test_foo():
|
||||
assert True
|
||||
"""
|
||||
)
|
||||
result = testdir.runpytest()
|
||||
result.assert_outcomes(failed=0, passed=1)
|
||||
|
||||
|
||||
Alternatively, it is possible to make checks based on the actual output of the termal using
|
||||
*glob-like* expressions:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def test_true_assertion(testdir):
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
def test_foo():
|
||||
assert False
|
||||
"""
|
||||
)
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines(["*assert False*", "*1 failed*"])
|
||||
|
||||
When choosing a file where to write a new test, take a look at the existing files and see if there's
|
||||
one file which looks like a good fit. For example, a regression test about a bug in the ``--lf`` option
|
||||
should go into ``test_cacheprovider.py``, given that this option is implemented in ``cacheprovider.py``.
|
||||
If in doubt, go ahead and open a PR with your best guess and we can discuss this over the code.
|
||||
|
||||
|
||||
Joining the Development Team
|
||||
----------------------------
|
||||
|
||||
|
||||
@@ -46,5 +46,3 @@ taking a lot of time to make a new one.
|
||||
* testing-in-python@lists.idyll.org (only major/minor releases)
|
||||
|
||||
And announce it on `Twitter <https://twitter.com/>`_ with the ``#pytest`` hashtag.
|
||||
|
||||
#. After a minor/major release, merge ``release-X.Y.Z`` into ``master`` and push (or open a PR).
|
||||
|
||||
2
LICENSE
2
LICENSE
@@ -1,6 +1,6 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2004-2017 Holger Krekel and others
|
||||
Copyright (c) 2004-2019 Holger Krekel and others
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
|
||||
@@ -22,8 +22,8 @@
|
||||
.. image:: https://travis-ci.org/pytest-dev/pytest.svg?branch=master
|
||||
:target: https://travis-ci.org/pytest-dev/pytest
|
||||
|
||||
.. image:: https://ci.appveyor.com/api/projects/status/mrgbjaua7t33pg6b?svg=true
|
||||
:target: https://ci.appveyor.com/project/pytestbot/pytest
|
||||
.. image:: https://dev.azure.com/pytest-dev/pytest/_apis/build/status/pytest-CI?branchName=master
|
||||
:target: https://dev.azure.com/pytest-dev/pytest
|
||||
|
||||
.. image:: https://img.shields.io/badge/code%20style-black-000000.svg
|
||||
:target: https://github.com/ambv/black
|
||||
@@ -111,7 +111,7 @@ Consult the `Changelog <https://docs.pytest.org/en/latest/changelog.html>`__ pag
|
||||
License
|
||||
-------
|
||||
|
||||
Copyright Holger Krekel and others, 2004-2018.
|
||||
Copyright Holger Krekel and others, 2004-2019.
|
||||
|
||||
Distributed under the terms of the `MIT`_ license, pytest is free and open source software.
|
||||
|
||||
|
||||
55
appveyor.yml
55
appveyor.yml
@@ -1,55 +0,0 @@
|
||||
environment:
|
||||
matrix:
|
||||
- TOXENV: "linting"
|
||||
PYTEST_NO_COVERAGE: "1"
|
||||
- TOXENV: "py27"
|
||||
- TOXENV: "py34"
|
||||
- TOXENV: "py35"
|
||||
- TOXENV: "py36"
|
||||
- TOXENV: "py37"
|
||||
- TOXENV: "pypy"
|
||||
PYTEST_NO_COVERAGE: "1"
|
||||
- TOXENV: "py27-xdist"
|
||||
- TOXENV: "py27-trial"
|
||||
- TOXENV: "py27-numpy"
|
||||
- TOXENV: "py27-pluggymaster"
|
||||
PYTEST_NO_COVERAGE: "1"
|
||||
- TOXENV: "py36-xdist"
|
||||
- TOXENV: "py36-trial"
|
||||
- TOXENV: "py36-numpy"
|
||||
- TOXENV: "py36-pluggymaster"
|
||||
PYTEST_NO_COVERAGE: "1"
|
||||
- TOXENV: "py27-nobyte"
|
||||
- TOXENV: "doctesting"
|
||||
- TOXENV: "py36-freeze"
|
||||
PYTEST_NO_COVERAGE: "1"
|
||||
- TOXENV: "docs"
|
||||
PYTEST_NO_COVERAGE: "1"
|
||||
|
||||
install:
|
||||
- echo Installed Pythons
|
||||
- dir c:\Python*
|
||||
|
||||
- if "%TOXENV%" == "pypy" call scripts\install-pypy.bat
|
||||
|
||||
- C:\Python36\python -m pip install --upgrade pip
|
||||
- C:\Python36\python -m pip install --upgrade --pre tox
|
||||
|
||||
build: false # Not a C# project, build stuff at the test step instead.
|
||||
|
||||
before_test:
|
||||
- call scripts\prepare-coverage.bat
|
||||
|
||||
test_script:
|
||||
- C:\Python36\python -m tox
|
||||
|
||||
on_success:
|
||||
- call scripts\upload-coverage.bat
|
||||
|
||||
cache:
|
||||
- '%LOCALAPPDATA%\pip\cache'
|
||||
- '%USERPROFILE%\.cache\pre-commit'
|
||||
|
||||
# We don't deploy anything on tags with AppVeyor, we use Travis instead, so we
|
||||
# might as well save resources
|
||||
skip_tags: true
|
||||
135
azure-pipelines.yml
Normal file
135
azure-pipelines.yml
Normal file
@@ -0,0 +1,135 @@
|
||||
trigger:
|
||||
- master
|
||||
- features
|
||||
|
||||
variables:
|
||||
PYTEST_ADDOPTS: "--junitxml=build/test-results/$(tox.env).xml -vv"
|
||||
python.needs_vc: False
|
||||
python.exe: "python"
|
||||
COVERAGE_FILE: "$(Build.Repository.LocalPath)/.coverage"
|
||||
COVERAGE_PROCESS_START: "$(Build.Repository.LocalPath)/.coveragerc"
|
||||
PYTEST_COVERAGE: '0'
|
||||
|
||||
jobs:
|
||||
|
||||
- job: 'Test'
|
||||
pool:
|
||||
vmImage: "vs2017-win2016"
|
||||
strategy:
|
||||
matrix:
|
||||
py27:
|
||||
python.version: '2.7'
|
||||
tox.env: 'py27'
|
||||
py27-nobyte-lsof-numpy:
|
||||
python.version: '2.7'
|
||||
tox.env: 'py27-lsof-nobyte-numpy'
|
||||
# Coverage for:
|
||||
# - test_supports_breakpoint_module_global
|
||||
# - test_terminal_reporter_writer_attr (without xdist)
|
||||
# - "if write" branch in _pytest.assertion.rewrite
|
||||
# - numpy
|
||||
# - pytester's LsofFdLeakChecker (being skipped)
|
||||
PYTEST_COVERAGE: '1'
|
||||
py27-twisted:
|
||||
python.version: '2.7'
|
||||
tox.env: 'py27-twisted'
|
||||
python.needs_vc: True
|
||||
py27-pluggymaster-xdist:
|
||||
python.version: '2.7'
|
||||
tox.env: 'py27-pluggymaster-xdist'
|
||||
# Coverage for:
|
||||
# - except-IOError in _attempt_to_close_capture_file for py2.
|
||||
# Also seen with py27-nobyte (using xdist), and py27-xdist.
|
||||
# But no exception with py27-pexpect,py27-twisted,py27-numpy.
|
||||
PYTEST_COVERAGE: '1'
|
||||
pypy:
|
||||
python.version: 'pypy'
|
||||
tox.env: 'pypy'
|
||||
python.exe: 'pypy'
|
||||
# NOTE: pypy3 fails to install pip currently due to an interal error.
|
||||
# pypy3:
|
||||
# python.version: 'pypy3'
|
||||
# tox.env: 'pypy3'
|
||||
# python.exe: 'pypy3'
|
||||
py34-xdist:
|
||||
python.version: '3.4'
|
||||
tox.env: 'py34-xdist'
|
||||
# Coverage for:
|
||||
# - _pytest.compat._bytes_to_ascii
|
||||
PYTEST_COVERAGE: '1'
|
||||
py35-xdist:
|
||||
python.version: '3.5'
|
||||
tox.env: 'py35-xdist'
|
||||
# Coverage for:
|
||||
# - test_supports_breakpoint_module_global
|
||||
PYTEST_COVERAGE: '1'
|
||||
py36-xdist:
|
||||
python.version: '3.6'
|
||||
tox.env: 'py36-xdist'
|
||||
py37:
|
||||
python.version: '3.7'
|
||||
tox.env: 'py37'
|
||||
# Coverage for:
|
||||
# - _py36_windowsconsoleio_workaround (with py36+)
|
||||
# - test_request_garbage (no xdist)
|
||||
PYTEST_COVERAGE: '1'
|
||||
py37-linting/docs/doctesting:
|
||||
python.version: '3.7'
|
||||
tox.env: 'linting,docs,doctesting'
|
||||
py37-twisted/numpy:
|
||||
python.version: '3.7'
|
||||
tox.env: 'py37-twisted,py37-numpy'
|
||||
py37-pluggymaster-xdist:
|
||||
python.version: '3.7'
|
||||
tox.env: 'py37-pluggymaster-xdist'
|
||||
maxParallel: 10
|
||||
|
||||
steps:
|
||||
- task: UsePythonVersion@0
|
||||
condition: not(startsWith(variables['python.exe'], 'pypy'))
|
||||
inputs:
|
||||
versionSpec: '$(python.version)'
|
||||
architecture: 'x64'
|
||||
|
||||
- script: choco install vcpython27
|
||||
condition: eq(variables['python.needs_vc'], True)
|
||||
displayName: 'Install VC for py27'
|
||||
|
||||
- script: choco install python.pypy
|
||||
condition: eq(variables['python.exe'], 'pypy')
|
||||
displayName: 'Install pypy'
|
||||
|
||||
- script: choco install pypy3
|
||||
condition: eq(variables['python.exe'], 'pypy3')
|
||||
displayName: 'Install pypy3'
|
||||
|
||||
- task: PowerShell@2
|
||||
inputs:
|
||||
targetType: 'inline'
|
||||
script: |
|
||||
Invoke-WebRequest -Uri "https://bootstrap.pypa.io/get-pip.py" -OutFile "get-pip.py"
|
||||
$(python.exe) get-pip.py
|
||||
condition: startsWith(variables['python.exe'], 'pypy')
|
||||
displayName: 'Install pip'
|
||||
|
||||
- script: $(python.exe) -m pip install --upgrade pip && $(python.exe) -m pip install tox
|
||||
displayName: 'Install tox'
|
||||
|
||||
- script: |
|
||||
call scripts/setup-coverage-vars.bat || goto :eof
|
||||
$(python.exe) -m tox -e $(tox.env)
|
||||
displayName: 'Run tests'
|
||||
|
||||
- task: PublishTestResults@2
|
||||
inputs:
|
||||
testResultsFiles: 'build/test-results/$(tox.env).xml'
|
||||
testRunTitle: '$(tox.env)'
|
||||
condition: succeededOrFailed()
|
||||
|
||||
- script: call scripts\upload-coverage.bat
|
||||
displayName: 'Report and upload coverage'
|
||||
condition: eq(variables['PYTEST_COVERAGE'], '1')
|
||||
env:
|
||||
PYTHON: $(python.exe)
|
||||
CODECOV_TOKEN: $(CODECOV_TOKEN)
|
||||
PYTEST_CODECOV_NAME: $(tox.env)
|
||||
@@ -5,7 +5,7 @@ if __name__ == "__main__":
|
||||
import pytest # NOQA
|
||||
import pstats
|
||||
|
||||
script = sys.argv[1:] if len(sys.argv) > 1 else "empty.py"
|
||||
script = sys.argv[1:] if len(sys.argv) > 1 else ["empty.py"]
|
||||
stats = cProfile.run("pytest.cmdline.main(%r)" % script, "prof")
|
||||
p = pstats.Stats("prof")
|
||||
p.strip_dirs()
|
||||
|
||||
@@ -1,10 +1,7 @@
|
||||
|
||||
|
||||
# 10000 iterations, just for relative comparison
|
||||
# 2.7.5 3.3.2
|
||||
# FilesCompleter 75.1109 69.2116
|
||||
# FastFilesCompleter 0.7383 1.0760
|
||||
|
||||
import timeit
|
||||
|
||||
imports = [
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from six.moves import range
|
||||
import pytest
|
||||
|
||||
import pytest
|
||||
|
||||
SKIP = True
|
||||
|
||||
|
||||
@@ -14,7 +14,8 @@ Each file should be named like ``<ISSUE>.<TYPE>.rst``, where
|
||||
* ``feature``: new user facing features, like new command-line options and new behavior.
|
||||
* ``bugfix``: fixes a reported bug.
|
||||
* ``doc``: documentation improvement, like rewording an entire session or adding missing docs.
|
||||
* ``removal``: feature deprecation or removal.
|
||||
* ``deprecation``: feature deprecation.
|
||||
* ``removal``: feature removal.
|
||||
* ``vendor``: changes in packages vendored in pytest.
|
||||
* ``trivial``: fixing a small typo or internal change that might be noteworthy.
|
||||
|
||||
|
||||
@@ -39,7 +39,7 @@ clean:
|
||||
-rm -rf $(BUILDDIR)/*
|
||||
|
||||
regen:
|
||||
PYTHONDONTWRITEBYTECODE=1 PYTEST_ADDOPT=-pno:hypothesis COLUMNS=76 regendoc --update *.rst */*.rst ${REGENDOC_ARGS}
|
||||
PYTHONDONTWRITEBYTECODE=1 PYTEST_ADDOPTS=-pno:hypothesis COLUMNS=76 regendoc --update *.rst */*.rst ${REGENDOC_ARGS}
|
||||
|
||||
html:
|
||||
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
||||
|
||||
@@ -1,19 +1,17 @@
|
||||
# flasky extensions. flasky pygments style based on tango style
|
||||
from pygments.style import Style
|
||||
from pygments.token import (
|
||||
Keyword,
|
||||
Name,
|
||||
Comment,
|
||||
String,
|
||||
Error,
|
||||
Number,
|
||||
Operator,
|
||||
Generic,
|
||||
Whitespace,
|
||||
Punctuation,
|
||||
Other,
|
||||
Literal,
|
||||
)
|
||||
from pygments.token import Comment
|
||||
from pygments.token import Error
|
||||
from pygments.token import Generic
|
||||
from pygments.token import Keyword
|
||||
from pygments.token import Literal
|
||||
from pygments.token import Name
|
||||
from pygments.token import Number
|
||||
from pygments.token import Operator
|
||||
from pygments.token import Other
|
||||
from pygments.token import Punctuation
|
||||
from pygments.token import String
|
||||
from pygments.token import Whitespace
|
||||
|
||||
|
||||
class FlaskyStyle(Style):
|
||||
|
||||
@@ -6,6 +6,22 @@ Release announcements
|
||||
:maxdepth: 2
|
||||
|
||||
|
||||
release-4.4.0
|
||||
release-4.3.1
|
||||
release-4.3.0
|
||||
release-4.2.1
|
||||
release-4.2.0
|
||||
release-4.1.1
|
||||
release-4.1.0
|
||||
release-4.0.2
|
||||
release-4.0.1
|
||||
release-4.0.0
|
||||
release-3.10.1
|
||||
release-3.10.0
|
||||
release-3.9.3
|
||||
release-3.9.2
|
||||
release-3.9.1
|
||||
release-3.9.0
|
||||
release-3.8.2
|
||||
release-3.8.1
|
||||
release-3.8.0
|
||||
|
||||
43
doc/en/announce/release-3.10.0.rst
Normal file
43
doc/en/announce/release-3.10.0.rst
Normal file
@@ -0,0 +1,43 @@
|
||||
pytest-3.10.0
|
||||
=======================================
|
||||
|
||||
The pytest team is proud to announce the 3.10.0 release!
|
||||
|
||||
pytest is a mature Python testing tool with more than a 2000 tests
|
||||
against itself, passing on many different interpreters and platforms.
|
||||
|
||||
This release contains a number of bugs fixes and improvements, so users are encouraged
|
||||
to take a look at the CHANGELOG:
|
||||
|
||||
https://docs.pytest.org/en/latest/changelog.html
|
||||
|
||||
For complete documentation, please visit:
|
||||
|
||||
https://docs.pytest.org/en/latest/
|
||||
|
||||
As usual, you can upgrade from pypi via:
|
||||
|
||||
pip install -U pytest
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Anders Hovmöller
|
||||
* Andreu Vallbona Plazas
|
||||
* Ankit Goel
|
||||
* Anthony Sottile
|
||||
* Bernardo Gomes
|
||||
* Brianna Laugher
|
||||
* Bruno Oliveira
|
||||
* Daniel Hahler
|
||||
* David Szotten
|
||||
* Mick Koch
|
||||
* Niclas Olofsson
|
||||
* Palash Chatterjee
|
||||
* Ronny Pfannschmidt
|
||||
* Sven-Hendrik Haase
|
||||
* Ville Skyttä
|
||||
* William Jamir Silva
|
||||
|
||||
|
||||
Happy testing,
|
||||
The Pytest Development Team
|
||||
24
doc/en/announce/release-3.10.1.rst
Normal file
24
doc/en/announce/release-3.10.1.rst
Normal file
@@ -0,0 +1,24 @@
|
||||
pytest-3.10.1
|
||||
=======================================
|
||||
|
||||
pytest 3.10.1 has just been released to PyPI.
|
||||
|
||||
This is a bug-fix release, being a drop-in replacement. To upgrade::
|
||||
|
||||
pip install --upgrade pytest
|
||||
|
||||
The full changelog is available at https://docs.pytest.org/en/latest/changelog.html.
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Anthony Sottile
|
||||
* Boris Feld
|
||||
* Bruno Oliveira
|
||||
* Daniel Hahler
|
||||
* Fabien ZARIFIAN
|
||||
* Jon Dufresne
|
||||
* Ronny Pfannschmidt
|
||||
|
||||
|
||||
Happy testing,
|
||||
The pytest Development Team
|
||||
@@ -20,8 +20,7 @@ Thanks to all who contributed to this release, among them:
|
||||
* Ondřej Súkup
|
||||
* Ronny Pfannschmidt
|
||||
* T.E.A de Souza
|
||||
* Victor
|
||||
* victor
|
||||
* Victor Maryama
|
||||
|
||||
|
||||
Happy testing,
|
||||
|
||||
@@ -22,10 +22,9 @@ Thanks to all who contributed to this release, among them:
|
||||
* Ronny Pfannschmidt
|
||||
* Sankt Petersbug
|
||||
* Tyler Richard
|
||||
* Victor
|
||||
* Victor Maryama
|
||||
* Vlad Shcherbina
|
||||
* turturica
|
||||
* victor
|
||||
* wim glenn
|
||||
|
||||
|
||||
|
||||
43
doc/en/announce/release-3.9.0.rst
Normal file
43
doc/en/announce/release-3.9.0.rst
Normal file
@@ -0,0 +1,43 @@
|
||||
pytest-3.9.0
|
||||
=======================================
|
||||
|
||||
The pytest team is proud to announce the 3.9.0 release!
|
||||
|
||||
pytest is a mature Python testing tool with more than a 2000 tests
|
||||
against itself, passing on many different interpreters and platforms.
|
||||
|
||||
This release contains a number of bugs fixes and improvements, so users are encouraged
|
||||
to take a look at the CHANGELOG:
|
||||
|
||||
https://docs.pytest.org/en/latest/changelog.html
|
||||
|
||||
For complete documentation, please visit:
|
||||
|
||||
https://docs.pytest.org/en/latest/
|
||||
|
||||
As usual, you can upgrade from pypi via:
|
||||
|
||||
pip install -U pytest
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Andrea Cimatoribus
|
||||
* Ankit Goel
|
||||
* Anthony Sottile
|
||||
* Ben Eyal
|
||||
* Bruno Oliveira
|
||||
* Daniel Hahler
|
||||
* Jeffrey Rackauckas
|
||||
* Jose Carlos Menezes
|
||||
* Kyle Altendorf
|
||||
* Niklas JQ
|
||||
* Palash Chatterjee
|
||||
* Ronny Pfannschmidt
|
||||
* Thomas Hess
|
||||
* Thomas Hisch
|
||||
* Tomer Keren
|
||||
* Victor Maryama
|
||||
|
||||
|
||||
Happy testing,
|
||||
The Pytest Development Team
|
||||
20
doc/en/announce/release-3.9.1.rst
Normal file
20
doc/en/announce/release-3.9.1.rst
Normal file
@@ -0,0 +1,20 @@
|
||||
pytest-3.9.1
|
||||
=======================================
|
||||
|
||||
pytest 3.9.1 has just been released to PyPI.
|
||||
|
||||
This is a bug-fix release, being a drop-in replacement. To upgrade::
|
||||
|
||||
pip install --upgrade pytest
|
||||
|
||||
The full changelog is available at https://docs.pytest.org/en/latest/changelog.html.
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Bruno Oliveira
|
||||
* Ronny Pfannschmidt
|
||||
* Thomas Hisch
|
||||
|
||||
|
||||
Happy testing,
|
||||
The pytest Development Team
|
||||
23
doc/en/announce/release-3.9.2.rst
Normal file
23
doc/en/announce/release-3.9.2.rst
Normal file
@@ -0,0 +1,23 @@
|
||||
pytest-3.9.2
|
||||
=======================================
|
||||
|
||||
pytest 3.9.2 has just been released to PyPI.
|
||||
|
||||
This is a bug-fix release, being a drop-in replacement. To upgrade::
|
||||
|
||||
pip install --upgrade pytest
|
||||
|
||||
The full changelog is available at https://docs.pytest.org/en/latest/changelog.html.
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Ankit Goel
|
||||
* Anthony Sottile
|
||||
* Bruno Oliveira
|
||||
* Ronny Pfannschmidt
|
||||
* Vincent Barbaresi
|
||||
* ykantor
|
||||
|
||||
|
||||
Happy testing,
|
||||
The pytest Development Team
|
||||
24
doc/en/announce/release-3.9.3.rst
Normal file
24
doc/en/announce/release-3.9.3.rst
Normal file
@@ -0,0 +1,24 @@
|
||||
pytest-3.9.3
|
||||
=======================================
|
||||
|
||||
pytest 3.9.3 has just been released to PyPI.
|
||||
|
||||
This is a bug-fix release, being a drop-in replacement. To upgrade::
|
||||
|
||||
pip install --upgrade pytest
|
||||
|
||||
The full changelog is available at https://docs.pytest.org/en/latest/changelog.html.
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Andreas Profous
|
||||
* Ankit Goel
|
||||
* Anthony Sottile
|
||||
* Bruno Oliveira
|
||||
* Daniel Hahler
|
||||
* Jon Dufresne
|
||||
* Ronny Pfannschmidt
|
||||
|
||||
|
||||
Happy testing,
|
||||
The pytest Development Team
|
||||
30
doc/en/announce/release-4.0.0.rst
Normal file
30
doc/en/announce/release-4.0.0.rst
Normal file
@@ -0,0 +1,30 @@
|
||||
pytest-4.0.0
|
||||
=======================================
|
||||
|
||||
The pytest team is proud to announce the 4.0.0 release!
|
||||
|
||||
pytest is a mature Python testing tool with more than a 2000 tests
|
||||
against itself, passing on many different interpreters and platforms.
|
||||
|
||||
This release contains a number of bugs fixes and improvements, so users are encouraged
|
||||
to take a look at the CHANGELOG:
|
||||
|
||||
https://docs.pytest.org/en/latest/changelog.html
|
||||
|
||||
For complete documentation, please visit:
|
||||
|
||||
https://docs.pytest.org/en/latest/
|
||||
|
||||
As usual, you can upgrade from pypi via:
|
||||
|
||||
pip install -U pytest
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Bruno Oliveira
|
||||
* Daniel Hahler
|
||||
* Ronny Pfannschmidt
|
||||
|
||||
|
||||
Happy testing,
|
||||
The Pytest Development Team
|
||||
23
doc/en/announce/release-4.0.1.rst
Normal file
23
doc/en/announce/release-4.0.1.rst
Normal file
@@ -0,0 +1,23 @@
|
||||
pytest-4.0.1
|
||||
=======================================
|
||||
|
||||
pytest 4.0.1 has just been released to PyPI.
|
||||
|
||||
This is a bug-fix release, being a drop-in replacement. To upgrade::
|
||||
|
||||
pip install --upgrade pytest
|
||||
|
||||
The full changelog is available at https://docs.pytest.org/en/latest/changelog.html.
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Anthony Sottile
|
||||
* Bruno Oliveira
|
||||
* Daniel Hahler
|
||||
* Michael D. Hoyle
|
||||
* Ronny Pfannschmidt
|
||||
* Slam
|
||||
|
||||
|
||||
Happy testing,
|
||||
The pytest Development Team
|
||||
24
doc/en/announce/release-4.0.2.rst
Normal file
24
doc/en/announce/release-4.0.2.rst
Normal file
@@ -0,0 +1,24 @@
|
||||
pytest-4.0.2
|
||||
=======================================
|
||||
|
||||
pytest 4.0.2 has just been released to PyPI.
|
||||
|
||||
This is a bug-fix release, being a drop-in replacement. To upgrade::
|
||||
|
||||
pip install --upgrade pytest
|
||||
|
||||
The full changelog is available at https://docs.pytest.org/en/latest/changelog.html.
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Anthony Sottile
|
||||
* Bruno Oliveira
|
||||
* Daniel Hahler
|
||||
* Pedro Algarvio
|
||||
* Ronny Pfannschmidt
|
||||
* Tomer Keren
|
||||
* Yash Todi
|
||||
|
||||
|
||||
Happy testing,
|
||||
The pytest Development Team
|
||||
44
doc/en/announce/release-4.1.0.rst
Normal file
44
doc/en/announce/release-4.1.0.rst
Normal file
@@ -0,0 +1,44 @@
|
||||
pytest-4.1.0
|
||||
=======================================
|
||||
|
||||
The pytest team is proud to announce the 4.1.0 release!
|
||||
|
||||
pytest is a mature Python testing tool with more than a 2000 tests
|
||||
against itself, passing on many different interpreters and platforms.
|
||||
|
||||
This release contains a number of bugs fixes and improvements, so users are encouraged
|
||||
to take a look at the CHANGELOG:
|
||||
|
||||
https://docs.pytest.org/en/latest/changelog.html
|
||||
|
||||
For complete documentation, please visit:
|
||||
|
||||
https://docs.pytest.org/en/latest/
|
||||
|
||||
As usual, you can upgrade from pypi via:
|
||||
|
||||
pip install -U pytest
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Adam Johnson
|
||||
* Aly Sivji
|
||||
* Andrey Paramonov
|
||||
* Anthony Sottile
|
||||
* Bruno Oliveira
|
||||
* Daniel Hahler
|
||||
* David Vo
|
||||
* Hyunchel Kim
|
||||
* Jeffrey Rackauckas
|
||||
* Kanguros
|
||||
* Nicholas Devenish
|
||||
* Pedro Algarvio
|
||||
* Randy Barlow
|
||||
* Ronny Pfannschmidt
|
||||
* Tomer Keren
|
||||
* feuillemorte
|
||||
* wim glenn
|
||||
|
||||
|
||||
Happy testing,
|
||||
The Pytest Development Team
|
||||
27
doc/en/announce/release-4.1.1.rst
Normal file
27
doc/en/announce/release-4.1.1.rst
Normal file
@@ -0,0 +1,27 @@
|
||||
pytest-4.1.1
|
||||
=======================================
|
||||
|
||||
pytest 4.1.1 has just been released to PyPI.
|
||||
|
||||
This is a bug-fix release, being a drop-in replacement. To upgrade::
|
||||
|
||||
pip install --upgrade pytest
|
||||
|
||||
The full changelog is available at https://docs.pytest.org/en/latest/changelog.html.
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Anthony Sottile
|
||||
* Anton Lodder
|
||||
* Bruno Oliveira
|
||||
* Daniel Hahler
|
||||
* David Vo
|
||||
* Oscar Benjamin
|
||||
* Ronny Pfannschmidt
|
||||
* Victor Maryama
|
||||
* Yoav Caspi
|
||||
* dmitry.dygalo
|
||||
|
||||
|
||||
Happy testing,
|
||||
The pytest Development Team
|
||||
37
doc/en/announce/release-4.2.0.rst
Normal file
37
doc/en/announce/release-4.2.0.rst
Normal file
@@ -0,0 +1,37 @@
|
||||
pytest-4.2.0
|
||||
=======================================
|
||||
|
||||
The pytest team is proud to announce the 4.2.0 release!
|
||||
|
||||
pytest is a mature Python testing tool with more than a 2000 tests
|
||||
against itself, passing on many different interpreters and platforms.
|
||||
|
||||
This release contains a number of bugs fixes and improvements, so users are encouraged
|
||||
to take a look at the CHANGELOG:
|
||||
|
||||
https://docs.pytest.org/en/latest/changelog.html
|
||||
|
||||
For complete documentation, please visit:
|
||||
|
||||
https://docs.pytest.org/en/latest/
|
||||
|
||||
As usual, you can upgrade from pypi via:
|
||||
|
||||
pip install -U pytest
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Adam Uhlir
|
||||
* Anthony Sottile
|
||||
* Bruno Oliveira
|
||||
* Christopher Dignam
|
||||
* Daniel Hahler
|
||||
* Joseph Hunkeler
|
||||
* Kristoffer Nordstroem
|
||||
* Ronny Pfannschmidt
|
||||
* Thomas Hisch
|
||||
* wim glenn
|
||||
|
||||
|
||||
Happy testing,
|
||||
The Pytest Development Team
|
||||
30
doc/en/announce/release-4.2.1.rst
Normal file
30
doc/en/announce/release-4.2.1.rst
Normal file
@@ -0,0 +1,30 @@
|
||||
pytest-4.2.1
|
||||
=======================================
|
||||
|
||||
pytest 4.2.1 has just been released to PyPI.
|
||||
|
||||
This is a bug-fix release, being a drop-in replacement. To upgrade::
|
||||
|
||||
pip install --upgrade pytest
|
||||
|
||||
The full changelog is available at https://docs.pytest.org/en/latest/changelog.html.
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Anthony Sottile
|
||||
* Arel Cordero
|
||||
* Bruno Oliveira
|
||||
* Daniel Hahler
|
||||
* Holger Kohr
|
||||
* Kevin J. Foley
|
||||
* Nick Murphy
|
||||
* Paweł Stradomski
|
||||
* Raphael Pierzina
|
||||
* Ronny Pfannschmidt
|
||||
* Sam Brightman
|
||||
* Thomas Hisch
|
||||
* Zac Hatfield-Dodds
|
||||
|
||||
|
||||
Happy testing,
|
||||
The pytest Development Team
|
||||
36
doc/en/announce/release-4.3.0.rst
Normal file
36
doc/en/announce/release-4.3.0.rst
Normal file
@@ -0,0 +1,36 @@
|
||||
pytest-4.3.0
|
||||
=======================================
|
||||
|
||||
The pytest team is proud to announce the 4.3.0 release!
|
||||
|
||||
pytest is a mature Python testing tool with more than a 2000 tests
|
||||
against itself, passing on many different interpreters and platforms.
|
||||
|
||||
This release contains a number of bugs fixes and improvements, so users are encouraged
|
||||
to take a look at the CHANGELOG:
|
||||
|
||||
https://docs.pytest.org/en/latest/changelog.html
|
||||
|
||||
For complete documentation, please visit:
|
||||
|
||||
https://docs.pytest.org/en/latest/
|
||||
|
||||
As usual, you can upgrade from pypi via:
|
||||
|
||||
pip install -U pytest
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Andras Mitzki
|
||||
* Anthony Sottile
|
||||
* Bruno Oliveira
|
||||
* Christian Fetzer
|
||||
* Daniel Hahler
|
||||
* Grygorii Iermolenko
|
||||
* R. Alex Matevish
|
||||
* Ronny Pfannschmidt
|
||||
* cclauss
|
||||
|
||||
|
||||
Happy testing,
|
||||
The Pytest Development Team
|
||||
29
doc/en/announce/release-4.3.1.rst
Normal file
29
doc/en/announce/release-4.3.1.rst
Normal file
@@ -0,0 +1,29 @@
|
||||
pytest-4.3.1
|
||||
=======================================
|
||||
|
||||
pytest 4.3.1 has just been released to PyPI.
|
||||
|
||||
This is a bug-fix release, being a drop-in replacement. To upgrade::
|
||||
|
||||
pip install --upgrade pytest
|
||||
|
||||
The full changelog is available at https://docs.pytest.org/en/latest/changelog.html.
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Andras Mitzki
|
||||
* Anthony Sottile
|
||||
* Bruno Oliveira
|
||||
* Daniel Hahler
|
||||
* Danilo Horta
|
||||
* Grygorii Iermolenko
|
||||
* Jeff Hale
|
||||
* Kyle Altendorf
|
||||
* Stephan Hoyer
|
||||
* Zac Hatfield-Dodds
|
||||
* Zac-HD
|
||||
* songbowen
|
||||
|
||||
|
||||
Happy testing,
|
||||
The pytest Development Team
|
||||
39
doc/en/announce/release-4.4.0.rst
Normal file
39
doc/en/announce/release-4.4.0.rst
Normal file
@@ -0,0 +1,39 @@
|
||||
pytest-4.4.0
|
||||
=======================================
|
||||
|
||||
The pytest team is proud to announce the 4.4.0 release!
|
||||
|
||||
pytest is a mature Python testing tool with more than a 2000 tests
|
||||
against itself, passing on many different interpreters and platforms.
|
||||
|
||||
This release contains a number of bugs fixes and improvements, so users are encouraged
|
||||
to take a look at the CHANGELOG:
|
||||
|
||||
https://docs.pytest.org/en/latest/changelog.html
|
||||
|
||||
For complete documentation, please visit:
|
||||
|
||||
https://docs.pytest.org/en/latest/
|
||||
|
||||
As usual, you can upgrade from pypi via:
|
||||
|
||||
pip install -U pytest
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Anthony Sottile
|
||||
* ApaDoctor
|
||||
* Bernhard M. Wiedemann
|
||||
* Brian Skinn
|
||||
* Bruno Oliveira
|
||||
* Daniel Hahler
|
||||
* Gary Tyler
|
||||
* Jeong YunWon
|
||||
* Miro Hrončok
|
||||
* Takafumi Arakaki
|
||||
* henrykironde
|
||||
* smheidrich
|
||||
|
||||
|
||||
Happy testing,
|
||||
The Pytest Development Team
|
||||
@@ -22,12 +22,15 @@ following::
|
||||
assert f() == 4
|
||||
|
||||
to assert that your function returns a certain value. If this assertion fails
|
||||
you will see the return value of the function call::
|
||||
you will see the return value of the function call:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest test_assert1.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 1 item
|
||||
|
||||
test_assert1.py F [100%]
|
||||
@@ -85,24 +88,30 @@ and if you need to have access to the actual exception info you may use::
|
||||
the actual exception raised. The main attributes of interest are
|
||||
``.type``, ``.value`` and ``.traceback``.
|
||||
|
||||
.. versionchanged:: 3.0
|
||||
You can pass a ``match`` keyword parameter to the context-manager to test
|
||||
that a regular expression matches on the string representation of an exception
|
||||
(similar to the ``TestCase.assertRaisesRegexp`` method from ``unittest``)::
|
||||
|
||||
In the context manager form you may use the keyword argument
|
||||
``message`` to specify a custom failure message::
|
||||
import pytest
|
||||
|
||||
>>> with raises(ZeroDivisionError, message="Expecting ZeroDivisionError"):
|
||||
... pass
|
||||
... Failed: Expecting ZeroDivisionError
|
||||
def myfunc():
|
||||
raise ValueError("Exception 123 raised")
|
||||
|
||||
If you want to write test code that works on Python 2.4 as well,
|
||||
you may also use two other ways to test for an expected exception::
|
||||
def test_match():
|
||||
with pytest.raises(ValueError, match=r'.* 123 .*'):
|
||||
myfunc()
|
||||
|
||||
The regexp parameter of the ``match`` method is matched with the ``re.search``
|
||||
function, so in the above example ``match='123'`` would have worked as
|
||||
well.
|
||||
|
||||
There's an alternate form of the ``pytest.raises`` function where you pass
|
||||
a function that will be executed with the given ``*args`` and ``**kwargs`` and
|
||||
assert that the given exception is raised::
|
||||
|
||||
pytest.raises(ExpectedException, func, *args, **kwargs)
|
||||
pytest.raises(ExpectedException, "func(*args, **kwargs)")
|
||||
|
||||
both of which execute the specified function with args and kwargs and
|
||||
asserts that the given ``ExpectedException`` is raised. The reporter will
|
||||
provide you with helpful output in case of failures such as *no
|
||||
The reporter will provide you with helpful output in case of failures such as *no
|
||||
exception* or *wrong exception*.
|
||||
|
||||
Note that it is also possible to specify a "raises" argument to
|
||||
@@ -119,23 +128,6 @@ exceptions your own code is deliberately raising, whereas using
|
||||
like documenting unfixed bugs (where the test describes what "should" happen)
|
||||
or bugs in dependencies.
|
||||
|
||||
Also, the context manager form accepts a ``match`` keyword parameter to test
|
||||
that a regular expression matches on the string representation of an exception
|
||||
(like the ``TestCase.assertRaisesRegexp`` method from ``unittest``)::
|
||||
|
||||
import pytest
|
||||
|
||||
def myfunc():
|
||||
raise ValueError("Exception 123 raised")
|
||||
|
||||
def test_match():
|
||||
with pytest.raises(ValueError, match=r'.* 123 .*'):
|
||||
myfunc()
|
||||
|
||||
The regexp parameter of the ``match`` method is matched with the ``re.search``
|
||||
function. So in the above example ``match='123'`` would have worked as
|
||||
well.
|
||||
|
||||
|
||||
.. _`assertwarns`:
|
||||
|
||||
@@ -165,12 +157,15 @@ when it encounters comparisons. For example::
|
||||
set2 = set("8035")
|
||||
assert set1 == set2
|
||||
|
||||
if you run this module::
|
||||
if you run this module:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest test_assert2.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 1 item
|
||||
|
||||
test_assert2.py F [100%]
|
||||
@@ -200,8 +195,8 @@ Special comparisons are done for a number of cases:
|
||||
|
||||
See the :ref:`reporting demo <tbreportdemo>` for many more examples.
|
||||
|
||||
Defining your own assertion comparison
|
||||
----------------------------------------------
|
||||
Defining your own explanation for failed assertions
|
||||
---------------------------------------------------
|
||||
|
||||
It is possible to add your own detailed explanations by implementing
|
||||
the ``pytest_assertrepr_compare`` hook.
|
||||
@@ -235,7 +230,9 @@ now, given this test module::
|
||||
assert f1 == f2
|
||||
|
||||
you can run the test module and get the custom output defined in
|
||||
the conftest file::
|
||||
the conftest file:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q test_foocompare.py
|
||||
F [100%]
|
||||
@@ -255,8 +252,8 @@ the conftest file::
|
||||
.. _assert-details:
|
||||
.. _`assert introspection`:
|
||||
|
||||
Advanced assertion introspection
|
||||
----------------------------------
|
||||
Assertion introspection details
|
||||
-------------------------------
|
||||
|
||||
.. versionadded:: 2.1
|
||||
|
||||
@@ -269,28 +266,46 @@ supporting modules which are not themselves test modules will not be rewritten**
|
||||
|
||||
You can manually enable assertion rewriting for an imported module by calling
|
||||
`register_assert_rewrite <https://docs.pytest.org/en/latest/writing_plugins.html#assertion-rewriting>`_
|
||||
before you import it (a good place to do that is in ``conftest.py``).
|
||||
|
||||
.. note::
|
||||
|
||||
``pytest`` rewrites test modules on import by using an import
|
||||
hook to write new ``pyc`` files. Most of the time this works transparently.
|
||||
However, if you are messing with import yourself, the import hook may
|
||||
interfere.
|
||||
|
||||
If this is the case you have two options:
|
||||
|
||||
* Disable rewriting for a specific module by adding the string
|
||||
``PYTEST_DONT_REWRITE`` to its docstring.
|
||||
|
||||
* Disable rewriting for all modules by using ``--assert=plain``.
|
||||
|
||||
Additionally, rewriting will fail silently if it cannot write new ``.pyc`` files,
|
||||
i.e. in a read-only filesystem or a zipfile.
|
||||
|
||||
before you import it (a good place to do that is in your root ``conftest.py``).
|
||||
|
||||
For further information, Benjamin Peterson wrote up `Behind the scenes of pytest's new assertion rewriting <http://pybites.blogspot.com/2011/07/behind-scenes-of-pytests-new-assertion.html>`_.
|
||||
|
||||
Assertion rewriting caches files on disk
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
``pytest`` will write back the rewritten modules to disk for caching. You can disable
|
||||
this behavior (for example to avoid leaving stale ``.pyc`` files around in projects that
|
||||
move files around a lot) by adding this to the top of your ``conftest.py`` file:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import sys
|
||||
|
||||
sys.dont_write_bytecode = True
|
||||
|
||||
Note that you still get the benefits of assertion introspection, the only change is that
|
||||
the ``.pyc`` files won't be cached on disk.
|
||||
|
||||
Additionally, rewriting will silently skip caching if it cannot write new ``.pyc`` files,
|
||||
i.e. in a read-only filesystem or a zipfile.
|
||||
|
||||
|
||||
Disabling assert rewriting
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
``pytest`` rewrites test modules on import by using an import
|
||||
hook to write new ``pyc`` files. Most of the time this works transparently.
|
||||
However, if you are working with the import machinery yourself, the import hook may
|
||||
interfere.
|
||||
|
||||
If this is the case you have two options:
|
||||
|
||||
* Disable rewriting for a specific module by adding the string
|
||||
``PYTEST_DONT_REWRITE`` to its docstring.
|
||||
|
||||
* Disable rewriting for all modules by using ``--assert=plain``.
|
||||
|
||||
|
||||
.. versionadded:: 2.1
|
||||
Add assert rewriting as an alternate introspection technique.
|
||||
|
||||
|
||||
@@ -8,18 +8,26 @@ When using bash as your shell, ``pytest`` can use argcomplete
|
||||
(https://argcomplete.readthedocs.io/) for auto-completion.
|
||||
For this ``argcomplete`` needs to be installed **and** enabled.
|
||||
|
||||
Install argcomplete using::
|
||||
Install argcomplete using:
|
||||
|
||||
sudo pip install 'argcomplete>=0.5.7'
|
||||
.. code-block:: bash
|
||||
|
||||
For global activation of all argcomplete enabled python applications run::
|
||||
sudo pip install 'argcomplete>=0.5.7'
|
||||
|
||||
For global activation of all argcomplete enabled python applications run:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo activate-global-python-argcomplete
|
||||
|
||||
For permanent (but not global) ``pytest`` activation, use::
|
||||
For permanent (but not global) ``pytest`` activation, use:
|
||||
|
||||
register-python-argcomplete pytest >> ~/.bashrc
|
||||
.. code-block:: bash
|
||||
|
||||
For one-time activation of argcomplete for ``pytest`` only, use::
|
||||
register-python-argcomplete pytest >> ~/.bashrc
|
||||
|
||||
eval "$(register-python-argcomplete pytest)"
|
||||
For one-time activation of argcomplete for ``pytest`` only, use:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
eval "$(register-python-argcomplete pytest)"
|
||||
|
||||
@@ -12,7 +12,9 @@ For information on plugin hooks and objects, see :ref:`plugins`.
|
||||
|
||||
For information on the ``pytest.mark`` mechanism, see :ref:`mark`.
|
||||
|
||||
For information about fixtures, see :ref:`fixtures`. To see a complete list of available fixtures (add ``-v`` to also see fixtures with leading ``_``), type ::
|
||||
For information about fixtures, see :ref:`fixtures`. To see a complete list of available fixtures (add ``-v`` to also see fixtures with leading ``_``), type :
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q --fixtures
|
||||
cache
|
||||
@@ -26,25 +28,29 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a
|
||||
|
||||
Values can be any object handled by the json stdlib module.
|
||||
capsys
|
||||
Enable capturing of writes to ``sys.stdout`` and ``sys.stderr`` and make
|
||||
captured output available via ``capsys.readouterr()`` method calls
|
||||
which return a ``(out, err)`` namedtuple. ``out`` and ``err`` will be ``text``
|
||||
objects.
|
||||
Enable text capturing of writes to ``sys.stdout`` and ``sys.stderr``.
|
||||
|
||||
The captured output is made available via ``capsys.readouterr()`` method
|
||||
calls, which return a ``(out, err)`` namedtuple.
|
||||
``out`` and ``err`` will be ``text`` objects.
|
||||
capsysbinary
|
||||
Enable capturing of writes to ``sys.stdout`` and ``sys.stderr`` and make
|
||||
captured output available via ``capsys.readouterr()`` method calls
|
||||
which return a ``(out, err)`` tuple. ``out`` and ``err`` will be ``bytes``
|
||||
objects.
|
||||
Enable bytes capturing of writes to ``sys.stdout`` and ``sys.stderr``.
|
||||
|
||||
The captured output is made available via ``capsysbinary.readouterr()``
|
||||
method calls, which return a ``(out, err)`` namedtuple.
|
||||
``out`` and ``err`` will be ``bytes`` objects.
|
||||
capfd
|
||||
Enable capturing of writes to file descriptors ``1`` and ``2`` and make
|
||||
captured output available via ``capfd.readouterr()`` method calls
|
||||
which return a ``(out, err)`` tuple. ``out`` and ``err`` will be ``text``
|
||||
objects.
|
||||
Enable text capturing of writes to file descriptors ``1`` and ``2``.
|
||||
|
||||
The captured output is made available via ``capfd.readouterr()`` method
|
||||
calls, which return a ``(out, err)`` namedtuple.
|
||||
``out`` and ``err`` will be ``text`` objects.
|
||||
capfdbinary
|
||||
Enable capturing of write to file descriptors 1 and 2 and make
|
||||
captured output available via ``capfdbinary.readouterr`` method calls
|
||||
which return a ``(out, err)`` tuple. ``out`` and ``err`` will be
|
||||
``bytes`` objects.
|
||||
Enable bytes capturing of writes to file descriptors ``1`` and ``2``.
|
||||
|
||||
The captured output is made available via ``capfd.readouterr()`` method
|
||||
calls, which return a ``(out, err)`` namedtuple.
|
||||
``out`` and ``err`` will be ``byte`` objects.
|
||||
doctest_namespace
|
||||
Fixture that returns a :py:class:`dict` that will be injected into the namespace of doctests.
|
||||
pytestconfig
|
||||
@@ -53,7 +59,7 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a
|
||||
Example::
|
||||
|
||||
def test_foo(pytestconfig):
|
||||
if pytestconfig.getoption("verbose"):
|
||||
if pytestconfig.getoption("verbose") > 0:
|
||||
...
|
||||
record_property
|
||||
Add an extra properties the calling test.
|
||||
@@ -66,8 +72,6 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a
|
||||
|
||||
def test_function(record_property):
|
||||
record_property("example_key", 1)
|
||||
record_xml_property
|
||||
(Deprecated) use record_property.
|
||||
record_xml_attribute
|
||||
Add extra xml attributes to the tag for the calling test.
|
||||
The fixture is callable with ``(name, value)``, with value being
|
||||
@@ -75,7 +79,7 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a
|
||||
caplog
|
||||
Access and control log capturing.
|
||||
|
||||
Captured logs are available through the following methods::
|
||||
Captured logs are available through the following properties/methods::
|
||||
|
||||
* caplog.text -> string containing formatted log output
|
||||
* caplog.records -> list of logging.LogRecord instances
|
||||
@@ -104,7 +108,9 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a
|
||||
See http://docs.python.org/library/warnings.html for information
|
||||
on warning categories.
|
||||
tmpdir_factory
|
||||
Return a TempdirFactory instance for the test session.
|
||||
Return a :class:`_pytest.tmpdir.TempdirFactory` instance for the test session.
|
||||
tmp_path_factory
|
||||
Return a :class:`_pytest.tmpdir.TempPathFactory` instance for the test session.
|
||||
tmpdir
|
||||
Return a temporary directory path object
|
||||
which is unique to each test function invocation,
|
||||
@@ -113,6 +119,16 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a
|
||||
path object.
|
||||
|
||||
.. _`py.path.local`: https://py.readthedocs.io/en/latest/path.html
|
||||
tmp_path
|
||||
Return a temporary directory path object
|
||||
which is unique to each test function invocation,
|
||||
created as a sub directory of the base temporary
|
||||
directory. The returned object is a :class:`pathlib.Path`
|
||||
object.
|
||||
|
||||
.. note::
|
||||
|
||||
in python < 3.6 this is a pathlib2.Path
|
||||
|
||||
no tests ran in 0.12 seconds
|
||||
|
||||
|
||||
156
doc/en/cache.rst
156
doc/en/cache.rst
@@ -43,7 +43,9 @@ First, let's create 50 test invocation of which only 2 fail::
|
||||
if i in (17, 25):
|
||||
pytest.fail("bad luck")
|
||||
|
||||
If you run this for the first time you will see two failures::
|
||||
If you run this for the first time you will see two failures:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q
|
||||
.................F.......F........................ [100%]
|
||||
@@ -72,13 +74,16 @@ If you run this for the first time you will see two failures::
|
||||
test_50.py:6: Failed
|
||||
2 failed, 48 passed in 0.12 seconds
|
||||
|
||||
If you then run it with ``--lf``::
|
||||
If you then run it with ``--lf``:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest --lf
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 50 items / 48 deselected
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 50 items / 48 deselected / 2 selected
|
||||
run-last-failure: rerun previous 2 failures
|
||||
|
||||
test_50.py FF [100%]
|
||||
@@ -113,12 +118,15 @@ not been run ("deselected").
|
||||
|
||||
Now, if you run with the ``--ff`` option, all tests will be run but the first
|
||||
previous failures will be executed first (as can be seen from the series
|
||||
of ``FF`` and dots)::
|
||||
of ``FF`` and dots):
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest --ff
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 50 items
|
||||
run-last-failure: rerun previous 2 failures first
|
||||
|
||||
@@ -160,7 +168,9 @@ Behavior when no tests failed in the last run
|
||||
|
||||
When no tests failed in the last run, or when no cached ``lastfailed`` data was
|
||||
found, ``pytest`` can be configured either to run all of the tests or no tests,
|
||||
using the ``--last-failed-no-failures`` option, which takes one of the following values::
|
||||
using the ``--last-failed-no-failures`` option, which takes one of the following values:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pytest --last-failed --last-failed-no-failures all # run all tests (default behavior)
|
||||
pytest --last-failed --last-failed-no-failures none # run no tests and exit
|
||||
@@ -179,11 +189,14 @@ across pytest invocations::
|
||||
import pytest
|
||||
import time
|
||||
|
||||
def expensive_computation():
|
||||
print("running expensive computation...")
|
||||
|
||||
@pytest.fixture
|
||||
def mydata(request):
|
||||
val = request.config.cache.get("example/value", None)
|
||||
if val is None:
|
||||
time.sleep(9*0.6) # expensive computation :)
|
||||
expensive_computation()
|
||||
val = 42
|
||||
request.config.cache.set("example/value", val)
|
||||
return val
|
||||
@@ -191,8 +204,9 @@ across pytest invocations::
|
||||
def test_function(mydata):
|
||||
assert mydata == 23
|
||||
|
||||
If you run this command once, it will take a while because
|
||||
of the sleep::
|
||||
If you run this command for the first time, you can see the print statement:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q
|
||||
F [100%]
|
||||
@@ -205,11 +219,13 @@ of the sleep::
|
||||
> assert mydata == 23
|
||||
E assert 42 == 23
|
||||
|
||||
test_caching.py:14: AssertionError
|
||||
test_caching.py:17: AssertionError
|
||||
1 failed in 0.12 seconds
|
||||
|
||||
If you run it a second time the value will be retrieved from
|
||||
the cache and this will be quick::
|
||||
the cache and nothing will be printed:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q
|
||||
F [100%]
|
||||
@@ -222,7 +238,7 @@ the cache and this will be quick::
|
||||
> assert mydata == 23
|
||||
E assert 42 == 23
|
||||
|
||||
test_caching.py:14: AssertionError
|
||||
test_caching.py:17: AssertionError
|
||||
1 failed in 0.12 seconds
|
||||
|
||||
See the :ref:`cache-api` for more details.
|
||||
@@ -232,18 +248,108 @@ Inspecting Cache content
|
||||
-------------------------------
|
||||
|
||||
You can always peek at the content of the cache using the
|
||||
``--cache-show`` command line option::
|
||||
``--cache-show`` command line option:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest --cache-show
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
cachedir: $REGENDOC_TMPDIR/.pytest_cache
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
------------------------------- cache values -------------------------------
|
||||
cache/lastfailed contains:
|
||||
{'test_caching.py::test_function': True}
|
||||
{'a/test_db.py::test_a1': True,
|
||||
'a/test_db2.py::test_a2': True,
|
||||
'b/test_error.py::test_root': True,
|
||||
'failure_demo.py::TestCustomAssertMsg::test_custom_repr': True,
|
||||
'failure_demo.py::TestCustomAssertMsg::test_multiline': True,
|
||||
'failure_demo.py::TestCustomAssertMsg::test_single_line': True,
|
||||
'failure_demo.py::TestFailing::test_not': True,
|
||||
'failure_demo.py::TestFailing::test_simple': True,
|
||||
'failure_demo.py::TestFailing::test_simple_multiline': True,
|
||||
'failure_demo.py::TestMoreErrors::test_compare': True,
|
||||
'failure_demo.py::TestMoreErrors::test_complex_error': True,
|
||||
'failure_demo.py::TestMoreErrors::test_global_func': True,
|
||||
'failure_demo.py::TestMoreErrors::test_instance': True,
|
||||
'failure_demo.py::TestMoreErrors::test_startswith': True,
|
||||
'failure_demo.py::TestMoreErrors::test_startswith_nested': True,
|
||||
'failure_demo.py::TestMoreErrors::test_try_finally': True,
|
||||
'failure_demo.py::TestMoreErrors::test_z1_unpack_error': True,
|
||||
'failure_demo.py::TestMoreErrors::test_z2_type_error': True,
|
||||
'failure_demo.py::TestRaises::test_raise': True,
|
||||
'failure_demo.py::TestRaises::test_raises': True,
|
||||
'failure_demo.py::TestRaises::test_raises_doesnt': True,
|
||||
'failure_demo.py::TestRaises::test_reinterpret_fails_with_print_for_the_fun_of_it': True,
|
||||
'failure_demo.py::TestRaises::test_some_error': True,
|
||||
'failure_demo.py::TestRaises::test_tupleerror': True,
|
||||
'failure_demo.py::TestSpecialisedExplanations::test_eq_attrs': True,
|
||||
'failure_demo.py::TestSpecialisedExplanations::test_eq_dataclass': True,
|
||||
'failure_demo.py::TestSpecialisedExplanations::test_eq_dict': True,
|
||||
'failure_demo.py::TestSpecialisedExplanations::test_eq_list': True,
|
||||
'failure_demo.py::TestSpecialisedExplanations::test_eq_list_long': True,
|
||||
'failure_demo.py::TestSpecialisedExplanations::test_eq_long_text': True,
|
||||
'failure_demo.py::TestSpecialisedExplanations::test_eq_long_text_multiline': True,
|
||||
'failure_demo.py::TestSpecialisedExplanations::test_eq_longer_list': True,
|
||||
'failure_demo.py::TestSpecialisedExplanations::test_eq_multiline_text': True,
|
||||
'failure_demo.py::TestSpecialisedExplanations::test_eq_set': True,
|
||||
'failure_demo.py::TestSpecialisedExplanations::test_eq_similar_text': True,
|
||||
'failure_demo.py::TestSpecialisedExplanations::test_eq_text': True,
|
||||
'failure_demo.py::TestSpecialisedExplanations::test_in_list': True,
|
||||
'failure_demo.py::TestSpecialisedExplanations::test_not_in_text_multiline': True,
|
||||
'failure_demo.py::TestSpecialisedExplanations::test_not_in_text_single': True,
|
||||
'failure_demo.py::TestSpecialisedExplanations::test_not_in_text_single_long': True,
|
||||
'failure_demo.py::TestSpecialisedExplanations::test_not_in_text_single_long_term': True,
|
||||
'failure_demo.py::test_attribute': True,
|
||||
'failure_demo.py::test_attribute_failure': True,
|
||||
'failure_demo.py::test_attribute_instance': True,
|
||||
'failure_demo.py::test_attribute_multiple': True,
|
||||
'failure_demo.py::test_dynamic_compile_shows_nicely': True,
|
||||
'failure_demo.py::test_generative[3-6]': True,
|
||||
'test_50.py::test_num[17]': True,
|
||||
'test_50.py::test_num[25]': True,
|
||||
'test_anothersmtp.py::test_showhelo': True,
|
||||
'test_assert1.py::test_function': True,
|
||||
'test_assert2.py::test_set_comparison': True,
|
||||
'test_backends.py::test_db_initialized[d2]': True,
|
||||
'test_caching.py::test_function': True,
|
||||
'test_checkconfig.py::test_something': True,
|
||||
'test_class.py::TestClass::test_two': True,
|
||||
'test_compute.py::test_compute[4]': True,
|
||||
'test_example.py::test_error': True,
|
||||
'test_example.py::test_fail': True,
|
||||
'test_foocompare.py::test_compare': True,
|
||||
'test_module.py::test_call_fails': True,
|
||||
'test_module.py::test_ehlo': True,
|
||||
'test_module.py::test_ehlo[mail.python.org]': True,
|
||||
'test_module.py::test_ehlo[smtp.gmail.com]': True,
|
||||
'test_module.py::test_event_simple': True,
|
||||
'test_module.py::test_fail1': True,
|
||||
'test_module.py::test_fail2': True,
|
||||
'test_module.py::test_func2': True,
|
||||
'test_module.py::test_interface_complex': True,
|
||||
'test_module.py::test_interface_simple': True,
|
||||
'test_module.py::test_noop': True,
|
||||
'test_module.py::test_noop[mail.python.org]': True,
|
||||
'test_module.py::test_noop[smtp.gmail.com]': True,
|
||||
'test_module.py::test_setup_fails': True,
|
||||
'test_parametrize.py::TestClass::test_equals[1-2]': True,
|
||||
'test_sample.py::test_answer': True,
|
||||
'test_show_warnings.py::test_one': True,
|
||||
'test_simple.yml::hello': True,
|
||||
'test_smtpsimple.py::test_ehlo': True,
|
||||
'test_step.py::TestUserHandling::test_modification': True,
|
||||
'test_strings.py::test_valid_string[!]': True,
|
||||
'test_tmp_path.py::test_create_file': True,
|
||||
'test_tmpdir.py::test_create_file': True,
|
||||
'test_tmpdir.py::test_needsfiles': True,
|
||||
'test_unittest_db.py::MyTest::test_method1': True,
|
||||
'test_unittest_db.py::MyTest::test_method2': True}
|
||||
cache/nodeids contains:
|
||||
['test_caching.py::test_function']
|
||||
cache/stepwise contains:
|
||||
[]
|
||||
example/value contains:
|
||||
42
|
||||
|
||||
@@ -253,10 +359,18 @@ Clearing Cache content
|
||||
-------------------------------
|
||||
|
||||
You can instruct pytest to clear all cache files and values
|
||||
by adding the ``--cache-clear`` option like this::
|
||||
by adding the ``--cache-clear`` option like this:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pytest --cache-clear
|
||||
|
||||
This is recommended for invocations from Continuous Integration
|
||||
servers where isolation and correctness is more important
|
||||
than speed.
|
||||
|
||||
|
||||
Stepwise
|
||||
--------
|
||||
|
||||
As an alternative to ``--lf -x``, especially for cases where you expect a large part of the test suite will fail, ``--sw``, ``--stepwise`` allows you to fix them one at a time. The test suite will run until the first failure and then stop. At the next invocation, tests will continue from the last failing test and then run until the next failing test. You may use the ``--stepwise-skip`` option to ignore one failing test and stop the test execution on the second failing test instead. This is useful if you get stuck on a failing test and just want to ignore it until later.
|
||||
|
||||
@@ -35,7 +35,9 @@ There are two ways in which ``pytest`` can perform capturing:
|
||||
|
||||
.. _`disable capturing`:
|
||||
|
||||
You can influence output capturing mechanisms from the command line::
|
||||
You can influence output capturing mechanisms from the command line:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pytest -s # disable all capturing
|
||||
pytest --capture=sys # replace sys.stdout/stderr with in-mem files
|
||||
@@ -52,7 +54,7 @@ is that you can use print statements for debugging::
|
||||
# content of test_module.py
|
||||
|
||||
def setup_function(function):
|
||||
print ("setting up %s" % function)
|
||||
print("setting up %s" % function)
|
||||
|
||||
def test_func1():
|
||||
assert True
|
||||
@@ -61,12 +63,15 @@ is that you can use print statements for debugging::
|
||||
assert False
|
||||
|
||||
and running this module will show you precisely the output
|
||||
of the failing function and hide the other one::
|
||||
of the failing function and hide the other one:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 2 items
|
||||
|
||||
test_module.py .F [100%]
|
||||
|
||||
@@ -10,17 +10,15 @@
|
||||
#
|
||||
# All configuration values have a default; values that are commented out
|
||||
# serve to show the default.
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
# built documents.
|
||||
#
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
# The short X.Y version.
|
||||
|
||||
import datetime
|
||||
import os
|
||||
import sys
|
||||
import datetime
|
||||
|
||||
from _pytest import __version__ as version
|
||||
|
||||
@@ -42,11 +40,13 @@ todo_include_todos = 1
|
||||
# Add any Sphinx extension module names here, as strings. They can be extensions
|
||||
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
||||
extensions = [
|
||||
"pygments_pytest",
|
||||
"sphinx.ext.autodoc",
|
||||
"sphinx.ext.todo",
|
||||
"sphinx.ext.autosummary",
|
||||
"sphinx.ext.intersphinx",
|
||||
"sphinx.ext.todo",
|
||||
"sphinx.ext.viewcode",
|
||||
"sphinx_removed_in",
|
||||
"sphinxcontrib_trio",
|
||||
]
|
||||
|
||||
@@ -65,7 +65,7 @@ master_doc = "contents"
|
||||
# General information about the project.
|
||||
project = u"pytest"
|
||||
year = datetime.datetime.utcnow().year
|
||||
copyright = u"2015–2018 , holger krekel and pytest-dev team"
|
||||
copyright = u"2015–2019 , holger krekel and pytest-dev team"
|
||||
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
@@ -335,7 +335,7 @@ intersphinx_mapping = {"python": ("https://docs.python.org/3", None)}
|
||||
def setup(app):
|
||||
# from sphinx.ext.autodoc import cut_lines
|
||||
# app.connect('autodoc-process-docstring', cut_lines(4, what=['module']))
|
||||
app.add_description_unit(
|
||||
app.add_object_type(
|
||||
"confval",
|
||||
"confval",
|
||||
objname="configuration value",
|
||||
|
||||
@@ -33,6 +33,7 @@ Full pytest documentation
|
||||
reference
|
||||
|
||||
goodpractices
|
||||
flaky
|
||||
pythonpath
|
||||
customize
|
||||
example/index
|
||||
@@ -40,6 +41,7 @@ Full pytest documentation
|
||||
|
||||
backwards-compatibility
|
||||
deprecations
|
||||
py27-py34-deprecation
|
||||
historical-notes
|
||||
license
|
||||
contributing
|
||||
|
||||
@@ -5,7 +5,9 @@ Command line options and configuration file settings
|
||||
-----------------------------------------------------------------
|
||||
|
||||
You can get help on command line options and values in INI-style
|
||||
configurations files by using the general help option::
|
||||
configurations files by using the general help option:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pytest -h # prints options _and_ config file settings
|
||||
|
||||
@@ -92,12 +94,16 @@ The rootdir is used a reference directory for constructing test
|
||||
addresses ("nodeids") and can be used also by plugins for storing
|
||||
per-testrun information.
|
||||
|
||||
Example::
|
||||
Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pytest path/to/testdir path/other/
|
||||
|
||||
will determine the common ancestor as ``path`` and then
|
||||
check for ini-files as follows::
|
||||
check for ini-files as follows:
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
# first look for pytest.ini files
|
||||
path/pytest.ini
|
||||
@@ -127,25 +133,33 @@ progress output, you can write it into a configuration file:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
# content of pytest.ini
|
||||
# (or tox.ini or setup.cfg)
|
||||
# content of pytest.ini or tox.ini
|
||||
# setup.cfg files should use [tool:pytest] section instead
|
||||
[pytest]
|
||||
addopts = -ra -q
|
||||
|
||||
Alternatively, you can set a ``PYTEST_ADDOPTS`` environment variable to add command
|
||||
line options while the environment is in use::
|
||||
line options while the environment is in use:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
export PYTEST_ADDOPTS="-v"
|
||||
|
||||
Here's how the command-line is built in the presence of ``addopts`` or the environment variable::
|
||||
Here's how the command-line is built in the presence of ``addopts`` or the environment variable:
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
<pytest.ini:addopts> $PYTEST_ADDOPTS <extra command-line arguments>
|
||||
|
||||
So if the user executes in the command-line::
|
||||
So if the user executes in the command-line:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pytest -m slow
|
||||
|
||||
The actual command line executed is::
|
||||
The actual command line executed is:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pytest -ra -q -v -m slow
|
||||
|
||||
|
||||
@@ -7,6 +7,11 @@ This page lists all pytest features that are currently deprecated or have been r
|
||||
The objective is to give users a clear rationale why a certain feature has been removed, and what alternatives
|
||||
should be used instead.
|
||||
|
||||
.. contents::
|
||||
:depth: 3
|
||||
:local:
|
||||
|
||||
|
||||
Deprecated Features
|
||||
-------------------
|
||||
|
||||
@@ -14,10 +19,268 @@ Below is a complete list of all pytest features which are considered deprecated.
|
||||
:class:`_pytest.warning_types.PytestWarning` or subclasses, which can be filtered using
|
||||
:ref:`standard warning filters <warnings>`.
|
||||
|
||||
``"message"`` parameter of ``pytest.raises``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. deprecated:: 4.1
|
||||
|
||||
It is a common mistake to think this parameter will match the exception message, while in fact
|
||||
it only serves to provide a custom message in case the ``pytest.raises`` check fails. To prevent
|
||||
users from making this mistake, and because it is believed to be little used, pytest is
|
||||
deprecating it without providing an alternative for the moment.
|
||||
|
||||
If you have a valid use case for this parameter, consider that to obtain the same results
|
||||
you can just call ``pytest.fail`` manually at the end of the ``with`` statement.
|
||||
|
||||
For example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
with pytest.raises(TimeoutError, message="Client got unexpected message"):
|
||||
wait_for(websocket.recv(), 0.5)
|
||||
|
||||
|
||||
Becomes:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
with pytest.raises(TimeoutError):
|
||||
wait_for(websocket.recv(), 0.5)
|
||||
pytest.fail("Client got unexpected message")
|
||||
|
||||
|
||||
If you still have concerns about this deprecation and future removal, please comment on
|
||||
`issue #3974 <https://github.com/pytest-dev/pytest/issues/3974>`__.
|
||||
|
||||
|
||||
``pytest.config`` global
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. deprecated:: 4.1
|
||||
|
||||
The ``pytest.config`` global object is deprecated. Instead use
|
||||
``request.config`` (via the ``request`` fixture) or if you are a plugin author
|
||||
use the ``pytest_configure(config)`` hook.
|
||||
|
||||
.. _raises-warns-exec:
|
||||
|
||||
``raises`` / ``warns`` with a string as the second argument
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. deprecated:: 4.1
|
||||
|
||||
Use the context manager form of these instead. When necessary, invoke ``exec``
|
||||
directly.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
pytest.raises(ZeroDivisionError, "1 / 0")
|
||||
pytest.raises(SyntaxError, "a $ b")
|
||||
|
||||
pytest.warns(DeprecationWarning, "my_function()")
|
||||
pytest.warns(SyntaxWarning, "assert(1, 2)")
|
||||
|
||||
Becomes:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
with pytest.raises(ZeroDivisionError):
|
||||
1 / 0
|
||||
with pytest.raises(SyntaxError):
|
||||
exec("a $ b") # exec is required for invalid syntax
|
||||
|
||||
with pytest.warns(DeprecationWarning):
|
||||
my_function()
|
||||
with pytest.warns(SyntaxWarning):
|
||||
exec("assert(1, 2)") # exec is used to avoid a top-level warning
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
Result log (``--result-log``)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. deprecated:: 3.0
|
||||
|
||||
The ``--resultlog`` command line option has been deprecated: it is little used
|
||||
and there are more modern and better alternatives, for example `pytest-tap <https://tappy.readthedocs.io/en/latest/>`_.
|
||||
|
||||
This feature will be effectively removed in pytest 4.0 as the team intends to include a better alternative in the core.
|
||||
|
||||
If you have any concerns, please don't hesitate to `open an issue <https://github.com/pytest-dev/pytest/issues>`__.
|
||||
|
||||
Removed Features
|
||||
----------------
|
||||
|
||||
As stated in our :ref:`backwards-compatibility` policy, deprecated features are removed only in major releases after
|
||||
an appropriate period of deprecation has passed.
|
||||
|
||||
Using ``Class`` in custom Collectors
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. versionremoved:: 4.0
|
||||
|
||||
Using objects named ``"Class"`` as a way to customize the type of nodes that are collected in ``Collector``
|
||||
subclasses has been deprecated. Users instead should use ``pytest_pycollect_makeitem`` to customize node types during
|
||||
collection.
|
||||
|
||||
This issue should affect only advanced plugins who create new collection types, so if you see this warning
|
||||
message please contact the authors so they can change the code.
|
||||
|
||||
|
||||
marks in ``pytest.mark.parametrize``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. versionremoved:: 4.0
|
||||
|
||||
Applying marks to values of a ``pytest.mark.parametrize`` call is now deprecated. For example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"a, b",
|
||||
[
|
||||
(3, 9),
|
||||
pytest.mark.xfail(reason="flaky")(6, 36),
|
||||
(10, 100),
|
||||
(20, 200),
|
||||
(40, 400),
|
||||
(50, 500),
|
||||
],
|
||||
)
|
||||
def test_foo(a, b):
|
||||
...
|
||||
|
||||
This code applies the ``pytest.mark.xfail(reason="flaky")`` mark to the ``(6, 36)`` value of the above parametrization
|
||||
call.
|
||||
|
||||
This was considered hard to read and understand, and also its implementation presented problems to the code preventing
|
||||
further internal improvements in the marks architecture.
|
||||
|
||||
To update the code, use ``pytest.param``:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"a, b",
|
||||
[
|
||||
(3, 9),
|
||||
pytest.param(6, 36, marks=pytest.mark.xfail(reason="flaky")),
|
||||
(10, 100),
|
||||
(20, 200),
|
||||
(40, 400),
|
||||
(50, 500),
|
||||
],
|
||||
)
|
||||
def test_foo(a, b):
|
||||
...
|
||||
|
||||
|
||||
``pytest_funcarg__`` prefix
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. versionremoved:: 4.0
|
||||
|
||||
In very early pytest versions fixtures could be defined using the ``pytest_funcarg__`` prefix:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def pytest_funcarg__data():
|
||||
return SomeData()
|
||||
|
||||
Switch over to the ``@pytest.fixture`` decorator:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.fixture
|
||||
def data():
|
||||
return SomeData()
|
||||
|
||||
|
||||
|
||||
[pytest] section in setup.cfg files
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. versionremoved:: 4.0
|
||||
|
||||
``[pytest]`` sections in ``setup.cfg`` files should now be named ``[tool:pytest]``
|
||||
to avoid conflicts with other distutils commands.
|
||||
|
||||
|
||||
Metafunc.addcall
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
.. versionremoved:: 4.0
|
||||
|
||||
:meth:`_pytest.python.Metafunc.addcall` was a precursor to the current parametrized mechanism. Users should use
|
||||
:meth:`_pytest.python.Metafunc.parametrize` instead.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
metafunc.addcall({"i": 1}, id="1")
|
||||
metafunc.addcall({"i": 2}, id="2")
|
||||
|
||||
Becomes:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
metafunc.parametrize("i", [1, 2], ids=["1", "2"])
|
||||
|
||||
|
||||
``cached_setup``
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
.. versionremoved:: 4.0
|
||||
|
||||
``request.cached_setup`` was the precursor of the setup/teardown mechanism available to fixtures.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.fixture
|
||||
def db_session():
|
||||
return request.cached_setup(
|
||||
setup=Session.create, teardown=lambda session: session.close(), scope="module"
|
||||
)
|
||||
|
||||
This should be updated to make use of standard fixture mechanisms:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def db_session():
|
||||
session = Session.create()
|
||||
yield session
|
||||
session.close()
|
||||
|
||||
|
||||
You can consult `funcarg comparison section in the docs <https://docs.pytest.org/en/latest/funcarg_compare.html>`_ for
|
||||
more information.
|
||||
|
||||
|
||||
pytest_plugins in non-top-level conftest files
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. versionremoved:: 4.0
|
||||
|
||||
Defining ``pytest_plugins`` is now deprecated in non-top-level conftest.py
|
||||
files because they will activate referenced plugins *globally*, which is surprising because for all other pytest
|
||||
features ``conftest.py`` files are only *active* for tests at or below it.
|
||||
|
||||
|
||||
``Config.warn`` and ``Node.warn``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. deprecated:: 3.8
|
||||
.. versionremoved:: 4.0
|
||||
|
||||
Those methods were part of the internal pytest warnings system, but since ``3.8`` pytest is using the builtin warning
|
||||
system for its own warnings, so those two functions are now deprecated.
|
||||
@@ -39,47 +302,57 @@ Becomes:
|
||||
* ``node.warn(PytestWarning("some message"))``: is now the **recommended** way to call this function.
|
||||
The warning instance must be a PytestWarning or subclass.
|
||||
|
||||
* ``node.warn("CI", "some message")``: this code/message form is now **deprecated** and should be converted to the warning instance form above.
|
||||
* ``node.warn("CI", "some message")``: this code/message form has been **removed** and should be converted to the warning instance form above.
|
||||
|
||||
record_xml_property
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
``pytest_namespace``
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
.. versionremoved:: 4.0
|
||||
|
||||
.. deprecated:: 3.7
|
||||
The ``record_xml_property`` fixture is now deprecated in favor of the more generic ``record_property``, which
|
||||
can be used by other consumers (for example ``pytest-html``) to obtain custom information about the test run.
|
||||
|
||||
This hook is deprecated because it greatly complicates the pytest internals regarding configuration and initialization, making some
|
||||
bug fixes and refactorings impossible.
|
||||
|
||||
Example of usage:
|
||||
This is just a matter of renaming the fixture as the API is the same:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class MySymbol:
|
||||
def test_foo(record_xml_property):
|
||||
...
|
||||
|
||||
Change to:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def test_foo(record_property):
|
||||
...
|
||||
|
||||
|
||||
def pytest_namespace():
|
||||
return {"my_symbol": MySymbol()}
|
||||
Passing command-line string to ``pytest.main()``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. versionremoved:: 4.0
|
||||
|
||||
Plugin authors relying on this hook should instead require that users now import the plugin modules directly (with an appropriate public API).
|
||||
|
||||
As a stopgap measure, plugin authors may still inject their names into pytest's namespace, usually during ``pytest_configure``:
|
||||
Passing a command-line string to ``pytest.main()`` is deprecated:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import pytest
|
||||
pytest.main("-v -s")
|
||||
|
||||
Pass a list instead:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
pytest.main(["-v", "-s"])
|
||||
|
||||
|
||||
def pytest_configure():
|
||||
pytest.my_symbol = MySymbol()
|
||||
|
||||
By passing a string, users expect that pytest will interpret that command-line using the shell rules they are working
|
||||
on (for example ``bash`` or ``Powershell``), but this is very hard/impossible to do in a portable way.
|
||||
|
||||
|
||||
Calling fixtures directly
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. deprecated:: 3.7
|
||||
.. versionremoved:: 4.0
|
||||
|
||||
Calling a fixture function directly, as opposed to request them in a test function, is deprecated.
|
||||
|
||||
@@ -114,116 +387,27 @@ In those cases just request the function directly in the dependent fixture:
|
||||
cell.make_full()
|
||||
return cell
|
||||
|
||||
``Node.get_marker``
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. deprecated:: 3.6
|
||||
|
||||
As part of a large :ref:`marker-revamp`, :meth:`_pytest.nodes.Node.get_marker` is deprecated. See
|
||||
:ref:`the documentation <update marker code>` on tips on how to update your code.
|
||||
|
||||
|
||||
record_xml_property
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. deprecated:: 3.5
|
||||
|
||||
The ``record_xml_property`` fixture is now deprecated in favor of the more generic ``record_property``, which
|
||||
can be used by other consumers (for example ``pytest-html``) to obtain custom information about the test run.
|
||||
|
||||
This is just a matter of renaming the fixture as the API is the same:
|
||||
Alternatively if the fixture function is called multiple times inside a test (making it hard to apply the above pattern) or
|
||||
if you would like to make minimal changes to the code, you can create a fixture which calls the original function together
|
||||
with the ``name`` parameter:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def test_foo(record_xml_property):
|
||||
...
|
||||
|
||||
Change to:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def test_foo(record_property):
|
||||
...
|
||||
|
||||
pytest_plugins in non-top-level conftest files
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. deprecated:: 3.5
|
||||
|
||||
Defining ``pytest_plugins`` is now deprecated in non-top-level conftest.py
|
||||
files because they will activate referenced plugins *globally*, which is surprising because for all other pytest
|
||||
features ``conftest.py`` files are only *active* for tests at or below it.
|
||||
|
||||
Metafunc.addcall
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
.. deprecated:: 3.3
|
||||
|
||||
:meth:`_pytest.python.Metafunc.addcall` was a precursor to the current parametrized mechanism. Users should use
|
||||
:meth:`_pytest.python.Metafunc.parametrize` instead.
|
||||
|
||||
marks in ``pytest.mark.parametrize``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. deprecated:: 3.2
|
||||
|
||||
Applying marks to values of a ``pytest.mark.parametrize`` call is now deprecated. For example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"a, b", [(3, 9), pytest.mark.xfail(reason="flaky")(6, 36), (10, 100)]
|
||||
)
|
||||
def test_foo(a, b):
|
||||
...
|
||||
|
||||
This code applies the ``pytest.mark.xfail(reason="flaky")`` mark to the ``(6, 36)`` value of the above parametrization
|
||||
call.
|
||||
|
||||
This was considered hard to read and understand, and also its implementation presented problems to the code preventing
|
||||
further internal improvements in the marks architecture.
|
||||
|
||||
To update the code, use ``pytest.param``:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"a, b",
|
||||
[(3, 9), pytest.param((6, 36), marks=pytest.mark.xfail(reason="flaky")), (10, 100)],
|
||||
)
|
||||
def test_foo(a, b):
|
||||
...
|
||||
def cell():
|
||||
return ...
|
||||
|
||||
|
||||
|
||||
Passing command-line string to ``pytest.main()``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. deprecated:: 3.0
|
||||
|
||||
Passing a command-line string to ``pytest.main()`` is deprecated:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
pytest.main("-v -s")
|
||||
|
||||
Pass a list instead:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
pytest.main(["-v", "-s"])
|
||||
|
||||
|
||||
By passing a string, users expect that pytest will interpret that command-line using the shell rules they are working
|
||||
on (for example ``bash`` or ``Powershell``), but this is very hard/impossible to do in a portable way.
|
||||
@pytest.fixture(name="cell")
|
||||
def cell_fixture():
|
||||
return cell()
|
||||
|
||||
|
||||
``yield`` tests
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
.. deprecated:: 3.0
|
||||
.. versionremoved:: 4.0
|
||||
|
||||
pytest supports ``yield``-style tests, where a test function actually ``yield`` functions and values
|
||||
pytest supported ``yield``-style tests, where a test function actually ``yield`` functions and values
|
||||
that are then turned into proper test methods. Example:
|
||||
|
||||
.. code-block:: python
|
||||
@@ -243,57 +427,80 @@ This form of test function doesn't support fixtures properly, and users should s
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.mark.parametrize("x, y", [(2, 4), (3, 9)])
|
||||
def test_squared():
|
||||
def test_squared(x, y):
|
||||
assert x ** x == y
|
||||
|
||||
Internal classes accessed through ``Node``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
``pytest_funcarg__`` prefix
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
.. versionremoved:: 4.0
|
||||
|
||||
.. deprecated:: 3.0
|
||||
Access of ``Module``, ``Function``, ``Class``, ``Instance``, ``File`` and ``Item`` through ``Node`` instances now issue
|
||||
this warning::
|
||||
|
||||
In very early pytest versions fixtures could be defined using the ``pytest_funcarg__`` prefix:
|
||||
usage of Function.Module is deprecated, please use pytest.Module instead
|
||||
|
||||
Users should just ``import pytest`` and access those objects using the ``pytest`` module.
|
||||
|
||||
This has been documented as deprecated for years, but only now we are actually emitting deprecation warnings.
|
||||
|
||||
``Node.get_marker``
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. versionremoved:: 4.0
|
||||
|
||||
As part of a large :ref:`marker-revamp`, :meth:`_pytest.nodes.Node.get_marker` is deprecated. See
|
||||
:ref:`the documentation <update marker code>` on tips on how to update your code.
|
||||
|
||||
|
||||
``somefunction.markname``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. versionremoved:: 4.0
|
||||
|
||||
As part of a large :ref:`marker-revamp` we already deprecated using ``MarkInfo``
|
||||
the only correct way to get markers of an element is via ``node.iter_markers(name)``.
|
||||
|
||||
|
||||
``pytest_namespace``
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. versionremoved:: 4.0
|
||||
|
||||
This hook is deprecated because it greatly complicates the pytest internals regarding configuration and initialization, making some
|
||||
bug fixes and refactorings impossible.
|
||||
|
||||
Example of usage:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def pytest_funcarg__data():
|
||||
return SomeData()
|
||||
class MySymbol:
|
||||
...
|
||||
|
||||
Switch over to the ``@pytest.fixture`` decorator:
|
||||
|
||||
def pytest_namespace():
|
||||
return {"my_symbol": MySymbol()}
|
||||
|
||||
|
||||
Plugin authors relying on this hook should instead require that users now import the plugin modules directly (with an appropriate public API).
|
||||
|
||||
As a stopgap measure, plugin authors may still inject their names into pytest's namespace, usually during ``pytest_configure``:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.fixture
|
||||
def data():
|
||||
return SomeData()
|
||||
import pytest
|
||||
|
||||
[pytest] section in setup.cfg files
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. deprecated:: 3.0
|
||||
def pytest_configure():
|
||||
pytest.my_symbol = MySymbol()
|
||||
|
||||
``[pytest]`` sections in ``setup.cfg`` files should now be named ``[tool:pytest]``
|
||||
to avoid conflicts with other distutils commands.
|
||||
|
||||
Result log (``--result-log``)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. deprecated:: 3.0
|
||||
|
||||
The ``--resultlog`` command line option has been deprecated: it is little used
|
||||
and there are more modern and better alternatives, for example `pytest-tap <https://tappy.readthedocs.io/en/latest/>`_.
|
||||
|
||||
Removed Features
|
||||
----------------
|
||||
|
||||
As stated in our :ref:`backwards-compatibility` policy, deprecated features are removed only in major releases after
|
||||
an appropriate period of deprecation has passed.
|
||||
|
||||
|
||||
Reinterpretation mode (``--assert=reinterp``)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
*Removed in version 3.0.*
|
||||
.. versionremoved:: 3.0
|
||||
|
||||
Reinterpretation mode has now been removed and only plain and rewrite
|
||||
mode are available, consequently the ``--assert=reinterp`` option is
|
||||
@@ -305,7 +512,7 @@ explicitly turn on assertion rewriting for those files.
|
||||
Removed command-line options
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
*Removed in version 3.0.*
|
||||
.. versionremoved:: 3.0
|
||||
|
||||
The following deprecated commandline options were removed:
|
||||
|
||||
@@ -317,7 +524,7 @@ The following deprecated commandline options were removed:
|
||||
py.test-X* entry points
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
*Removed in version 3.0.*
|
||||
.. versionremoved:: 3.0
|
||||
|
||||
Removed all ``py.test-X*`` entry points. The versioned, suffixed entry points
|
||||
were never documented and a leftover from a pre-virtualenv era. These entry
|
||||
|
||||
@@ -4,7 +4,9 @@ Doctest integration for modules and test files
|
||||
|
||||
By default all files matching the ``test*.txt`` pattern will
|
||||
be run through the python standard ``doctest`` module. You
|
||||
can change the pattern by issuing::
|
||||
can change the pattern by issuing:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pytest --doctest-glob='*.rst'
|
||||
|
||||
@@ -26,7 +28,9 @@ can be given multiple times in the command-line.
|
||||
|
||||
You can also trigger running of doctests
|
||||
from docstrings in all python modules (including regular
|
||||
python test modules)::
|
||||
python test modules):
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pytest --doctest-modules
|
||||
|
||||
@@ -39,7 +43,9 @@ putting them into a pytest.ini file like this:
|
||||
[pytest]
|
||||
addopts = --doctest-modules
|
||||
|
||||
If you then have a text file like this::
|
||||
If you then have a text file like this:
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
# content of example.rst
|
||||
|
||||
@@ -58,11 +64,14 @@ and another like this::
|
||||
"""
|
||||
return 42
|
||||
|
||||
then you can just invoke ``pytest`` without command line options::
|
||||
then you can just invoke ``pytest`` without command line options:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini
|
||||
collected 1 item
|
||||
|
||||
@@ -70,7 +79,9 @@ then you can just invoke ``pytest`` without command line options::
|
||||
|
||||
========================= 1 passed in 0.12 seconds =========================
|
||||
|
||||
It is possible to use fixtures using the ``getfixture`` helper::
|
||||
It is possible to use fixtures using the ``getfixture`` helper:
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
# content of example.rst
|
||||
>>> tmp = getfixture('tmpdir')
|
||||
@@ -109,14 +120,18 @@ the ``doctest_optionflags`` ini option:
|
||||
|
||||
|
||||
Alternatively, it can be enabled by an inline comment in the doc test
|
||||
itself::
|
||||
itself:
|
||||
|
||||
.. code-block:: rst
|
||||
|
||||
# content of example.rst
|
||||
>>> get_unicode_greeting() # doctest: +ALLOW_UNICODE
|
||||
'Hello'
|
||||
|
||||
By default, pytest would report only the first failure for a given doctest. If
|
||||
you want to continue the test even when you have failures, do::
|
||||
you want to continue the test even when you have failures, do:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pytest --doctest-modules --doctest-continue-on-failure
|
||||
|
||||
@@ -152,6 +167,9 @@ which can then be used in your doctests directly::
|
||||
"""
|
||||
pass
|
||||
|
||||
Note that like the normal ``conftest.py``, the fixtures are discovered in the directory tree conftest is in.
|
||||
Meaning that if you put your doctest with your source code, the relevant conftest.py needs to be in the same directory tree.
|
||||
Fixtures will not be discovered in a sibling directory tree!
|
||||
|
||||
Output format
|
||||
-------------
|
||||
@@ -161,7 +179,9 @@ Output format
|
||||
You can change the diff output format on failure for your doctests
|
||||
by using one of standard doctest modules format in options
|
||||
(see :data:`python:doctest.REPORT_UDIFF`, :data:`python:doctest.REPORT_CDIFF`,
|
||||
:data:`python:doctest.REPORT_NDIFF`, :data:`python:doctest.REPORT_ONLY_FIRST_FAILURE`)::
|
||||
:data:`python:doctest.REPORT_NDIFF`, :data:`python:doctest.REPORT_ONLY_FIRST_FAILURE`):
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pytest --doctest-modules --doctest-report none
|
||||
pytest --doctest-modules --doctest-report udiff
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
from pytest import raises
|
||||
import _pytest._code
|
||||
import six
|
||||
|
||||
import _pytest._code
|
||||
import pytest
|
||||
from pytest import raises
|
||||
|
||||
|
||||
def otherfunc(a, b):
|
||||
assert a == b
|
||||
@@ -15,15 +17,11 @@ def otherfunc_multi(a, b):
|
||||
assert a == b
|
||||
|
||||
|
||||
@pytest.mark.parametrize("param1, param2", [(3, 6)])
|
||||
def test_generative(param1, param2):
|
||||
assert param1 * 2 < param2
|
||||
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
if "param1" in metafunc.fixturenames:
|
||||
metafunc.addcall(funcargs=dict(param1=3, param2=6))
|
||||
|
||||
|
||||
class TestFailing(object):
|
||||
def test_simple(self):
|
||||
def f():
|
||||
@@ -100,6 +98,30 @@ class TestSpecialisedExplanations(object):
|
||||
text = "head " * 50 + "f" * 70 + "tail " * 20
|
||||
assert "f" * 70 not in text
|
||||
|
||||
def test_eq_dataclass(self):
|
||||
from dataclasses import dataclass
|
||||
|
||||
@dataclass
|
||||
class Foo(object):
|
||||
a: int
|
||||
b: str
|
||||
|
||||
left = Foo(1, "b")
|
||||
right = Foo(1, "c")
|
||||
assert left == right
|
||||
|
||||
def test_eq_attrs(self):
|
||||
import attr
|
||||
|
||||
@attr.s
|
||||
class Foo(object):
|
||||
a = attr.ib()
|
||||
b = attr.ib()
|
||||
|
||||
left = Foo(1, "b")
|
||||
right = Foo(1, "c")
|
||||
assert left == right
|
||||
|
||||
|
||||
def test_attribute():
|
||||
class Foo(object):
|
||||
@@ -143,11 +165,11 @@ def globf(x):
|
||||
|
||||
class TestRaises(object):
|
||||
def test_raises(self):
|
||||
s = "qwe" # NOQA
|
||||
raises(TypeError, "int(s)")
|
||||
s = "qwe"
|
||||
raises(TypeError, int, s)
|
||||
|
||||
def test_raises_doesnt(self):
|
||||
raises(IOError, "int('3')")
|
||||
raises(IOError, int, "3")
|
||||
|
||||
def test_raise(self):
|
||||
raise ValueError("demo error")
|
||||
@@ -247,7 +269,7 @@ class TestCustomAssertMsg(object):
|
||||
b = 2
|
||||
assert (
|
||||
A.a == b
|
||||
), "A.a appears not to be b\n" "or does not appear to be b\none of those"
|
||||
), "A.a appears not to be b\nor does not appear to be b\none of those"
|
||||
|
||||
def test_custom_repr(self):
|
||||
class JSON(object):
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import pytest
|
||||
import py
|
||||
|
||||
import pytest
|
||||
|
||||
mydir = py.path.local(__file__).dirpath()
|
||||
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
|
||||
hello = "world"
|
||||
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
|
||||
import py
|
||||
|
||||
failure_demo = py.path.local(__file__).dirpath("failure_demo.py")
|
||||
@@ -10,5 +9,5 @@ def test_failure_demo_fails_properly(testdir):
|
||||
failure_demo.copy(target)
|
||||
failure_demo.copy(testdir.tmpdir.join(failure_demo.basename))
|
||||
result = testdir.runpytest(target, syspathinsert=True)
|
||||
result.stdout.fnmatch_lines(["*42 failed*"])
|
||||
result.stdout.fnmatch_lines(["*44 failed*"])
|
||||
assert result.ret != 0
|
||||
|
||||
@@ -24,10 +24,10 @@ example: specifying and selecting acceptance tests
|
||||
pytest.skip("specify -A to run acceptance tests")
|
||||
self.tmpdir = request.config.mktemp(request.function.__name__, numbered=True)
|
||||
|
||||
def run(self, cmd):
|
||||
def run(self, *cmd):
|
||||
""" called by test code to execute an acceptance test. """
|
||||
self.tmpdir.chdir()
|
||||
return py.process.cmdexec(cmd)
|
||||
return subprocess.check_output(cmd).decode()
|
||||
|
||||
|
||||
and the actual test function example:
|
||||
@@ -36,7 +36,7 @@ and the actual test function example:
|
||||
|
||||
def test_some_acceptance_aspect(accept):
|
||||
accept.tmpdir.mkdir("somesub")
|
||||
result = accept.run("ls -la")
|
||||
result = accept.run("ls", "-la")
|
||||
assert "somesub" in result
|
||||
|
||||
If you run this test without specifying a command line option
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
|
||||
@@ -27,27 +27,31 @@ You can "mark" a test function with custom metadata like this::
|
||||
|
||||
.. versionadded:: 2.2
|
||||
|
||||
You can then restrict a test run to only run tests marked with ``webtest``::
|
||||
You can then restrict a test run to only run tests marked with ``webtest``:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -v -m webtest
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: .pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collecting ... collected 4 items / 3 deselected
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collecting ... collected 4 items / 3 deselected / 1 selected
|
||||
|
||||
test_server.py::test_send_http PASSED [100%]
|
||||
|
||||
================== 1 passed, 3 deselected in 0.12 seconds ==================
|
||||
|
||||
Or the inverse, running all tests except the webtest ones::
|
||||
Or the inverse, running all tests except the webtest ones:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -v -m "not webtest"
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: .pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collecting ... collected 4 items / 1 deselected
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collecting ... collected 4 items / 1 deselected / 3 selected
|
||||
|
||||
test_server.py::test_something_quick PASSED [ 33%]
|
||||
test_server.py::test_another PASSED [ 66%]
|
||||
@@ -60,45 +64,51 @@ Selecting tests based on their node ID
|
||||
|
||||
You can provide one or more :ref:`node IDs <node-id>` as positional
|
||||
arguments to select only specified tests. This makes it easy to select
|
||||
tests based on their module, class, method, or function name::
|
||||
tests based on their module, class, method, or function name:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -v test_server.py::TestClass::test_method
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: .pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collecting ... collected 1 item
|
||||
|
||||
test_server.py::TestClass::test_method PASSED [100%]
|
||||
|
||||
========================= 1 passed in 0.12 seconds =========================
|
||||
|
||||
You can also select on the class::
|
||||
You can also select on the class:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -v test_server.py::TestClass
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: .pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collecting ... collected 1 item
|
||||
|
||||
test_server.py::TestClass::test_method PASSED [100%]
|
||||
|
||||
========================= 1 passed in 0.12 seconds =========================
|
||||
|
||||
Or select multiple nodes::
|
||||
Or select multiple nodes:
|
||||
|
||||
$ pytest -v test_server.py::TestClass test_server.py::test_send_http
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: .pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collecting ... collected 2 items
|
||||
.. code-block:: pytest
|
||||
|
||||
test_server.py::TestClass::test_method PASSED [ 50%]
|
||||
test_server.py::test_send_http PASSED [100%]
|
||||
$ pytest -v test_server.py::TestClass test_server.py::test_send_http
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collecting ... collected 2 items
|
||||
|
||||
========================= 2 passed in 0.12 seconds =========================
|
||||
test_server.py::TestClass::test_method PASSED [ 50%]
|
||||
test_server.py::test_send_http PASSED [100%]
|
||||
|
||||
========================= 2 passed in 0.12 seconds =========================
|
||||
|
||||
.. _node-id:
|
||||
|
||||
@@ -124,27 +134,31 @@ Using ``-k expr`` to select tests based on their name
|
||||
You can use the ``-k`` command line option to specify an expression
|
||||
which implements a substring match on the test names instead of the
|
||||
exact match on markers that ``-m`` provides. This makes it easy to
|
||||
select tests based on their names::
|
||||
select tests based on their names:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -v -k http # running with the above defined example module
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: .pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collecting ... collected 4 items / 3 deselected
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collecting ... collected 4 items / 3 deselected / 1 selected
|
||||
|
||||
test_server.py::test_send_http PASSED [100%]
|
||||
|
||||
================== 1 passed, 3 deselected in 0.12 seconds ==================
|
||||
|
||||
And you can also run all tests except the ones that match the keyword::
|
||||
And you can also run all tests except the ones that match the keyword:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -k "not send_http" -v
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: .pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collecting ... collected 4 items / 1 deselected
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collecting ... collected 4 items / 1 deselected / 3 selected
|
||||
|
||||
test_server.py::test_something_quick PASSED [ 33%]
|
||||
test_server.py::test_another PASSED [ 66%]
|
||||
@@ -152,14 +166,16 @@ And you can also run all tests except the ones that match the keyword::
|
||||
|
||||
================== 3 passed, 1 deselected in 0.12 seconds ==================
|
||||
|
||||
Or to select "http" and "quick" tests::
|
||||
Or to select "http" and "quick" tests:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -k "http or quick" -v
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: .pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collecting ... collected 4 items / 2 deselected
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collecting ... collected 4 items / 2 deselected / 2 selected
|
||||
|
||||
test_server.py::test_send_http PASSED [ 50%]
|
||||
test_server.py::test_something_quick PASSED [100%]
|
||||
@@ -188,14 +204,18 @@ Registering markers
|
||||
|
||||
.. ini-syntax for custom markers:
|
||||
|
||||
Registering markers for your test suite is simple::
|
||||
Registering markers for your test suite is simple:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
# content of pytest.ini
|
||||
[pytest]
|
||||
markers =
|
||||
webtest: mark a test as a webtest.
|
||||
|
||||
You can ask which markers exist for your test suite - the list includes our just defined ``webtest`` markers::
|
||||
You can ask which markers exist for your test suite - the list includes our just defined ``webtest`` markers:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest --markers
|
||||
@pytest.mark.webtest: mark a test as a webtest.
|
||||
@@ -271,8 +291,12 @@ You can also set a module level marker::
|
||||
import pytest
|
||||
pytestmark = pytest.mark.webtest
|
||||
|
||||
in which case it will be applied to all functions and
|
||||
methods defined in the module.
|
||||
or multiple markers::
|
||||
|
||||
pytestmark = [pytest.mark.webtest, pytest.mark.slowtest]
|
||||
|
||||
in which case markers will be applied (in left-to-right order) to
|
||||
all functions and methods defined in the module.
|
||||
|
||||
.. _`marking individual tests when using parametrize`:
|
||||
|
||||
@@ -288,7 +312,7 @@ apply a marker to an individual test instance::
|
||||
@pytest.mark.foo
|
||||
@pytest.mark.parametrize(("n", "expected"), [
|
||||
(1, 2),
|
||||
pytest.mark.bar((1, 3)),
|
||||
pytest.param((1, 3), marks=pytest.mark.bar),
|
||||
(2, 3),
|
||||
])
|
||||
def test_increment(n, expected):
|
||||
@@ -298,15 +322,6 @@ In this example the mark "foo" will apply to each of the three
|
||||
tests, whereas the "bar" mark is only applied to the second test.
|
||||
Skip and xfail marks can also be applied in this way, see :ref:`skip/xfail with parametrize`.
|
||||
|
||||
.. note::
|
||||
|
||||
If the data you are parametrizing happen to be single callables, you need to be careful
|
||||
when marking these items. ``pytest.mark.xfail(my_func)`` won't work because it's also the
|
||||
signature of a function being decorated. To resolve this ambiguity, you need to pass a
|
||||
reason argument:
|
||||
``pytest.mark.xfail(func_bar, reason="Issue#7")``.
|
||||
|
||||
|
||||
.. _`adding a custom marker from a plugin`:
|
||||
|
||||
Custom marker and command line option to control test runs
|
||||
@@ -347,31 +362,39 @@ A test file using this local plugin::
|
||||
pass
|
||||
|
||||
and an example invocations specifying a different environment than what
|
||||
the test needs::
|
||||
the test needs:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -E stage2
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 1 item
|
||||
|
||||
test_someenv.py s [100%]
|
||||
|
||||
======================== 1 skipped in 0.12 seconds =========================
|
||||
|
||||
and here is one that specifies exactly the environment needed::
|
||||
and here is one that specifies exactly the environment needed:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -E stage1
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 1 item
|
||||
|
||||
test_someenv.py . [100%]
|
||||
|
||||
========================= 1 passed in 0.12 seconds =========================
|
||||
|
||||
The ``--markers`` option always gives you a list of available markers::
|
||||
The ``--markers`` option always gives you a list of available markers:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest --markers
|
||||
@pytest.mark.env(name): mark test to run only on named environment
|
||||
@@ -424,7 +447,9 @@ However, if there is a callable as the single positional argument with no keywor
|
||||
def test_with_args():
|
||||
pass
|
||||
|
||||
The output is as follows::
|
||||
The output is as follows:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q -s
|
||||
Mark(name='my_marker', args=(<function hello_world at 0xdeadbeef>,), kwargs={})
|
||||
@@ -462,10 +487,12 @@ test function. From a conftest file we can read it like this::
|
||||
|
||||
def pytest_runtest_setup(item):
|
||||
for mark in item.iter_markers(name='glob'):
|
||||
print ("glob args=%s kwargs=%s" %(mark.args, mark.kwargs))
|
||||
print("glob args=%s kwargs=%s" % (mark.args, mark.kwargs))
|
||||
sys.stdout.flush()
|
||||
|
||||
Let's run this without capturing output and see what we get::
|
||||
Let's run this without capturing output and see what we get:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q -s
|
||||
glob args=('function',) kwargs={'x': 3}
|
||||
@@ -520,27 +547,33 @@ Let's do a little test file to show how this looks like::
|
||||
def test_runs_everywhere():
|
||||
pass
|
||||
|
||||
then you will see two tests skipped and two executed tests as expected::
|
||||
then you will see two tests skipped and two executed tests as expected:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -rs # this option reports skip reasons
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 4 items
|
||||
|
||||
test_plat.py s.s. [100%]
|
||||
========================= short test summary info ==========================
|
||||
SKIP [2] $REGENDOC_TMPDIR/conftest.py:12: cannot run on platform linux
|
||||
SKIPPED [2] $REGENDOC_TMPDIR/conftest.py:12: cannot run on platform linux
|
||||
|
||||
=================== 2 passed, 2 skipped in 0.12 seconds ====================
|
||||
|
||||
Note that if you specify a platform via the marker-command line option like this::
|
||||
Note that if you specify a platform via the marker-command line option like this:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -m linux
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 4 items / 3 deselected
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 4 items / 3 deselected / 1 selected
|
||||
|
||||
test_plat.py . [100%]
|
||||
|
||||
@@ -585,48 +618,54 @@ We want to dynamically define two markers and can do it in a
|
||||
elif "event" in item.nodeid:
|
||||
item.add_marker(pytest.mark.event)
|
||||
|
||||
We can now use the ``-m option`` to select one set::
|
||||
We can now use the ``-m option`` to select one set:
|
||||
|
||||
$ pytest -m interface --tb=short
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 4 items / 2 deselected
|
||||
.. code-block:: pytest
|
||||
|
||||
test_module.py FF [100%]
|
||||
$ pytest -m interface --tb=short
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 4 items / 2 deselected / 2 selected
|
||||
|
||||
================================= FAILURES =================================
|
||||
__________________________ test_interface_simple ___________________________
|
||||
test_module.py:3: in test_interface_simple
|
||||
assert 0
|
||||
E assert 0
|
||||
__________________________ test_interface_complex __________________________
|
||||
test_module.py:6: in test_interface_complex
|
||||
assert 0
|
||||
E assert 0
|
||||
================== 2 failed, 2 deselected in 0.12 seconds ==================
|
||||
test_module.py FF [100%]
|
||||
|
||||
or to select both "event" and "interface" tests::
|
||||
================================= FAILURES =================================
|
||||
__________________________ test_interface_simple ___________________________
|
||||
test_module.py:3: in test_interface_simple
|
||||
assert 0
|
||||
E assert 0
|
||||
__________________________ test_interface_complex __________________________
|
||||
test_module.py:6: in test_interface_complex
|
||||
assert 0
|
||||
E assert 0
|
||||
================== 2 failed, 2 deselected in 0.12 seconds ==================
|
||||
|
||||
$ pytest -m "interface or event" --tb=short
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 4 items / 1 deselected
|
||||
or to select both "event" and "interface" tests:
|
||||
|
||||
test_module.py FFF [100%]
|
||||
.. code-block:: pytest
|
||||
|
||||
================================= FAILURES =================================
|
||||
__________________________ test_interface_simple ___________________________
|
||||
test_module.py:3: in test_interface_simple
|
||||
assert 0
|
||||
E assert 0
|
||||
__________________________ test_interface_complex __________________________
|
||||
test_module.py:6: in test_interface_complex
|
||||
assert 0
|
||||
E assert 0
|
||||
____________________________ test_event_simple _____________________________
|
||||
test_module.py:9: in test_event_simple
|
||||
assert 0
|
||||
E assert 0
|
||||
================== 3 failed, 1 deselected in 0.12 seconds ==================
|
||||
$ pytest -m "interface or event" --tb=short
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 4 items / 1 deselected / 3 selected
|
||||
|
||||
test_module.py FFF [100%]
|
||||
|
||||
================================= FAILURES =================================
|
||||
__________________________ test_interface_simple ___________________________
|
||||
test_module.py:3: in test_interface_simple
|
||||
assert 0
|
||||
E assert 0
|
||||
__________________________ test_interface_complex __________________________
|
||||
test_module.py:6: in test_interface_complex
|
||||
assert 0
|
||||
E assert 0
|
||||
____________________________ test_event_simple _____________________________
|
||||
test_module.py:9: in test_event_simple
|
||||
assert 0
|
||||
E assert 0
|
||||
================== 3 failed, 1 deselected in 0.12 seconds ==================
|
||||
|
||||
@@ -2,9 +2,10 @@
|
||||
module containing a parametrized tests testing cross-python
|
||||
serialization via the pickle module.
|
||||
"""
|
||||
import distutils.spawn
|
||||
import subprocess
|
||||
import textwrap
|
||||
|
||||
import py
|
||||
import pytest
|
||||
|
||||
pythonlist = ["python2.7", "python3.4", "python3.5"]
|
||||
@@ -23,7 +24,7 @@ def python2(request, python1):
|
||||
|
||||
class Python(object):
|
||||
def __init__(self, version, picklefile):
|
||||
self.pythonpath = py.path.local.sysfind(version)
|
||||
self.pythonpath = distutils.spawn.find_executable(version)
|
||||
if not self.pythonpath:
|
||||
pytest.skip("{!r} not found".format(version))
|
||||
self.picklefile = picklefile
|
||||
@@ -32,7 +33,7 @@ class Python(object):
|
||||
dumpfile = self.picklefile.dirpath("dump.py")
|
||||
dumpfile.write(
|
||||
textwrap.dedent(
|
||||
"""\
|
||||
r"""
|
||||
import pickle
|
||||
f = open({!r}, 'wb')
|
||||
s = pickle.dump({!r}, f, protocol=2)
|
||||
@@ -42,13 +43,13 @@ class Python(object):
|
||||
)
|
||||
)
|
||||
)
|
||||
py.process.cmdexec("{} {}".format(self.pythonpath, dumpfile))
|
||||
subprocess.check_call((self.pythonpath, str(dumpfile)))
|
||||
|
||||
def load_and_is_true(self, expression):
|
||||
loadfile = self.picklefile.dirpath("load.py")
|
||||
loadfile.write(
|
||||
textwrap.dedent(
|
||||
"""\
|
||||
r"""
|
||||
import pickle
|
||||
f = open({!r}, 'rb')
|
||||
obj = pickle.load(f)
|
||||
@@ -62,7 +63,7 @@ class Python(object):
|
||||
)
|
||||
)
|
||||
print(loadfile)
|
||||
py.process.cmdexec("{} {}".format(self.pythonpath, loadfile))
|
||||
subprocess.check_call((self.pythonpath, str(loadfile)))
|
||||
|
||||
|
||||
@pytest.mark.parametrize("obj", [42, {}, {1: 3}])
|
||||
|
||||
@@ -23,12 +23,15 @@ You can create a simple example file:
|
||||
:literal:
|
||||
|
||||
and if you installed `PyYAML`_ or a compatible YAML-parser you can
|
||||
now execute the test specification::
|
||||
now execute the test specification:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
nonpython $ pytest test_simple.yml
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR/nonpython, inifile:
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR/nonpython
|
||||
collected 2 items
|
||||
|
||||
test_simple.yml F. [100%]
|
||||
@@ -55,13 +58,15 @@ your own domain specific testing language this way.
|
||||
will be reported as a (red) string.
|
||||
|
||||
``reportinfo()`` is used for representing the test location and is also
|
||||
consulted when reporting in ``verbose`` mode::
|
||||
consulted when reporting in ``verbose`` mode:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
nonpython $ pytest -v
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: .pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR/nonpython, inifile:
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR/nonpython
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_simple.yml::hello FAILED [ 50%]
|
||||
@@ -77,16 +82,19 @@ consulted when reporting in ``verbose`` mode::
|
||||
.. regendoc:wipe
|
||||
|
||||
While developing your custom test collection and execution it's also
|
||||
interesting to just look at the collection tree::
|
||||
interesting to just look at the collection tree:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
nonpython $ pytest --collect-only
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR/nonpython, inifile:
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR/nonpython
|
||||
collected 2 items
|
||||
<Package '$REGENDOC_TMPDIR/nonpython'>
|
||||
<YamlFile 'test_simple.yml'>
|
||||
<YamlItem 'hello'>
|
||||
<YamlItem 'ok'>
|
||||
<Package $REGENDOC_TMPDIR/nonpython>
|
||||
<YamlFile test_simple.yml>
|
||||
<YamlItem hello>
|
||||
<YamlItem ok>
|
||||
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
# content of conftest.py
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
|
||||
@@ -42,14 +42,18 @@ Now we add a test configuration like this::
|
||||
end = 2
|
||||
metafunc.parametrize("param1", range(end))
|
||||
|
||||
This means that we only run 2 tests if we do not pass ``--all``::
|
||||
This means that we only run 2 tests if we do not pass ``--all``:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q test_compute.py
|
||||
.. [100%]
|
||||
2 passed in 0.12 seconds
|
||||
|
||||
We run only two computations, so we see two dots.
|
||||
let's run the full monty::
|
||||
let's run the full monty:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q --all
|
||||
....F [100%]
|
||||
@@ -134,23 +138,25 @@ used as the test IDs. These are succinct, but can be a pain to maintain.
|
||||
In ``test_timedistance_v2``, we specified ``ids`` as a function that can generate a
|
||||
string representation to make part of the test ID. So our ``datetime`` values use the
|
||||
label generated by ``idfn``, but because we didn't generate a label for ``timedelta``
|
||||
objects, they are still using the default pytest representation::
|
||||
objects, they are still using the default pytest representation:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest test_time.py --collect-only
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 8 items
|
||||
<Module 'test_time.py'>
|
||||
<Function 'test_timedistance_v0[a0-b0-expected0]'>
|
||||
<Function 'test_timedistance_v0[a1-b1-expected1]'>
|
||||
<Function 'test_timedistance_v1[forward]'>
|
||||
<Function 'test_timedistance_v1[backward]'>
|
||||
<Function 'test_timedistance_v2[20011212-20011211-expected0]'>
|
||||
<Function 'test_timedistance_v2[20011211-20011212-expected1]'>
|
||||
<Function 'test_timedistance_v3[forward]'>
|
||||
<Function 'test_timedistance_v3[backward]'>
|
||||
<Module test_time.py>
|
||||
<Function test_timedistance_v0[a0-b0-expected0]>
|
||||
<Function test_timedistance_v0[a1-b1-expected1]>
|
||||
<Function test_timedistance_v1[forward]>
|
||||
<Function test_timedistance_v1[backward]>
|
||||
<Function test_timedistance_v2[20011212-20011211-expected0]>
|
||||
<Function test_timedistance_v2[20011211-20011212-expected1]>
|
||||
<Function test_timedistance_v3[forward]>
|
||||
<Function test_timedistance_v3[backward]>
|
||||
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
|
||||
@@ -191,33 +197,37 @@ only have to work a bit to construct the correct arguments for pytest's
|
||||
def test_demo2(self, attribute):
|
||||
assert isinstance(attribute, str)
|
||||
|
||||
this is a fully self-contained example which you can run with::
|
||||
this is a fully self-contained example which you can run with:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest test_scenarios.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 4 items
|
||||
|
||||
test_scenarios.py .... [100%]
|
||||
|
||||
========================= 4 passed in 0.12 seconds =========================
|
||||
|
||||
If you just collect tests you'll also nicely see 'advanced' and 'basic' as variants for the test function::
|
||||
If you just collect tests you'll also nicely see 'advanced' and 'basic' as variants for the test function:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest --collect-only test_scenarios.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 4 items
|
||||
<Module 'test_scenarios.py'>
|
||||
<Class 'TestSampleWithScenarios'>
|
||||
<Instance '()'>
|
||||
<Function 'test_demo1[basic]'>
|
||||
<Function 'test_demo2[basic]'>
|
||||
<Function 'test_demo1[advanced]'>
|
||||
<Function 'test_demo2[advanced]'>
|
||||
<Module test_scenarios.py>
|
||||
<Class TestSampleWithScenarios>
|
||||
<Function test_demo1[basic]>
|
||||
<Function test_demo2[basic]>
|
||||
<Function test_demo1[advanced]>
|
||||
<Function test_demo2[advanced]>
|
||||
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
|
||||
@@ -269,20 +279,25 @@ creates a database object for the actual test invocations::
|
||||
else:
|
||||
raise ValueError("invalid internal test config")
|
||||
|
||||
Let's first see how it looks like at collection time::
|
||||
Let's first see how it looks like at collection time:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest test_backends.py --collect-only
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 2 items
|
||||
<Module 'test_backends.py'>
|
||||
<Function 'test_db_initialized[d1]'>
|
||||
<Function 'test_db_initialized[d2]'>
|
||||
<Module test_backends.py>
|
||||
<Function test_db_initialized[d1]>
|
||||
<Function test_db_initialized[d2]>
|
||||
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
|
||||
And then when we run the test::
|
||||
And then when we run the test:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q test_backends.py
|
||||
.F [100%]
|
||||
@@ -330,15 +345,18 @@ will be passed to respective fixture function::
|
||||
assert x == 'aaa'
|
||||
assert y == 'b'
|
||||
|
||||
The result of this test will be successful::
|
||||
The result of this test will be successful:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest test_indirect_list.py --collect-only
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 1 item
|
||||
<Module 'test_indirect_list.py'>
|
||||
<Function 'test_indirect[a-b]'>
|
||||
<Module test_indirect_list.py>
|
||||
<Function test_indirect[a-b]>
|
||||
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
|
||||
@@ -375,10 +393,13 @@ parametrizer`_ but in a lot less code::
|
||||
assert a == b
|
||||
|
||||
def test_zerodivision(self, a, b):
|
||||
pytest.raises(ZeroDivisionError, "a/b")
|
||||
with pytest.raises(ZeroDivisionError):
|
||||
a / b
|
||||
|
||||
Our test generator looks up a class-level definition which specifies which
|
||||
argument sets to use for each test function. Let's run it::
|
||||
argument sets to use for each test function. Let's run it:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q
|
||||
F.. [100%]
|
||||
@@ -408,12 +429,14 @@ is to be run with different sets of arguments for its three arguments:
|
||||
|
||||
.. literalinclude:: multipython.py
|
||||
|
||||
Running it results in some skips if we don't have all the python interpreters installed and otherwise runs all combinations (5 interpreters times 5 interpreters times 3 objects to serialize/deserialize)::
|
||||
Running it results in some skips if we don't have all the python interpreters installed and otherwise runs all combinations (5 interpreters times 5 interpreters times 3 objects to serialize/deserialize):
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
. $ pytest -rs -q multipython.py
|
||||
...sss...sssssssss...sss... [100%]
|
||||
========================= short test summary info ==========================
|
||||
SKIP [15] $REGENDOC_TMPDIR/CWD/multipython.py:29: 'python3.4' not found
|
||||
SKIPPED [15] $REGENDOC_TMPDIR/CWD/multipython.py:30: 'python3.4' not found
|
||||
12 passed, 15 skipped in 0.12 seconds
|
||||
|
||||
Indirect parametrization of optional implementations/imports
|
||||
@@ -457,17 +480,20 @@ And finally a little test module::
|
||||
assert round(basemod.func1(), 3) == round(optmod.func1(), 3)
|
||||
|
||||
|
||||
If you run this with reporting for skips enabled::
|
||||
If you run this with reporting for skips enabled:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -rs test_module.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 2 items
|
||||
|
||||
test_module.py .s [100%]
|
||||
========================= short test summary info ==========================
|
||||
SKIP [1] $REGENDOC_TMPDIR/conftest.py:11: could not import 'opt2'
|
||||
SKIPPED [1] $REGENDOC_TMPDIR/conftest.py:11: could not import 'opt2'
|
||||
|
||||
=================== 1 passed, 1 skipped in 0.12 seconds ====================
|
||||
|
||||
@@ -512,21 +538,22 @@ we mark the rest three parametrized tests with the custom marker ``basic``,
|
||||
and for the fourth test we also use the built-in mark ``xfail`` to indicate this
|
||||
test is expected to fail. For explicitness, we set test ids for some tests.
|
||||
|
||||
Then run ``pytest`` with verbose mode and with only the ``basic`` marker::
|
||||
Then run ``pytest`` with verbose mode and with only the ``basic`` marker:
|
||||
|
||||
pytest -v -m basic
|
||||
============================================ test session starts =============================================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 4 items
|
||||
.. code-block:: pytest
|
||||
|
||||
test_pytest_param_example.py::test_eval[1+7-8] PASSED
|
||||
test_pytest_param_example.py::test_eval[basic_2+4] PASSED
|
||||
test_pytest_param_example.py::test_eval[basic_6*9] xfail
|
||||
========================================== short test summary info ===========================================
|
||||
XFAIL test_pytest_param_example.py::test_eval[basic_6*9]
|
||||
$ pytest -v -m basic
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collecting ... collected 17 items / 14 deselected / 3 selected
|
||||
|
||||
============================================= 1 tests deselected =============================================
|
||||
test_pytest_param_example.py::test_eval[1+7-8] PASSED [ 33%]
|
||||
test_pytest_param_example.py::test_eval[basic_2+4] PASSED [ 66%]
|
||||
test_pytest_param_example.py::test_eval[basic_6*9] XFAIL [100%]
|
||||
|
||||
============ 2 passed, 14 deselected, 1 xfailed in 0.12 seconds ============
|
||||
|
||||
As the result:
|
||||
|
||||
@@ -536,3 +563,50 @@ As the result:
|
||||
- The test ``test_eval[1+7-8]`` passed, but the name is autogenerated and confusing.
|
||||
- The test ``test_eval[basic_2+4]`` passed.
|
||||
- The test ``test_eval[basic_6*9]`` was expected to fail and did fail.
|
||||
|
||||
.. _`parametrizing_conditional_raising`:
|
||||
|
||||
Parametrizing conditional raising
|
||||
--------------------------------------------------------------------
|
||||
|
||||
Use :func:`pytest.raises` with the
|
||||
:ref:`pytest.mark.parametrize ref` decorator to write parametrized tests
|
||||
in which some tests raise exceptions and others do not.
|
||||
|
||||
It is helpful to define a no-op context manager ``does_not_raise`` to serve
|
||||
as a complement to ``raises``. For example::
|
||||
|
||||
from contextlib import contextmanager
|
||||
import pytest
|
||||
|
||||
@contextmanager
|
||||
def does_not_raise():
|
||||
yield
|
||||
|
||||
|
||||
@pytest.mark.parametrize('example_input,expectation', [
|
||||
(3, does_not_raise()),
|
||||
(2, does_not_raise()),
|
||||
(1, does_not_raise()),
|
||||
(0, pytest.raises(ZeroDivisionError)),
|
||||
])
|
||||
def test_division(example_input, expectation):
|
||||
"""Test how much I know division."""
|
||||
with expectation:
|
||||
assert (6 / example_input) is not None
|
||||
|
||||
In the example above, the first three test cases should run unexceptionally,
|
||||
while the fourth should raise ``ZeroDivisionError``.
|
||||
|
||||
If you're only supporting Python 3.7+, you can simply use ``nullcontext``
|
||||
to define ``does_not_raise``::
|
||||
|
||||
from contextlib import nullcontext as does_not_raise
|
||||
|
||||
Or, if you're supporting Python 3.3+ you can use::
|
||||
|
||||
from contextlib import ExitStack as does_not_raise
|
||||
|
||||
Or, if desired, you can ``pip install contextlib2`` and use::
|
||||
|
||||
from contextlib2 import ExitStack as does_not_raise
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import sys
|
||||
|
||||
import pytest
|
||||
|
||||
py3 = sys.version_info[0] >= 3
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
|
||||
def test_exception_syntax():
|
||||
try:
|
||||
0/0
|
||||
0 / 0
|
||||
except ZeroDivisionError, e:
|
||||
pass
|
||||
assert e
|
||||
|
||||
@@ -2,4 +2,4 @@ def test_exception_syntax():
|
||||
try:
|
||||
0 / 0
|
||||
except ZeroDivisionError as e:
|
||||
pass
|
||||
assert e
|
||||
|
||||
@@ -6,7 +6,9 @@ Ignore paths during test collection
|
||||
|
||||
You can easily ignore certain test directories and modules during collection
|
||||
by passing the ``--ignore=path`` option on the cli. ``pytest`` allows multiple
|
||||
``--ignore`` options. Example::
|
||||
``--ignore`` options. Example:
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
tests/
|
||||
|-- example
|
||||
@@ -24,20 +26,25 @@ by passing the ``--ignore=path`` option on the cli. ``pytest`` allows multiple
|
||||
'-- test_world_03.py
|
||||
|
||||
Now if you invoke ``pytest`` with ``--ignore=tests/foobar/test_foobar_03.py --ignore=tests/hello/``,
|
||||
you will see that ``pytest`` only collects test-modules, which do not match the patterns specified::
|
||||
you will see that ``pytest`` only collects test-modules, which do not match the patterns specified:
|
||||
|
||||
========= test session starts ==========
|
||||
platform darwin -- Python 2.7.10, pytest-2.8.2, py-1.4.30, pluggy-0.3.1
|
||||
.. code-block:: pytest
|
||||
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 5 items
|
||||
|
||||
tests/example/test_example_01.py .
|
||||
tests/example/test_example_02.py .
|
||||
tests/example/test_example_03.py .
|
||||
tests/foobar/test_foobar_01.py .
|
||||
tests/foobar/test_foobar_02.py .
|
||||
tests/example/test_example_01.py . [ 20%]
|
||||
tests/example/test_example_02.py . [ 40%]
|
||||
tests/example/test_example_03.py . [ 60%]
|
||||
tests/foobar/test_foobar_01.py . [ 80%]
|
||||
tests/foobar/test_foobar_02.py . [100%]
|
||||
|
||||
======= 5 passed in 0.02 seconds =======
|
||||
========================= 5 passed in 0.02 seconds =========================
|
||||
|
||||
The ``--ignore-glob`` option allows to ignore test file paths based on Unix shell-style wildcards.
|
||||
If you want to exclude test-modules that end with ``_01.py``, execute ``pytest`` with ``--ignore-glob='*_01.py'``.
|
||||
|
||||
Deselect tests during test collection
|
||||
-------------------------------------
|
||||
@@ -52,7 +59,9 @@ Keeping duplicate paths specified from command line
|
||||
----------------------------------------------------
|
||||
|
||||
Default behavior of ``pytest`` is to ignore duplicate paths specified from the command line.
|
||||
Example::
|
||||
Example:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
pytest path_a path_a
|
||||
|
||||
@@ -63,7 +72,9 @@ Example::
|
||||
Just collect tests once.
|
||||
|
||||
To collect duplicate tests, use the ``--keep-duplicates`` option on the cli.
|
||||
Example::
|
||||
Example:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
pytest --keep-duplicates path_a path_a
|
||||
|
||||
@@ -73,7 +84,9 @@ Example::
|
||||
|
||||
As the collector just works on directories, if you specify twice a single test file, ``pytest`` will
|
||||
still collect it twice, no matter if the ``--keep-duplicates`` is not specified.
|
||||
Example::
|
||||
Example:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
pytest test_a.py test_a.py
|
||||
|
||||
@@ -85,7 +98,9 @@ Example::
|
||||
Changing directory recursion
|
||||
-----------------------------------------------------
|
||||
|
||||
You can set the :confval:`norecursedirs` option in an ini-file, for example your ``pytest.ini`` in the project root directory::
|
||||
You can set the :confval:`norecursedirs` option in an ini-file, for example your ``pytest.ini`` in the project root directory:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
# content of pytest.ini
|
||||
[pytest]
|
||||
@@ -101,7 +116,9 @@ Changing naming conventions
|
||||
You can configure different naming conventions by setting
|
||||
the :confval:`python_files`, :confval:`python_classes` and
|
||||
:confval:`python_functions` configuration options.
|
||||
Here is an example::
|
||||
Here is an example:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
# content of pytest.ini
|
||||
# Example 1: have pytest look for "check" instead of "test"
|
||||
@@ -123,22 +140,26 @@ that match ``*_check``. For example, if we have::
|
||||
def complex_check(self):
|
||||
pass
|
||||
|
||||
The test collection would look like this::
|
||||
The test collection would look like this:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest --collect-only
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini
|
||||
collected 2 items
|
||||
<Module 'check_myapp.py'>
|
||||
<Class 'CheckMyApp'>
|
||||
<Instance '()'>
|
||||
<Function 'simple_check'>
|
||||
<Function 'complex_check'>
|
||||
<Module check_myapp.py>
|
||||
<Class CheckMyApp>
|
||||
<Function simple_check>
|
||||
<Function complex_check>
|
||||
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
|
||||
You can check for multiple glob patterns by adding a space between the patterns::
|
||||
You can check for multiple glob patterns by adding a space between the patterns:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
# Example 2: have pytest look for files with "test" and "example"
|
||||
# content of pytest.ini, tox.ini, or setup.cfg file (replace "pytest"
|
||||
@@ -158,13 +179,17 @@ Interpreting cmdline arguments as Python packages
|
||||
You can use the ``--pyargs`` option to make ``pytest`` try
|
||||
interpreting arguments as python package names, deriving
|
||||
their file system path and then running the test. For
|
||||
example if you have unittest2 installed you can type::
|
||||
example if you have unittest2 installed you can type:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pytest --pyargs unittest2.test.test_skipping -q
|
||||
|
||||
which would run the respective test module. Like with
|
||||
other options, through an ini-file and the :confval:`addopts` option you
|
||||
can make this change more permanently::
|
||||
can make this change more permanently:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
# content of pytest.ini
|
||||
[pytest]
|
||||
@@ -177,19 +202,21 @@ treat it as a filesystem path.
|
||||
Finding out what is collected
|
||||
-----------------------------------------------
|
||||
|
||||
You can always peek at the collection tree without running tests like this::
|
||||
You can always peek at the collection tree without running tests like this:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
. $ pytest --collect-only pythoncollection.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini
|
||||
collected 3 items
|
||||
<Module 'CWD/pythoncollection.py'>
|
||||
<Function 'test_function'>
|
||||
<Class 'TestClass'>
|
||||
<Instance '()'>
|
||||
<Function 'test_method'>
|
||||
<Function 'test_anothermethod'>
|
||||
<Module CWD/pythoncollection.py>
|
||||
<Function test_function>
|
||||
<Class TestClass>
|
||||
<Function test_method>
|
||||
<Function test_anothermethod>
|
||||
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
|
||||
@@ -200,7 +227,9 @@ Customizing test collection
|
||||
|
||||
.. regendoc:wipe
|
||||
|
||||
You can easily instruct ``pytest`` to discover tests from every Python file::
|
||||
You can easily instruct ``pytest`` to discover tests from every Python file:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
# content of pytest.ini
|
||||
[pytest]
|
||||
@@ -233,7 +262,9 @@ and a ``setup.py`` dummy file like this::
|
||||
0/0 # will raise exception if imported
|
||||
|
||||
If you run with a Python 2 interpreter then you will find the one test and will
|
||||
leave out the ``setup.py`` file::
|
||||
leave out the ``setup.py`` file:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
#$ pytest --collect-only
|
||||
====== test session starts ======
|
||||
@@ -246,12 +277,29 @@ leave out the ``setup.py`` file::
|
||||
====== no tests ran in 0.04 seconds ======
|
||||
|
||||
If you run with a Python 3 interpreter both the one test and the ``setup.py``
|
||||
file will be left out::
|
||||
file will be left out:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest --collect-only
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini
|
||||
collected 0 items
|
||||
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
|
||||
It's also possible to ignore files based on Unix shell-style wildcards by adding
|
||||
patterns to ``collect_ignore_glob``.
|
||||
|
||||
The following example ``conftest.py`` ignores the file ``setup.py`` and in
|
||||
addition all files that end with ``*_py2.py`` when executed with a Python 3
|
||||
interpreter::
|
||||
|
||||
# content of conftest.py
|
||||
import sys
|
||||
|
||||
collect_ignore = ["setup.py"]
|
||||
if sys.version_info[0] > 2:
|
||||
collect_ignore_glob = ["*_py2.py"]
|
||||
|
||||
@@ -7,26 +7,30 @@ Demo of Python failure reports with pytest
|
||||
Here is a nice run of several tens of failures
|
||||
and how ``pytest`` presents things (unfortunately
|
||||
not showing the nice colors here in the HTML that you
|
||||
get on the terminal - we are working on that)::
|
||||
get on the terminal - we are working on that):
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
assertion $ pytest failure_demo.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR/assertion, inifile:
|
||||
collected 42 items
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR/assertion
|
||||
collected 44 items
|
||||
|
||||
failure_demo.py FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF [100%]
|
||||
failure_demo.py FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF [100%]
|
||||
|
||||
================================= FAILURES =================================
|
||||
____________________________ test_generative[0] ____________________________
|
||||
___________________________ test_generative[3-6] ___________________________
|
||||
|
||||
param1 = 3, param2 = 6
|
||||
|
||||
@pytest.mark.parametrize("param1, param2", [(3, 6)])
|
||||
def test_generative(param1, param2):
|
||||
> assert param1 * 2 < param2
|
||||
E assert (3 * 2) < 6
|
||||
|
||||
failure_demo.py:19: AssertionError
|
||||
failure_demo.py:22: AssertionError
|
||||
_________________________ TestFailing.test_simple __________________________
|
||||
|
||||
self = <failure_demo.TestFailing object at 0xdeadbeef>
|
||||
@@ -43,7 +47,7 @@ get on the terminal - we are working on that)::
|
||||
E + where 42 = <function TestFailing.test_simple.<locals>.f at 0xdeadbeef>()
|
||||
E + and 43 = <function TestFailing.test_simple.<locals>.g at 0xdeadbeef>()
|
||||
|
||||
failure_demo.py:35: AssertionError
|
||||
failure_demo.py:33: AssertionError
|
||||
____________________ TestFailing.test_simple_multiline _____________________
|
||||
|
||||
self = <failure_demo.TestFailing object at 0xdeadbeef>
|
||||
@@ -51,7 +55,7 @@ get on the terminal - we are working on that)::
|
||||
def test_simple_multiline(self):
|
||||
> otherfunc_multi(42, 6 * 9)
|
||||
|
||||
failure_demo.py:38:
|
||||
failure_demo.py:36:
|
||||
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
||||
|
||||
a = 42, b = 54
|
||||
@@ -60,7 +64,7 @@ get on the terminal - we are working on that)::
|
||||
> assert a == b
|
||||
E assert 42 == 54
|
||||
|
||||
failure_demo.py:15: AssertionError
|
||||
failure_demo.py:17: AssertionError
|
||||
___________________________ TestFailing.test_not ___________________________
|
||||
|
||||
self = <failure_demo.TestFailing object at 0xdeadbeef>
|
||||
@@ -73,7 +77,7 @@ get on the terminal - we are working on that)::
|
||||
E assert not 42
|
||||
E + where 42 = <function TestFailing.test_not.<locals>.f at 0xdeadbeef>()
|
||||
|
||||
failure_demo.py:44: AssertionError
|
||||
failure_demo.py:42: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_eq_text _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -84,7 +88,7 @@ get on the terminal - we are working on that)::
|
||||
E - spam
|
||||
E + eggs
|
||||
|
||||
failure_demo.py:49: AssertionError
|
||||
failure_demo.py:47: AssertionError
|
||||
_____________ TestSpecialisedExplanations.test_eq_similar_text _____________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -97,7 +101,7 @@ get on the terminal - we are working on that)::
|
||||
E + foo 2 bar
|
||||
E ? ^
|
||||
|
||||
failure_demo.py:52: AssertionError
|
||||
failure_demo.py:50: AssertionError
|
||||
____________ TestSpecialisedExplanations.test_eq_multiline_text ____________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -110,7 +114,7 @@ get on the terminal - we are working on that)::
|
||||
E + eggs
|
||||
E bar
|
||||
|
||||
failure_demo.py:55: AssertionError
|
||||
failure_demo.py:53: AssertionError
|
||||
______________ TestSpecialisedExplanations.test_eq_long_text _______________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -127,7 +131,7 @@ get on the terminal - we are working on that)::
|
||||
E + 1111111111b222222222
|
||||
E ? ^
|
||||
|
||||
failure_demo.py:60: AssertionError
|
||||
failure_demo.py:58: AssertionError
|
||||
_________ TestSpecialisedExplanations.test_eq_long_text_multiline __________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -147,7 +151,7 @@ get on the terminal - we are working on that)::
|
||||
E
|
||||
E ...Full output truncated (7 lines hidden), use '-vv' to show
|
||||
|
||||
failure_demo.py:65: AssertionError
|
||||
failure_demo.py:63: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_eq_list _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -158,7 +162,7 @@ get on the terminal - we are working on that)::
|
||||
E At index 2 diff: 2 != 3
|
||||
E Use -v to get the full diff
|
||||
|
||||
failure_demo.py:68: AssertionError
|
||||
failure_demo.py:66: AssertionError
|
||||
______________ TestSpecialisedExplanations.test_eq_list_long _______________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -171,7 +175,7 @@ get on the terminal - we are working on that)::
|
||||
E At index 100 diff: 1 != 2
|
||||
E Use -v to get the full diff
|
||||
|
||||
failure_demo.py:73: AssertionError
|
||||
failure_demo.py:71: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_eq_dict _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -189,7 +193,7 @@ get on the terminal - we are working on that)::
|
||||
E
|
||||
E ...Full output truncated (2 lines hidden), use '-vv' to show
|
||||
|
||||
failure_demo.py:76: AssertionError
|
||||
failure_demo.py:74: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_eq_set __________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -207,7 +211,7 @@ get on the terminal - we are working on that)::
|
||||
E
|
||||
E ...Full output truncated (2 lines hidden), use '-vv' to show
|
||||
|
||||
failure_demo.py:79: AssertionError
|
||||
failure_demo.py:77: AssertionError
|
||||
_____________ TestSpecialisedExplanations.test_eq_longer_list ______________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -218,7 +222,7 @@ get on the terminal - we are working on that)::
|
||||
E Right contains more items, first extra item: 3
|
||||
E Use -v to get the full diff
|
||||
|
||||
failure_demo.py:82: AssertionError
|
||||
failure_demo.py:80: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_in_list _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -227,7 +231,7 @@ get on the terminal - we are working on that)::
|
||||
> assert 1 in [0, 2, 3, 4, 5]
|
||||
E assert 1 in [0, 2, 3, 4, 5]
|
||||
|
||||
failure_demo.py:85: AssertionError
|
||||
failure_demo.py:83: AssertionError
|
||||
__________ TestSpecialisedExplanations.test_not_in_text_multiline __________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -246,7 +250,7 @@ get on the terminal - we are working on that)::
|
||||
E
|
||||
E ...Full output truncated (2 lines hidden), use '-vv' to show
|
||||
|
||||
failure_demo.py:89: AssertionError
|
||||
failure_demo.py:87: AssertionError
|
||||
___________ TestSpecialisedExplanations.test_not_in_text_single ____________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -259,7 +263,7 @@ get on the terminal - we are working on that)::
|
||||
E single foo line
|
||||
E ? +++
|
||||
|
||||
failure_demo.py:93: AssertionError
|
||||
failure_demo.py:91: AssertionError
|
||||
_________ TestSpecialisedExplanations.test_not_in_text_single_long _________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -272,7 +276,7 @@ get on the terminal - we are working on that)::
|
||||
E head head foo tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail
|
||||
E ? +++
|
||||
|
||||
failure_demo.py:97: AssertionError
|
||||
failure_demo.py:95: AssertionError
|
||||
______ TestSpecialisedExplanations.test_not_in_text_single_long_term _______
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
@@ -285,7 +289,49 @@ get on the terminal - we are working on that)::
|
||||
E head head fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffftail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail
|
||||
E ? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
|
||||
|
||||
failure_demo.py:101: AssertionError
|
||||
failure_demo.py:99: AssertionError
|
||||
______________ TestSpecialisedExplanations.test_eq_dataclass _______________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
|
||||
def test_eq_dataclass(self):
|
||||
from dataclasses import dataclass
|
||||
|
||||
@dataclass
|
||||
class Foo(object):
|
||||
a: int
|
||||
b: str
|
||||
|
||||
left = Foo(1, "b")
|
||||
right = Foo(1, "c")
|
||||
> assert left == right
|
||||
E AssertionError: assert TestSpecialis...oo(a=1, b='b') == TestSpecialise...oo(a=1, b='c')
|
||||
E Omitting 1 identical items, use -vv to show
|
||||
E Differing attributes:
|
||||
E b: 'b' != 'c'
|
||||
|
||||
failure_demo.py:111: AssertionError
|
||||
________________ TestSpecialisedExplanations.test_eq_attrs _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
|
||||
def test_eq_attrs(self):
|
||||
import attr
|
||||
|
||||
@attr.s
|
||||
class Foo(object):
|
||||
a = attr.ib()
|
||||
b = attr.ib()
|
||||
|
||||
left = Foo(1, "b")
|
||||
right = Foo(1, "c")
|
||||
> assert left == right
|
||||
E AssertionError: assert Foo(a=1, b='b') == Foo(a=1, b='c')
|
||||
E Omitting 1 identical items, use -vv to show
|
||||
E Differing attributes:
|
||||
E b: 'b' != 'c'
|
||||
|
||||
failure_demo.py:123: AssertionError
|
||||
______________________________ test_attribute ______________________________
|
||||
|
||||
def test_attribute():
|
||||
@@ -297,7 +343,7 @@ get on the terminal - we are working on that)::
|
||||
E assert 1 == 2
|
||||
E + where 1 = <failure_demo.test_attribute.<locals>.Foo object at 0xdeadbeef>.b
|
||||
|
||||
failure_demo.py:109: AssertionError
|
||||
failure_demo.py:131: AssertionError
|
||||
_________________________ test_attribute_instance __________________________
|
||||
|
||||
def test_attribute_instance():
|
||||
@@ -309,7 +355,7 @@ get on the terminal - we are working on that)::
|
||||
E + where 1 = <failure_demo.test_attribute_instance.<locals>.Foo object at 0xdeadbeef>.b
|
||||
E + where <failure_demo.test_attribute_instance.<locals>.Foo object at 0xdeadbeef> = <class 'failure_demo.test_attribute_instance.<locals>.Foo'>()
|
||||
|
||||
failure_demo.py:116: AssertionError
|
||||
failure_demo.py:138: AssertionError
|
||||
__________________________ test_attribute_failure __________________________
|
||||
|
||||
def test_attribute_failure():
|
||||
@@ -322,7 +368,7 @@ get on the terminal - we are working on that)::
|
||||
i = Foo()
|
||||
> assert i.b == 2
|
||||
|
||||
failure_demo.py:127:
|
||||
failure_demo.py:149:
|
||||
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
||||
|
||||
self = <failure_demo.test_attribute_failure.<locals>.Foo object at 0xdeadbeef>
|
||||
@@ -331,7 +377,7 @@ get on the terminal - we are working on that)::
|
||||
> raise Exception("Failed to get attrib")
|
||||
E Exception: Failed to get attrib
|
||||
|
||||
failure_demo.py:122: Exception
|
||||
failure_demo.py:144: Exception
|
||||
_________________________ test_attribute_multiple __________________________
|
||||
|
||||
def test_attribute_multiple():
|
||||
@@ -348,31 +394,26 @@ get on the terminal - we are working on that)::
|
||||
E + and 2 = <failure_demo.test_attribute_multiple.<locals>.Bar object at 0xdeadbeef>.b
|
||||
E + where <failure_demo.test_attribute_multiple.<locals>.Bar object at 0xdeadbeef> = <class 'failure_demo.test_attribute_multiple.<locals>.Bar'>()
|
||||
|
||||
failure_demo.py:137: AssertionError
|
||||
failure_demo.py:159: AssertionError
|
||||
__________________________ TestRaises.test_raises __________________________
|
||||
|
||||
self = <failure_demo.TestRaises object at 0xdeadbeef>
|
||||
|
||||
def test_raises(self):
|
||||
s = "qwe" # NOQA
|
||||
> raises(TypeError, "int(s)")
|
||||
s = "qwe"
|
||||
> raises(TypeError, int, s)
|
||||
E ValueError: invalid literal for int() with base 10: 'qwe'
|
||||
|
||||
failure_demo.py:147:
|
||||
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
||||
|
||||
> int(s)
|
||||
E ValueError: invalid literal for int() with base 10: 'qwe'
|
||||
|
||||
<0-codegen $PYTHON_PREFIX/lib/python3.6/site-packages/_pytest/python_api.py:682>:1: ValueError
|
||||
failure_demo.py:169: ValueError
|
||||
______________________ TestRaises.test_raises_doesnt _______________________
|
||||
|
||||
self = <failure_demo.TestRaises object at 0xdeadbeef>
|
||||
|
||||
def test_raises_doesnt(self):
|
||||
> raises(IOError, "int('3')")
|
||||
> raises(IOError, int, "3")
|
||||
E Failed: DID NOT RAISE <class 'OSError'>
|
||||
|
||||
failure_demo.py:150: Failed
|
||||
failure_demo.py:172: Failed
|
||||
__________________________ TestRaises.test_raise ___________________________
|
||||
|
||||
self = <failure_demo.TestRaises object at 0xdeadbeef>
|
||||
@@ -381,7 +422,7 @@ get on the terminal - we are working on that)::
|
||||
> raise ValueError("demo error")
|
||||
E ValueError: demo error
|
||||
|
||||
failure_demo.py:153: ValueError
|
||||
failure_demo.py:175: ValueError
|
||||
________________________ TestRaises.test_tupleerror ________________________
|
||||
|
||||
self = <failure_demo.TestRaises object at 0xdeadbeef>
|
||||
@@ -390,7 +431,7 @@ get on the terminal - we are working on that)::
|
||||
> a, b = [1] # NOQA
|
||||
E ValueError: not enough values to unpack (expected 2, got 1)
|
||||
|
||||
failure_demo.py:156: ValueError
|
||||
failure_demo.py:178: ValueError
|
||||
______ TestRaises.test_reinterpret_fails_with_print_for_the_fun_of_it ______
|
||||
|
||||
self = <failure_demo.TestRaises object at 0xdeadbeef>
|
||||
@@ -401,7 +442,7 @@ get on the terminal - we are working on that)::
|
||||
> a, b = items.pop()
|
||||
E TypeError: 'int' object is not iterable
|
||||
|
||||
failure_demo.py:161: TypeError
|
||||
failure_demo.py:183: TypeError
|
||||
--------------------------- Captured stdout call ---------------------------
|
||||
items is [1, 2, 3]
|
||||
________________________ TestRaises.test_some_error ________________________
|
||||
@@ -412,7 +453,7 @@ get on the terminal - we are working on that)::
|
||||
> if namenotexi: # NOQA
|
||||
E NameError: name 'namenotexi' is not defined
|
||||
|
||||
failure_demo.py:164: NameError
|
||||
failure_demo.py:186: NameError
|
||||
____________________ test_dynamic_compile_shows_nicely _____________________
|
||||
|
||||
def test_dynamic_compile_shows_nicely():
|
||||
@@ -427,14 +468,14 @@ get on the terminal - we are working on that)::
|
||||
sys.modules[name] = module
|
||||
> module.foo()
|
||||
|
||||
failure_demo.py:182:
|
||||
failure_demo.py:204:
|
||||
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
||||
|
||||
def foo():
|
||||
> assert 1 == 0
|
||||
E AssertionError
|
||||
|
||||
<2-codegen 'abc-123' $REGENDOC_TMPDIR/assertion/failure_demo.py:179>:2: AssertionError
|
||||
<0-codegen 'abc-123' $REGENDOC_TMPDIR/assertion/failure_demo.py:201>:2: AssertionError
|
||||
____________________ TestMoreErrors.test_complex_error _____________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
|
||||
@@ -448,9 +489,9 @@ get on the terminal - we are working on that)::
|
||||
|
||||
> somefunc(f(), g())
|
||||
|
||||
failure_demo.py:193:
|
||||
failure_demo.py:215:
|
||||
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
||||
failure_demo.py:11: in somefunc
|
||||
failure_demo.py:13: in somefunc
|
||||
otherfunc(x, y)
|
||||
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
||||
|
||||
@@ -460,7 +501,7 @@ get on the terminal - we are working on that)::
|
||||
> assert a == b
|
||||
E assert 44 == 43
|
||||
|
||||
failure_demo.py:7: AssertionError
|
||||
failure_demo.py:9: AssertionError
|
||||
___________________ TestMoreErrors.test_z1_unpack_error ____________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
|
||||
@@ -470,7 +511,7 @@ get on the terminal - we are working on that)::
|
||||
> a, b = items
|
||||
E ValueError: not enough values to unpack (expected 2, got 0)
|
||||
|
||||
failure_demo.py:197: ValueError
|
||||
failure_demo.py:219: ValueError
|
||||
____________________ TestMoreErrors.test_z2_type_error _____________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
|
||||
@@ -480,7 +521,7 @@ get on the terminal - we are working on that)::
|
||||
> a, b = items
|
||||
E TypeError: 'int' object is not iterable
|
||||
|
||||
failure_demo.py:201: TypeError
|
||||
failure_demo.py:223: TypeError
|
||||
______________________ TestMoreErrors.test_startswith ______________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
|
||||
@@ -493,7 +534,7 @@ get on the terminal - we are working on that)::
|
||||
E + where False = <built-in method startswith of str object at 0xdeadbeef>('456')
|
||||
E + where <built-in method startswith of str object at 0xdeadbeef> = '123'.startswith
|
||||
|
||||
failure_demo.py:206: AssertionError
|
||||
failure_demo.py:228: AssertionError
|
||||
__________________ TestMoreErrors.test_startswith_nested ___________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
|
||||
@@ -512,7 +553,7 @@ get on the terminal - we are working on that)::
|
||||
E + where '123' = <function TestMoreErrors.test_startswith_nested.<locals>.f at 0xdeadbeef>()
|
||||
E + and '456' = <function TestMoreErrors.test_startswith_nested.<locals>.g at 0xdeadbeef>()
|
||||
|
||||
failure_demo.py:215: AssertionError
|
||||
failure_demo.py:237: AssertionError
|
||||
_____________________ TestMoreErrors.test_global_func ______________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
|
||||
@@ -523,7 +564,7 @@ get on the terminal - we are working on that)::
|
||||
E + where False = isinstance(43, float)
|
||||
E + where 43 = globf(42)
|
||||
|
||||
failure_demo.py:218: AssertionError
|
||||
failure_demo.py:240: AssertionError
|
||||
_______________________ TestMoreErrors.test_instance _______________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
|
||||
@@ -534,7 +575,7 @@ get on the terminal - we are working on that)::
|
||||
E assert 42 != 42
|
||||
E + where 42 = <failure_demo.TestMoreErrors object at 0xdeadbeef>.x
|
||||
|
||||
failure_demo.py:222: AssertionError
|
||||
failure_demo.py:244: AssertionError
|
||||
_______________________ TestMoreErrors.test_compare ________________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
|
||||
@@ -544,7 +585,7 @@ get on the terminal - we are working on that)::
|
||||
E assert 11 < 5
|
||||
E + where 11 = globf(10)
|
||||
|
||||
failure_demo.py:225: AssertionError
|
||||
failure_demo.py:247: AssertionError
|
||||
_____________________ TestMoreErrors.test_try_finally ______________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
|
||||
@@ -555,7 +596,7 @@ get on the terminal - we are working on that)::
|
||||
> assert x == 0
|
||||
E assert 1 == 0
|
||||
|
||||
failure_demo.py:230: AssertionError
|
||||
failure_demo.py:252: AssertionError
|
||||
___________________ TestCustomAssertMsg.test_single_line ___________________
|
||||
|
||||
self = <failure_demo.TestCustomAssertMsg object at 0xdeadbeef>
|
||||
@@ -570,7 +611,7 @@ get on the terminal - we are working on that)::
|
||||
E assert 1 == 2
|
||||
E + where 1 = <class 'failure_demo.TestCustomAssertMsg.test_single_line.<locals>.A'>.a
|
||||
|
||||
failure_demo.py:241: AssertionError
|
||||
failure_demo.py:263: AssertionError
|
||||
____________________ TestCustomAssertMsg.test_multiline ____________________
|
||||
|
||||
self = <failure_demo.TestCustomAssertMsg object at 0xdeadbeef>
|
||||
@@ -582,14 +623,14 @@ get on the terminal - we are working on that)::
|
||||
b = 2
|
||||
> assert (
|
||||
A.a == b
|
||||
), "A.a appears not to be b\n" "or does not appear to be b\none of those"
|
||||
), "A.a appears not to be b\nor does not appear to be b\none of those"
|
||||
E AssertionError: A.a appears not to be b
|
||||
E or does not appear to be b
|
||||
E one of those
|
||||
E assert 1 == 2
|
||||
E + where 1 = <class 'failure_demo.TestCustomAssertMsg.test_multiline.<locals>.A'>.a
|
||||
|
||||
failure_demo.py:248: AssertionError
|
||||
failure_demo.py:270: AssertionError
|
||||
___________________ TestCustomAssertMsg.test_custom_repr ___________________
|
||||
|
||||
self = <failure_demo.TestCustomAssertMsg object at 0xdeadbeef>
|
||||
@@ -611,11 +652,5 @@ get on the terminal - we are working on that)::
|
||||
E assert 1 == 2
|
||||
E + where 1 = This is JSON\n{\n 'foo': 'bar'\n}.a
|
||||
|
||||
failure_demo.py:261: AssertionError
|
||||
============================= warnings summary =============================
|
||||
$REGENDOC_TMPDIR/assertion/failure_demo.py:24: RemovedInPytest4Warning: Metafunc.addcall is deprecated and scheduled to be removed in pytest 4.0.
|
||||
Please use Metafunc.parametrize instead.
|
||||
metafunc.addcall(funcargs=dict(param1=3, param2=6))
|
||||
|
||||
-- Docs: https://docs.pytest.org/en/latest/warnings.html
|
||||
================== 42 failed, 1 warnings in 0.12 seconds ===================
|
||||
failure_demo.py:283: AssertionError
|
||||
======================== 44 failed in 0.12 seconds =========================
|
||||
|
||||
@@ -43,7 +43,9 @@ provide the ``cmdopt`` through a :ref:`fixture function <fixture function>`:
|
||||
def cmdopt(request):
|
||||
return request.config.getoption("--cmdopt")
|
||||
|
||||
Let's run this without supplying our new option::
|
||||
Let's run this without supplying our new option:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q test_sample.py
|
||||
F [100%]
|
||||
@@ -65,7 +67,9 @@ Let's run this without supplying our new option::
|
||||
first
|
||||
1 failed in 0.12 seconds
|
||||
|
||||
And now with supplying a command line option::
|
||||
And now with supplying a command line option:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q --cmdopt=type2
|
||||
F [100%]
|
||||
@@ -117,12 +121,15 @@ the command line arguments before they get processed:
|
||||
If you have the `xdist plugin <https://pypi.org/project/pytest-xdist/>`_ installed
|
||||
you will now always perform test runs using a number
|
||||
of subprocesses close to your CPU. Running in an empty
|
||||
directory with the above conftest.py::
|
||||
directory with the above conftest.py:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 0 items
|
||||
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
@@ -175,26 +182,32 @@ We can now write a test module like this:
|
||||
def test_func_slow():
|
||||
pass
|
||||
|
||||
and when running it will see a skipped "slow" test::
|
||||
and when running it will see a skipped "slow" test:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -rs # "-rs" means report details on the little 's'
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 2 items
|
||||
|
||||
test_module.py .s [100%]
|
||||
========================= short test summary info ==========================
|
||||
SKIP [1] test_module.py:8: need --runslow option to run
|
||||
SKIPPED [1] test_module.py:8: need --runslow option to run
|
||||
|
||||
=================== 1 passed, 1 skipped in 0.12 seconds ====================
|
||||
|
||||
Or run it including the ``slow`` marked test::
|
||||
Or run it including the ``slow`` marked test:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest --runslow
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 2 items
|
||||
|
||||
test_module.py .. [100%]
|
||||
@@ -230,7 +243,9 @@ Example:
|
||||
The ``__tracebackhide__`` setting influences ``pytest`` showing
|
||||
of tracebacks: the ``checkconfig`` function will not be shown
|
||||
unless the ``--full-trace`` command line option is specified.
|
||||
Let's run our little function::
|
||||
Let's run our little function:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q test_checkconfig.py
|
||||
F [100%]
|
||||
@@ -327,13 +342,16 @@ It's easy to present extra information in a ``pytest`` run:
|
||||
def pytest_report_header(config):
|
||||
return "project deps: mylib-1.1"
|
||||
|
||||
which will add the string to the test header accordingly::
|
||||
which will add the string to the test header accordingly:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
project deps: mylib-1.1
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 0 items
|
||||
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
@@ -353,25 +371,30 @@ display more information if applicable:
|
||||
if config.getoption("verbose") > 0:
|
||||
return ["info1: did you know that ...", "did you?"]
|
||||
|
||||
which will add info only when run with "--v"::
|
||||
which will add info only when run with "--v":
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -v
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: .pytest_cache
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
info1: did you know that ...
|
||||
did you?
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collecting ... collected 0 items
|
||||
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
|
||||
and nothing when run plainly::
|
||||
and nothing when run plainly:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 0 items
|
||||
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
@@ -403,12 +426,15 @@ out which tests are the slowest. Let's make an artificial test suite:
|
||||
def test_funcslow2():
|
||||
time.sleep(0.3)
|
||||
|
||||
Now we can profile which test functions execute the slowest::
|
||||
Now we can profile which test functions execute the slowest:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest --durations=3
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 3 items
|
||||
|
||||
test_some_are_slow.py ... [100%]
|
||||
@@ -475,12 +501,15 @@ tests in a class. Here is a test module example:
|
||||
def test_normal():
|
||||
pass
|
||||
|
||||
If we run this::
|
||||
If we run this:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -rx
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 4 items
|
||||
|
||||
test_step.py .Fx. [100%]
|
||||
@@ -496,7 +525,7 @@ If we run this::
|
||||
|
||||
test_step.py:11: AssertionError
|
||||
========================= short test summary info ==========================
|
||||
XFAIL test_step.py::TestUserHandling::()::test_deletion
|
||||
XFAIL test_step.py::TestUserHandling::test_deletion
|
||||
reason: previous test failed (test_modification)
|
||||
============== 1 failed, 2 passed, 1 xfailed in 0.12 seconds ===============
|
||||
|
||||
@@ -556,12 +585,15 @@ the ``db`` fixture:
|
||||
def test_root(db): # no db here, will error out
|
||||
pass
|
||||
|
||||
We can run this::
|
||||
We can run this:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 7 items
|
||||
|
||||
test_step.py .Fx. [ 57%]
|
||||
@@ -574,7 +606,7 @@ We can run this::
|
||||
file $REGENDOC_TMPDIR/b/test_error.py, line 1
|
||||
def test_root(db): # no db here, will error out
|
||||
E fixture 'db' not found
|
||||
> available fixtures: cache, capfd, capfdbinary, caplog, capsys, capsysbinary, doctest_namespace, monkeypatch, pytestconfig, record_property, record_xml_attribute, record_xml_property, recwarn, tmpdir, tmpdir_factory
|
||||
> available fixtures: cache, capfd, capfdbinary, caplog, capsys, capsysbinary, doctest_namespace, monkeypatch, pytestconfig, record_property, record_xml_attribute, recwarn, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory
|
||||
> use 'pytest --fixtures [testpath]' for help on them.
|
||||
|
||||
$REGENDOC_TMPDIR/b/test_error.py:1
|
||||
@@ -667,12 +699,15 @@ if you then have failing tests:
|
||||
def test_fail2():
|
||||
assert 0
|
||||
|
||||
and run them::
|
||||
and run them:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest test_module.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 2 items
|
||||
|
||||
test_module.py FF [100%]
|
||||
@@ -696,7 +731,9 @@ and run them::
|
||||
test_module.py:6: AssertionError
|
||||
========================= 2 failed in 0.12 seconds =========================
|
||||
|
||||
you will have a "failures" file which contains the failing test ids::
|
||||
you will have a "failures" file which contains the failing test ids:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ cat failures
|
||||
test_module.py::test_fail1 (PYTEST_TMPDIR/test_fail10)
|
||||
@@ -766,12 +803,15 @@ if you then have failing tests:
|
||||
def test_fail2():
|
||||
assert 0
|
||||
|
||||
and run it::
|
||||
and run it:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -s test_module.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 3 items
|
||||
|
||||
test_module.py Esetting up a test failed! test_module.py::test_setup_fails
|
||||
@@ -852,6 +892,8 @@ In that order.
|
||||
can be changed between releases (even bug fixes) so it shouldn't be relied on for scripting
|
||||
or automation.
|
||||
|
||||
.. _freezing-pytest:
|
||||
|
||||
Freezing pytest
|
||||
---------------
|
||||
|
||||
@@ -895,6 +937,8 @@ like ``pytest-timeout`` they must be imported explicitly and passed on to pytest
|
||||
|
||||
|
||||
This allows you to execute tests using the frozen
|
||||
application with standard ``pytest`` command-line options::
|
||||
application with standard ``pytest`` command-line options:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
./app_main --pytest --verbose --tb=long --junitxml=results.xml test-suite/
|
||||
|
||||
@@ -13,7 +13,7 @@ calls it::
|
||||
|
||||
@pytest.fixture(scope="session", autouse=True)
|
||||
def callattr_ahead_of_alltests(request):
|
||||
print ("callattr_ahead_of_alltests called")
|
||||
print("callattr_ahead_of_alltests called")
|
||||
seen = set([None])
|
||||
session = request.node
|
||||
for item in session.items:
|
||||
@@ -31,20 +31,20 @@ will be called ahead of running any tests::
|
||||
class TestHello(object):
|
||||
@classmethod
|
||||
def callme(cls):
|
||||
print ("callme called!")
|
||||
print("callme called!")
|
||||
|
||||
def test_method1(self):
|
||||
print ("test_method1 called")
|
||||
print("test_method1 called")
|
||||
|
||||
def test_method2(self):
|
||||
print ("test_method1 called")
|
||||
print("test_method1 called")
|
||||
|
||||
class TestOther(object):
|
||||
@classmethod
|
||||
def callme(cls):
|
||||
print ("callme other called")
|
||||
print("callme other called")
|
||||
def test_other(self):
|
||||
print ("test other")
|
||||
print("test other")
|
||||
|
||||
# works with unittest as well ...
|
||||
import unittest
|
||||
@@ -52,12 +52,14 @@ will be called ahead of running any tests::
|
||||
class SomeTest(unittest.TestCase):
|
||||
@classmethod
|
||||
def callme(self):
|
||||
print ("SomeTest callme called")
|
||||
print("SomeTest callme called")
|
||||
|
||||
def test_unit1(self):
|
||||
print ("test_unit1 method called")
|
||||
print("test_unit1 method called")
|
||||
|
||||
If you run this without output capturing::
|
||||
If you run this without output capturing:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q -s test_module.py
|
||||
callattr_ahead_of_alltests called
|
||||
|
||||
@@ -66,12 +66,15 @@ using it::
|
||||
|
||||
Here, the ``test_ehlo`` needs the ``smtp_connection`` fixture value. pytest
|
||||
will discover and call the :py:func:`@pytest.fixture <_pytest.python.fixture>`
|
||||
marked ``smtp_connection`` fixture function. Running the test looks like this::
|
||||
marked ``smtp_connection`` fixture function. Running the test looks like this:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest test_smtpsimple.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 1 item
|
||||
|
||||
test_smtpsimple.py F [100%]
|
||||
@@ -111,7 +114,9 @@ with a list of available function arguments.
|
||||
|
||||
.. note::
|
||||
|
||||
You can always issue ::
|
||||
You can always issue:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pytest --fixtures test_simplefactory.py
|
||||
|
||||
@@ -153,7 +158,7 @@ This makes use of the automatic caching mechanisms of pytest.
|
||||
|
||||
Another good approach is by adding the data files in the ``tests`` folder.
|
||||
There are also community plugins available to help managing this aspect of
|
||||
testing, e.g. `pytest-datadir <https://github.com/gabrielcnr/pytest-datadir>`__
|
||||
testing, e.g. `pytest-datadir <https://pypi.org/project/pytest-datadir/>`__
|
||||
and `pytest-datafiles <https://pypi.org/project/pytest-datafiles/>`__.
|
||||
|
||||
.. _smtpshared:
|
||||
@@ -204,12 +209,15 @@ located)::
|
||||
assert 0 # for demo purposes
|
||||
|
||||
We deliberately insert failing ``assert 0`` statements in order to
|
||||
inspect what is going on and can now run the tests::
|
||||
inspect what is going on and can now run the tests:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest test_module.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 2 items
|
||||
|
||||
test_module.py FF [100%]
|
||||
@@ -259,6 +267,11 @@ instance, you can simply declare it:
|
||||
|
||||
Finally, the ``class`` scope will invoke the fixture once per test *class*.
|
||||
|
||||
.. note::
|
||||
|
||||
Pytest will only cache one instance of a fixture at a time.
|
||||
This means that when using a parametrized fixture, pytest may invoke a fixture more than once in the given scope.
|
||||
|
||||
|
||||
``package`` scope (experimental)
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
@@ -351,7 +364,9 @@ The ``print`` and ``smtp.close()`` statements will execute when the last test in
|
||||
the module has finished execution, regardless of the exception status of the
|
||||
tests.
|
||||
|
||||
Let's execute it::
|
||||
Let's execute it:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -s -q --tb=no
|
||||
FFteardown smtp
|
||||
@@ -455,12 +470,14 @@ read an optional server URL from the test module which uses our fixture::
|
||||
server = getattr(request.module, "smtpserver", "smtp.gmail.com")
|
||||
smtp_connection = smtplib.SMTP(server, 587, timeout=5)
|
||||
yield smtp_connection
|
||||
print ("finalizing %s (%s)" % (smtp_connection, server))
|
||||
print("finalizing %s (%s)" % (smtp_connection, server))
|
||||
smtp_connection.close()
|
||||
|
||||
We use the ``request.module`` attribute to optionally obtain an
|
||||
``smtpserver`` attribute from the test module. If we just execute
|
||||
again, nothing much has changed::
|
||||
again, nothing much has changed:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -s -q --tb=no
|
||||
FFfinalizing <smtplib.SMTP object at 0xdeadbeef> (smtp.gmail.com)
|
||||
@@ -477,7 +494,9 @@ server URL in its module namespace::
|
||||
def test_showhelo(smtp_connection):
|
||||
assert 0, smtp_connection.helo()
|
||||
|
||||
Running it::
|
||||
Running it:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -qq --tb=short test_anothersmtp.py
|
||||
F [100%]
|
||||
@@ -579,7 +598,9 @@ The main change is the declaration of ``params`` with
|
||||
:py:func:`@pytest.fixture <_pytest.python.fixture>`, a list of values
|
||||
for each of which the fixture function will execute and can access
|
||||
a value via ``request.param``. No test function code needs to change.
|
||||
So let's just do another run::
|
||||
So let's just do another run:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q test_module.py
|
||||
FFFF [100%]
|
||||
@@ -615,7 +636,7 @@ So let's just do another run::
|
||||
response, msg = smtp_connection.ehlo()
|
||||
assert response == 250
|
||||
> assert b"smtp.gmail.com" in msg
|
||||
E AssertionError: assert b'smtp.gmail.com' in b'mail.python.org\nPIPELINING\nSIZE 51200000\nETRN\nSTARTTLS\nAUTH DIGEST-MD5 NTLM CRAM-MD5\nENHANCEDSTATUSCODES\n8BITMIME\nDSN\nSMTPUTF8'
|
||||
E AssertionError: assert b'smtp.gmail.com' in b'mail.python.org\nPIPELINING\nSIZE 51200000\nETRN\nSTARTTLS\nAUTH DIGEST-MD5 NTLM CRAM-MD5\nENHANCEDSTATUSCODES\n8BITMIME\nDSN\nSMTPUTF8\nCHUNKING'
|
||||
|
||||
test_module.py:5: AssertionError
|
||||
-------------------------- Captured stdout setup ---------------------------
|
||||
@@ -681,26 +702,29 @@ a function which will be called with the fixture value and then
|
||||
has to return a string to use. In the latter case if the function
|
||||
return ``None`` then pytest's auto-generated ID will be used.
|
||||
|
||||
Running the above tests results in the following test IDs being used::
|
||||
Running the above tests results in the following test IDs being used:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest --collect-only
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 10 items
|
||||
<Module 'test_anothersmtp.py'>
|
||||
<Function 'test_showhelo[smtp.gmail.com]'>
|
||||
<Function 'test_showhelo[mail.python.org]'>
|
||||
<Module 'test_ids.py'>
|
||||
<Function 'test_a[spam]'>
|
||||
<Function 'test_a[ham]'>
|
||||
<Function 'test_b[eggs]'>
|
||||
<Function 'test_b[1]'>
|
||||
<Module 'test_module.py'>
|
||||
<Function 'test_ehlo[smtp.gmail.com]'>
|
||||
<Function 'test_noop[smtp.gmail.com]'>
|
||||
<Function 'test_ehlo[mail.python.org]'>
|
||||
<Function 'test_noop[mail.python.org]'>
|
||||
<Module test_anothersmtp.py>
|
||||
<Function test_showhelo[smtp.gmail.com]>
|
||||
<Function test_showhelo[mail.python.org]>
|
||||
<Module test_ids.py>
|
||||
<Function test_a[spam]>
|
||||
<Function test_a[ham]>
|
||||
<Function test_b[eggs]>
|
||||
<Function test_b[1]>
|
||||
<Module test_module.py>
|
||||
<Function test_ehlo[smtp.gmail.com]>
|
||||
<Function test_noop[smtp.gmail.com]>
|
||||
<Function test_ehlo[mail.python.org]>
|
||||
<Function test_noop[mail.python.org]>
|
||||
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
|
||||
@@ -723,13 +747,15 @@ Example::
|
||||
def test_data(data_set):
|
||||
pass
|
||||
|
||||
Running this test will *skip* the invocation of ``data_set`` with value ``2``::
|
||||
Running this test will *skip* the invocation of ``data_set`` with value ``2``:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest test_fixture_marks.py -v
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: .pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collecting ... collected 3 items
|
||||
|
||||
test_fixture_marks.py::test_data[0] PASSED [ 33%]
|
||||
@@ -766,13 +792,15 @@ and instantiate an object ``app`` where we stick the already defined
|
||||
assert app.smtp_connection
|
||||
|
||||
Here we declare an ``app`` fixture which receives the previously defined
|
||||
``smtp_connection`` fixture and instantiates an ``App`` object with it. Let's run it::
|
||||
``smtp_connection`` fixture and instantiates an ``App`` object with it. Let's run it:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -v test_appsetup.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: .pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_appsetup.py::test_smtp_connection_exists[smtp.gmail.com] PASSED [ 50%]
|
||||
@@ -785,7 +813,7 @@ different ``App`` instances and respective smtp servers. There is no
|
||||
need for the ``app`` fixture to be aware of the ``smtp_connection``
|
||||
parametrization because pytest will fully analyse the fixture dependency graph.
|
||||
|
||||
Note, that the ``app`` fixture has a scope of ``module`` and uses a
|
||||
Note that the ``app`` fixture has a scope of ``module`` and uses a
|
||||
module-scoped ``smtp_connection`` fixture. The example would still work if
|
||||
``smtp_connection`` was cached on a ``session`` scope: it is fine for fixtures to use
|
||||
"broader" scoped fixtures but not the other way round:
|
||||
@@ -816,32 +844,34 @@ to show the setup/teardown flow::
|
||||
@pytest.fixture(scope="module", params=["mod1", "mod2"])
|
||||
def modarg(request):
|
||||
param = request.param
|
||||
print (" SETUP modarg %s" % param)
|
||||
print(" SETUP modarg %s" % param)
|
||||
yield param
|
||||
print (" TEARDOWN modarg %s" % param)
|
||||
print(" TEARDOWN modarg %s" % param)
|
||||
|
||||
@pytest.fixture(scope="function", params=[1,2])
|
||||
def otherarg(request):
|
||||
param = request.param
|
||||
print (" SETUP otherarg %s" % param)
|
||||
print(" SETUP otherarg %s" % param)
|
||||
yield param
|
||||
print (" TEARDOWN otherarg %s" % param)
|
||||
print(" TEARDOWN otherarg %s" % param)
|
||||
|
||||
def test_0(otherarg):
|
||||
print (" RUN test0 with otherarg %s" % otherarg)
|
||||
print(" RUN test0 with otherarg %s" % otherarg)
|
||||
def test_1(modarg):
|
||||
print (" RUN test1 with modarg %s" % modarg)
|
||||
print(" RUN test1 with modarg %s" % modarg)
|
||||
def test_2(otherarg, modarg):
|
||||
print (" RUN test2 with otherarg %s and modarg %s" % (otherarg, modarg))
|
||||
print(" RUN test2 with otherarg %s and modarg %s" % (otherarg, modarg))
|
||||
|
||||
|
||||
Let's run the tests in verbose mode and with looking at the print-output::
|
||||
Let's run the tests in verbose mode and with looking at the print-output:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -v -s test_module.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: .pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collecting ... collected 8 items
|
||||
|
||||
test_module.py::test_0[1] SETUP otherarg 1
|
||||
@@ -937,7 +967,9 @@ and declare its use in a test module via a ``usefixtures`` marker::
|
||||
Due to the ``usefixtures`` marker, the ``cleandir`` fixture
|
||||
will be required for the execution of each test method, just as if
|
||||
you specified a "cleandir" function argument to each of them. Let's run it
|
||||
to verify our fixture is activated and the tests pass::
|
||||
to verify our fixture is activated and the tests pass:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q
|
||||
.. [100%]
|
||||
@@ -1036,7 +1068,9 @@ which implies that all test methods in the class will use this fixture
|
||||
without a need to state it in the test function signature or with a
|
||||
class-level ``usefixtures`` decorator.
|
||||
|
||||
If we run it, we get two passing tests::
|
||||
If we run it, we get two passing tests:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q
|
||||
.. [100%]
|
||||
|
||||
125
doc/en/flaky.rst
Normal file
125
doc/en/flaky.rst
Normal file
@@ -0,0 +1,125 @@
|
||||
|
||||
Flaky tests
|
||||
-----------
|
||||
|
||||
A "flaky" test is one that exhibits intermittent or sporadic failure, that seems to have non-deterministic behaviour. Sometimes it passes, sometimes it fails, and it's not clear why. This page discusses pytest features that can help and other general strategies for identifying, fixing or mitigating them.
|
||||
|
||||
Why flaky tests are a problem
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Flaky tests are particularly troublesome when a continuous integration (CI) server is being used, so that all tests must pass before a new code change can be merged. If the test result is not a reliable signal -- that a test failure means the code change broke the test -- developers can become mistrustful of the test results, which can lead to overlooking genuine failures. It is also a source of wasted time as developers must re-run test suites and investigate spurious failures.
|
||||
|
||||
|
||||
Potential root causes
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
System state
|
||||
~~~~~~~~~~~~
|
||||
|
||||
Broadly speaking, a flaky test indicates that the test relies on some system state that is not being appropriately controlled - the test environment is not sufficiently isolated. Higher level tests are more likely to be flaky as they rely on more state.
|
||||
|
||||
Flaky tests sometimes appear when a test suite is run in parallel (such as use of pytest-xdist). This can indicate a test is reliant on test ordering.
|
||||
|
||||
- Perhaps a different test is failing to clean up after itself and leaving behind data which causes the flaky test to fail.
|
||||
- The flaky test is reliant on data from a previous test that doesn't clean up after itself, and in parallel runs that previous test is not always present
|
||||
- Tests that modify global state typically cannot be run in parallel.
|
||||
|
||||
|
||||
Overly strict assertion
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Overly strict assertions can cause problems with floating point comparison as well as timing issues. `pytest.approx <https://docs.pytest.org/en/latest/reference.html#pytest-approx>`_ is useful here.
|
||||
|
||||
|
||||
Pytest features
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
Xfail strict
|
||||
~~~~~~~~~~~~
|
||||
|
||||
:ref:`pytest.mark.xfail ref` with ``strict=False`` can be used to mark a test so that its failure does not cause the whole build to break. This could be considered like a manual quarantine, and is rather dangerous to use permanently.
|
||||
|
||||
|
||||
PYTEST_CURRENT_TEST
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
:ref:`pytest current test env` may be useful for figuring out "which test got stuck".
|
||||
|
||||
|
||||
Plugins
|
||||
~~~~~~~
|
||||
|
||||
Rerunning any failed tests can mitigate the negative effects of flaky tests by giving them additional chances to pass, so that the overall build does not fail. Several pytest plugins support this:
|
||||
|
||||
* `flaky <https://github.com/box/flaky>`_
|
||||
* `pytest-flakefinder <https://github.com/dropbox/pytest-flakefinder>`_ - `blog post <https://blogs.dropbox.com/tech/2016/03/open-sourcing-pytest-tools/>`_
|
||||
* `pytest-rerunfailures <https://github.com/pytest-dev/pytest-rerunfailures>`_
|
||||
* `pytest-replay <https://github.com/ESSS/pytest-replay>`_: This plugin helps to reproduce locally crashes or flaky tests observed during CI runs.
|
||||
|
||||
Plugins to deliberately randomize tests can help expose tests with state problems:
|
||||
|
||||
* `pytest-random-order <https://github.com/jbasko/pytest-random-order>`_
|
||||
* `pytest-randomly <https://github.com/pytest-dev/pytest-randomly>`_
|
||||
|
||||
|
||||
Other general strategies
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Split up test suites
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
It can be common to split a single test suite into two, such as unit vs integration, and only use the unit test suite as a CI gate. This also helps keep build times manageable as high level tests tend to be slower. However, it means it does become possible for code that breaks the build to be merged, so extra vigilance is needed for monitoring the integration test results.
|
||||
|
||||
|
||||
Video/screenshot on failure
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
For UI tests these are important for understanding what the state of the UI was when the test failed. pytest-splinter can be used with plugins like pytest-bdd and can `save a screenshot on test failure <https://pytest-splinter.readthedocs.io/en/latest/#automatic-screenshots-on-test-failure>`_, which can help to isolate the cause.
|
||||
|
||||
|
||||
Delete or rewrite the test
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
If the functionality is covered by other tests, perhaps the test can be removed. If not, perhaps it can be rewritten at a lower level which will remove the flakiness or make its source more apparent.
|
||||
|
||||
|
||||
Quarantine
|
||||
~~~~~~~~~~
|
||||
|
||||
Mark Lapierre discusses the `Pros and Cons of Quarantined Tests <https://dev.to/mlapierre/pros-and-cons-of-quarantined-tests-2emj>`_ in a post from 2018.
|
||||
|
||||
|
||||
|
||||
CI tools that rerun on failure
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Azure Pipelines (the Azure cloud CI/CD tool, formerly Visual Studio Team Services or VSTS) has a feature to `identify flaky tests <https://docs.microsoft.com/en-us/azure/devops/release-notes/2017/dec-11-vsts#identify-flaky-tests>`_ and rerun failed tests.
|
||||
|
||||
|
||||
|
||||
Research
|
||||
^^^^^^^^
|
||||
|
||||
This is a limited list, please submit an issue or pull request to expand it!
|
||||
|
||||
* Gao, Zebao, Yalan Liang, Myra B. Cohen, Atif M. Memon, and Zhen Wang. "Making system user interactive tests repeatable: When and what should we control?." In *Software Engineering (ICSE), 2015 IEEE/ACM 37th IEEE International Conference on*, vol. 1, pp. 55-65. IEEE, 2015. `PDF <http://www.cs.umd.edu/~atif/pubs/gao-icse15.pdf>`__
|
||||
* Palomba, Fabio, and Andy Zaidman. "Does refactoring of test smells induce fixing flaky tests?." In *Software Maintenance and Evolution (ICSME), 2017 IEEE International Conference on*, pp. 1-12. IEEE, 2017. `PDF in Google Drive <https://drive.google.com/file/d/10HdcCQiuQVgW3yYUJD-TSTq1NbYEprl0/view>`__
|
||||
* Bell, Jonathan, Owolabi Legunsen, Michael Hilton, Lamyaa Eloussi, Tifany Yung, and Darko Marinov. "DeFlaker: Automatically detecting flaky tests." In *Proceedings of the 2018 International Conference on Software Engineering*. 2018. `PDF <https://www.jonbell.net/icse18-deflaker.pdf>`__
|
||||
|
||||
|
||||
Resources
|
||||
^^^^^^^^^
|
||||
|
||||
* `Eradicating Non-Determinism in Tests <https://martinfowler.com/articles/nonDeterminism.html>`_ by Martin Fowler, 2011
|
||||
* `No more flaky tests on the Go team <https://www.thoughtworks.com/insights/blog/no-more-flaky-tests-go-team>`_ by Pavan Sudarshan, 2012
|
||||
* `The Build That Cried Broken: Building Trust in your Continuous Integration Tests <https://www.youtube.com/embed/VotJqV4n8ig>`_ talk (video) by `Angie Jones <http://angiejones.tech/>`_ at SeleniumConf Austin 2017
|
||||
* `Test and Code Podcast: Flaky Tests and How to Deal with Them <https://testandcode.com/50>`_ by Brian Okken and Anthony Shaw, 2018
|
||||
* Microsoft:
|
||||
|
||||
* `How we approach testing VSTS to enable continuous delivery <https://blogs.msdn.microsoft.com/bharry/2017/06/28/testing-in-a-cloud-delivery-cadence/>`_ by Brian Harry MS, 2017
|
||||
* `Eliminating Flaky Tests <https://docs.microsoft.com/en-us/azure/devops/learn/devops-at-microsoft/eliminating-flaky-tests>`_ blog and talk (video) by Munil Shah, 2017
|
||||
|
||||
* Google:
|
||||
|
||||
* `Flaky Tests at Google and How We Mitigate Them <https://testing.googleblog.com/2016/05/flaky-tests-at-google-and-how-we.html>`_ by John Micco, 2016
|
||||
* `Where do Google's flaky tests come from? <https://docs.google.com/document/d/1mZ0-Kc97DI_F3tf_GBW_NB_aqka-P1jVOsFfufxqUUM/edit#heading=h.ec0r4fypsleh>`_ by Jeff Listfield, 2017
|
||||
@@ -26,9 +26,9 @@ a per-session Database object::
|
||||
# content of conftest.py
|
||||
class Database(object):
|
||||
def __init__(self):
|
||||
print ("database instance created")
|
||||
print("database instance created")
|
||||
def destroy(self):
|
||||
print ("database instance destroyed")
|
||||
print("database instance destroyed")
|
||||
|
||||
def pytest_funcarg__db(request):
|
||||
return request.cached_setup(setup=DataBase,
|
||||
|
||||
@@ -7,9 +7,6 @@ Installation and Getting Started
|
||||
|
||||
**PyPI package name**: `pytest <https://pypi.org/project/pytest/>`_
|
||||
|
||||
**Dependencies**: `py <https://pypi.org/project/py/>`_,
|
||||
`colorama (Windows) <https://pypi.org/project/colorama/>`_,
|
||||
|
||||
**Documentation as PDF**: `download latest <https://media.readthedocs.org/pdf/pytest/latest/pytest.pdf>`_
|
||||
|
||||
``pytest`` is a framework that makes building simple and scalable tests easy. Tests are expressive and readable—no boilerplate code required. Get started in minutes with a small unit test or complex functional test for your application or library.
|
||||
@@ -20,14 +17,18 @@ Installation and Getting Started
|
||||
Install ``pytest``
|
||||
----------------------------------------
|
||||
|
||||
1. Run the following command in your command line::
|
||||
1. Run the following command in your command line:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install -U pytest
|
||||
|
||||
2. Check that you installed the correct version::
|
||||
2. Check that you installed the correct version:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ pytest --version
|
||||
This is pytest version 3.x.y, imported from $PYTHON_PREFIX/lib/python3.6/site-packages/pytest.py
|
||||
This is pytest version 4.x.y, imported from $PYTHON_PREFIX/lib/python3.6/site-packages/pytest.py
|
||||
|
||||
.. _`simpletest`:
|
||||
|
||||
@@ -43,12 +44,15 @@ Create a simple test function with just four lines of code::
|
||||
def test_answer():
|
||||
assert func(3) == 5
|
||||
|
||||
That’s it. You can now execute the test function::
|
||||
That’s it. You can now execute the test function:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 1 item
|
||||
|
||||
test_sample.py F [100%]
|
||||
@@ -90,7 +94,9 @@ Use the ``raises`` helper to assert that some code raises an exception::
|
||||
with pytest.raises(SystemExit):
|
||||
f()
|
||||
|
||||
Execute the test function with “quiet” reporting mode::
|
||||
Execute the test function with “quiet” reporting mode:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q test_sysexit.py
|
||||
. [100%]
|
||||
@@ -111,7 +117,9 @@ Once you develop multiple tests, you may want to group them into a class. pytest
|
||||
x = "hello"
|
||||
assert hasattr(x, 'check')
|
||||
|
||||
``pytest`` discovers all tests following its :ref:`Conventions for Python test discovery <test discovery>`, so it finds both ``test_`` prefixed functions. There is no need to subclass anything. We can simply run the module by passing its filename::
|
||||
``pytest`` discovers all tests following its :ref:`Conventions for Python test discovery <test discovery>`, so it finds both ``test_`` prefixed functions. There is no need to subclass anything. We can simply run the module by passing its filename:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q test_class.py
|
||||
.F [100%]
|
||||
@@ -138,10 +146,12 @@ Request a unique temporary directory for functional tests
|
||||
|
||||
# content of test_tmpdir.py
|
||||
def test_needsfiles(tmpdir):
|
||||
print (tmpdir)
|
||||
print(tmpdir)
|
||||
assert 0
|
||||
|
||||
List the name ``tmpdir`` in the test function signature and ``pytest`` will lookup and call a fixture factory to create the resource before performing the test function call. Before the test runs, ``pytest`` creates a unique-per-test-invocation temporary directory::
|
||||
List the name ``tmpdir`` in the test function signature and ``pytest`` will lookup and call a fixture factory to create the resource before performing the test function call. Before the test runs, ``pytest`` creates a unique-per-test-invocation temporary directory:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q test_tmpdir.py
|
||||
F [100%]
|
||||
@@ -151,7 +161,7 @@ List the name ``tmpdir`` in the test function signature and ``pytest`` will look
|
||||
tmpdir = local('PYTEST_TMPDIR/test_needsfiles0')
|
||||
|
||||
def test_needsfiles(tmpdir):
|
||||
print (tmpdir)
|
||||
print(tmpdir)
|
||||
> assert 0
|
||||
E assert 0
|
||||
|
||||
@@ -162,7 +172,9 @@ List the name ``tmpdir`` in the test function signature and ``pytest`` will look
|
||||
|
||||
More info on tmpdir handling is available at :ref:`Temporary directories and files <tmpdir handling>`.
|
||||
|
||||
Find out what kind of builtin :ref:`pytest fixtures <fixtures>` exist with the command::
|
||||
Find out what kind of builtin :ref:`pytest fixtures <fixtures>` exist with the command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pytest --fixtures # shows builtin and custom fixtures
|
||||
|
||||
|
||||
@@ -7,12 +7,13 @@ Good Integration Practices
|
||||
Install package with pip
|
||||
-------------------------------------------------
|
||||
|
||||
For development, we recommend to use virtualenv_ environments and pip_
|
||||
for installing your application and any dependencies
|
||||
as well as the ``pytest`` package itself. This ensures your code and
|
||||
dependencies are isolated from the system Python installation.
|
||||
For development, we recommend you use venv_ for virtual environments
|
||||
(or virtualenv_ for Python 2.7) and
|
||||
pip_ for installing your application and any dependencies,
|
||||
as well as the ``pytest`` package itself.
|
||||
This ensures your code and dependencies are isolated from your system Python installation.
|
||||
|
||||
First you need to place a ``setup.py`` file in the root of your package with the following minimum content::
|
||||
Next, place a ``setup.py`` file in the root of your package with the following minimum content::
|
||||
|
||||
from setuptools import setup, find_packages
|
||||
|
||||
@@ -41,8 +42,8 @@ Conventions for Python test discovery
|
||||
* In those directories, search for ``test_*.py`` or ``*_test.py`` files, imported by their `test package name`_.
|
||||
* From those files, collect test items:
|
||||
|
||||
* ``test_`` prefixed test functions or methods outside of class
|
||||
* ``test_`` prefixed test functions or methods inside ``Test`` prefixed test classes (without an ``__init__`` method)
|
||||
* ``test`` prefixed test functions or methods outside of class
|
||||
* ``test`` prefixed test functions or methods inside ``Test`` prefixed test classes (without an ``__init__`` method)
|
||||
|
||||
For examples of how to customize your test discovery :doc:`example/pythoncollection`.
|
||||
|
||||
@@ -72,8 +73,18 @@ to keep tests separate from actual application code (often a good idea)::
|
||||
test_view.py
|
||||
...
|
||||
|
||||
This way your tests can run easily against an installed version
|
||||
of ``mypkg``.
|
||||
This has the following benefits:
|
||||
|
||||
* Your tests can run against an installed version after executing ``pip install .``.
|
||||
* Your tests can run against the local copy with an editable install after executing ``pip install --editable .``.
|
||||
* If you don't have a ``setup.py`` file and are relying on the fact that Python by default puts the current
|
||||
directory in ``sys.path`` to import your package, you can execute ``python -m pytest`` to execute the tests against the
|
||||
local copy directly, without using ``pip``.
|
||||
|
||||
.. note::
|
||||
|
||||
See :ref:`pythonpath` for more information about the difference between calling ``pytest`` and
|
||||
``python -m pytest``.
|
||||
|
||||
Note that using this scheme your test files must have **unique names**, because
|
||||
``pytest`` will import them as *top-level* modules since there are no packages
|
||||
|
||||
@@ -175,3 +175,13 @@ Previous to version 2.4 to set a break point in code one needed to use ``pytest.
|
||||
This is no longer needed and one can use the native ``import pdb;pdb.set_trace()`` call directly.
|
||||
|
||||
For more details see :ref:`breakpoints`.
|
||||
|
||||
"compat" properties
|
||||
-------------------
|
||||
|
||||
.. deprecated:: 3.9
|
||||
|
||||
Access of ``Module``, ``Function``, ``Class``, ``Instance``, ``File`` and ``Item`` through ``Node`` instances have long
|
||||
been documented as deprecated, but started to emit warnings from pytest ``3.9`` and onward.
|
||||
|
||||
Users should just ``import pytest`` and access those objects using the ``pytest`` module.
|
||||
|
||||
@@ -22,12 +22,15 @@ An example of a simple test:
|
||||
assert inc(3) == 5
|
||||
|
||||
|
||||
To execute it::
|
||||
To execute it:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 1 item
|
||||
|
||||
test_sample.py F [100%]
|
||||
|
||||
@@ -5,7 +5,7 @@ License
|
||||
|
||||
Distributed under the terms of the `MIT`_ license, pytest is free and open source software.
|
||||
|
||||
::
|
||||
.. code-block:: text
|
||||
|
||||
The MIT License (MIT)
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
.. _`distribute docs`:
|
||||
.. _`distribute`: https://pypi.org/project/distribute/
|
||||
.. _`pip`: https://pypi.org/project/pip/
|
||||
.. _`venv`: https://docs.python.org/3/library/venv.html/
|
||||
.. _`virtualenv`: https://pypi.org/project/virtualenv/
|
||||
.. _hudson: http://hudson-ci.org/
|
||||
.. _jenkins: http://jenkins-ci.org/
|
||||
|
||||
@@ -9,11 +9,15 @@ Logging
|
||||
pytest captures log messages of level ``WARNING`` or above automatically and displays them in their own section
|
||||
for each failed test in the same manner as captured stdout and stderr.
|
||||
|
||||
Running without options::
|
||||
Running without options:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pytest
|
||||
|
||||
Shows failed tests like so::
|
||||
Shows failed tests like so:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
----------------------- Captured stdlog call ----------------------
|
||||
test_reporting.py 26 WARNING text going to logger
|
||||
@@ -27,12 +31,16 @@ By default each captured log message shows the module, line number, log level
|
||||
and message.
|
||||
|
||||
If desired the log and date format can be specified to
|
||||
anything that the logging module supports by passing specific formatting options::
|
||||
anything that the logging module supports by passing specific formatting options:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pytest --log-format="%(asctime)s %(levelname)s %(message)s" \
|
||||
--log-date-format="%Y-%m-%d %H:%M:%S"
|
||||
|
||||
Shows failed tests like so::
|
||||
Shows failed tests like so:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
----------------------- Captured stdlog call ----------------------
|
||||
2010-04-10 14:48:44 WARNING text going to logger
|
||||
@@ -51,7 +59,9 @@ These options can also be customized through ``pytest.ini`` file:
|
||||
log_date_format = %Y-%m-%d %H:%M:%S
|
||||
|
||||
Further it is possible to disable reporting of captured content (stdout,
|
||||
stderr and logs) on failed tests completely with::
|
||||
stderr and logs) on failed tests completely with:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pytest --show-capture=no
|
||||
|
||||
@@ -133,7 +143,6 @@ the records for the ``setup`` and ``call`` stages during teardown like so:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def window(caplog):
|
||||
window = create_window()
|
||||
@@ -198,6 +207,9 @@ option names are:
|
||||
* ``log_file_format``
|
||||
* ``log_file_date_format``
|
||||
|
||||
You can call ``set_log_path()`` to customize the log_file path dynamically. This functionality
|
||||
is considered **experimental**.
|
||||
|
||||
.. _log_release_notes:
|
||||
|
||||
Release notes
|
||||
|
||||
@@ -29,9 +29,11 @@ which also serve as documentation.
|
||||
Raising errors on unknown marks: --strict
|
||||
-----------------------------------------
|
||||
|
||||
When the ``--strict`` command-line flag is passed, any marks not registered in the ``pytest.ini`` file will trigger an error.
|
||||
When the ``--strict`` command-line flag is passed, any unknown marks applied
|
||||
with the ``@pytest.mark.name_of_the_mark`` decorator will trigger an error.
|
||||
Marks defined or added by pytest or by a plugin will not trigger an error.
|
||||
|
||||
Marks can be registered like this:
|
||||
Marks can be registered in ``pytest.ini`` like this:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
@@ -156,4 +158,4 @@ More details can be found in the `original PR <https://github.com/pytest-dev/pyt
|
||||
.. note::
|
||||
|
||||
in a future major relase of pytest we will introduce class based markers,
|
||||
at which points markers will no longer be limited to instances of :py:class:`Mark`
|
||||
at which point markers will no longer be limited to instances of :py:class:`Mark`
|
||||
|
||||
@@ -12,7 +12,9 @@ Running tests written for nose
|
||||
Usage
|
||||
-------------
|
||||
|
||||
After :ref:`installation` type::
|
||||
After :ref:`installation` type:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
python setup.py develop # make sure tests can import our package
|
||||
pytest # instead of 'nosetests'
|
||||
|
||||
@@ -50,12 +50,15 @@ to an expected output::
|
||||
|
||||
Here, the ``@parametrize`` decorator defines three different ``(test_input,expected)``
|
||||
tuples so that the ``test_eval`` function will run three times using
|
||||
them in turn::
|
||||
them in turn:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 3 items
|
||||
|
||||
test_expectation.py ..F [100%]
|
||||
@@ -78,6 +81,21 @@ them in turn::
|
||||
test_expectation.py:8: AssertionError
|
||||
==================== 1 failed, 2 passed in 0.12 seconds ====================
|
||||
|
||||
.. note::
|
||||
|
||||
pytest by default escapes any non-ascii characters used in unicode strings
|
||||
for the parametrization because it has several downsides.
|
||||
If however you would like to use unicode strings in parametrization and see them in the terminal as is (non-escaped), use this option in your ``pytest.ini``:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[pytest]
|
||||
disable_test_id_escaping_and_forfeit_all_rights_to_community_support = True
|
||||
|
||||
Keep in mind however that this might cause unwanted side effects and
|
||||
even bugs depending on the OS used and plugins currently installed, so use it at your own risk.
|
||||
|
||||
|
||||
As designed in this example, only one pair of input/output values fails
|
||||
the simple test function. And as usual with test function arguments,
|
||||
you can see the ``input`` and ``output`` values in the traceback.
|
||||
@@ -99,12 +117,15 @@ for example with the builtin ``mark.xfail``::
|
||||
def test_eval(test_input, expected):
|
||||
assert eval(test_input) == expected
|
||||
|
||||
Let's run this::
|
||||
Let's run this:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 3 items
|
||||
|
||||
test_expectation.py ..x [100%]
|
||||
@@ -114,6 +135,10 @@ Let's run this::
|
||||
The one parameter set which caused a failure previously now
|
||||
shows up as an "xfailed (expected to fail)" test.
|
||||
|
||||
In case the values provided to ``parametrize`` result in an empty list - for
|
||||
example, if they're dynamically generated by some function - the behaviour of
|
||||
pytest is defined by the :confval:`empty_parameter_set_mark` option.
|
||||
|
||||
To get all combinations of multiple parametrized arguments you can stack
|
||||
``parametrize`` decorators::
|
||||
|
||||
@@ -162,13 +187,17 @@ command line option and the parametrization of our test function::
|
||||
metafunc.parametrize("stringinput",
|
||||
metafunc.config.getoption('stringinput'))
|
||||
|
||||
If we now pass two stringinput values, our test will run twice::
|
||||
If we now pass two stringinput values, our test will run twice:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q --stringinput="hello" --stringinput="world" test_strings.py
|
||||
.. [100%]
|
||||
2 passed in 0.12 seconds
|
||||
|
||||
Let's also run with a stringinput that will lead to a failing test::
|
||||
Let's also run with a stringinput that will lead to a failing test:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q --stringinput="!" test_strings.py
|
||||
F [100%]
|
||||
@@ -190,12 +219,14 @@ As expected our test function fails.
|
||||
|
||||
If you don't specify a stringinput it will be skipped because
|
||||
``metafunc.parametrize()`` will be called with an empty parameter
|
||||
list::
|
||||
list:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q -rs test_strings.py
|
||||
s [100%]
|
||||
========================= short test summary info ==========================
|
||||
SKIP [1] test_strings.py: got empty parameter set ['stringinput'], function test_valid_string at $REGENDOC_TMPDIR/test_strings.py:1
|
||||
SKIPPED [1] test_strings.py: got empty parameter set ['stringinput'], function test_valid_string at $REGENDOC_TMPDIR/test_strings.py:1
|
||||
1 skipped in 0.12 seconds
|
||||
|
||||
Note that when calling ``metafunc.parametrize`` multiple times with different parameter sets, all parameter names across
|
||||
|
||||
@@ -27,7 +27,7 @@ Here is a little annotated list for some popular plugins:
|
||||
for `twisted <http://twistedmatrix.com>`_ apps, starting a reactor and
|
||||
processing deferreds from test functions.
|
||||
|
||||
* `pytest-cov <https://pypi.org/project/pytest-cov/>`_:
|
||||
* `pytest-cov <https://pypi.org/project/pytest-cov/>`__:
|
||||
coverage reporting, compatible with distributed testing
|
||||
|
||||
* `pytest-xdist <https://pypi.org/project/pytest-xdist/>`_:
|
||||
@@ -59,9 +59,9 @@ To see a complete list of all plugins with their latest testing
|
||||
status against different pytest and Python versions, please visit
|
||||
`plugincompat <http://plugincompat.herokuapp.com/>`_.
|
||||
|
||||
You may also discover more plugins through a `pytest- pypi.python.org search`_.
|
||||
You may also discover more plugins through a `pytest- pypi.org search`_.
|
||||
|
||||
.. _`pytest- pypi.python.org search`: https://pypi.org/search/?q=pytest-
|
||||
.. _`pytest- pypi.org search`: https://pypi.org/search/?q=pytest-
|
||||
|
||||
|
||||
.. _`available installable plugins`:
|
||||
@@ -69,23 +69,26 @@ You may also discover more plugins through a `pytest- pypi.python.org search`_.
|
||||
Requiring/Loading plugins in a test module or conftest file
|
||||
-----------------------------------------------------------
|
||||
|
||||
You can require plugins in a test module or a conftest file like this::
|
||||
You can require plugins in a test module or a conftest file like this:
|
||||
|
||||
pytest_plugins = "myapp.testsupport.myplugin",
|
||||
.. code-block:: python
|
||||
|
||||
pytest_plugins = ("myapp.testsupport.myplugin",)
|
||||
|
||||
When the test module or conftest plugin is loaded the specified plugins
|
||||
will be loaded as well.
|
||||
|
||||
pytest_plugins = "myapp.testsupport.myplugin"
|
||||
|
||||
which will import the specified module as a ``pytest`` plugin.
|
||||
|
||||
.. note::
|
||||
Requiring plugins using a ``pytest_plugins`` variable in non-root
|
||||
``conftest.py`` files is deprecated. See
|
||||
:ref:`full explanation <requiring plugins in non-root conftests>`
|
||||
in the Writing plugins section.
|
||||
|
||||
.. note::
|
||||
The name ``pytest_plugins`` is reserved and should not be used as a
|
||||
name for a custom plugin module.
|
||||
|
||||
|
||||
.. _`findpluginname`:
|
||||
|
||||
Finding out which plugins are active
|
||||
|
||||
@@ -75,7 +75,7 @@ Issues
|
||||
------
|
||||
|
||||
* By using ``request.getfuncargvalue()`` we rely on actual fixture function
|
||||
execution to know what fixtures are involved, due to it's dynamic nature
|
||||
execution to know what fixtures are involved, due to its dynamic nature
|
||||
* More importantly, ``request.getfuncargvalue()`` cannot be combined with
|
||||
parametrized fixtures, such as ``extra_context``
|
||||
* This is very inconvenient if you wish to extend an existing test suite by
|
||||
|
||||
22
doc/en/py27-py34-deprecation.rst
Normal file
22
doc/en/py27-py34-deprecation.rst
Normal file
@@ -0,0 +1,22 @@
|
||||
Python 2.7 and 3.4 support plan
|
||||
===============================
|
||||
|
||||
Python 2.7 EOL is fast approaching, with
|
||||
upstream support `ending in 2020 <https://legacy.python.org/dev/peps/pep-0373/#id4>`__.
|
||||
Python 3.4's last release is scheduled for
|
||||
`March 2019 <https://www.python.org/dev/peps/pep-0429/#release-schedule>`__. pytest is one of
|
||||
the participating projects of the https://python3statement.org.
|
||||
|
||||
We plan to drop support for Python 2.7 and 3.4 at the same time with the release of **pytest 5.0**,
|
||||
scheduled to be released by **mid-2019**. Thanks to the `python_requires <https://packaging.python.org/guides/distributing-packages-using-setuptools/#python-requires>`__ ``setuptools`` option,
|
||||
Python 2.7 and Python 3.4 users using a modern ``pip`` version
|
||||
will install the last compatible pytest ``4.X`` version automatically even if ``5.0`` or later
|
||||
are available on PyPI.
|
||||
|
||||
During the period **from mid-2019 and 2020**, the pytest core team plans to make
|
||||
bug-fix releases of the pytest ``4.X`` series by back-porting patches to the ``4.x-maintenance``
|
||||
branch.
|
||||
|
||||
**After 2020**, the core team will no longer actively back port-patches, but the ``4.x-maintenance``
|
||||
branch will continue to exist so the community itself can contribute patches. The
|
||||
core team will be happy to accept those patches and make new ``4.X`` releases **until mid-2020**.
|
||||
@@ -1,4 +1,3 @@
|
||||
|
||||
Reference
|
||||
=========
|
||||
|
||||
@@ -49,7 +48,7 @@ pytest.main
|
||||
.. autofunction:: _pytest.config.main
|
||||
|
||||
pytest.param
|
||||
~~~~~~~~~~~~~
|
||||
~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: pytest.param(*values, [id], [marks])
|
||||
|
||||
@@ -84,6 +83,12 @@ pytest.warns
|
||||
.. autofunction:: pytest.warns(expected_warning: Exception, [match])
|
||||
:with:
|
||||
|
||||
pytest.freeze_includes
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
**Tutorial**: :ref:`freezing-pytest`.
|
||||
|
||||
.. autofunction:: pytest.freeze_includes
|
||||
|
||||
.. _`marks ref`:
|
||||
|
||||
@@ -111,6 +116,7 @@ Add warning filters to marked test items.
|
||||
A *warning specification string*, which is composed of contents of the tuple ``(action, message, category, module, lineno)``
|
||||
as specified in `The Warnings filter <https://docs.python.org/3/library/warnings.html#warning-filter>`_ section of
|
||||
the Python documentation, separated by ``":"``. Optional fields can be omitted.
|
||||
Module names passed for filtering are not regex-escaped.
|
||||
|
||||
For example:
|
||||
|
||||
@@ -172,7 +178,7 @@ Mark a test function as using the given fixture names.
|
||||
|
||||
.. warning::
|
||||
|
||||
This mark can be used with *test functions* only, having no affect when applied
|
||||
This mark has no effect when applied
|
||||
to a **fixture** function.
|
||||
|
||||
.. py:function:: pytest.mark.usefixtures(*names)
|
||||
@@ -192,16 +198,18 @@ Marks a test function as *expected to fail*.
|
||||
.. py:function:: pytest.mark.xfail(condition=None, *, reason=None, raises=None, run=True, strict=False)
|
||||
|
||||
:type condition: bool or str
|
||||
:param condition: ``True/False`` if the condition should be marked as xfail or a :ref:`condition string <string conditions>`.
|
||||
:param condition:
|
||||
Condition for marking the test function as xfail (``True/False`` or a
|
||||
:ref:`condition string <string conditions>`).
|
||||
:keyword str reason: Reason why the test function is marked as xfail.
|
||||
:keyword Exception raises: Exception subclass expected to be raised by the test function; other exceptions will fail the test.
|
||||
:keyword bool run:
|
||||
If the test function should actually be executed. If ``False``, the function will always xfail and will
|
||||
not be executed (useful a function is segfaulting).
|
||||
not be executed (useful if a function is segfaulting).
|
||||
:keyword bool strict:
|
||||
* If ``False`` (the default) the function will be shown in the terminal output as ``xfailed`` if it fails
|
||||
and as ``xpass`` if it passes. In both cases this will not cause the test suite to fail as a whole. This
|
||||
is particularly useful to mark *flaky* tests (tests that random at fail) to be tackled later.
|
||||
is particularly useful to mark *flaky* tests (tests that fail at random) to be tackled later.
|
||||
* If ``True``, the function will be shown in the terminal output as ``xfailed`` if it fails, but if it
|
||||
unexpectedly passes then it will **fail** the test suite. This is particularly useful to mark functions
|
||||
that are always failing and there should be a clear indication if they unexpectedly start to pass (for example
|
||||
@@ -492,6 +500,32 @@ Each recorded warning is an instance of :class:`warnings.WarningMessage`.
|
||||
differently; see :ref:`ensuring_function_triggers`.
|
||||
|
||||
|
||||
tmp_path
|
||||
~~~~~~~~
|
||||
|
||||
**Tutorial**: :doc:`tmpdir`
|
||||
|
||||
.. currentmodule:: _pytest.tmpdir
|
||||
|
||||
.. autofunction:: tmp_path()
|
||||
:no-auto-options:
|
||||
|
||||
|
||||
tmp_path_factory
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
**Tutorial**: :ref:`tmp_path_factory example`
|
||||
|
||||
.. _`tmp_path_factory factory api`:
|
||||
|
||||
``tmp_path_factory`` instances have the following methods:
|
||||
|
||||
.. currentmodule:: _pytest.tmpdir
|
||||
|
||||
.. automethod:: TempPathFactory.mktemp
|
||||
.. automethod:: TempPathFactory.getbasetemp
|
||||
|
||||
|
||||
tmpdir
|
||||
~~~~~~
|
||||
|
||||
@@ -551,6 +585,8 @@ Initialization hooks called for plugins and ``conftest.py`` files.
|
||||
.. autofunction:: pytest_sessionstart
|
||||
.. autofunction:: pytest_sessionfinish
|
||||
|
||||
.. autofunction:: pytest_plugin_registered
|
||||
|
||||
Test running hooks
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
@@ -574,6 +610,8 @@ into interactive debugging when a test failure occurs.
|
||||
The :py:mod:`_pytest.terminal` reported specifically uses
|
||||
the reporting hook to print information about a test run.
|
||||
|
||||
.. autofunction:: pytest_pyfunc_call
|
||||
|
||||
Collection hooks
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
@@ -583,6 +621,7 @@ Collection hooks
|
||||
.. autofunction:: pytest_ignore_collect
|
||||
.. autofunction:: pytest_collect_directory
|
||||
.. autofunction:: pytest_collect_file
|
||||
.. autofunction:: pytest_pycollect_makemodule
|
||||
|
||||
For influencing the collection of objects in Python modules
|
||||
you can use the following hook:
|
||||
@@ -596,12 +635,15 @@ items, delete or otherwise amend the test items:
|
||||
|
||||
.. autofunction:: pytest_collection_modifyitems
|
||||
|
||||
.. autofunction:: pytest_collection_finish
|
||||
|
||||
Reporting hooks
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
Session related reporting hooks:
|
||||
|
||||
.. autofunction:: pytest_collectstart
|
||||
.. autofunction:: pytest_make_collect_report
|
||||
.. autofunction:: pytest_itemcollected
|
||||
.. autofunction:: pytest_collectreport
|
||||
.. autofunction:: pytest_deselected
|
||||
@@ -611,7 +653,6 @@ Session related reporting hooks:
|
||||
.. autofunction:: pytest_terminal_summary
|
||||
.. autofunction:: pytest_fixture_setup
|
||||
.. autofunction:: pytest_fixture_post_finalizer
|
||||
.. autofunction:: pytest_logwarning
|
||||
.. autofunction:: pytest_warning_captured
|
||||
|
||||
And here is the central hook for reporting about
|
||||
@@ -718,13 +759,6 @@ MarkGenerator
|
||||
:members:
|
||||
|
||||
|
||||
MarkInfo
|
||||
~~~~~~~~
|
||||
|
||||
.. autoclass:: _pytest.mark.MarkInfo
|
||||
:members:
|
||||
|
||||
|
||||
Mark
|
||||
~~~~
|
||||
|
||||
@@ -798,6 +832,33 @@ Special Variables
|
||||
pytest treats some global variables in a special manner when defined in a test module.
|
||||
|
||||
|
||||
collect_ignore
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
**Tutorial**: :ref:`customizing-test-collection`
|
||||
|
||||
Can be declared in *conftest.py files* to exclude test directories or modules.
|
||||
Needs to be ``list[str]``.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
collect_ignore = ["setup.py"]
|
||||
|
||||
|
||||
collect_ignore_glob
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
**Tutorial**: :ref:`customizing-test-collection`
|
||||
|
||||
Can be declared in *conftest.py files* to exclude test directories or modules
|
||||
with Unix shell-style wildcards. Needs to be ``list[str]`` where ``str`` can
|
||||
contain glob patterns.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
collect_ignore_glob = ["*_ignore.py"]
|
||||
|
||||
|
||||
pytest_plugins
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
@@ -821,7 +882,7 @@ pytest_mark
|
||||
**Tutorial**: :ref:`scoped-marking`
|
||||
|
||||
Can be declared at the **global** level in *test modules* to apply one or more :ref:`marks <marks ref>` to all
|
||||
test functions and methods. Can be either a single mark or a sequence of marks.
|
||||
test functions and methods. Can be either a single mark or a list of marks.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@@ -834,7 +895,7 @@ test functions and methods. Can be either a single mark or a sequence of marks.
|
||||
|
||||
import pytest
|
||||
|
||||
pytestmark = (pytest.mark.integration, pytest.mark.slow)
|
||||
pytestmark = [pytest.mark.integration, pytest.mark.slow]
|
||||
|
||||
PYTEST_DONT_REWRITE (module docstring)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@@ -890,6 +951,12 @@ Here is a list of builtin configuration options that may be written in a ``pytes
|
||||
file, usually located at the root of your repository. All options must be under a ``[pytest]`` section
|
||||
(``[tool:pytest]`` for ``setup.cfg`` files).
|
||||
|
||||
.. warning::
|
||||
Usage of ``setup.cfg`` is not recommended unless for very simple use cases. ``.cfg``
|
||||
files use a different parser than ``pytest.ini`` and ``tox.ini`` which might cause hard to track
|
||||
down problems.
|
||||
When possible, it is recommended to use the latter files to hold your pytest configuration.
|
||||
|
||||
Configuration file options may be overwritten in the command-line by using ``-o/--override``, which can also be
|
||||
passed multiple times. The expected format is ``name=value``. For example::
|
||||
|
||||
@@ -976,6 +1043,7 @@ passed multiple times. The expected format is ``name=value``. For example::
|
||||
|
||||
* ``skip`` skips tests with an empty parameterset (default)
|
||||
* ``xfail`` marks tests with an empty parameterset as xfail(run=False)
|
||||
* ``fail_at_collect`` raises an exception if parametrize collects an empty parameter set
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
@@ -1009,6 +1077,20 @@ passed multiple times. The expected format is ``name=value``. For example::
|
||||
This tells pytest to ignore deprecation warnings and turn all other warnings
|
||||
into errors. For more information please refer to :ref:`warnings`.
|
||||
|
||||
.. confval:: junit_family
|
||||
|
||||
.. versionadded:: 4.2
|
||||
|
||||
Configures the format of the generated JUnit XML file. The possible options are:
|
||||
|
||||
* ``xunit1`` (or ``legacy``): produces old style output, compatible with the xunit 1.0 format. **This is the default**.
|
||||
* ``xunit2``: produces `xunit 2.0 style output <https://github.com/jenkinsci/xunit-plugin/blob/xunit-2.3.2/src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd>`__,
|
||||
which should be more compatible with latest Jenkins versions.
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[pytest]
|
||||
junit_family = xunit2
|
||||
|
||||
.. confval:: junit_suite_name
|
||||
|
||||
@@ -1179,8 +1261,11 @@ passed multiple times. The expected format is ``name=value``. For example::
|
||||
|
||||
.. confval:: markers
|
||||
|
||||
List of markers that are allowed in test functions, enforced when ``--strict`` command-line argument is used.
|
||||
You can use a marker name per line, indented from the option name.
|
||||
When the ``--strict`` command-line argument is used, only known markers -
|
||||
defined in code by core pytest or some plugin - are allowed.
|
||||
You can list additional markers in this setting to add them to the whitelist.
|
||||
|
||||
You can list one marker name per line, indented from the option name.
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# pinning sphinx to 1.4.* due to search issues with rtd:
|
||||
# https://github.com/rtfd/readthedocs-sphinx-ext/issues/25
|
||||
sphinx ==1.4.*
|
||||
pygments-pytest>=1.1.0
|
||||
sphinx>=1.8.2,<2.1
|
||||
sphinxcontrib-trio
|
||||
sphinx-removed-in>=0.2.0
|
||||
|
||||
@@ -22,7 +22,9 @@ it's an **xpass** and will be reported in the test summary.
|
||||
``pytest`` counts and lists *skip* and *xfail* tests separately. Detailed
|
||||
information about skipped/xfailed tests is not shown by default to avoid
|
||||
cluttering the output. You can use the ``-r`` option to see details
|
||||
corresponding to the "short" letters shown in the test progress::
|
||||
corresponding to the "short" letters shown in the test progress:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pytest -rxXs # show extra info on xfailed, xpassed, and skipped tests
|
||||
|
||||
@@ -58,18 +60,20 @@ by calling the ``pytest.skip(reason)`` function:
|
||||
if not valid_config():
|
||||
pytest.skip("unsupported configuration")
|
||||
|
||||
The imperative method is useful when it is not possible to evaluate the skip condition
|
||||
during import time.
|
||||
|
||||
It is also possible to skip the whole module using
|
||||
``pytest.skip(reason, allow_module_level=True)`` at the module level:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import sys
|
||||
import pytest
|
||||
|
||||
if not pytest.config.getoption("--custom-flag"):
|
||||
pytest.skip("--custom-flag is missing, skipping tests", allow_module_level=True)
|
||||
if not sys.platform.startswith("win"):
|
||||
pytest.skip("skipping windows-only tests", allow_module_level=True)
|
||||
|
||||
The imperative method is useful when it is not possible to evaluate the skip condition
|
||||
during import time.
|
||||
|
||||
**Reference**: :ref:`pytest.mark.skip ref`
|
||||
|
||||
@@ -80,7 +84,7 @@ during import time.
|
||||
|
||||
If you wish to skip something conditionally then you can use ``skipif`` instead.
|
||||
Here is an example of marking a test function to be skipped
|
||||
when run on an interpreter earlier than Python3.6 ::
|
||||
when run on an interpreter earlier than Python3.6::
|
||||
|
||||
import sys
|
||||
@pytest.mark.skipif(sys.version_info < (3,6),
|
||||
@@ -307,7 +311,9 @@ investigated later.
|
||||
Ignoring xfail
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
By specifying on the commandline::
|
||||
By specifying on the commandline:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pytest --runxfail
|
||||
|
||||
@@ -321,12 +327,15 @@ Here is a simple test file with the several usages:
|
||||
|
||||
.. literalinclude:: example/xfail_demo.py
|
||||
|
||||
Running it with the report-on-xfail option gives this output::
|
||||
Running it with the report-on-xfail option gives this output:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
example $ pytest -rx xfail_demo.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR/example, inifile:
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR/example
|
||||
collected 7 items
|
||||
|
||||
xfail_demo.py xxxxxxx [100%]
|
||||
|
||||
@@ -23,6 +23,11 @@ Books
|
||||
Talks and blog postings
|
||||
---------------------------------------------
|
||||
|
||||
- pytest: recommendations, basic packages for testing in Python and Django, Andreu Vallbona, PyconES 2017 (`slides in english <http://talks.apsl.io/testing-pycones-2017/>`_, `video in spanish <https://www.youtube.com/watch?v=K20GeR-lXDk>`_)
|
||||
|
||||
- `pytest advanced, Andrew Svetlov (Russian, PyCon Russia, 2016)
|
||||
<https://www.youtube.com/watch?v=7KgihdKTWY4>`_.
|
||||
|
||||
- `Pythonic testing, Igor Starikov (Russian, PyNsk, November 2016)
|
||||
<https://www.youtube.com/watch?v=_92nfdd5nK8>`_.
|
||||
|
||||
|
||||
@@ -5,6 +5,83 @@
|
||||
Temporary directories and files
|
||||
================================================
|
||||
|
||||
The ``tmp_path`` fixture
|
||||
------------------------
|
||||
|
||||
.. versionadded:: 3.9
|
||||
|
||||
|
||||
You can use the ``tmp_path`` fixture which will
|
||||
provide a temporary directory unique to the test invocation,
|
||||
created in the `base temporary directory`_.
|
||||
|
||||
``tmp_path`` is a ``pathlib/pathlib2.Path`` object. Here is an example test usage:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of test_tmp_path.py
|
||||
import os
|
||||
|
||||
CONTENT = u"content"
|
||||
|
||||
|
||||
def test_create_file(tmp_path):
|
||||
d = tmp_path / "sub"
|
||||
d.mkdir()
|
||||
p = d / "hello.txt"
|
||||
p.write_text(CONTENT)
|
||||
assert p.read_text() == CONTENT
|
||||
assert len(list(tmp_path.iterdir())) == 1
|
||||
assert 0
|
||||
|
||||
Running this would result in a passed test except for the last
|
||||
``assert 0`` line which we use to look at values:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest test_tmp_path.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 1 item
|
||||
|
||||
test_tmp_path.py F [100%]
|
||||
|
||||
================================= FAILURES =================================
|
||||
_____________________________ test_create_file _____________________________
|
||||
|
||||
tmp_path = PosixPath('PYTEST_TMPDIR/test_create_file0')
|
||||
|
||||
def test_create_file(tmp_path):
|
||||
d = tmp_path / "sub"
|
||||
d.mkdir()
|
||||
p = d / "hello.txt"
|
||||
p.write_text(CONTENT)
|
||||
assert p.read_text() == CONTENT
|
||||
assert len(list(tmp_path.iterdir())) == 1
|
||||
> assert 0
|
||||
E assert 0
|
||||
|
||||
test_tmp_path.py:13: AssertionError
|
||||
========================= 1 failed in 0.12 seconds =========================
|
||||
|
||||
.. _`tmp_path_factory example`:
|
||||
|
||||
The ``tmp_path_factory`` fixture
|
||||
--------------------------------
|
||||
|
||||
.. versionadded:: 3.9
|
||||
|
||||
|
||||
The ``tmp_path_factory`` is a session-scoped fixture which can be used
|
||||
to create arbitrary temporary directories from any other fixture or test.
|
||||
|
||||
It is intended to replace ``tmpdir_factory``, and returns :class:`pathlib.Path` instances.
|
||||
|
||||
See :ref:`tmp_path_factory API <tmp_path_factory factory api>` for details.
|
||||
|
||||
|
||||
The 'tmpdir' fixture
|
||||
--------------------
|
||||
|
||||
@@ -25,12 +102,15 @@ and more. Here is an example test usage::
|
||||
assert 0
|
||||
|
||||
Running this would result in a passed test except for the last
|
||||
``assert 0`` line which we use to look at values::
|
||||
``assert 0`` line which we use to look at values:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest test_tmpdir.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 1 item
|
||||
|
||||
test_tmpdir.py F [100%]
|
||||
@@ -98,7 +178,9 @@ the system temporary directory. The base name will be ``pytest-NUM`` where
|
||||
``NUM`` will be incremented with each test run. Moreover, entries older
|
||||
than 3 temporary directories will be removed.
|
||||
|
||||
You can override the default temporary directory setting like this::
|
||||
You can override the default temporary directory setting like this:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pytest --basetemp=mydir
|
||||
|
||||
|
||||
@@ -22,16 +22,15 @@ Almost all ``unittest`` features are supported:
|
||||
|
||||
* ``@unittest.skip`` style decorators;
|
||||
* ``setUp/tearDown``;
|
||||
* ``setUpClass/tearDownClass()``;
|
||||
* ``setUpClass/tearDownClass``;
|
||||
* ``setUpModule/tearDownModule``;
|
||||
|
||||
.. _`load_tests protocol`: https://docs.python.org/3/library/unittest.html#load-tests-protocol
|
||||
.. _`setUpModule/tearDownModule`: https://docs.python.org/3/library/unittest.html#setupmodule-and-teardownmodule
|
||||
.. _`subtests`: https://docs.python.org/3/library/unittest.html#distinguishing-test-iterations-using-subtests
|
||||
|
||||
Up to this point pytest does not have support for the following features:
|
||||
|
||||
* `load_tests protocol`_;
|
||||
* `setUpModule/tearDownModule`_;
|
||||
* `subtests`_;
|
||||
|
||||
Benefits out of the box
|
||||
@@ -123,12 +122,15 @@ fixture definition::
|
||||
The ``@pytest.mark.usefixtures("db_class")`` class-decorator makes sure that
|
||||
the pytest fixture function ``db_class`` is called once per class.
|
||||
Due to the deliberately failing assert statements, we can take a look at
|
||||
the ``self.db`` values in the traceback::
|
||||
the ``self.db`` values in the traceback:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest test_unittest_db.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 2 items
|
||||
|
||||
test_unittest_db.py FF [100%]
|
||||
@@ -200,7 +202,9 @@ used for all methods of the class where it is defined. This is a
|
||||
shortcut for using a ``@pytest.mark.usefixtures("initdir")`` marker
|
||||
on the class like in the previous example.
|
||||
|
||||
Running this test module ...::
|
||||
Running this test module ...:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q test_unittest_cleandir.py
|
||||
. [100%]
|
||||
|
||||
291
doc/en/usage.rst
291
doc/en/usage.rst
@@ -12,7 +12,9 @@ Calling pytest through ``python -m pytest``
|
||||
|
||||
.. versionadded:: 2.0
|
||||
|
||||
You can invoke testing through the Python interpreter from the command line::
|
||||
You can invoke testing through the Python interpreter from the command line:
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
python -m pytest [...]
|
||||
|
||||
@@ -34,7 +36,7 @@ Running ``pytest`` can result in six different exit codes:
|
||||
Getting help on version, option names, environment variables
|
||||
--------------------------------------------------------------
|
||||
|
||||
::
|
||||
.. code-block:: bash
|
||||
|
||||
pytest --version # shows where pytest was imported from
|
||||
pytest --fixtures # show available builtin function arguments
|
||||
@@ -46,7 +48,9 @@ Getting help on version, option names, environment variables
|
||||
Stopping after the first (or N) failures
|
||||
---------------------------------------------------
|
||||
|
||||
To stop the testing process after the first (N) failures::
|
||||
To stop the testing process after the first (N) failures:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pytest -x # stop after first failure
|
||||
pytest --maxfail=2 # stop after two failures
|
||||
@@ -60,19 +64,19 @@ Pytest supports several ways to run and select tests from the command-line.
|
||||
|
||||
**Run tests in a module**
|
||||
|
||||
::
|
||||
.. code-block:: bash
|
||||
|
||||
pytest test_mod.py
|
||||
|
||||
**Run tests in a directory**
|
||||
|
||||
::
|
||||
.. code-block:: bash
|
||||
|
||||
pytest testing/
|
||||
|
||||
**Run tests by keyword expressions**
|
||||
|
||||
::
|
||||
.. code-block:: bash
|
||||
|
||||
pytest -k "MyClass and not method"
|
||||
|
||||
@@ -87,18 +91,22 @@ The example above will run ``TestMyClass.test_something`` but not ``TestMyClass
|
||||
Each collected test is assigned a unique ``nodeid`` which consist of the module filename followed
|
||||
by specifiers like class names, function names and parameters from parametrization, separated by ``::`` characters.
|
||||
|
||||
To run a specific test within a module::
|
||||
To run a specific test within a module:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pytest test_mod.py::test_func
|
||||
|
||||
|
||||
Another example specifying a test method in the command line::
|
||||
Another example specifying a test method in the command line:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pytest test_mod.py::TestClass::test_method
|
||||
|
||||
**Run tests by marker expressions**
|
||||
|
||||
::
|
||||
.. code-block:: bash
|
||||
|
||||
pytest -m slow
|
||||
|
||||
@@ -108,7 +116,7 @@ For more information see :ref:`marks <mark>`.
|
||||
|
||||
**Run tests from packages**
|
||||
|
||||
::
|
||||
.. code-block:: bash
|
||||
|
||||
pytest --pyargs pkg.testing
|
||||
|
||||
@@ -118,7 +126,9 @@ This will import ``pkg.testing`` and use its filesystem location to find and run
|
||||
Modifying Python traceback printing
|
||||
----------------------------------------------
|
||||
|
||||
Examples for modifying traceback printing::
|
||||
Examples for modifying traceback printing:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pytest --showlocals # show local variables in tracebacks
|
||||
pytest -l # show local variables (shortcut)
|
||||
@@ -147,18 +157,83 @@ Detailed summary report
|
||||
|
||||
.. versionadded:: 2.9
|
||||
|
||||
The ``-r`` flag can be used to display test results summary at the end of the test session,
|
||||
The ``-r`` flag can be used to display a "short test summary info" at the end of the test session,
|
||||
making it easy in large test suites to get a clear picture of all failures, skips, xfails, etc.
|
||||
|
||||
Example::
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# content of test_example.py
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def error_fixture():
|
||||
assert 0
|
||||
|
||||
|
||||
def test_ok():
|
||||
print("ok")
|
||||
|
||||
|
||||
def test_fail():
|
||||
assert 0
|
||||
|
||||
|
||||
def test_error(error_fixture):
|
||||
pass
|
||||
|
||||
|
||||
def test_skip():
|
||||
pytest.skip("skipping this test")
|
||||
|
||||
|
||||
def test_xfail():
|
||||
pytest.xfail("xfailing this test")
|
||||
|
||||
|
||||
@pytest.mark.xfail(reason="always xfail")
|
||||
def test_xpass():
|
||||
pass
|
||||
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -ra
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 0 items
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 6 items
|
||||
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
test_example.py .FEsxX [100%]
|
||||
|
||||
================================== ERRORS ==================================
|
||||
_______________________ ERROR at setup of test_error _______________________
|
||||
|
||||
@pytest.fixture
|
||||
def error_fixture():
|
||||
> assert 0
|
||||
E assert 0
|
||||
|
||||
test_example.py:6: AssertionError
|
||||
================================= FAILURES =================================
|
||||
________________________________ test_fail _________________________________
|
||||
|
||||
def test_fail():
|
||||
> assert 0
|
||||
E assert 0
|
||||
|
||||
test_example.py:14: AssertionError
|
||||
========================= short test summary info ==========================
|
||||
SKIPPED [1] $REGENDOC_TMPDIR/test_example.py:23: skipping this test
|
||||
XFAIL test_example.py::test_xfail
|
||||
reason: xfailing this test
|
||||
XPASS test_example.py::test_xpass always xfail
|
||||
ERROR test_example.py::test_error
|
||||
FAILED test_example.py::test_fail
|
||||
1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.12 seconds
|
||||
|
||||
The ``-r`` options accepts a number of characters after it, with ``a`` used above meaning "all except passes".
|
||||
|
||||
@@ -173,15 +248,79 @@ Here is the full list of available characters that can be used:
|
||||
- ``P`` - passed with output
|
||||
- ``a`` - all except ``pP``
|
||||
|
||||
More than one character can be used, so for example to only see failed and skipped tests, you can execute::
|
||||
More than one character can be used, so for example to only see failed and skipped tests, you can execute:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -rfs
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 0 items
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 6 items
|
||||
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
test_example.py .FEsxX [100%]
|
||||
|
||||
================================== ERRORS ==================================
|
||||
_______________________ ERROR at setup of test_error _______________________
|
||||
|
||||
@pytest.fixture
|
||||
def error_fixture():
|
||||
> assert 0
|
||||
E assert 0
|
||||
|
||||
test_example.py:6: AssertionError
|
||||
================================= FAILURES =================================
|
||||
________________________________ test_fail _________________________________
|
||||
|
||||
def test_fail():
|
||||
> assert 0
|
||||
E assert 0
|
||||
|
||||
test_example.py:14: AssertionError
|
||||
========================= short test summary info ==========================
|
||||
FAILED test_example.py::test_fail
|
||||
SKIPPED [1] $REGENDOC_TMPDIR/test_example.py:23: skipping this test
|
||||
1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.12 seconds
|
||||
|
||||
Using ``p`` lists the passing tests, whilst ``P`` adds an extra section "PASSES" with those tests that passed but had
|
||||
captured output:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -rpP
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 6 items
|
||||
|
||||
test_example.py .FEsxX [100%]
|
||||
|
||||
================================== ERRORS ==================================
|
||||
_______________________ ERROR at setup of test_error _______________________
|
||||
|
||||
@pytest.fixture
|
||||
def error_fixture():
|
||||
> assert 0
|
||||
E assert 0
|
||||
|
||||
test_example.py:6: AssertionError
|
||||
================================= FAILURES =================================
|
||||
________________________________ test_fail _________________________________
|
||||
|
||||
def test_fail():
|
||||
> assert 0
|
||||
E assert 0
|
||||
|
||||
test_example.py:14: AssertionError
|
||||
========================= short test summary info ==========================
|
||||
PASSED test_example.py::test_ok
|
||||
================================== PASSES ==================================
|
||||
_________________________________ test_ok __________________________________
|
||||
--------------------------- Captured stdout call ---------------------------
|
||||
ok
|
||||
1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.12 seconds
|
||||
|
||||
.. _pdb-option:
|
||||
|
||||
@@ -191,13 +330,17 @@ Dropping to PDB_ (Python Debugger) on failures
|
||||
.. _PDB: http://docs.python.org/library/pdb.html
|
||||
|
||||
Python comes with a builtin Python debugger called PDB_. ``pytest``
|
||||
allows one to drop into the PDB_ prompt via a command line option::
|
||||
allows one to drop into the PDB_ prompt via a command line option:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pytest --pdb
|
||||
|
||||
This will invoke the Python debugger on every failure (or KeyboardInterrupt).
|
||||
Often you might only want to do this for the first failing test to understand
|
||||
a certain failure situation::
|
||||
a certain failure situation:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pytest -x --pdb # drop to PDB on first failure, then end test session
|
||||
pytest --pdb --maxfail=3 # drop to PDB for first three failures
|
||||
@@ -220,7 +363,9 @@ Dropping to PDB_ (Python Debugger) at the start of a test
|
||||
----------------------------------------------------------
|
||||
|
||||
|
||||
``pytest`` allows one to drop into the PDB_ prompt immediately at the start of each test via a command line option::
|
||||
``pytest`` allows one to drop into the PDB_ prompt immediately at the start of each test via a command line option:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pytest --trace
|
||||
|
||||
@@ -239,10 +384,8 @@ in your code and pytest automatically disables its output capture for that test:
|
||||
* Output capture in other tests is not affected.
|
||||
* Any prior test output that has already been captured and will be processed as
|
||||
such.
|
||||
* Any later output produced within the same test will not be captured and will
|
||||
instead get sent directly to ``sys.stdout``. Note that this holds true even
|
||||
for test output occurring after you exit the interactive PDB_ tracing session
|
||||
and continue with the regular test run.
|
||||
* Output capture gets resumed when ending the debugger session (via the
|
||||
``continue`` command).
|
||||
|
||||
|
||||
.. _`breakpoint-builtin`:
|
||||
@@ -255,8 +398,8 @@ Pytest supports the use of ``breakpoint()`` with the following behaviours:
|
||||
|
||||
- When ``breakpoint()`` is called and ``PYTHONBREAKPOINT`` is set to the default value, pytest will use the custom internal PDB trace UI instead of the system default ``Pdb``.
|
||||
- When tests are complete, the system will default back to the system ``Pdb`` trace UI.
|
||||
- If ``--pdb`` is called on execution of pytest, the custom internal Pdb trace UI is used on both ``breakpoint()`` and failed tests/unhandled exceptions.
|
||||
- If ``--pdbcls`` is used, the custom class debugger will be executed when a test fails (as expected within existing behaviour), but also when ``breakpoint()`` is called from within a test, the custom class debugger will be instantiated.
|
||||
- With ``--pdb`` passed to pytest, the custom internal Pdb trace UI is used with both ``breakpoint()`` and failed tests/unhandled exceptions.
|
||||
- ``--pdbcls`` can be used to specify a custom debugger class.
|
||||
|
||||
.. _durations:
|
||||
|
||||
@@ -265,16 +408,21 @@ Profiling test execution duration
|
||||
|
||||
.. versionadded: 2.2
|
||||
|
||||
To get a list of the slowest 10 test durations::
|
||||
To get a list of the slowest 10 test durations:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pytest --durations=10
|
||||
|
||||
By default, pytest will not show test durations that are too small (<0.01s) unless ``-vv`` is passed on the command-line.
|
||||
|
||||
Creating JUnitXML format files
|
||||
----------------------------------------------------
|
||||
|
||||
To create result files which can be read by Jenkins_ or other Continuous
|
||||
integration servers, use this invocation::
|
||||
integration servers, use this invocation:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pytest --junitxml=path
|
||||
|
||||
@@ -289,6 +437,20 @@ To set the name of the root test suite xml item, you can configure the ``junit_s
|
||||
[pytest]
|
||||
junit_suite_name = my_suite
|
||||
|
||||
.. versionadded:: 4.0
|
||||
|
||||
JUnit XML specification seems to indicate that ``"time"`` attribute
|
||||
should report total test execution times, including setup and teardown
|
||||
(`1 <http://windyroad.com.au/dl/Open%20Source/JUnit.xsd>`_, `2
|
||||
<https://www.ibm.com/support/knowledgecenter/en/SSQ2R2_14.1.0/com.ibm.rsar.analysis.codereview.cobol.doc/topics/cac_useresults_junit.html>`_).
|
||||
It is the default pytest behavior. To report just call durations
|
||||
instead, configure the ``junit_duration_report`` option like this:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[pytest]
|
||||
junit_duration_report = call
|
||||
|
||||
.. _record_property example:
|
||||
|
||||
record_property
|
||||
@@ -478,16 +640,14 @@ Creating resultlog format files
|
||||
|
||||
.. deprecated:: 3.0
|
||||
|
||||
This option is rarely used and is scheduled for removal in 4.0.
|
||||
This option is rarely used and is scheduled for removal in 5.0.
|
||||
|
||||
An alternative for users which still need similar functionality is to use the
|
||||
`pytest-tap <https://pypi.org/project/pytest-tap/>`_ plugin which provides
|
||||
a stream of test data.
|
||||
See `the deprecation docs <https://docs.pytest.org/en/latest/deprecations.html#result-log-result-log>`__
|
||||
for more information.
|
||||
|
||||
If you have any concerns, please don't hesitate to
|
||||
`open an issue <https://github.com/pytest-dev/pytest/issues>`_.
|
||||
To create plain-text machine-readable result files you can issue:
|
||||
|
||||
To create plain-text machine-readable result files you can issue::
|
||||
.. code-block:: bash
|
||||
|
||||
pytest --resultlog=path
|
||||
|
||||
@@ -500,7 +660,9 @@ by the `PyPy-test`_ web page to show test results over several revisions.
|
||||
Sending test report to online pastebin service
|
||||
-----------------------------------------------------
|
||||
|
||||
**Creating a URL for each test failure**::
|
||||
**Creating a URL for each test failure**:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pytest --pastebin=failed
|
||||
|
||||
@@ -508,12 +670,30 @@ This will submit test run information to a remote Paste service and
|
||||
provide a URL for each failure. You may select tests as usual or add
|
||||
for example ``-x`` if you only want to send one particular failure.
|
||||
|
||||
**Creating a URL for a whole test session log**::
|
||||
**Creating a URL for a whole test session log**:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pytest --pastebin=all
|
||||
|
||||
Currently only pasting to the http://bpaste.net service is implemented.
|
||||
|
||||
Early loading plugins
|
||||
---------------------
|
||||
|
||||
You can early-load plugins (internal and external) explicitly in the command-line with the ``-p`` option::
|
||||
|
||||
pytest -p mypluginmodule
|
||||
|
||||
The option receives a ``name`` parameter, which can be:
|
||||
|
||||
* A full module dotted name, for example ``myproject.plugins``. This dotted name must be importable.
|
||||
* The entry-point name of a plugin. This is the name passed to ``setuptools`` when the plugin is
|
||||
registered. For example to early-load the `pytest-cov <https://pypi.org/project/pytest-cov/>`__ plugin you can use::
|
||||
|
||||
pytest -p pytest_cov
|
||||
|
||||
|
||||
Disabling plugins
|
||||
-----------------
|
||||
|
||||
@@ -521,7 +701,9 @@ To disable loading specific plugins at invocation time, use the ``-p`` option
|
||||
together with the prefix ``no:``.
|
||||
|
||||
Example: to disable loading the plugin ``doctest``, which is responsible for
|
||||
executing doctest tests from text files, invoke pytest like this::
|
||||
executing doctest tests from text files, invoke pytest like this:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pytest -p no:doctest
|
||||
|
||||
@@ -553,11 +735,30 @@ You can specify additional plugins to ``pytest.main``::
|
||||
pytest.main(["-qq"], plugins=[MyPlugin()])
|
||||
|
||||
Running it will show that ``MyPlugin`` was added and its
|
||||
hook was invoked::
|
||||
hook was invoked:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ python myinvoke.py
|
||||
. [100%]*** test run reporting finishing
|
||||
.FEsxX. [100%]*** test run reporting finishing
|
||||
|
||||
================================== ERRORS ==================================
|
||||
_______________________ ERROR at setup of test_error _______________________
|
||||
|
||||
@pytest.fixture
|
||||
def error_fixture():
|
||||
> assert 0
|
||||
E assert 0
|
||||
|
||||
test_example.py:6: AssertionError
|
||||
================================= FAILURES =================================
|
||||
________________________________ test_fail _________________________________
|
||||
|
||||
def test_fail():
|
||||
> assert 0
|
||||
E assert 0
|
||||
|
||||
test_example.py:14: AssertionError
|
||||
|
||||
.. note::
|
||||
|
||||
|
||||
@@ -18,25 +18,31 @@ and displays them at the end of the session::
|
||||
def test_one():
|
||||
assert api_v1() == 1
|
||||
|
||||
Running pytest now produces this output::
|
||||
Running pytest now produces this output:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest test_show_warnings.py
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 1 item
|
||||
|
||||
test_show_warnings.py . [100%]
|
||||
|
||||
============================= warnings summary =============================
|
||||
$REGENDOC_TMPDIR/test_show_warnings.py:4: UserWarning: api v1, should use functions from v2
|
||||
warnings.warn(UserWarning("api v1, should use functions from v2"))
|
||||
test_show_warnings.py::test_one
|
||||
$REGENDOC_TMPDIR/test_show_warnings.py:4: UserWarning: api v1, should use functions from v2
|
||||
warnings.warn(UserWarning("api v1, should use functions from v2"))
|
||||
|
||||
-- Docs: https://docs.pytest.org/en/latest/warnings.html
|
||||
=================== 1 passed, 1 warnings in 0.12 seconds ===================
|
||||
|
||||
The ``-W`` flag can be passed to control which warnings will be displayed or even turn
|
||||
them into errors::
|
||||
them into errors:
|
||||
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest -q test_show_warnings.py -W error::UserWarning
|
||||
F [100%]
|
||||
@@ -75,54 +81,6 @@ Both ``-W`` command-line option and ``filterwarnings`` ini option are based on P
|
||||
`-W option`_ and `warnings.simplefilter`_, so please refer to those sections in the Python
|
||||
documentation for other examples and advanced usage.
|
||||
|
||||
Disabling warning summary
|
||||
-------------------------
|
||||
|
||||
Although not recommended, you can use the ``--disable-warnings`` command-line option to suppress the
|
||||
warning summary entirely from the test run output.
|
||||
|
||||
Disabling warning capture entirely
|
||||
----------------------------------
|
||||
|
||||
This plugin is enabled by default but can be disabled entirely in your ``pytest.ini`` file with:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[pytest]
|
||||
addopts = -p no:warnings
|
||||
|
||||
Or passing ``-p no:warnings`` in the command-line. This might be useful if your test suites handles warnings
|
||||
using an external system.
|
||||
|
||||
|
||||
.. _`deprecation-warnings`:
|
||||
|
||||
DeprecationWarning and PendingDeprecationWarning
|
||||
------------------------------------------------
|
||||
|
||||
.. versionadded:: 3.8
|
||||
|
||||
By default pytest will display ``DeprecationWarning`` and ``PendingDeprecationWarning`` if no other warning filters
|
||||
are configured.
|
||||
|
||||
To disable showing ``DeprecationWarning`` and ``PendingDeprecationWarning`` warnings, you might define any warnings
|
||||
filter either in the command-line or in the ini file, or you can use:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[pytest]
|
||||
filterwarnings =
|
||||
ignore::DeprecationWarning
|
||||
ignore::PendingDeprecationWarning
|
||||
|
||||
.. note::
|
||||
This makes pytest more compliant with `PEP-0506 <https://www.python.org/dev/peps/pep-0565/#recommended-filter-settings-for-test-runners>`_ which suggests that those warnings should
|
||||
be shown by default by test runners, but pytest doesn't follow ``PEP-0506`` completely because resetting all
|
||||
warning filters like suggested in the PEP will break existing test suites that configure warning filters themselves
|
||||
by calling ``warnings.simplefilter`` (see issue `#2430 <https://github.com/pytest-dev/pytest/issues/2430>`_
|
||||
for an example of that).
|
||||
|
||||
|
||||
.. _`filterwarnings`:
|
||||
|
||||
``@pytest.mark.filterwarnings``
|
||||
@@ -161,24 +119,6 @@ decorator or to all tests in a module by setting the ``pytestmark`` variable:
|
||||
pytestmark = pytest.mark.filterwarnings("error")
|
||||
|
||||
|
||||
.. note::
|
||||
|
||||
Except for these features, pytest does not change the python warning filter; it only captures
|
||||
and displays the warnings which are issued with respect to the currently configured filter,
|
||||
including changes to the filter made by test functions or by the system under test.
|
||||
|
||||
.. note::
|
||||
|
||||
``DeprecationWarning`` and ``PendingDeprecationWarning`` are hidden by the standard library
|
||||
by default so you have to explicitly configure them to be displayed in your ``pytest.ini``:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[pytest]
|
||||
filterwarnings =
|
||||
once::DeprecationWarning
|
||||
once::PendingDeprecationWarning
|
||||
|
||||
|
||||
*Credits go to Florian Schulze for the reference implementation in the* `pytest-warnings`_
|
||||
*plugin.*
|
||||
@@ -187,6 +127,102 @@ decorator or to all tests in a module by setting the ``pytestmark`` variable:
|
||||
.. _warnings.simplefilter: https://docs.python.org/3/library/warnings.html#warnings.simplefilter
|
||||
.. _`pytest-warnings`: https://github.com/fschulze/pytest-warnings
|
||||
|
||||
Disabling warnings summary
|
||||
--------------------------
|
||||
|
||||
Although not recommended, you can use the ``--disable-warnings`` command-line option to suppress the
|
||||
warning summary entirely from the test run output.
|
||||
|
||||
Disabling warning capture entirely
|
||||
----------------------------------
|
||||
|
||||
This plugin is enabled by default but can be disabled entirely in your ``pytest.ini`` file with:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[pytest]
|
||||
addopts = -p no:warnings
|
||||
|
||||
Or passing ``-p no:warnings`` in the command-line. This might be useful if your test suites handles warnings
|
||||
using an external system.
|
||||
|
||||
|
||||
.. _`deprecation-warnings`:
|
||||
|
||||
DeprecationWarning and PendingDeprecationWarning
|
||||
------------------------------------------------
|
||||
|
||||
.. versionadded:: 3.8
|
||||
.. versionchanged:: 3.9
|
||||
|
||||
By default pytest will display ``DeprecationWarning`` and ``PendingDeprecationWarning`` warnings from
|
||||
user code and third-party libraries, as recommended by `PEP-0565 <https://www.python.org/dev/peps/pep-0565>`_.
|
||||
This helps users keep their code modern and avoid breakages when deprecated warnings are effectively removed.
|
||||
|
||||
Sometimes it is useful to hide some specific deprecation warnings that happen in code that you have no control over
|
||||
(such as third-party libraries), in which case you might use the warning filters options (ini or marks) to ignore
|
||||
those warnings.
|
||||
|
||||
For example:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[pytest]
|
||||
filterwarnings =
|
||||
ignore:.*U.*mode is deprecated:DeprecationWarning
|
||||
|
||||
|
||||
This will ignore all warnings of type ``DeprecationWarning`` where the start of the message matches
|
||||
the regular expression ``".*U.*mode is deprecated"``.
|
||||
|
||||
.. note::
|
||||
If warnings are configured at the interpreter level, using
|
||||
the `PYTHONWARNINGS <https://docs.python.org/3/using/cmdline.html#envvar-PYTHONWARNINGS>`_ environment variable or the
|
||||
``-W`` command-line option, pytest will not configure any filters by default.
|
||||
|
||||
Also pytest doesn't follow ``PEP-0506`` suggestion of resetting all warning filters because
|
||||
it might break test suites that configure warning filters themselves
|
||||
by calling ``warnings.simplefilter`` (see issue `#2430 <https://github.com/pytest-dev/pytest/issues/2430>`_
|
||||
for an example of that).
|
||||
|
||||
|
||||
.. _`ensuring a function triggers a deprecation warning`:
|
||||
|
||||
.. _ensuring_function_triggers:
|
||||
|
||||
Ensuring code triggers a deprecation warning
|
||||
--------------------------------------------
|
||||
|
||||
You can also call a global helper for checking
|
||||
that a certain function call triggers a ``DeprecationWarning`` or
|
||||
``PendingDeprecationWarning``::
|
||||
|
||||
import pytest
|
||||
|
||||
def test_global():
|
||||
pytest.deprecated_call(myfunction, 17)
|
||||
|
||||
By default, ``DeprecationWarning`` and ``PendingDeprecationWarning`` will not be
|
||||
caught when using ``pytest.warns`` or ``recwarn`` because default Python warnings filters hide
|
||||
them. If you wish to record them in your own code, use the
|
||||
command ``warnings.simplefilter('always')``::
|
||||
|
||||
import warnings
|
||||
import pytest
|
||||
|
||||
def test_deprecation(recwarn):
|
||||
warnings.simplefilter('always')
|
||||
warnings.warn("deprecated", DeprecationWarning)
|
||||
assert len(recwarn) == 1
|
||||
assert recwarn.pop(DeprecationWarning)
|
||||
|
||||
You can also use it as a contextmanager::
|
||||
|
||||
def test_global():
|
||||
with pytest.deprecated_call():
|
||||
myobject.deprecated_method()
|
||||
|
||||
|
||||
|
||||
.. _`asserting warnings`:
|
||||
|
||||
@@ -197,7 +233,7 @@ decorator or to all tests in a module by setting the ``pytestmark`` variable:
|
||||
.. _warns:
|
||||
|
||||
Asserting warnings with the warns function
|
||||
-----------------------------------------------
|
||||
------------------------------------------
|
||||
|
||||
.. versionadded:: 2.8
|
||||
|
||||
@@ -255,7 +291,7 @@ Alternatively, you can examine raised warnings in detail using the
|
||||
.. _recwarn:
|
||||
|
||||
Recording warnings
|
||||
------------------------
|
||||
------------------
|
||||
|
||||
You can record raised warnings either using ``pytest.warns`` or with
|
||||
the ``recwarn`` fixture.
|
||||
@@ -293,43 +329,26 @@ warnings, or index into it to get a particular recorded warning.
|
||||
|
||||
Full API: :class:`WarningsRecorder`.
|
||||
|
||||
.. _`ensuring a function triggers a deprecation warning`:
|
||||
.. _custom_failure_messages:
|
||||
|
||||
.. _ensuring_function_triggers:
|
||||
Custom failure messages
|
||||
-----------------------
|
||||
|
||||
Ensuring a function triggers a deprecation warning
|
||||
-------------------------------------------------------
|
||||
Recording warnings provides an opportunity to produce custom test
|
||||
failure messages for when no warnings are issued or other conditions
|
||||
are met.
|
||||
|
||||
You can also call a global helper for checking
|
||||
that a certain function call triggers a ``DeprecationWarning`` or
|
||||
``PendingDeprecationWarning``::
|
||||
|
||||
import pytest
|
||||
|
||||
def test_global():
|
||||
pytest.deprecated_call(myfunction, 17)
|
||||
|
||||
By default, ``DeprecationWarning`` and ``PendingDeprecationWarning`` will not be
|
||||
caught when using ``pytest.warns`` or ``recwarn`` because default Python warnings filters hide
|
||||
them. If you wish to record them in your own code, use the
|
||||
command ``warnings.simplefilter('always')``::
|
||||
|
||||
import warnings
|
||||
import pytest
|
||||
|
||||
def test_deprecation(recwarn):
|
||||
warnings.simplefilter('always')
|
||||
warnings.warn("deprecated", DeprecationWarning)
|
||||
assert len(recwarn) == 1
|
||||
assert recwarn.pop(DeprecationWarning)
|
||||
|
||||
You can also use it as a contextmanager::
|
||||
|
||||
def test_global():
|
||||
with pytest.deprecated_call():
|
||||
myobject.deprecated_method()
|
||||
.. code-block:: python
|
||||
|
||||
def test():
|
||||
with pytest.warns(Warning) as record:
|
||||
f()
|
||||
if not record:
|
||||
pytest.fail("Expected a warning!")
|
||||
|
||||
If no warnings are issued when calling ``f``, then ``not record`` will
|
||||
evaluate to ``True``. You can then call ``pytest.fail`` with a
|
||||
custom error message.
|
||||
|
||||
.. _internal-warnings:
|
||||
|
||||
@@ -353,13 +372,14 @@ defines an ``__init__`` constructor, as this prevents the class from being insta
|
||||
def test_foo(self):
|
||||
assert 1 == 1
|
||||
|
||||
::
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest test_pytest_warnings.py -q
|
||||
|
||||
============================= warnings summary =============================
|
||||
$REGENDOC_TMPDIR/test_pytest_warnings.py:1: PytestWarning: cannot collect test class 'Test' because it has a __init__ constructor
|
||||
class Test:
|
||||
test_pytest_warnings.py:1
|
||||
$REGENDOC_TMPDIR/test_pytest_warnings.py:1: PytestWarning: cannot collect test class 'Test' because it has a __init__ constructor
|
||||
class Test:
|
||||
|
||||
-- Docs: https://docs.pytest.org/en/latest/warnings.html
|
||||
1 warnings in 0.12 seconds
|
||||
|
||||
@@ -73,7 +73,7 @@ sub directory but not for other directories::
|
||||
a/conftest.py:
|
||||
def pytest_runtest_setup(item):
|
||||
# called for running each test in 'a' directory
|
||||
print ("setting up", item)
|
||||
print("setting up", item)
|
||||
|
||||
a/test_sub.py:
|
||||
def test_sub():
|
||||
@@ -386,40 +386,43 @@ return a result object, with which we can assert the tests' outcomes.
|
||||
result.assert_outcomes(passed=4)
|
||||
|
||||
|
||||
additionally it is possible to copy examples for a example folder before running pytest on it
|
||||
additionally it is possible to copy examples for an example folder before running pytest on it
|
||||
|
||||
.. code:: ini
|
||||
.. code-block:: ini
|
||||
|
||||
# content of pytest.ini
|
||||
[pytest]
|
||||
pytester_example_dir = .
|
||||
|
||||
|
||||
.. code:: python
|
||||
.. code-block:: python
|
||||
|
||||
# content of test_example.py
|
||||
|
||||
|
||||
def test_plugin(testdir):
|
||||
testdir.copy_example("test_example.py")
|
||||
testdir.runpytest("-k", "test_example")
|
||||
testdir.copy_example("test_example.py")
|
||||
testdir.runpytest("-k", "test_example")
|
||||
|
||||
|
||||
def test_example():
|
||||
pass
|
||||
pass
|
||||
|
||||
.. code::
|
||||
.. code-block:: pytest
|
||||
|
||||
$ pytest
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini
|
||||
collected 2 items
|
||||
|
||||
test_example.py .. [100%]
|
||||
|
||||
============================= warnings summary =============================
|
||||
$REGENDOC_TMPDIR/test_example.py:4: PytestExperimentalApiWarning: testdir.copy_example is an experimental api that may change over time
|
||||
testdir.copy_example("test_example.py")
|
||||
test_example.py::test_plugin
|
||||
$REGENDOC_TMPDIR/test_example.py:4: PytestExperimentalApiWarning: testdir.copy_example is an experimental api that may change over time
|
||||
testdir.copy_example("test_example.py")
|
||||
|
||||
-- Docs: https://docs.pytest.org/en/latest/warnings.html
|
||||
=================== 2 passed, 1 warnings in 0.12 seconds ===================
|
||||
|
||||
@@ -93,7 +93,15 @@ Remarks:
|
||||
|
||||
* It is possible for setup/teardown pairs to be invoked multiple times
|
||||
per testing process.
|
||||
|
||||
* teardown functions are not called if the corresponding setup function existed
|
||||
and failed/was skipped.
|
||||
|
||||
* Prior to pytest-4.2, xunit-style functions did not obey the scope rules of fixtures, so
|
||||
it was possible, for example, for a ``setup_method`` to be called before a
|
||||
session-scoped autouse fixture.
|
||||
|
||||
Now the xunit-style functions are integrated with the fixture mechanism and obey the proper
|
||||
scope rules of fixtures involved in the call.
|
||||
|
||||
.. _`unittest.py module`: http://docs.python.org/library/unittest.html
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import json
|
||||
|
||||
import py
|
||||
import requests
|
||||
|
||||
@@ -64,9 +65,9 @@ def report(issues):
|
||||
print(title)
|
||||
# print()
|
||||
# lines = body.split("\n")
|
||||
# print ("\n".join(lines[:3]))
|
||||
# print("\n".join(lines[:3]))
|
||||
# if len(lines) > 3 or len(body) > 240:
|
||||
# print ("...")
|
||||
# print("...")
|
||||
print("\n\nFound %s open issues" % len(issues))
|
||||
|
||||
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
[build-system]
|
||||
requires = [
|
||||
"setuptools",
|
||||
# sync with setup.py until we discard non-pep-517/518
|
||||
"setuptools>=40.0",
|
||||
"setuptools-scm",
|
||||
"wheel",
|
||||
]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[tool.towncrier]
|
||||
package = "pytest"
|
||||
@@ -15,7 +17,12 @@ template = "changelog/_template.rst"
|
||||
|
||||
[[tool.towncrier.type]]
|
||||
directory = "removal"
|
||||
name = "Deprecations and Removals"
|
||||
name = "Removals"
|
||||
showcontent = true
|
||||
|
||||
[[tool.towncrier.type]]
|
||||
directory = "deprecation"
|
||||
name = "Deprecations"
|
||||
showcontent = true
|
||||
|
||||
[[tool.towncrier.type]]
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
REM install pypy using choco
|
||||
REM redirect to a file because choco install python.pypy is too noisy. If the command fails, write output to console
|
||||
choco install python.pypy > pypy-inst.log 2>&1 || (type pypy-inst.log & exit /b 1)
|
||||
set PATH=C:\tools\pypy\pypy;%PATH% # so tox can find pypy
|
||||
echo PyPy installed
|
||||
pypy --version
|
||||
@@ -1,10 +0,0 @@
|
||||
REM scripts called by AppVeyor to setup the environment variables to enable coverage
|
||||
if not defined PYTEST_NO_COVERAGE (
|
||||
set "COVERAGE_FILE=%CD%\.coverage"
|
||||
set "COVERAGE_PROCESS_START=%CD%\.coveragerc"
|
||||
set "_PYTEST_TOX_COVERAGE_RUN=coverage run -m"
|
||||
set "_PYTEST_TOX_EXTRA_DEP=coverage-enable-subprocess"
|
||||
echo Coverage setup completed
|
||||
) else (
|
||||
echo Skipping coverage setup, PYTEST_NO_COVERAGE is set
|
||||
)
|
||||
@@ -2,9 +2,13 @@
|
||||
Invoke development tasks.
|
||||
"""
|
||||
import argparse
|
||||
from colorama import init, Fore
|
||||
from pathlib import Path
|
||||
from subprocess import check_output, check_call, call
|
||||
from subprocess import call
|
||||
from subprocess import check_call
|
||||
from subprocess import check_output
|
||||
|
||||
from colorama import Fore
|
||||
from colorama import init
|
||||
|
||||
|
||||
def announce(version):
|
||||
|
||||
21
scripts/retry.cmd
Normal file
21
scripts/retry.cmd
Normal file
@@ -0,0 +1,21 @@
|
||||
@echo off
|
||||
rem Source: https://github.com/appveyor/ci/blob/master/scripts/appveyor-retry.cmd
|
||||
rem initiate the retry number
|
||||
set retryNumber=0
|
||||
set maxRetries=3
|
||||
|
||||
:RUN
|
||||
%*
|
||||
set LastErrorLevel=%ERRORLEVEL%
|
||||
IF %LastErrorLevel% == 0 GOTO :EOF
|
||||
set /a retryNumber=%retryNumber%+1
|
||||
IF %reTryNumber% == %maxRetries% (GOTO :FAILED)
|
||||
|
||||
:RETRY
|
||||
set /a retryNumberDisp=%retryNumber%+1
|
||||
@echo Command "%*" failed with exit code %LastErrorLevel%. Retrying %retryNumberDisp% of %maxRetries%
|
||||
GOTO :RUN
|
||||
|
||||
: FAILED
|
||||
@echo Sorry, we tried running command for %maxRetries% times and all attempts were unsuccessful!
|
||||
EXIT /B %LastErrorLevel%
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user