Compare commits
264 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7dad3cb157 | ||
|
|
745737e337 | ||
|
|
eae1055fb0 | ||
|
|
2e1c36bbb6 | ||
|
|
e3cea41dcd | ||
|
|
c0f091d540 | ||
|
|
d4cd1aad8e | ||
|
|
39a297afe6 | ||
|
|
068548f7a9 | ||
|
|
0c163ce624 | ||
|
|
fc95877622 | ||
|
|
03b694a1d0 | ||
|
|
79c2a47985 | ||
|
|
9289d77a80 | ||
|
|
2eee2d0099 | ||
|
|
0ea46e6aef | ||
|
|
b0ac3581dd | ||
|
|
e7ed45a5d4 | ||
|
|
7cf859085e | ||
|
|
424479cf0f | ||
|
|
98dcd764bc | ||
|
|
ccd67733fb | ||
|
|
5873ca5146 | ||
|
|
d2903507d8 | ||
|
|
9232b88df3 | ||
|
|
e98f77037e | ||
|
|
672e42e558 | ||
|
|
36d7df4542 | ||
|
|
e5eaf02e19 | ||
|
|
1d7b574b31 | ||
|
|
d16fdb378c | ||
|
|
cc092afd3b | ||
|
|
fd4485a540 | ||
|
|
3b8779ad17 | ||
|
|
3e875178ad | ||
|
|
97b671057d | ||
|
|
d6fc489b2b | ||
|
|
4e8438afc8 | ||
|
|
b1f8038abf | ||
|
|
5603a0cd4b | ||
|
|
3c649cf91d | ||
|
|
82d573e391 | ||
|
|
b7b96b24d8 | ||
|
|
990e7bf3b9 | ||
|
|
bcdc3d0154 | ||
|
|
faea7e1407 | ||
|
|
7c701948d5 | ||
|
|
899b804ec1 | ||
|
|
66bd4e485a | ||
|
|
8ff8dd3ae9 | ||
|
|
0d17dc1e19 | ||
|
|
83e0b52294 | ||
|
|
1265612465 | ||
|
|
40eed363e8 | ||
|
|
5ccd3f2fc5 | ||
|
|
ba878c6d9d | ||
|
|
8792261df1 | ||
|
|
c2ed29070a | ||
|
|
38104dfc92 | ||
|
|
2e55c4ba61 | ||
|
|
e6ad6e02d2 | ||
|
|
d08c4ce0ad | ||
|
|
309e3d38a0 | ||
|
|
91e2b23258 | ||
|
|
6a4492a22d | ||
|
|
0dd378da30 | ||
|
|
7b273b8577 | ||
|
|
d98521b0d9 | ||
|
|
52011e84d3 | ||
|
|
de583ed7a3 | ||
|
|
f8480caae4 | ||
|
|
47d9e6ca1f | ||
|
|
6f0a33dfdc | ||
|
|
1641d00cb1 | ||
|
|
2d7a32f7ea | ||
|
|
06acbb9f5e | ||
|
|
7b630d9080 | ||
|
|
98de64badc | ||
|
|
1e241e1f2a | ||
|
|
ff2c18fedb | ||
|
|
bf64a800d6 | ||
|
|
efc57391eb | ||
|
|
dc65aa1fea | ||
|
|
80ad3fb8ed | ||
|
|
bc7110931a | ||
|
|
8c508612ec | ||
|
|
b7d046527e | ||
|
|
42804c52e8 | ||
|
|
d88a3712c5 | ||
|
|
abfedd692e | ||
|
|
d0b048c86d | ||
|
|
b6f069f4c3 | ||
|
|
edc2e5ab82 | ||
|
|
6da9a087f8 | ||
|
|
e19462d581 | ||
|
|
a811fabb43 | ||
|
|
07e76cbef2 | ||
|
|
76d5c9e4f4 | ||
|
|
abcadc4202 | ||
|
|
b7f6a9f3fd | ||
|
|
c8264385ea | ||
|
|
ad8131be9e | ||
|
|
54c88a6cf3 | ||
|
|
115f15600f | ||
|
|
65a145e2a7 | ||
|
|
5719a72eeb | ||
|
|
4bc4495115 | ||
|
|
c66e9f8f0f | ||
|
|
52eafdc21e | ||
|
|
85c0d5481b | ||
|
|
fd9055fd11 | ||
|
|
780bdda95a | ||
|
|
fd4b461290 | ||
|
|
040062e40c | ||
|
|
d853e9167a | ||
|
|
d50ad270f0 | ||
|
|
30c93701a7 | ||
|
|
b507e1754c | ||
|
|
748fce94fd | ||
|
|
d6281b4206 | ||
|
|
b61ed2cf7e | ||
|
|
9263f30c88 | ||
|
|
8f9a88ef7a | ||
|
|
c64af0d9ce | ||
|
|
9181df42da | ||
|
|
74e1a49dd7 | ||
|
|
468b1241a5 | ||
|
|
24744cf5cf | ||
|
|
ffc969b6c2 | ||
|
|
0567a8ee77 | ||
|
|
580c8525f0 | ||
|
|
d6010aa0c9 | ||
|
|
c46e2cbbc7 | ||
|
|
c47835f5ec | ||
|
|
412b56f7cf | ||
|
|
faba432996 | ||
|
|
2ba23e8d08 | ||
|
|
d74f852fd6 | ||
|
|
1728798e81 | ||
|
|
53a8d20d88 | ||
|
|
61446faa17 | ||
|
|
9711e335d9 | ||
|
|
080a9d2f12 | ||
|
|
15af7e1662 | ||
|
|
e42cbc714f | ||
|
|
5e26e6e553 | ||
|
|
d0a4d348fe | ||
|
|
494be731e3 | ||
|
|
8ae244a06a | ||
|
|
f91049cec9 | ||
|
|
270d0f89ba | ||
|
|
e382ed4245 | ||
|
|
ef7cb47b1e | ||
|
|
6efde60b8b | ||
|
|
fd059359cc | ||
|
|
c2c504797e | ||
|
|
84f9f45f98 | ||
|
|
28aa4c891e | ||
|
|
6ff0fdb977 | ||
|
|
b0837693d0 | ||
|
|
52851e4388 | ||
|
|
cbe31f3748 | ||
|
|
c9bbdf4f10 | ||
|
|
f984e94fca | ||
|
|
b4fe91943d | ||
|
|
7d6317802e | ||
|
|
0365e5c3a0 | ||
|
|
e6859406f1 | ||
|
|
51cff6f106 | ||
|
|
68e58e1493 | ||
|
|
9f7eac0ba1 | ||
|
|
b0e31dca86 | ||
|
|
7d10a57514 | ||
|
|
2c0f6207e9 | ||
|
|
adb12d0d4f | ||
|
|
844c141d10 | ||
|
|
02d94e69f0 | ||
|
|
1bc56f9838 | ||
|
|
98ea8fae32 | ||
|
|
36288c5134 | ||
|
|
83a3cc9c94 | ||
|
|
0c04b44919 | ||
|
|
a5e8860feb | ||
|
|
8d95f89a6a | ||
|
|
3bca62e9e4 | ||
|
|
72b4534a0c | ||
|
|
21b4280126 | ||
|
|
30a9debaf1 | ||
|
|
4c5718c78d | ||
|
|
c93b949878 | ||
|
|
cf34adb75f | ||
|
|
f824a73143 | ||
|
|
e45a33f029 | ||
|
|
064e79761c | ||
|
|
f7713c47e8 | ||
|
|
8e4e2ba244 | ||
|
|
3b8935c533 | ||
|
|
ce8678e6d5 | ||
|
|
2e1f6c85f6 | ||
|
|
ca5e6830c6 | ||
|
|
69cbac8fb5 | ||
|
|
7301981f32 | ||
|
|
555ba4159d | ||
|
|
f47ae74981 | ||
|
|
e061ace099 | ||
|
|
85d52481b1 | ||
|
|
47379d4a79 | ||
|
|
0cb9d26d83 | ||
|
|
95cc114b34 | ||
|
|
9d716a39d6 | ||
|
|
923dcfd620 | ||
|
|
b5467645d3 | ||
|
|
a65380941d | ||
|
|
17d7c60735 | ||
|
|
81f822d528 | ||
|
|
0b340aa1f6 | ||
|
|
859915dc5e | ||
|
|
a8f4f49a82 | ||
|
|
e18c3ed494 | ||
|
|
2263fcf6b7 | ||
|
|
b96d552dbd | ||
|
|
2b2c1e5b7b | ||
|
|
7b63fa5966 | ||
|
|
b18040337a | ||
|
|
42e0d7970c | ||
|
|
ed0a4fe23b | ||
|
|
bb0632c7ad | ||
|
|
84ab194516 | ||
|
|
530cae9204 | ||
|
|
9dc43e84dc | ||
|
|
8af265da04 | ||
|
|
e3b9382122 | ||
|
|
c3f4eb6d57 | ||
|
|
892aa457be | ||
|
|
8f7b53e55b | ||
|
|
d27c377817 | ||
|
|
50abe43216 | ||
|
|
ddc67ca13a | ||
|
|
a1d3da4027 | ||
|
|
85e7b11ef5 | ||
|
|
77e1f93ca1 | ||
|
|
94b1ce65c6 | ||
|
|
f5b992f68a | ||
|
|
24a458b4c8 | ||
|
|
ac1d277225 | ||
|
|
9777703e03 | ||
|
|
f43cda9681 | ||
|
|
b47fdbe0a7 | ||
|
|
cde970be69 | ||
|
|
cfd43a9b02 | ||
|
|
ebd10aa6b4 | ||
|
|
24db492f53 | ||
|
|
1b387bea62 | ||
|
|
9528b64f7f | ||
|
|
b96559149c | ||
|
|
3388d82c1c | ||
|
|
9985a7cdca | ||
|
|
1d00c5e109 | ||
|
|
0559f11aa5 | ||
|
|
2893cddb68 | ||
|
|
6910641266 | ||
|
|
4b81a07303 | ||
|
|
7eb765578a | ||
|
|
e2cf3e0932 |
@@ -9,6 +9,7 @@ lib/
|
||||
bin/
|
||||
include/
|
||||
.Python/
|
||||
.env/
|
||||
|
||||
# These lines are suggested according to the svn:ignore property
|
||||
# Feel free to enable them by uncommenting them
|
||||
@@ -27,6 +28,7 @@ dist/
|
||||
*.egg-info
|
||||
issue/
|
||||
env/
|
||||
env3/
|
||||
3rdparty/
|
||||
.tox
|
||||
.cache
|
||||
|
||||
5
.hgtags
5
.hgtags
@@ -65,3 +65,8 @@ af860de70cc3f157ac34ca1d4bf557a057bff775 2.4.0
|
||||
8d051f89184bfa3033f5e59819dff9f32a612941 2.4.2
|
||||
a064ad64d167508a8e9e73766b1a4e6bd10c85db 2.5.0
|
||||
039d543d1ca02a716c0b0de9a7131beb8021e8a2 2.5.1
|
||||
421d3b4d150d901de24b1cbeb8955547b1420483 2.5.2
|
||||
60725b17a9d1af4100abb8be3f9f4ddf6262bf34 2.6.0
|
||||
60725b17a9d1af4100abb8be3f9f4ddf6262bf34 2.6.0
|
||||
88af949b9611494e2c65d528f9e565b00fb7e8ca 2.6.0
|
||||
a4f9639702baa3eb4f3b16e162f74f7b69f3f9e1 2.6.1
|
||||
|
||||
7
AUTHORS
7
AUTHORS
@@ -38,3 +38,10 @@ Anthon van der Neut
|
||||
Mark Abramowitz
|
||||
Piotr Banaszkiewicz
|
||||
Jurko Gospodnetić
|
||||
Marc Schlaich
|
||||
Christopher Gilling
|
||||
Daniel Grana
|
||||
Andy Freeland
|
||||
Trevor Bekolay
|
||||
David Mohr
|
||||
Nicolas Delaby
|
||||
|
||||
162
CHANGELOG
162
CHANGELOG
@@ -1,3 +1,165 @@
|
||||
2.6.2
|
||||
-----------
|
||||
|
||||
- Added function pytest.freeze_includes(), which makes it easy to embed
|
||||
pytest into executables using tools like cx_freeze.
|
||||
See docs for examples and rationale. Thanks Bruno Oliveira.
|
||||
|
||||
- Improve assertion rewriting cache invalidation precision.
|
||||
|
||||
- fixed issue561: adapt autouse fixture example for python3.
|
||||
|
||||
- fixed issue453: assertion rewriting issue with __repr__ containing
|
||||
"\n{", "\n}" and "\n~".
|
||||
|
||||
- fix issue560: correctly display code if an "else:" or "finally:" is
|
||||
followed by statements on the same line.
|
||||
|
||||
- Fix example in monkeypatch documentation, thanks t-8ch.
|
||||
|
||||
- fix issue572: correct tmpdir doc example for python3.
|
||||
|
||||
- Do not mark as universal wheel because Python 2.6 is different from
|
||||
other builds due to the extra argparse dependency. Fixes issue566.
|
||||
Thanks sontek.
|
||||
|
||||
2.6.1
|
||||
-----------------------------------
|
||||
|
||||
- No longer show line numbers in the --verbose output, the output is now
|
||||
purely the nodeid. The line number is still shown in failure reports.
|
||||
Thanks Floris Bruynooghe.
|
||||
|
||||
- fix issue437 where assertion rewriting could cause pytest-xdist slaves
|
||||
to collect different tests. Thanks Bruno Oliveira.
|
||||
|
||||
- fix issue555: add "errors" attribute to capture-streams to satisfy
|
||||
some distutils and possibly other code accessing sys.stdout.errors.
|
||||
|
||||
- fix issue547 capsys/capfd also work when output capturing ("-s") is disabled.
|
||||
|
||||
- address issue170: allow pytest.mark.xfail(...) to specify expected exceptions via
|
||||
an optional "raises=EXC" argument where EXC can be a single exception
|
||||
or a tuple of exception classes. Thanks David Mohr for the complete
|
||||
PR.
|
||||
|
||||
- fix integration of pytest with unittest.mock.patch decorator when
|
||||
it uses the "new" argument. Thanks Nicolas Delaby for test and PR.
|
||||
|
||||
- fix issue with detecting conftest files if the arguments contain
|
||||
"::" node id specifications (copy pasted from "-v" output)
|
||||
|
||||
- fix issue544 by only removing "@NUM" at the end of "::" separated parts
|
||||
and if the part has an ".py" extension
|
||||
|
||||
- don't use py.std import helper, rather import things directly.
|
||||
Thanks Bruno Oliveira.
|
||||
|
||||
2.6
|
||||
-----------------------------------
|
||||
|
||||
- Cache exceptions from fixtures according to their scope (issue 467).
|
||||
|
||||
- fix issue537: Avoid importing old assertion reinterpretation code by default.
|
||||
|
||||
- fix issue364: shorten and enhance tracebacks representation by default.
|
||||
The new "--tb=auto" option (default) will only display long tracebacks
|
||||
for the first and last entry. You can get the old behaviour of printing
|
||||
all entries as long entries with "--tb=long". Also short entries by
|
||||
default are now printed very similarly to "--tb=native" ones.
|
||||
|
||||
- fix issue514: teach assertion reinterpretation about private class attributes
|
||||
|
||||
- change -v output to include full node IDs of tests. Users can copy
|
||||
a node ID from a test run, including line number, and use it as a
|
||||
positional argument in order to run only a single test.
|
||||
|
||||
- fix issue 475: fail early and comprehensible if calling
|
||||
pytest.raises with wrong exception type.
|
||||
|
||||
- fix issue516: tell in getting-started about current dependencies.
|
||||
|
||||
- cleanup setup.py a bit and specify supported versions. Thanks Jurko
|
||||
Gospodnetic for the PR.
|
||||
|
||||
- change XPASS colour to yellow rather then red when tests are run
|
||||
with -v.
|
||||
|
||||
- fix issue473: work around mock putting an unbound method into a class
|
||||
dict when double-patching.
|
||||
|
||||
- fix issue498: if a fixture finalizer fails, make sure that
|
||||
the fixture is still invalidated.
|
||||
|
||||
- fix issue453: the result of the pytest_assertrepr_compare hook now gets
|
||||
it's newlines escaped so that format_exception does not blow up.
|
||||
|
||||
- internal new warning system: pytest will now produce warnings when
|
||||
it detects oddities in your test collection or execution.
|
||||
Warnings are ultimately sent to a new pytest_logwarning hook which is
|
||||
currently only implemented by the terminal plugin which displays
|
||||
warnings in the summary line and shows more details when -rw (report on
|
||||
warnings) is specified.
|
||||
|
||||
- change skips into warnings for test classes with an __init__ and
|
||||
callables in test modules which look like a test but are not functions.
|
||||
|
||||
- fix issue436: improved finding of initial conftest files from command
|
||||
line arguments by using the result of parse_known_args rather than
|
||||
the previous flaky heuristics. Thanks Marc Abramowitz for tests
|
||||
and initial fixing approaches in this area.
|
||||
|
||||
- fix issue #479: properly handle nose/unittest(2) SkipTest exceptions
|
||||
during collection/loading of test modules. Thanks to Marc Schlaich
|
||||
for the complete PR.
|
||||
|
||||
- fix issue490: include pytest_load_initial_conftests in documentation
|
||||
and improve docstring.
|
||||
|
||||
- fix issue472: clarify that ``pytest.config.getvalue()`` cannot work
|
||||
if it's triggered ahead of command line parsing.
|
||||
|
||||
- merge PR123: improved integration with mock.patch decorator on tests.
|
||||
|
||||
- fix issue412: messing with stdout/stderr FD-level streams is now
|
||||
captured without crashes.
|
||||
|
||||
- fix issue483: trial/py33 works now properly. Thanks Daniel Grana for PR.
|
||||
|
||||
- improve example for pytest integration with "python setup.py test"
|
||||
which now has a generic "-a" or "--pytest-args" option where you
|
||||
can pass additional options as a quoted string. Thanks Trevor Bekolay.
|
||||
|
||||
- simplified internal capturing mechanism and made it more robust
|
||||
against tests or setups changing FD1/FD2, also better integrated
|
||||
now with pytest.pdb() in single tests.
|
||||
|
||||
- improvements to pytest's own test-suite leakage detection, courtesy of PRs
|
||||
from Marc Abramowitz
|
||||
|
||||
- fix issue492: avoid leak in test_writeorg. Thanks Marc Abramowitz.
|
||||
|
||||
- fix issue493: don't run tests in doc directory with ``python setup.py test``
|
||||
(use tox -e doctesting for that)
|
||||
|
||||
- fix issue486: better reporting and handling of early conftest loading failures
|
||||
|
||||
- some cleanup and simplification of internal conftest handling.
|
||||
|
||||
- work a bit harder to break reference cycles when catching exceptions.
|
||||
Thanks Jurko Gospodnetic.
|
||||
|
||||
- fix issue443: fix skip examples to use proper comparison. Thanks Alex
|
||||
Groenholm.
|
||||
|
||||
- support nose-style ``__test__`` attribute on modules, classes and
|
||||
functions, including unittest-style Classes. If set to False, the
|
||||
test will not be collected.
|
||||
|
||||
- fix issue512: show "<notset>" for arguments which might not be set
|
||||
in monkeypatch plugin. Improves output in documentation.
|
||||
|
||||
|
||||
2.5.2
|
||||
-----------------------------------
|
||||
|
||||
|
||||
44
HOWTORELEASE.rst
Normal file
44
HOWTORELEASE.rst
Normal file
@@ -0,0 +1,44 @@
|
||||
|
||||
How to release pytest (draft)
|
||||
--------------------------------------------
|
||||
|
||||
1. bump version numbers in setup.py and pytest/__init__.py
|
||||
|
||||
2. check and finalize CHANGELOG
|
||||
|
||||
3. write doc/en/announce/pytest-VERSION.txt and include
|
||||
it in doc/en/announce/index.txt
|
||||
|
||||
4. use devpi for uploading a release tarball to a staging area:
|
||||
- ``devpi use https://devpi.net/USER/dev``
|
||||
- ``devpi upload``
|
||||
|
||||
5. run from multiple machines:
|
||||
- ``devpi use https://devpi.net/USER/dev``
|
||||
- ``devpi test pytest-VERSION``
|
||||
|
||||
6. check that tests pass for relevant combinations with
|
||||
``devpi list pytest``
|
||||
or look at failures with "devpi list -f pytest".
|
||||
There will be some failed environments like e.g. the py33-trial
|
||||
or py27-pexpect tox environments on Win32 platforms
|
||||
which is ok (tox does not support skipping on
|
||||
per-platform basis yet).
|
||||
|
||||
7. go to "doc/en" and upload docs with "make install"
|
||||
(the latter requires ssh-login permissions on pytest.org
|
||||
because it uses rsync). Note that the "install" target of
|
||||
doc/en/Makefile defines where the rsync goes to, typically
|
||||
to the "latest" section of pytest.org.
|
||||
|
||||
8. publish to pypi "devpi push pytest-2.6.2 pypi:NAME" where NAME
|
||||
is the name of pypi.python.org as configured in your
|
||||
~/.pypirc file -- it's the same you would use with
|
||||
"setup.py upload -r NAME"
|
||||
|
||||
9. send release announcement to mailing lists:
|
||||
|
||||
pytest-dev
|
||||
testing-in-python
|
||||
python-announce-list@python.org
|
||||
|
||||
@@ -79,15 +79,6 @@ style tests, i.e. it leverages existing test suites without needing
|
||||
to rewrite them. Together with the previously mentioned setup_test()
|
||||
maybe the setupfunc could be ommitted?
|
||||
|
||||
checks / deprecations for next release
|
||||
---------------------------------------------------------------
|
||||
tags: bug 2.4 core xdist
|
||||
|
||||
* check oejskit plugin compatibility
|
||||
* move pytest_nose out of pylib because it implicitely extends
|
||||
the protocol now - setup/teardown is called at module level.
|
||||
consider making calling of setup/teardown configurable
|
||||
|
||||
optimizations
|
||||
---------------------------------------------------------------
|
||||
tags: 2.4 core
|
||||
|
||||
@@ -5,6 +5,8 @@ Changelog: http://pytest.org/latest/changelog.html
|
||||
|
||||
Issues: https://bitbucket.org/hpk42/pytest/issues?status=open
|
||||
|
||||
CI: https://drone.io/bitbucket.org/hpk42/pytest
|
||||
|
||||
The ``pytest`` testing tool makes it easy to write small tests, yet
|
||||
scales to support complex functional testing. It provides
|
||||
|
||||
@@ -17,8 +19,8 @@ scales to support complex functional testing. It provides
|
||||
- multi-paradigm support: you can use ``pytest`` to run test suites based
|
||||
on `unittest <http://pytest.org/latest/unittest.html>`_ (or trial),
|
||||
`nose <http://pytest.org/latest/nose.html>`_
|
||||
- single-source compatibility to Python2.5 all the way up to Python3.3,
|
||||
PyPy-1.9 and Jython-2.5.1.
|
||||
- single-source compatibility to Python2.5 all the way up to Python3.4,
|
||||
PyPy-2.3 and Jython-2.5.1.
|
||||
|
||||
- many `external plugins <http://pytest.org/latest/plugins.html#installing-external-plugins-searching>`_.
|
||||
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
#
|
||||
__version__ = '2.5.2'
|
||||
__version__ = '2.6.2'
|
||||
|
||||
@@ -6,20 +6,32 @@ import sys
|
||||
from _pytest.monkeypatch import monkeypatch
|
||||
from _pytest.assertion import util
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("debugconfig")
|
||||
group.addoption('--assert', action="store", dest="assertmode",
|
||||
group.addoption('--assert',
|
||||
action="store",
|
||||
dest="assertmode",
|
||||
choices=("rewrite", "reinterp", "plain",),
|
||||
default="rewrite", metavar="MODE",
|
||||
help="""control assertion debugging tools.
|
||||
'plain' performs no assertion debugging.
|
||||
'reinterp' reinterprets assert statements after they failed to provide assertion expression information.
|
||||
'rewrite' (the default) rewrites assert statements in test modules on import
|
||||
to provide assert expression information. """)
|
||||
group.addoption('--no-assert', action="store_true", default=False,
|
||||
dest="noassert", help="DEPRECATED equivalent to --assert=plain")
|
||||
group.addoption('--nomagic', '--no-magic', action="store_true",
|
||||
default=False, help="DEPRECATED equivalent to --assert=plain")
|
||||
default="rewrite",
|
||||
metavar="MODE",
|
||||
help="""control assertion debugging tools. 'plain'
|
||||
performs no assertion debugging. 'reinterp'
|
||||
reinterprets assert statements after they failed
|
||||
to provide assertion expression information.
|
||||
'rewrite' (the default) rewrites assert
|
||||
statements in test modules on import to
|
||||
provide assert expression information. """)
|
||||
group.addoption('--no-assert',
|
||||
action="store_true",
|
||||
default=False,
|
||||
dest="noassert",
|
||||
help="DEPRECATED equivalent to --assert=plain")
|
||||
group.addoption('--nomagic', '--no-magic',
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="DEPRECATED equivalent to --assert=plain")
|
||||
|
||||
|
||||
class AssertionState:
|
||||
"""State for the assertion plugin."""
|
||||
@@ -28,6 +40,7 @@ class AssertionState:
|
||||
self.mode = mode
|
||||
self.trace = config.trace.root.get("assertion")
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
mode = config.getvalue("assertmode")
|
||||
if config.getvalue("noassert") or config.getvalue("nomagic"):
|
||||
@@ -41,7 +54,7 @@ def pytest_configure(config):
|
||||
# Both Jython and CPython 2.6.0 have AST bugs that make the
|
||||
# assertion rewriting hook malfunction.
|
||||
if (sys.platform.startswith('java') or
|
||||
sys.version_info[:3] == (2, 6, 0)):
|
||||
sys.version_info[:3] == (2, 6, 0)):
|
||||
mode = "reinterp"
|
||||
if mode != "plain":
|
||||
_load_modules(mode)
|
||||
@@ -58,11 +71,13 @@ def pytest_configure(config):
|
||||
config._assertstate.hook = hook
|
||||
config._assertstate.trace("configured with mode set to %r" % (mode,))
|
||||
|
||||
|
||||
def pytest_unconfigure(config):
|
||||
hook = config._assertstate.hook
|
||||
if hook is not None:
|
||||
if hook is not None and hook in sys.meta_path:
|
||||
sys.meta_path.remove(hook)
|
||||
|
||||
|
||||
def pytest_collection(session):
|
||||
# this hook is only called when test modules are collected
|
||||
# so for example not in the master process of pytest-xdist
|
||||
@@ -71,36 +86,55 @@ def pytest_collection(session):
|
||||
if hook is not None:
|
||||
hook.set_session(session)
|
||||
|
||||
|
||||
def pytest_runtest_setup(item):
|
||||
"""Setup the pytest_assertrepr_compare hook
|
||||
|
||||
The newinterpret and rewrite modules will use util._reprcompare if
|
||||
it exists to use custom reporting via the
|
||||
pytest_assertrepr_compare hook. This sets up this custom
|
||||
comparison for the test.
|
||||
"""
|
||||
def callbinrepr(op, left, right):
|
||||
"""Call the pytest_assertrepr_compare hook and prepare the result
|
||||
|
||||
This uses the first result from the hook and then ensures the
|
||||
following:
|
||||
* Overly verbose explanations are dropped unles -vv was used.
|
||||
* Embedded newlines are escaped to help util.format_explanation()
|
||||
later.
|
||||
* If the rewrite mode is used embedded %-characters are replaced
|
||||
to protect later % formatting.
|
||||
|
||||
The result can be formatted by util.format_explanation() for
|
||||
pretty printing.
|
||||
"""
|
||||
hook_result = item.ihook.pytest_assertrepr_compare(
|
||||
config=item.config, op=op, left=left, right=right)
|
||||
|
||||
for new_expl in hook_result:
|
||||
if new_expl:
|
||||
# Don't include pageloads of data unless we are very
|
||||
# verbose (-vv)
|
||||
if (sum(len(p) for p in new_expl[1:]) > 80*8
|
||||
and item.config.option.verbose < 2):
|
||||
new_expl[1:] = [py.builtin._totext(
|
||||
'Detailed information truncated, use "-vv" to show')]
|
||||
res = py.builtin._totext('\n~').join(new_expl)
|
||||
new_expl = [line.replace("\n", "\\n") for line in new_expl]
|
||||
res = py.builtin._totext("\n~").join(new_expl)
|
||||
if item.config.getvalue("assertmode") == "rewrite":
|
||||
# The result will be fed back a python % formatting
|
||||
# operation, which will fail if there are extraneous
|
||||
# '%'s in the string. Escape them here.
|
||||
res = res.replace("%", "%%")
|
||||
return res
|
||||
util._reprcompare = callbinrepr
|
||||
|
||||
|
||||
def pytest_runtest_teardown(item):
|
||||
util._reprcompare = None
|
||||
|
||||
|
||||
def pytest_sessionfinish(session):
|
||||
hook = session.config._assertstate.hook
|
||||
if hook is not None:
|
||||
hook.session = None
|
||||
|
||||
|
||||
def _load_modules(mode):
|
||||
"""Lazily import assertion related code."""
|
||||
global rewrite, reinterpret
|
||||
@@ -108,6 +142,7 @@ def _load_modules(mode):
|
||||
if mode == "rewrite":
|
||||
from _pytest.assertion import rewrite # noqa
|
||||
|
||||
|
||||
def warn_about_missing_assertion(mode):
|
||||
try:
|
||||
assert False
|
||||
@@ -121,8 +156,10 @@ def warn_about_missing_assertion(mode):
|
||||
specifically = "failing tests may report as passing"
|
||||
|
||||
sys.stderr.write("WARNING: " + specifically +
|
||||
" because assert statements are not executed "
|
||||
"by the underlying Python interpreter "
|
||||
"(are you using python -O?)\n")
|
||||
" because assert statements are not executed "
|
||||
"by the underlying Python interpreter "
|
||||
"(are you using python -O?)\n")
|
||||
|
||||
|
||||
# Expose this plugin's implementation for the pytest_assertrepr_compare hook
|
||||
pytest_assertrepr_compare = util.assertrepr_compare
|
||||
|
||||
@@ -286,7 +286,19 @@ class DebugInterpreter(ast.NodeVisitor):
|
||||
source = "__exprinfo_expr.%s" % (attr.attr,)
|
||||
co = self._compile(source)
|
||||
try:
|
||||
result = self.frame.eval(co, __exprinfo_expr=source_result)
|
||||
try:
|
||||
result = self.frame.eval(co, __exprinfo_expr=source_result)
|
||||
except AttributeError:
|
||||
# Maybe the attribute name needs to be mangled?
|
||||
if not attr.attr.startswith("__") or attr.attr.endswith("__"):
|
||||
raise
|
||||
source = "getattr(__exprinfo_expr.__class__, '__name__', '')"
|
||||
co = self._compile(source)
|
||||
class_name = self.frame.eval(co, __exprinfo_expr=source_result)
|
||||
mangled_attr = "_" + class_name + attr.attr
|
||||
source = "__exprinfo_expr.%s" % (mangled_attr,)
|
||||
co = self._compile(source)
|
||||
result = self.frame.eval(co, __exprinfo_expr=source_result)
|
||||
except Exception:
|
||||
raise Failure(explanation)
|
||||
explanation = "%s\n{%s = %s.%s\n}" % (self.frame.repr(result),
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
import traceback
|
||||
import types
|
||||
import py
|
||||
import sys, inspect
|
||||
from compiler import parse, ast, pycodegen
|
||||
@@ -355,7 +357,18 @@ class Getattr(Interpretable):
|
||||
expr.eval(frame)
|
||||
source = '__exprinfo_expr.%s' % self.attrname
|
||||
try:
|
||||
self.result = frame.eval(source, __exprinfo_expr=expr.result)
|
||||
try:
|
||||
self.result = frame.eval(source, __exprinfo_expr=expr.result)
|
||||
except AttributeError:
|
||||
# Maybe the attribute name needs to be mangled?
|
||||
if (not self.attrname.startswith("__") or
|
||||
self.attrname.endswith("__")):
|
||||
raise
|
||||
source = "getattr(__exprinfo_expr.__class__, '__name__', '')"
|
||||
class_name = frame.eval(source, __exprinfo_expr=expr.result)
|
||||
mangled_attr = "_" + class_name + self.attrname
|
||||
source = "__exprinfo_expr.%s" % (mangled_attr,)
|
||||
self.result = frame.eval(source, __exprinfo_expr=expr.result)
|
||||
except passthroughex:
|
||||
raise
|
||||
except:
|
||||
@@ -466,7 +479,7 @@ def check(s, frame=None):
|
||||
def interpret(source, frame, should_fail=False):
|
||||
module = Interpretable(parse(source, 'exec').node)
|
||||
#print "got module", module
|
||||
if isinstance(frame, py.std.types.FrameType):
|
||||
if isinstance(frame, types.FrameType):
|
||||
frame = py.code.Frame(frame)
|
||||
try:
|
||||
module.run(frame)
|
||||
@@ -476,7 +489,6 @@ def interpret(source, frame, should_fail=False):
|
||||
except passthroughex:
|
||||
raise
|
||||
except:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
if should_fail:
|
||||
return ("(assertion failed, but when it was re-run for "
|
||||
|
||||
@@ -45,10 +45,8 @@ class AssertionError(BuiltinAssertionError):
|
||||
|
||||
if sys.version_info > (3, 0):
|
||||
AssertionError.__module__ = "builtins"
|
||||
reinterpret_old = "old reinterpretation not available for py3"
|
||||
else:
|
||||
from _pytest.assertion.oldinterpret import interpret as reinterpret_old
|
||||
if sys.version_info >= (2, 6) or (sys.platform.startswith("java")):
|
||||
|
||||
if sys.version_info >= (2, 6) or sys.platform.startswith("java"):
|
||||
from _pytest.assertion.newinterpret import interpret as reinterpret
|
||||
else:
|
||||
reinterpret = reinterpret_old
|
||||
from _pytest.assertion.oldinterpret import interpret as reinterpret
|
||||
|
||||
@@ -131,15 +131,15 @@ class AssertionRewritingHook(object):
|
||||
pyc = os.path.join(cache_dir, cache_name)
|
||||
# Notice that even if we're in a read-only directory, I'm going
|
||||
# to check for a cached pyc. This may not be optimal...
|
||||
co = _read_pyc(fn_pypath, pyc)
|
||||
co = _read_pyc(fn_pypath, pyc, state.trace)
|
||||
if co is None:
|
||||
state.trace("rewriting %r" % (fn,))
|
||||
co = _rewrite_test(state, fn_pypath)
|
||||
source_stat, co = _rewrite_test(state, fn_pypath)
|
||||
if co is None:
|
||||
# Probably a SyntaxError in the test.
|
||||
return None
|
||||
if write:
|
||||
_make_rewritten_pyc(state, fn_pypath, pyc, co)
|
||||
_make_rewritten_pyc(state, source_stat, pyc, co)
|
||||
else:
|
||||
state.trace("found cached rewritten pyc for %r" % (fn,))
|
||||
self.modules[name] = co, pyc
|
||||
@@ -192,13 +192,12 @@ class AssertionRewritingHook(object):
|
||||
pkg_resources.register_loader_type(cls, pkg_resources.DefaultProvider)
|
||||
|
||||
|
||||
def _write_pyc(state, co, source_path, pyc):
|
||||
def _write_pyc(state, co, source_stat, pyc):
|
||||
# Technically, we don't have to have the same pyc format as
|
||||
# (C)Python, since these "pycs" should never be seen by builtin
|
||||
# import. However, there's little reason deviate, and I hope
|
||||
# sometime to be able to use imp.load_compiled to load them. (See
|
||||
# the comment in load_module above.)
|
||||
mtime = int(source_path.mtime())
|
||||
try:
|
||||
fp = open(pyc, "wb")
|
||||
except IOError:
|
||||
@@ -210,7 +209,9 @@ def _write_pyc(state, co, source_path, pyc):
|
||||
return False
|
||||
try:
|
||||
fp.write(imp.get_magic())
|
||||
fp.write(struct.pack("<l", mtime))
|
||||
mtime = int(source_stat.mtime)
|
||||
size = source_stat.size & 0xFFFFFFFF
|
||||
fp.write(struct.pack("<ll", mtime, size))
|
||||
marshal.dump(co, fp)
|
||||
finally:
|
||||
fp.close()
|
||||
@@ -225,9 +226,10 @@ BOM_UTF8 = '\xef\xbb\xbf'
|
||||
def _rewrite_test(state, fn):
|
||||
"""Try to read and rewrite *fn* and return the code object."""
|
||||
try:
|
||||
stat = fn.stat()
|
||||
source = fn.read("rb")
|
||||
except EnvironmentError:
|
||||
return None
|
||||
return None, None
|
||||
if ASCII_IS_DEFAULT_ENCODING:
|
||||
# ASCII is the default encoding in Python 2. Without a coding
|
||||
# declaration, Python 2 will complain about any bytes in the file
|
||||
@@ -246,14 +248,15 @@ def _rewrite_test(state, fn):
|
||||
cookie_re.match(source[0:end1]) is None and
|
||||
cookie_re.match(source[end1 + 1:end2]) is None):
|
||||
if hasattr(state, "_indecode"):
|
||||
return None # encodings imported us again, we don't rewrite
|
||||
# encodings imported us again, so don't rewrite.
|
||||
return None, None
|
||||
state._indecode = True
|
||||
try:
|
||||
try:
|
||||
source.decode("ascii")
|
||||
except UnicodeDecodeError:
|
||||
# Let it fail in real import.
|
||||
return None
|
||||
return None, None
|
||||
finally:
|
||||
del state._indecode
|
||||
# On Python versions which are not 2.7 and less than or equal to 3.1, the
|
||||
@@ -265,7 +268,7 @@ def _rewrite_test(state, fn):
|
||||
except SyntaxError:
|
||||
# Let this pop up again in the real import.
|
||||
state.trace("failed to parse: %r" % (fn,))
|
||||
return None
|
||||
return None, None
|
||||
rewrite_asserts(tree)
|
||||
try:
|
||||
co = compile(tree, fn.strpath, "exec")
|
||||
@@ -273,23 +276,23 @@ def _rewrite_test(state, fn):
|
||||
# It's possible that this error is from some bug in the
|
||||
# assertion rewriting, but I don't know of a fast way to tell.
|
||||
state.trace("failed to compile: %r" % (fn,))
|
||||
return None
|
||||
return co
|
||||
return None, None
|
||||
return stat, co
|
||||
|
||||
def _make_rewritten_pyc(state, fn, pyc, co):
|
||||
def _make_rewritten_pyc(state, source_stat, pyc, co):
|
||||
"""Try to dump rewritten code to *pyc*."""
|
||||
if sys.platform.startswith("win"):
|
||||
# Windows grants exclusive access to open files and doesn't have atomic
|
||||
# rename, so just write into the final file.
|
||||
_write_pyc(state, co, fn, pyc)
|
||||
_write_pyc(state, co, source_stat, pyc)
|
||||
else:
|
||||
# When not on windows, assume rename is atomic. Dump the code object
|
||||
# into a file specific to this process and atomically replace it.
|
||||
proc_pyc = pyc + "." + str(os.getpid())
|
||||
if _write_pyc(state, co, fn, proc_pyc):
|
||||
if _write_pyc(state, co, source_stat, proc_pyc):
|
||||
os.rename(proc_pyc, pyc)
|
||||
|
||||
def _read_pyc(source, pyc):
|
||||
def _read_pyc(source, pyc, trace=lambda x: None):
|
||||
"""Possibly read a pytest pyc containing rewritten code.
|
||||
|
||||
Return rewritten code if successful or None if not.
|
||||
@@ -298,23 +301,28 @@ def _read_pyc(source, pyc):
|
||||
fp = open(pyc, "rb")
|
||||
except IOError:
|
||||
return None
|
||||
try:
|
||||
with fp:
|
||||
try:
|
||||
mtime = int(source.mtime())
|
||||
data = fp.read(8)
|
||||
except EnvironmentError:
|
||||
size = source.size()
|
||||
data = fp.read(12)
|
||||
except EnvironmentError as e:
|
||||
trace('_read_pyc(%s): EnvironmentError %s' % (source, e))
|
||||
return None
|
||||
# Check for invalid or out of date pyc file.
|
||||
if (len(data) != 8 or data[:4] != imp.get_magic() or
|
||||
struct.unpack("<l", data[4:])[0] != mtime):
|
||||
if (len(data) != 12 or data[:4] != imp.get_magic() or
|
||||
struct.unpack("<ll", data[4:]) != (mtime, size)):
|
||||
trace('_read_pyc(%s): invalid or out of date pyc' % source)
|
||||
return None
|
||||
try:
|
||||
co = marshal.load(fp)
|
||||
except Exception as e:
|
||||
trace('_read_pyc(%s): marshal.load error %s' % (source, e))
|
||||
return None
|
||||
co = marshal.load(fp)
|
||||
if not isinstance(co, types.CodeType):
|
||||
# That's interesting....
|
||||
trace('_read_pyc(%s): not a code object' % source)
|
||||
return None
|
||||
return co
|
||||
finally:
|
||||
fp.close()
|
||||
|
||||
|
||||
def rewrite_asserts(mod):
|
||||
@@ -322,7 +330,25 @@ def rewrite_asserts(mod):
|
||||
AssertionRewriter().run(mod)
|
||||
|
||||
|
||||
_saferepr = py.io.saferepr
|
||||
def _saferepr(obj):
|
||||
"""Get a safe repr of an object for assertion error messages.
|
||||
|
||||
The assertion formatting (util.format_explanation()) requires
|
||||
newlines to be escaped since they are a special character for it.
|
||||
Normally assertion.util.format_explanation() does this but for a
|
||||
custom repr it is possible to contain one of the special escape
|
||||
sequences, especially '\n{' and '\n}' are likely to be present in
|
||||
JSON reprs.
|
||||
|
||||
"""
|
||||
repr = py.io.saferepr(obj)
|
||||
if py.builtin._istext(repr):
|
||||
t = py.builtin.text
|
||||
else:
|
||||
t = py.builtin.bytes
|
||||
return repr.replace(t("\n"), t("\\n"))
|
||||
|
||||
|
||||
from _pytest.assertion.util import format_explanation as _format_explanation # noqa
|
||||
|
||||
def _should_repr_global_name(obj):
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
"""Utilities for assertion debugging"""
|
||||
import pprint
|
||||
|
||||
import py
|
||||
try:
|
||||
@@ -149,11 +150,10 @@ def assertrepr_compare(config, op, left, right):
|
||||
if istext(left) and istext(right):
|
||||
explanation = _notin_text(left, right, verbose)
|
||||
except Exception:
|
||||
excinfo = py.code.ExceptionInfo()
|
||||
explanation = [
|
||||
u('(pytest_assertion plugin: representation of details failed. '
|
||||
'Probably an object has a faulty __repr__.)'),
|
||||
u(excinfo)]
|
||||
u(py.code.ExceptionInfo())]
|
||||
|
||||
if not explanation:
|
||||
return None
|
||||
@@ -169,6 +169,7 @@ def _diff_text(left, right, verbose=False):
|
||||
|
||||
If the input are bytes they will be safely converted to text.
|
||||
"""
|
||||
from difflib import ndiff
|
||||
explanation = []
|
||||
if isinstance(left, py.builtin.bytes):
|
||||
left = u(repr(left)[1:-1]).replace(r'\n', '\n')
|
||||
@@ -196,8 +197,8 @@ def _diff_text(left, right, verbose=False):
|
||||
left = left[:-i]
|
||||
right = right[:-i]
|
||||
explanation += [line.strip('\n')
|
||||
for line in py.std.difflib.ndiff(left.splitlines(),
|
||||
right.splitlines())]
|
||||
for line in ndiff(left.splitlines(),
|
||||
right.splitlines())]
|
||||
return explanation
|
||||
|
||||
|
||||
@@ -215,8 +216,8 @@ def _compare_eq_sequence(left, right, verbose=False):
|
||||
explanation += [
|
||||
u('Right contains more items, first extra item: %s') %
|
||||
py.io.saferepr(right[len(left)],)]
|
||||
return explanation # + _diff_text(py.std.pprint.pformat(left),
|
||||
# py.std.pprint.pformat(right))
|
||||
return explanation # + _diff_text(pprint.pformat(left),
|
||||
# pprint.pformat(right))
|
||||
|
||||
|
||||
def _compare_eq_set(left, right, verbose=False):
|
||||
@@ -243,7 +244,7 @@ def _compare_eq_dict(left, right, verbose=False):
|
||||
len(same)]
|
||||
elif same:
|
||||
explanation += [u('Common items:')]
|
||||
explanation += py.std.pprint.pformat(same).splitlines()
|
||||
explanation += pprint.pformat(same).splitlines()
|
||||
diff = set(k for k in common if left[k] != right[k])
|
||||
if diff:
|
||||
explanation += [u('Differing items:')]
|
||||
@@ -253,12 +254,12 @@ def _compare_eq_dict(left, right, verbose=False):
|
||||
extra_left = set(left) - set(right)
|
||||
if extra_left:
|
||||
explanation.append(u('Left contains more items:'))
|
||||
explanation.extend(py.std.pprint.pformat(
|
||||
explanation.extend(pprint.pformat(
|
||||
dict((k, left[k]) for k in extra_left)).splitlines())
|
||||
extra_right = set(right) - set(left)
|
||||
if extra_right:
|
||||
explanation.append(u('Right contains more items:'))
|
||||
explanation.extend(py.std.pprint.pformat(
|
||||
explanation.extend(pprint.pformat(
|
||||
dict((k, right[k]) for k in extra_right)).splitlines())
|
||||
return explanation
|
||||
|
||||
|
||||
@@ -1,40 +1,18 @@
|
||||
"""
|
||||
per-test stdout/stderr capturing mechanisms,
|
||||
``capsys`` and ``capfd`` function arguments.
|
||||
per-test stdout/stderr capturing mechanism.
|
||||
|
||||
"""
|
||||
# note: py.io capture was where copied from
|
||||
# pylib 1.4.20.dev2 (rev 13d9af95547e)
|
||||
from __future__ import with_statement
|
||||
|
||||
import sys
|
||||
import os
|
||||
import tempfile
|
||||
from tempfile import TemporaryFile
|
||||
|
||||
import py
|
||||
import pytest
|
||||
|
||||
try:
|
||||
from io import StringIO
|
||||
except ImportError:
|
||||
from StringIO import StringIO
|
||||
|
||||
try:
|
||||
from io import BytesIO
|
||||
except ImportError:
|
||||
class BytesIO(StringIO):
|
||||
def write(self, data):
|
||||
if isinstance(data, unicode):
|
||||
raise TypeError("not a byte value: %r" % (data,))
|
||||
StringIO.write(self, data)
|
||||
|
||||
if sys.version_info < (3, 0):
|
||||
class TextIO(StringIO):
|
||||
def write(self, data):
|
||||
if not isinstance(data, unicode):
|
||||
enc = getattr(self, '_encoding', 'UTF-8')
|
||||
data = unicode(data, enc, 'replace')
|
||||
StringIO.write(self, data)
|
||||
else:
|
||||
TextIO = StringIO
|
||||
|
||||
from py.io import TextIO
|
||||
unicode = py.builtin.text
|
||||
|
||||
patchsysdict = {0: 'stdin', 1: 'stdout', 2: 'stderr'}
|
||||
|
||||
@@ -42,9 +20,10 @@ patchsysdict = {0: 'stdin', 1: 'stdout', 2: 'stderr'}
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("general")
|
||||
group._addoption(
|
||||
'--capture', action="store", default=None,
|
||||
'--capture', action="store",
|
||||
default="fd" if hasattr(os, "dup") else "sys",
|
||||
metavar="method", choices=['fd', 'sys', 'no'],
|
||||
help="per-test capturing method: one of fd (default)|sys|no.")
|
||||
help="per-test capturing method: one of fd|sys|no.")
|
||||
group._addoption(
|
||||
'-s', action="store_const", const="no", dest="capture",
|
||||
help="shortcut for --capture=no.")
|
||||
@@ -52,32 +31,22 @@ def pytest_addoption(parser):
|
||||
|
||||
@pytest.mark.tryfirst
|
||||
def pytest_load_initial_conftests(early_config, parser, args, __multicall__):
|
||||
ns = parser.parse_known_args(args)
|
||||
method = ns.capture
|
||||
if not method:
|
||||
method = "fd"
|
||||
if method == "fd" and not hasattr(os, "dup"):
|
||||
method = "sys"
|
||||
capman = CaptureManager(method)
|
||||
early_config.pluginmanager.register(capman, "capturemanager")
|
||||
ns = early_config.known_args_namespace
|
||||
pluginmanager = early_config.pluginmanager
|
||||
capman = CaptureManager(ns.capture)
|
||||
pluginmanager.register(capman, "capturemanager")
|
||||
|
||||
# make sure that capturemanager is properly reset at final shutdown
|
||||
def teardown():
|
||||
try:
|
||||
capman.reset_capturings()
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
early_config.pluginmanager.add_shutdown(teardown)
|
||||
pluginmanager.add_shutdown(capman.reset_capturings)
|
||||
|
||||
# make sure logging does not raise exceptions at the end
|
||||
def silence_logging_at_shutdown():
|
||||
if "logging" in sys.modules:
|
||||
sys.modules["logging"].raiseExceptions = False
|
||||
early_config.pluginmanager.add_shutdown(silence_logging_at_shutdown)
|
||||
pluginmanager.add_shutdown(silence_logging_at_shutdown)
|
||||
|
||||
# finally trigger conftest loading but while capturing (issue93)
|
||||
capman.resumecapture()
|
||||
capman.init_capturings()
|
||||
try:
|
||||
try:
|
||||
return __multicall__.execute()
|
||||
@@ -89,208 +58,143 @@ def pytest_load_initial_conftests(early_config, parser, args, __multicall__):
|
||||
raise
|
||||
|
||||
|
||||
def addouterr(rep, outerr):
|
||||
for secname, content in zip(["out", "err"], outerr):
|
||||
if content:
|
||||
rep.sections.append(("Captured std%s" % secname, content))
|
||||
|
||||
|
||||
class NoCapture:
|
||||
def startall(self):
|
||||
pass
|
||||
|
||||
def resume(self):
|
||||
pass
|
||||
|
||||
def reset(self):
|
||||
pass
|
||||
|
||||
def suspend(self):
|
||||
return "", ""
|
||||
|
||||
|
||||
class CaptureManager:
|
||||
def __init__(self, defaultmethod=None):
|
||||
self._method2capture = {}
|
||||
self._defaultmethod = defaultmethod
|
||||
|
||||
def _maketempfile(self):
|
||||
f = py.std.tempfile.TemporaryFile()
|
||||
newf = dupfile(f, encoding="UTF-8")
|
||||
f.close()
|
||||
return newf
|
||||
|
||||
def _makestringio(self):
|
||||
return TextIO()
|
||||
def __init__(self, method):
|
||||
self._method = method
|
||||
|
||||
def _getcapture(self, method):
|
||||
if method == "fd":
|
||||
return StdCaptureFD(
|
||||
out=self._maketempfile(),
|
||||
err=self._maketempfile(),
|
||||
)
|
||||
return MultiCapture(out=True, err=True, Capture=FDCapture)
|
||||
elif method == "sys":
|
||||
return StdCapture(
|
||||
out=self._makestringio(),
|
||||
err=self._makestringio(),
|
||||
)
|
||||
return MultiCapture(out=True, err=True, Capture=SysCapture)
|
||||
elif method == "no":
|
||||
return NoCapture()
|
||||
return MultiCapture(out=False, err=False, in_=False)
|
||||
else:
|
||||
raise ValueError("unknown capturing method: %r" % method)
|
||||
|
||||
def _getmethod(self, config, fspath):
|
||||
if config.option.capture:
|
||||
method = config.option.capture
|
||||
else:
|
||||
try:
|
||||
method = config._conftest.rget("option_capture", path=fspath)
|
||||
except KeyError:
|
||||
method = "fd"
|
||||
if method == "fd" and not hasattr(os, 'dup'): # e.g. jython
|
||||
method = "sys"
|
||||
return method
|
||||
def init_capturings(self):
|
||||
assert not hasattr(self, "_capturing")
|
||||
self._capturing = self._getcapture(self._method)
|
||||
self._capturing.start_capturing()
|
||||
|
||||
def reset_capturings(self):
|
||||
for cap in self._method2capture.values():
|
||||
cap.reset()
|
||||
cap = self.__dict__.pop("_capturing", None)
|
||||
if cap is not None:
|
||||
cap.pop_outerr_to_orig()
|
||||
cap.stop_capturing()
|
||||
|
||||
def resumecapture_item(self, item):
|
||||
method = self._getmethod(item.config, item.fspath)
|
||||
if not hasattr(item, 'outerr'):
|
||||
item.outerr = ('', '') # we accumulate outerr on the item
|
||||
return self.resumecapture(method)
|
||||
def resumecapture(self):
|
||||
self._capturing.resume_capturing()
|
||||
|
||||
def resumecapture(self, method=None):
|
||||
if hasattr(self, '_capturing'):
|
||||
raise ValueError(
|
||||
"cannot resume, already capturing with %r" %
|
||||
(self._capturing,))
|
||||
if method is None:
|
||||
method = self._defaultmethod
|
||||
cap = self._method2capture.get(method)
|
||||
self._capturing = method
|
||||
if cap is None:
|
||||
self._method2capture[method] = cap = self._getcapture(method)
|
||||
cap.startall()
|
||||
else:
|
||||
cap.resume()
|
||||
|
||||
def suspendcapture(self, item=None):
|
||||
def suspendcapture(self, in_=False):
|
||||
self.deactivate_funcargs()
|
||||
if hasattr(self, '_capturing'):
|
||||
method = self._capturing
|
||||
cap = self._method2capture.get(method)
|
||||
if cap is not None:
|
||||
outerr = cap.suspend()
|
||||
del self._capturing
|
||||
if item:
|
||||
outerr = (item.outerr[0] + outerr[0],
|
||||
item.outerr[1] + outerr[1])
|
||||
cap = getattr(self, "_capturing", None)
|
||||
if cap is not None:
|
||||
outerr = cap.readouterr()
|
||||
cap.suspend_capturing(in_=in_)
|
||||
return outerr
|
||||
if hasattr(item, 'outerr'):
|
||||
return item.outerr
|
||||
return "", ""
|
||||
|
||||
def activate_funcargs(self, pyfuncitem):
|
||||
funcargs = getattr(pyfuncitem, "funcargs", None)
|
||||
if funcargs is not None:
|
||||
for name, capfuncarg in funcargs.items():
|
||||
if name in ('capsys', 'capfd'):
|
||||
assert not hasattr(self, '_capturing_funcarg')
|
||||
self._capturing_funcarg = capfuncarg
|
||||
capfuncarg._start()
|
||||
capfuncarg = pyfuncitem.__dict__.pop("_capfuncarg", None)
|
||||
if capfuncarg is not None:
|
||||
capfuncarg._start()
|
||||
self._capfuncarg = capfuncarg
|
||||
|
||||
def deactivate_funcargs(self):
|
||||
capturing_funcarg = getattr(self, '_capturing_funcarg', None)
|
||||
if capturing_funcarg:
|
||||
outerr = capturing_funcarg._finalize()
|
||||
del self._capturing_funcarg
|
||||
return outerr
|
||||
capfuncarg = self.__dict__.pop("_capfuncarg", None)
|
||||
if capfuncarg is not None:
|
||||
capfuncarg.close()
|
||||
|
||||
@pytest.mark.tryfirst
|
||||
def pytest_make_collect_report(self, __multicall__, collector):
|
||||
method = self._getmethod(collector.config, collector.fspath)
|
||||
try:
|
||||
self.resumecapture(method)
|
||||
except ValueError:
|
||||
# recursive collect, XXX refactor capturing
|
||||
# to allow for more lightweight recursive capturing
|
||||
if not isinstance(collector, pytest.File):
|
||||
return
|
||||
self.resumecapture()
|
||||
try:
|
||||
rep = __multicall__.execute()
|
||||
finally:
|
||||
outerr = self.suspendcapture()
|
||||
addouterr(rep, outerr)
|
||||
out, err = self.suspendcapture()
|
||||
if out:
|
||||
rep.sections.append(("Captured stdout", out))
|
||||
if err:
|
||||
rep.sections.append(("Captured stderr", err))
|
||||
return rep
|
||||
|
||||
@pytest.mark.tryfirst
|
||||
@pytest.mark.hookwrapper
|
||||
def pytest_runtest_setup(self, item):
|
||||
self.resumecapture_item(item)
|
||||
self.resumecapture()
|
||||
yield
|
||||
self.suspendcapture_item(item, "setup")
|
||||
|
||||
@pytest.mark.tryfirst
|
||||
@pytest.mark.hookwrapper
|
||||
def pytest_runtest_call(self, item):
|
||||
self.resumecapture_item(item)
|
||||
self.resumecapture()
|
||||
self.activate_funcargs(item)
|
||||
yield
|
||||
#self.deactivate_funcargs() called from suspendcapture()
|
||||
self.suspendcapture_item(item, "call")
|
||||
|
||||
@pytest.mark.tryfirst
|
||||
@pytest.mark.hookwrapper
|
||||
def pytest_runtest_teardown(self, item):
|
||||
self.resumecapture_item(item)
|
||||
|
||||
def pytest_keyboard_interrupt(self, excinfo):
|
||||
if hasattr(self, '_capturing'):
|
||||
self.suspendcapture()
|
||||
self.resumecapture()
|
||||
yield
|
||||
self.suspendcapture_item(item, "teardown")
|
||||
|
||||
@pytest.mark.tryfirst
|
||||
def pytest_runtest_makereport(self, __multicall__, item, call):
|
||||
funcarg_outerr = self.deactivate_funcargs()
|
||||
rep = __multicall__.execute()
|
||||
outerr = self.suspendcapture(item)
|
||||
if funcarg_outerr is not None:
|
||||
outerr = (outerr[0] + funcarg_outerr[0],
|
||||
outerr[1] + funcarg_outerr[1])
|
||||
addouterr(rep, outerr)
|
||||
if not rep.passed or rep.when == "teardown":
|
||||
outerr = ('', '')
|
||||
item.outerr = outerr
|
||||
return rep
|
||||
def pytest_keyboard_interrupt(self, excinfo):
|
||||
self.reset_capturings()
|
||||
|
||||
@pytest.mark.tryfirst
|
||||
def pytest_internalerror(self, excinfo):
|
||||
self.reset_capturings()
|
||||
|
||||
def suspendcapture_item(self, item, when):
|
||||
out, err = self.suspendcapture()
|
||||
item.add_report_section(when, "out", out)
|
||||
item.add_report_section(when, "err", err)
|
||||
|
||||
error_capsysfderror = "cannot use capsys and capfd at the same time"
|
||||
|
||||
|
||||
def pytest_funcarg__capsys(request):
|
||||
@pytest.fixture
|
||||
def capsys(request):
|
||||
"""enables capturing of writes to sys.stdout/sys.stderr and makes
|
||||
captured output available via ``capsys.readouterr()`` method calls
|
||||
which return a ``(out, err)`` tuple.
|
||||
"""
|
||||
if "capfd" in request._funcargs:
|
||||
raise request.raiseerror(error_capsysfderror)
|
||||
return CaptureFixture(StdCapture)
|
||||
request.node._capfuncarg = c = CaptureFixture(SysCapture)
|
||||
return c
|
||||
|
||||
|
||||
def pytest_funcarg__capfd(request):
|
||||
@pytest.fixture
|
||||
def capfd(request):
|
||||
"""enables capturing of writes to file descriptors 1 and 2 and makes
|
||||
captured output available via ``capsys.readouterr()`` method calls
|
||||
captured output available via ``capfd.readouterr()`` method calls
|
||||
which return a ``(out, err)`` tuple.
|
||||
"""
|
||||
if "capsys" in request._funcargs:
|
||||
request.raiseerror(error_capsysfderror)
|
||||
if not hasattr(os, 'dup'):
|
||||
pytest.skip("capfd funcarg needs os.dup")
|
||||
return CaptureFixture(StdCaptureFD)
|
||||
request.node._capfuncarg = c = CaptureFixture(FDCapture)
|
||||
return c
|
||||
|
||||
|
||||
class CaptureFixture:
|
||||
def __init__(self, captureclass):
|
||||
self._capture = captureclass()
|
||||
self.captureclass = captureclass
|
||||
|
||||
def _start(self):
|
||||
self._capture.startall()
|
||||
self._capture = MultiCapture(out=True, err=True, in_=False,
|
||||
Capture=self.captureclass)
|
||||
self._capture.start_capturing()
|
||||
|
||||
def _finalize(self):
|
||||
if hasattr(self, '_capture'):
|
||||
outerr = self._outerr = self._capture.reset()
|
||||
del self._capture
|
||||
return outerr
|
||||
def close(self):
|
||||
cap = self.__dict__.pop("_capture", None)
|
||||
if cap is not None:
|
||||
self._outerr = cap.pop_outerr_to_orig()
|
||||
cap.stop_capturing()
|
||||
|
||||
def readouterr(self):
|
||||
try:
|
||||
@@ -298,295 +202,223 @@ class CaptureFixture:
|
||||
except AttributeError:
|
||||
return self._outerr
|
||||
|
||||
def close(self):
|
||||
self._finalize()
|
||||
|
||||
|
||||
class FDCapture:
|
||||
""" Capture IO to/from a given os-level filedescriptor. """
|
||||
|
||||
def __init__(self, targetfd, tmpfile=None, patchsys=False):
|
||||
""" save targetfd descriptor, and open a new
|
||||
temporary file there. If no tmpfile is
|
||||
specified a tempfile.Tempfile() will be opened
|
||||
in text mode.
|
||||
"""
|
||||
self.targetfd = targetfd
|
||||
if tmpfile is None and targetfd != 0:
|
||||
f = tempfile.TemporaryFile('wb+')
|
||||
tmpfile = dupfile(f, encoding="UTF-8")
|
||||
f.close()
|
||||
self.tmpfile = tmpfile
|
||||
self._savefd = os.dup(self.targetfd)
|
||||
if patchsys:
|
||||
self._oldsys = getattr(sys, patchsysdict[targetfd])
|
||||
|
||||
def start(self):
|
||||
try:
|
||||
os.fstat(self._savefd)
|
||||
except OSError:
|
||||
raise ValueError(
|
||||
"saved filedescriptor not valid, "
|
||||
"did you call start() twice?")
|
||||
if self.targetfd == 0 and not self.tmpfile:
|
||||
fd = os.open(os.devnull, os.O_RDONLY)
|
||||
os.dup2(fd, 0)
|
||||
os.close(fd)
|
||||
if hasattr(self, '_oldsys'):
|
||||
setattr(sys, patchsysdict[self.targetfd], DontReadFromInput())
|
||||
else:
|
||||
os.dup2(self.tmpfile.fileno(), self.targetfd)
|
||||
if hasattr(self, '_oldsys'):
|
||||
setattr(sys, patchsysdict[self.targetfd], self.tmpfile)
|
||||
|
||||
def done(self):
|
||||
""" unpatch and clean up, returns the self.tmpfile (file object)
|
||||
"""
|
||||
os.dup2(self._savefd, self.targetfd)
|
||||
os.close(self._savefd)
|
||||
if self.targetfd != 0:
|
||||
self.tmpfile.seek(0)
|
||||
if hasattr(self, '_oldsys'):
|
||||
setattr(sys, patchsysdict[self.targetfd], self._oldsys)
|
||||
return self.tmpfile
|
||||
|
||||
def writeorg(self, data):
|
||||
""" write a string to the original file descriptor
|
||||
"""
|
||||
tempfp = tempfile.TemporaryFile()
|
||||
try:
|
||||
os.dup2(self._savefd, tempfp.fileno())
|
||||
tempfp.write(data)
|
||||
finally:
|
||||
tempfp.close()
|
||||
|
||||
|
||||
def dupfile(f, mode=None, buffering=0, raising=False, encoding=None):
|
||||
""" return a new open file object that's a duplicate of f
|
||||
|
||||
mode is duplicated if not given, 'buffering' controls
|
||||
buffer size (defaulting to no buffering) and 'raising'
|
||||
defines whether an exception is raised when an incompatible
|
||||
file object is passed in (if raising is False, the file
|
||||
object itself will be returned)
|
||||
def safe_text_dupfile(f, mode, default_encoding="UTF8"):
|
||||
""" return a open text file object that's a duplicate of f on the
|
||||
FD-level if possible.
|
||||
"""
|
||||
encoding = getattr(f, "encoding", None)
|
||||
try:
|
||||
fd = f.fileno()
|
||||
mode = mode or f.mode
|
||||
except AttributeError:
|
||||
if raising:
|
||||
raise
|
||||
return f
|
||||
newfd = os.dup(fd)
|
||||
if sys.version_info >= (3, 0):
|
||||
if encoding is not None:
|
||||
mode = mode.replace("b", "")
|
||||
buffering = True
|
||||
return os.fdopen(newfd, mode, buffering, encoding, closefd=True)
|
||||
except Exception:
|
||||
if "b" not in getattr(f, "mode", "") and hasattr(f, "encoding"):
|
||||
# we seem to have a text stream, let's just use it
|
||||
return f
|
||||
else:
|
||||
f = os.fdopen(newfd, mode, buffering)
|
||||
if encoding is not None:
|
||||
return EncodedFile(f, encoding)
|
||||
return f
|
||||
newfd = os.dup(fd)
|
||||
if "b" not in mode:
|
||||
mode += "b"
|
||||
f = os.fdopen(newfd, mode, 0) # no buffering
|
||||
return EncodedFile(f, encoding or default_encoding)
|
||||
|
||||
|
||||
class EncodedFile(object):
|
||||
def __init__(self, _stream, encoding):
|
||||
self._stream = _stream
|
||||
errors = "strict" # possibly needed by py3 code (issue555)
|
||||
def __init__(self, buffer, encoding):
|
||||
self.buffer = buffer
|
||||
self.encoding = encoding
|
||||
|
||||
def write(self, obj):
|
||||
if isinstance(obj, unicode):
|
||||
obj = obj.encode(self.encoding)
|
||||
self._stream.write(obj)
|
||||
obj = obj.encode(self.encoding, "replace")
|
||||
self.buffer.write(obj)
|
||||
|
||||
def writelines(self, linelist):
|
||||
data = ''.join(linelist)
|
||||
self.write(data)
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self._stream, name)
|
||||
return getattr(self.buffer, name)
|
||||
|
||||
|
||||
class Capture(object):
|
||||
def reset(self):
|
||||
""" reset sys.stdout/stderr and return captured output as strings. """
|
||||
if hasattr(self, '_reset'):
|
||||
raise ValueError("was already reset")
|
||||
self._reset = True
|
||||
outfile, errfile = self.done(save=False)
|
||||
out, err = "", ""
|
||||
if outfile and not outfile.closed:
|
||||
out = outfile.read()
|
||||
outfile.close()
|
||||
if errfile and errfile != outfile and not errfile.closed:
|
||||
err = errfile.read()
|
||||
errfile.close()
|
||||
return out, err
|
||||
class MultiCapture(object):
|
||||
out = err = in_ = None
|
||||
|
||||
def suspend(self):
|
||||
""" return current snapshot captures, memorize tempfiles. """
|
||||
outerr = self.readouterr()
|
||||
outfile, errfile = self.done()
|
||||
return outerr
|
||||
|
||||
|
||||
class StdCaptureFD(Capture):
|
||||
""" This class allows to capture writes to FD1 and FD2
|
||||
and may connect a NULL file to FD0 (and prevent
|
||||
reads from sys.stdin). If any of the 0,1,2 file descriptors
|
||||
is invalid it will not be captured.
|
||||
"""
|
||||
def __init__(self, out=True, err=True, in_=True, patchsys=True):
|
||||
self._options = {
|
||||
"out": out,
|
||||
"err": err,
|
||||
"in_": in_,
|
||||
"patchsys": patchsys,
|
||||
}
|
||||
self._save()
|
||||
|
||||
def _save(self):
|
||||
in_ = self._options['in_']
|
||||
out = self._options['out']
|
||||
err = self._options['err']
|
||||
patchsys = self._options['patchsys']
|
||||
def __init__(self, out=True, err=True, in_=True, Capture=None):
|
||||
if in_:
|
||||
try:
|
||||
self.in_ = FDCapture(
|
||||
0, tmpfile=None,
|
||||
patchsys=patchsys)
|
||||
except OSError:
|
||||
pass
|
||||
self.in_ = Capture(0)
|
||||
if out:
|
||||
tmpfile = None
|
||||
if hasattr(out, 'write'):
|
||||
tmpfile = out
|
||||
try:
|
||||
self.out = FDCapture(
|
||||
1, tmpfile=tmpfile,
|
||||
patchsys=patchsys)
|
||||
self._options['out'] = self.out.tmpfile
|
||||
except OSError:
|
||||
pass
|
||||
self.out = Capture(1)
|
||||
if err:
|
||||
if hasattr(err, 'write'):
|
||||
tmpfile = err
|
||||
else:
|
||||
tmpfile = None
|
||||
try:
|
||||
self.err = FDCapture(
|
||||
2, tmpfile=tmpfile,
|
||||
patchsys=patchsys)
|
||||
self._options['err'] = self.err.tmpfile
|
||||
except OSError:
|
||||
pass
|
||||
self.err = Capture(2)
|
||||
|
||||
def startall(self):
|
||||
if hasattr(self, 'in_'):
|
||||
def start_capturing(self):
|
||||
if self.in_:
|
||||
self.in_.start()
|
||||
if hasattr(self, 'out'):
|
||||
if self.out:
|
||||
self.out.start()
|
||||
if hasattr(self, 'err'):
|
||||
if self.err:
|
||||
self.err.start()
|
||||
|
||||
def resume(self):
|
||||
""" resume capturing with original temp files. """
|
||||
self.startall()
|
||||
|
||||
def done(self, save=True):
|
||||
""" return (outfile, errfile) and stop capturing. """
|
||||
outfile = errfile = None
|
||||
if hasattr(self, 'out') and not self.out.tmpfile.closed:
|
||||
outfile = self.out.done()
|
||||
if hasattr(self, 'err') and not self.err.tmpfile.closed:
|
||||
errfile = self.err.done()
|
||||
if hasattr(self, 'in_'):
|
||||
self.in_.done()
|
||||
if save:
|
||||
self._save()
|
||||
return outfile, errfile
|
||||
|
||||
def readouterr(self):
|
||||
""" return snapshot value of stdout/stderr capturings. """
|
||||
out = self._readsnapshot('out')
|
||||
err = self._readsnapshot('err')
|
||||
def pop_outerr_to_orig(self):
|
||||
""" pop current snapshot out/err capture and flush to orig streams. """
|
||||
out, err = self.readouterr()
|
||||
if out:
|
||||
self.out.writeorg(out)
|
||||
if err:
|
||||
self.err.writeorg(err)
|
||||
return out, err
|
||||
|
||||
def _readsnapshot(self, name):
|
||||
if hasattr(self, name):
|
||||
f = getattr(self, name).tmpfile
|
||||
else:
|
||||
return ''
|
||||
def suspend_capturing(self, in_=False):
|
||||
if self.out:
|
||||
self.out.suspend()
|
||||
if self.err:
|
||||
self.err.suspend()
|
||||
if in_ and self.in_:
|
||||
self.in_.suspend()
|
||||
self._in_suspended = True
|
||||
|
||||
def resume_capturing(self):
|
||||
if self.out:
|
||||
self.out.resume()
|
||||
if self.err:
|
||||
self.err.resume()
|
||||
if hasattr(self, "_in_suspended"):
|
||||
self.in_.resume()
|
||||
del self._in_suspended
|
||||
|
||||
def stop_capturing(self):
|
||||
""" stop capturing and reset capturing streams """
|
||||
if hasattr(self, '_reset'):
|
||||
raise ValueError("was already stopped")
|
||||
self._reset = True
|
||||
if self.out:
|
||||
self.out.done()
|
||||
if self.err:
|
||||
self.err.done()
|
||||
if self.in_:
|
||||
self.in_.done()
|
||||
|
||||
def readouterr(self):
|
||||
""" return snapshot unicode value of stdout/stderr capturings. """
|
||||
return (self.out.snap() if self.out is not None else "",
|
||||
self.err.snap() if self.err is not None else "")
|
||||
|
||||
class NoCapture:
|
||||
__init__ = start = done = suspend = resume = lambda *args: None
|
||||
|
||||
class FDCapture:
|
||||
""" Capture IO to/from a given os-level filedescriptor. """
|
||||
|
||||
def __init__(self, targetfd, tmpfile=None):
|
||||
self.targetfd = targetfd
|
||||
try:
|
||||
self.targetfd_save = os.dup(self.targetfd)
|
||||
except OSError:
|
||||
self.start = lambda: None
|
||||
self.done = lambda: None
|
||||
else:
|
||||
if targetfd == 0:
|
||||
assert not tmpfile, "cannot set tmpfile with stdin"
|
||||
tmpfile = open(os.devnull, "r")
|
||||
self.syscapture = SysCapture(targetfd)
|
||||
else:
|
||||
if tmpfile is None:
|
||||
f = TemporaryFile()
|
||||
with f:
|
||||
tmpfile = safe_text_dupfile(f, mode="wb+")
|
||||
if targetfd in patchsysdict:
|
||||
self.syscapture = SysCapture(targetfd, tmpfile)
|
||||
else:
|
||||
self.syscapture = NoCapture()
|
||||
self.tmpfile = tmpfile
|
||||
self.tmpfile_fd = tmpfile.fileno()
|
||||
|
||||
def __repr__(self):
|
||||
return "<FDCapture %s oldfd=%s>" % (self.targetfd, self.targetfd_save)
|
||||
|
||||
def start(self):
|
||||
""" Start capturing on targetfd using memorized tmpfile. """
|
||||
try:
|
||||
os.fstat(self.targetfd_save)
|
||||
except (AttributeError, OSError):
|
||||
raise ValueError("saved filedescriptor not valid anymore")
|
||||
os.dup2(self.tmpfile_fd, self.targetfd)
|
||||
self.syscapture.start()
|
||||
|
||||
def snap(self):
|
||||
f = self.tmpfile
|
||||
f.seek(0)
|
||||
res = f.read()
|
||||
enc = getattr(f, "encoding", None)
|
||||
if enc:
|
||||
res = py.builtin._totext(res, enc, "replace")
|
||||
if res:
|
||||
enc = getattr(f, "encoding", None)
|
||||
if enc and isinstance(res, bytes):
|
||||
res = py.builtin._totext(res, enc, "replace")
|
||||
f.truncate(0)
|
||||
f.seek(0)
|
||||
return res
|
||||
return ''
|
||||
|
||||
def done(self):
|
||||
""" stop capturing, restore streams, return original capture file,
|
||||
seeked to position zero. """
|
||||
targetfd_save = self.__dict__.pop("targetfd_save")
|
||||
os.dup2(targetfd_save, self.targetfd)
|
||||
os.close(targetfd_save)
|
||||
self.syscapture.done()
|
||||
self.tmpfile.close()
|
||||
|
||||
def suspend(self):
|
||||
self.syscapture.suspend()
|
||||
os.dup2(self.targetfd_save, self.targetfd)
|
||||
|
||||
def resume(self):
|
||||
self.syscapture.resume()
|
||||
os.dup2(self.tmpfile_fd, self.targetfd)
|
||||
|
||||
def writeorg(self, data):
|
||||
""" write to original file descriptor. """
|
||||
if py.builtin._istext(data):
|
||||
data = data.encode("utf8") # XXX use encoding of original stream
|
||||
os.write(self.targetfd_save, data)
|
||||
|
||||
|
||||
class SysCapture:
|
||||
def __init__(self, fd, tmpfile=None):
|
||||
name = patchsysdict[fd]
|
||||
self._old = getattr(sys, name)
|
||||
self.name = name
|
||||
if tmpfile is None:
|
||||
if name == "stdin":
|
||||
tmpfile = DontReadFromInput()
|
||||
else:
|
||||
tmpfile = TextIO()
|
||||
self.tmpfile = tmpfile
|
||||
|
||||
def start(self):
|
||||
setattr(sys, self.name, self.tmpfile)
|
||||
|
||||
def snap(self):
|
||||
f = self.tmpfile
|
||||
res = f.getvalue()
|
||||
f.truncate(0)
|
||||
f.seek(0)
|
||||
return res
|
||||
|
||||
def done(self):
|
||||
setattr(sys, self.name, self._old)
|
||||
del self._old
|
||||
self.tmpfile.close()
|
||||
|
||||
class StdCapture(Capture):
|
||||
""" This class allows to capture writes to sys.stdout|stderr "in-memory"
|
||||
and will raise errors on tries to read from sys.stdin. It only
|
||||
modifies sys.stdout|stderr|stdin attributes and does not
|
||||
touch underlying File Descriptors (use StdCaptureFD for that).
|
||||
"""
|
||||
def __init__(self, out=True, err=True, in_=True):
|
||||
self._oldout = sys.stdout
|
||||
self._olderr = sys.stderr
|
||||
self._oldin = sys.stdin
|
||||
if out and not hasattr(out, 'file'):
|
||||
out = TextIO()
|
||||
self.out = out
|
||||
if err:
|
||||
if not hasattr(err, 'write'):
|
||||
err = TextIO()
|
||||
self.err = err
|
||||
self.in_ = in_
|
||||
|
||||
def startall(self):
|
||||
if self.out:
|
||||
sys.stdout = self.out
|
||||
if self.err:
|
||||
sys.stderr = self.err
|
||||
if self.in_:
|
||||
sys.stdin = self.in_ = DontReadFromInput()
|
||||
|
||||
def done(self, save=True):
|
||||
""" return (outfile, errfile) and stop capturing. """
|
||||
outfile = errfile = None
|
||||
if self.out and not self.out.closed:
|
||||
sys.stdout = self._oldout
|
||||
outfile = self.out
|
||||
outfile.seek(0)
|
||||
if self.err and not self.err.closed:
|
||||
sys.stderr = self._olderr
|
||||
errfile = self.err
|
||||
errfile.seek(0)
|
||||
if self.in_:
|
||||
sys.stdin = self._oldin
|
||||
return outfile, errfile
|
||||
def suspend(self):
|
||||
setattr(sys, self.name, self._old)
|
||||
|
||||
def resume(self):
|
||||
""" resume capturing with original temp files. """
|
||||
self.startall()
|
||||
setattr(sys, self.name, self.tmpfile)
|
||||
|
||||
def readouterr(self):
|
||||
""" return snapshot value of stdout/stderr capturings. """
|
||||
out = err = ""
|
||||
if self.out:
|
||||
out = self.out.getvalue()
|
||||
self.out.truncate(0)
|
||||
self.out.seek(0)
|
||||
if self.err:
|
||||
err = self.err.getvalue()
|
||||
self.err.truncate(0)
|
||||
self.err.seek(0)
|
||||
return out, err
|
||||
def writeorg(self, data):
|
||||
self._old.write(data)
|
||||
self._old.flush()
|
||||
|
||||
|
||||
class DontReadFromInput:
|
||||
|
||||
@@ -1,4 +1,9 @@
|
||||
""" command line options, ini-file and conftest.py processing. """
|
||||
import argparse
|
||||
import shlex
|
||||
import traceback
|
||||
import types
|
||||
import warnings
|
||||
|
||||
import py
|
||||
# DON't import pytest here because it causes import cycle troubles
|
||||
@@ -7,6 +12,13 @@ from _pytest import hookspec # the extension point definitions
|
||||
from _pytest.core import PluginManager
|
||||
|
||||
# pytest startup
|
||||
#
|
||||
class ConftestImportFailure(Exception):
|
||||
def __init__(self, path, excinfo):
|
||||
Exception.__init__(self, path, excinfo)
|
||||
self.path = path
|
||||
self.excinfo = excinfo
|
||||
|
||||
|
||||
def main(args=None, plugins=None):
|
||||
""" return exit code, after performing an in-process test run.
|
||||
@@ -16,8 +28,17 @@ def main(args=None, plugins=None):
|
||||
:arg plugins: list of plugin objects to be auto-registered during
|
||||
initialization.
|
||||
"""
|
||||
config = _prepareconfig(args, plugins)
|
||||
return config.hook.pytest_cmdline_main(config=config)
|
||||
try:
|
||||
config = _prepareconfig(args, plugins)
|
||||
except ConftestImportFailure:
|
||||
e = sys.exc_info()[1]
|
||||
tw = py.io.TerminalWriter(sys.stderr)
|
||||
for line in traceback.format_exception(*e.excinfo):
|
||||
tw.line(line.rstrip(), red=True)
|
||||
tw.line("ERROR: could not load %s\n" % (e.path), red=True)
|
||||
return 4
|
||||
else:
|
||||
return config.hook.pytest_cmdline_main(config=config)
|
||||
|
||||
class cmdline: # compatibility namespace
|
||||
main = staticmethod(main)
|
||||
@@ -54,13 +75,17 @@ def _prepareconfig(args=None, plugins=None):
|
||||
elif not isinstance(args, (tuple, list)):
|
||||
if not isinstance(args, str):
|
||||
raise ValueError("not a string or argument list: %r" % (args,))
|
||||
args = py.std.shlex.split(args)
|
||||
args = shlex.split(args)
|
||||
pluginmanager = get_plugin_manager()
|
||||
if plugins:
|
||||
for plugin in plugins:
|
||||
pluginmanager.register(plugin)
|
||||
return pluginmanager.hook.pytest_cmdline_parse(
|
||||
pluginmanager=pluginmanager, args=args)
|
||||
try:
|
||||
if plugins:
|
||||
for plugin in plugins:
|
||||
pluginmanager.register(plugin)
|
||||
return pluginmanager.hook.pytest_cmdline_parse(
|
||||
pluginmanager=pluginmanager, args=args)
|
||||
except Exception:
|
||||
pluginmanager.ensure_shutdown()
|
||||
raise
|
||||
|
||||
class PytestPluginManager(PluginManager):
|
||||
def __init__(self, hookspecs=[hookspec]):
|
||||
@@ -82,6 +107,8 @@ class PytestPluginManager(PluginManager):
|
||||
config.addinivalue_line("markers",
|
||||
"trylast: mark a hook implementation function such that the "
|
||||
"plugin machinery will try to call it last/as late as possible.")
|
||||
for warning in self._warnings:
|
||||
config.warn(code="I1", message=warning)
|
||||
|
||||
|
||||
class Parser:
|
||||
@@ -94,7 +121,6 @@ class Parser:
|
||||
self._usage = usage
|
||||
self._inidict = {}
|
||||
self._ininames = []
|
||||
self.hints = []
|
||||
|
||||
def processoption(self, option):
|
||||
if self._processopt:
|
||||
@@ -159,8 +185,7 @@ class Parser:
|
||||
a = option.attrs()
|
||||
arggroup.add_argument(*n, **a)
|
||||
# bash like autocompletion for dirs (appending '/')
|
||||
optparser.add_argument(FILE_OR_DIR, nargs='*'
|
||||
).completer=filescompleter
|
||||
optparser.add_argument(FILE_OR_DIR, nargs='*').completer=filescompleter
|
||||
return optparser
|
||||
|
||||
def parse_setoption(self, args, option):
|
||||
@@ -207,7 +232,7 @@ class ArgumentError(Exception):
|
||||
|
||||
|
||||
class Argument:
|
||||
"""class that mimics the necessary behaviour of py.std.optparse.Option """
|
||||
"""class that mimics the necessary behaviour of optparse.Option """
|
||||
_typ_map = {
|
||||
'int': int,
|
||||
'string': str,
|
||||
@@ -225,7 +250,7 @@ class Argument:
|
||||
try:
|
||||
help = attrs['help']
|
||||
if '%default' in help:
|
||||
py.std.warnings.warn(
|
||||
warnings.warn(
|
||||
'pytest now uses argparse. "%default" should be'
|
||||
' changed to "%(default)s" ',
|
||||
FutureWarning,
|
||||
@@ -241,7 +266,7 @@ class Argument:
|
||||
if isinstance(typ, py.builtin._basestring):
|
||||
if typ == 'choice':
|
||||
if self.TYPE_WARN:
|
||||
py.std.warnings.warn(
|
||||
warnings.warn(
|
||||
'type argument to addoption() is a string %r.'
|
||||
' For parsearg this is optional and when supplied '
|
||||
' should be a type.'
|
||||
@@ -253,7 +278,7 @@ class Argument:
|
||||
attrs['type'] = type(attrs['choices'][0])
|
||||
else:
|
||||
if self.TYPE_WARN:
|
||||
py.std.warnings.warn(
|
||||
warnings.warn(
|
||||
'type argument to addoption() is a string %r.'
|
||||
' For parsearg this should be a type.'
|
||||
' (options: %s)' % (typ, names),
|
||||
@@ -373,32 +398,24 @@ class OptionGroup:
|
||||
self.options.append(option)
|
||||
|
||||
|
||||
class MyOptionParser(py.std.argparse.ArgumentParser):
|
||||
class MyOptionParser(argparse.ArgumentParser):
|
||||
def __init__(self, parser):
|
||||
self._parser = parser
|
||||
py.std.argparse.ArgumentParser.__init__(self, usage=parser._usage,
|
||||
argparse.ArgumentParser.__init__(self, usage=parser._usage,
|
||||
add_help=False, formatter_class=DropShorterLongHelpFormatter)
|
||||
|
||||
def format_epilog(self, formatter):
|
||||
hints = self._parser.hints
|
||||
if hints:
|
||||
s = "\n".join(["hint: " + x for x in hints]) + "\n"
|
||||
s = "\n" + s + "\n"
|
||||
return s
|
||||
return ""
|
||||
|
||||
def parse_args(self, args=None, namespace=None):
|
||||
"""allow splitting of positional arguments"""
|
||||
args, argv = self.parse_known_args(args, namespace)
|
||||
if argv:
|
||||
for arg in argv:
|
||||
if arg and arg[0] == '-':
|
||||
msg = py.std.argparse._('unrecognized arguments: %s')
|
||||
msg = argparse._('unrecognized arguments: %s')
|
||||
self.error(msg % ' '.join(argv))
|
||||
getattr(args, FILE_OR_DIR).extend(argv)
|
||||
return args
|
||||
|
||||
class DropShorterLongHelpFormatter(py.std.argparse.HelpFormatter):
|
||||
class DropShorterLongHelpFormatter(argparse.HelpFormatter):
|
||||
"""shorten help for long options that differ only in extra hyphens
|
||||
|
||||
- collapse **long** options that are the same except for extra hyphens
|
||||
@@ -408,7 +425,7 @@ class DropShorterLongHelpFormatter(py.std.argparse.HelpFormatter):
|
||||
- cache result on action object as this is called at least 2 times
|
||||
"""
|
||||
def _format_action_invocation(self, action):
|
||||
orgstr = py.std.argparse.HelpFormatter._format_action_invocation(self, action)
|
||||
orgstr = argparse.HelpFormatter._format_action_invocation(self, action)
|
||||
if orgstr and orgstr[0] != '-': # only optional arguments
|
||||
return orgstr
|
||||
res = getattr(action, '_formatted_action_invocation', None)
|
||||
@@ -442,7 +459,7 @@ class DropShorterLongHelpFormatter(py.std.argparse.HelpFormatter):
|
||||
if len(option) == 2 or option[2] == ' ':
|
||||
return_list.append(option)
|
||||
if option[2:] == short_long.get(option.replace('-', '')):
|
||||
return_list.append(option)
|
||||
return_list.append(option.replace(' ', '='))
|
||||
action._formatted_action_invocation = ', '.join(return_list)
|
||||
return action._formatted_action_invocation
|
||||
|
||||
@@ -451,38 +468,32 @@ class Conftest(object):
|
||||
""" the single place for accessing values and interacting
|
||||
towards conftest modules from pytest objects.
|
||||
"""
|
||||
def __init__(self, onimport=None, confcutdir=None):
|
||||
def __init__(self, onimport=None):
|
||||
self._path2confmods = {}
|
||||
self._onimport = onimport
|
||||
self._conftestpath2mod = {}
|
||||
self._confcutdir = confcutdir
|
||||
self._confcutdir = None
|
||||
|
||||
def setinitial(self, args):
|
||||
""" try to find a first anchor path for looking up global values
|
||||
from conftests. This function is usually called _before_
|
||||
argument parsing. conftest files may add command line options
|
||||
and we thus have no completely safe way of determining
|
||||
which parts of the arguments are actually related to options
|
||||
and which are file system paths. We just try here to get
|
||||
bootstrapped ...
|
||||
def setinitial(self, namespace):
|
||||
""" load initial conftest files given a preparsed "namespace".
|
||||
As conftest files may add their own command line options
|
||||
which have arguments ('--my-opt somepath') we might get some
|
||||
false positives. All builtin and 3rd party plugins will have
|
||||
been loaded, however, so common options will not confuse our logic
|
||||
here.
|
||||
"""
|
||||
current = py.path.local()
|
||||
opt = '--confcutdir'
|
||||
for i in range(len(args)):
|
||||
opt1 = str(args[i])
|
||||
if opt1.startswith(opt):
|
||||
if opt1 == opt:
|
||||
if len(args) > i:
|
||||
p = current.join(args[i+1], abs=True)
|
||||
elif opt1.startswith(opt + "="):
|
||||
p = current.join(opt1[len(opt)+1:], abs=1)
|
||||
self._confcutdir = p
|
||||
break
|
||||
self._confcutdir = current.join(namespace.confcutdir, abs=True) \
|
||||
if namespace.confcutdir else None
|
||||
testpaths = namespace.file_or_dir
|
||||
foundanchor = False
|
||||
for arg in args:
|
||||
if hasattr(arg, 'startswith') and arg.startswith("--"):
|
||||
continue
|
||||
anchor = current.join(arg, abs=1)
|
||||
for path in testpaths:
|
||||
path = str(path)
|
||||
# remove node-id syntax
|
||||
i = path.find("::")
|
||||
if i != -1:
|
||||
path = path[:i]
|
||||
anchor = current.join(path, abs=1)
|
||||
if exists(anchor): # we found some file object
|
||||
self._try_load_conftest(anchor)
|
||||
foundanchor = True
|
||||
@@ -490,7 +501,7 @@ class Conftest(object):
|
||||
self._try_load_conftest(current)
|
||||
|
||||
def _try_load_conftest(self, anchor):
|
||||
self._path2confmods[None] = self.getconftestmodules(anchor)
|
||||
self.getconftestmodules(anchor)
|
||||
# let's also consider test* subdirs
|
||||
if anchor.check(dir=1):
|
||||
for x in anchor.listdir("test*"):
|
||||
@@ -499,28 +510,22 @@ class Conftest(object):
|
||||
|
||||
def getconftestmodules(self, path):
|
||||
try:
|
||||
clist = self._path2confmods[path]
|
||||
return self._path2confmods[path]
|
||||
except KeyError:
|
||||
if path is None:
|
||||
raise ValueError("missing default conftest.")
|
||||
clist = []
|
||||
for parent in path.parts():
|
||||
if self._confcutdir and self._confcutdir.relto(parent):
|
||||
continue
|
||||
conftestpath = parent.join("conftest.py")
|
||||
if conftestpath.check(file=1):
|
||||
clist.append(self.importconftest(conftestpath))
|
||||
mod = self.importconftest(conftestpath)
|
||||
clist.append(mod)
|
||||
self._path2confmods[path] = clist
|
||||
return clist
|
||||
return clist
|
||||
|
||||
def rget(self, name, path=None):
|
||||
mod, value = self.rget_with_confmod(name, path)
|
||||
return value
|
||||
|
||||
def rget_with_confmod(self, name, path=None):
|
||||
def rget_with_confmod(self, name, path):
|
||||
modules = self.getconftestmodules(path)
|
||||
modules.reverse()
|
||||
for mod in modules:
|
||||
for mod in reversed(modules):
|
||||
try:
|
||||
return mod, getattr(mod, name)
|
||||
except AttributeError:
|
||||
@@ -528,27 +533,27 @@ class Conftest(object):
|
||||
raise KeyError(name)
|
||||
|
||||
def importconftest(self, conftestpath):
|
||||
assert conftestpath.check(), conftestpath
|
||||
try:
|
||||
return self._conftestpath2mod[conftestpath]
|
||||
except KeyError:
|
||||
pkgpath = conftestpath.pypkgpath()
|
||||
if pkgpath is None:
|
||||
_ensure_removed_sysmodule(conftestpath.purebasename)
|
||||
self._conftestpath2mod[conftestpath] = mod = conftestpath.pyimport()
|
||||
try:
|
||||
mod = conftestpath.pyimport()
|
||||
except Exception:
|
||||
raise ConftestImportFailure(conftestpath, sys.exc_info())
|
||||
self._conftestpath2mod[conftestpath] = mod
|
||||
dirpath = conftestpath.dirpath()
|
||||
if dirpath in self._path2confmods:
|
||||
for path, mods in self._path2confmods.items():
|
||||
if path and path.relto(dirpath) or path == dirpath:
|
||||
assert mod not in mods
|
||||
mods.append(mod)
|
||||
self._postimport(mod)
|
||||
if self._onimport:
|
||||
self._onimport(mod)
|
||||
return mod
|
||||
|
||||
def _postimport(self, mod):
|
||||
if self._onimport:
|
||||
self._onimport(mod)
|
||||
return mod
|
||||
|
||||
def _ensure_removed_sysmodule(modname):
|
||||
try:
|
||||
@@ -563,6 +568,11 @@ class CmdOptions(object):
|
||||
def __repr__(self):
|
||||
return "<CmdOptions %r>" %(self.__dict__,)
|
||||
|
||||
class Notset:
|
||||
def __repr__(self):
|
||||
return "<NOTSET>"
|
||||
|
||||
notset = Notset()
|
||||
FILE_OR_DIR = 'file_or_dir'
|
||||
class Config(object):
|
||||
""" access to configuration values, pluginmanager and plugin hooks. """
|
||||
@@ -613,6 +623,14 @@ class Config(object):
|
||||
self.hook.pytest_unconfigure(config=self)
|
||||
self.pluginmanager.ensure_shutdown()
|
||||
|
||||
def warn(self, code, message):
|
||||
""" generate a warning for this test session. """
|
||||
self.hook.pytest_logwarning(code=code, message=message,
|
||||
fslocation=None, nodeid=None)
|
||||
|
||||
def get_terminal_writer(self):
|
||||
return self.pluginmanager.getplugin("terminalreporter")._tw
|
||||
|
||||
def pytest_cmdline_parse(self, pluginmanager, args):
|
||||
assert self == pluginmanager.config, (self, pluginmanager.config)
|
||||
self.parse(args)
|
||||
@@ -670,12 +688,20 @@ class Config(object):
|
||||
plugins += self._conftest.getconftestmodules(fspath)
|
||||
return plugins
|
||||
|
||||
def pytest_load_initial_conftests(self, parser, args):
|
||||
self._conftest.setinitial(args)
|
||||
def pytest_load_initial_conftests(self, early_config):
|
||||
self._conftest.setinitial(early_config.known_args_namespace)
|
||||
pytest_load_initial_conftests.trylast = True
|
||||
|
||||
def _initini(self, args):
|
||||
self.inicfg = getcfg(args, ["pytest.ini", "tox.ini", "setup.cfg"])
|
||||
parsed_args = self._parser.parse_known_args(args)
|
||||
if parsed_args.inifilename:
|
||||
iniconfig = py.iniconfig.IniConfig(parsed_args.inifilename)
|
||||
if 'pytest' in iniconfig.sections:
|
||||
self.inicfg = iniconfig['pytest']
|
||||
else:
|
||||
self.inicfg = {}
|
||||
else:
|
||||
self.inicfg = getcfg(args, ["pytest.ini", "tox.ini", "setup.cfg"])
|
||||
self._parser.addini('addopts', 'extra command line options', 'args')
|
||||
self._parser.addini('minversion', 'minimally required pytest version')
|
||||
|
||||
@@ -687,8 +713,19 @@ class Config(object):
|
||||
self.pluginmanager.consider_preparse(args)
|
||||
self.pluginmanager.consider_setuptools_entrypoints()
|
||||
self.pluginmanager.consider_env()
|
||||
self.hook.pytest_load_initial_conftests(early_config=self,
|
||||
args=args, parser=self._parser)
|
||||
self.known_args_namespace = ns = self._parser.parse_known_args(args)
|
||||
try:
|
||||
self.hook.pytest_load_initial_conftests(early_config=self,
|
||||
args=args, parser=self._parser)
|
||||
except ConftestImportFailure:
|
||||
e = sys.exc_info()[1]
|
||||
if ns.help or ns.version:
|
||||
# we don't want to prevent --help/--version to work
|
||||
# so just let is pass and print a warning at the end
|
||||
self.pluginmanager._warnings.append(
|
||||
"could not load initial conftests (%s)\n" % e.path)
|
||||
else:
|
||||
raise
|
||||
|
||||
def _checkversion(self):
|
||||
import pytest
|
||||
@@ -704,17 +741,15 @@ class Config(object):
|
||||
|
||||
def parse(self, args):
|
||||
# parse given cmdline arguments into this config object.
|
||||
# Note that this can only be called once per testing process.
|
||||
assert not hasattr(self, 'args'), (
|
||||
"can only parse cmdline args at most once per Config object")
|
||||
self._origargs = args
|
||||
self._preparse(args)
|
||||
# XXX deprecated hook:
|
||||
self.hook.pytest_cmdline_preparse(config=self, args=args)
|
||||
self._parser.hints.extend(self.pluginmanager._hints)
|
||||
args = self._parser.parse_setoption(args, self.option)
|
||||
if not args:
|
||||
args.append(py.std.os.getcwd())
|
||||
args.append(os.getcwd())
|
||||
self.args = args
|
||||
|
||||
def addinivalue_line(self, name, line):
|
||||
@@ -752,18 +787,18 @@ class Config(object):
|
||||
if type == "pathlist":
|
||||
dp = py.path.local(self.inicfg.config.path).dirpath()
|
||||
l = []
|
||||
for relpath in py.std.shlex.split(value):
|
||||
for relpath in shlex.split(value):
|
||||
l.append(dp.join(relpath, abs=True))
|
||||
return l
|
||||
elif type == "args":
|
||||
return py.std.shlex.split(value)
|
||||
return shlex.split(value)
|
||||
elif type == "linelist":
|
||||
return [t for t in map(lambda x: x.strip(), value.split("\n")) if t]
|
||||
else:
|
||||
assert type is None
|
||||
return value
|
||||
|
||||
def _getconftest_pathlist(self, name, path=None):
|
||||
def _getconftest_pathlist(self, name, path):
|
||||
try:
|
||||
mod, relroots = self._conftest.rget_with_confmod(name, path)
|
||||
except KeyError:
|
||||
@@ -777,47 +812,36 @@ class Config(object):
|
||||
l.append(relroot)
|
||||
return l
|
||||
|
||||
def _getconftest(self, name, path=None, check=False):
|
||||
if check:
|
||||
self._checkconftest(name)
|
||||
return self._conftest.rget(name, path)
|
||||
|
||||
def getoption(self, name):
|
||||
def getoption(self, name, default=notset, skip=False):
|
||||
""" return command line option value.
|
||||
|
||||
:arg name: name of the option. You may also specify
|
||||
the literal ``--OPT`` option instead of the "dest" option name.
|
||||
:arg default: default value if no option of that name exists.
|
||||
:arg skip: if True raise pytest.skip if option does not exists
|
||||
or has a None value.
|
||||
"""
|
||||
name = self._opt2dest.get(name, name)
|
||||
try:
|
||||
return getattr(self.option, name)
|
||||
val = getattr(self.option, name)
|
||||
if val is None and skip:
|
||||
raise AttributeError(name)
|
||||
return val
|
||||
except AttributeError:
|
||||
if default is not notset:
|
||||
return default
|
||||
if skip:
|
||||
import pytest
|
||||
pytest.skip("no %r option found" %(name,))
|
||||
raise ValueError("no option named %r" % (name,))
|
||||
|
||||
def getvalue(self, name, path=None):
|
||||
""" return command line option value.
|
||||
|
||||
:arg name: name of the command line option
|
||||
|
||||
(deprecated) if we can't find the option also lookup
|
||||
the name in a matching conftest file.
|
||||
"""
|
||||
try:
|
||||
return getattr(self.option, name)
|
||||
except AttributeError:
|
||||
return self._getconftest(name, path, check=False)
|
||||
""" (deprecated, use getoption()) """
|
||||
return self.getoption(name)
|
||||
|
||||
def getvalueorskip(self, name, path=None):
|
||||
""" (deprecated) return getvalue(name) or call
|
||||
pytest.skip if no value exists. """
|
||||
__tracebackhide__ = True
|
||||
try:
|
||||
val = self.getvalue(name, path)
|
||||
if val is None:
|
||||
raise KeyError(name)
|
||||
return val
|
||||
except KeyError:
|
||||
py.test.skip("no %r value found" %(name,))
|
||||
""" (deprecated, use getoption(skip=True)) """
|
||||
return self.getoption(name, skip=True)
|
||||
|
||||
def exists(path, ignore=EnvironmentError):
|
||||
try:
|
||||
@@ -848,7 +872,7 @@ def setns(obj, dic):
|
||||
mod = getattr(obj, name, None)
|
||||
if mod is None:
|
||||
modname = "pytest.%s" % name
|
||||
mod = py.std.types.ModuleType(modname)
|
||||
mod = types.ModuleType(modname)
|
||||
sys.modules[modname] = mod
|
||||
mod.__all__ = []
|
||||
setattr(obj, name, mod)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
"""
|
||||
pytest PluginManager, basic initialization and tracing.
|
||||
"""
|
||||
import os
|
||||
import sys
|
||||
import inspect
|
||||
import py
|
||||
@@ -71,7 +72,7 @@ class PluginManager(object):
|
||||
self._name2plugin = {}
|
||||
self._listattrcache = {}
|
||||
self._plugins = []
|
||||
self._hints = []
|
||||
self._warnings = []
|
||||
self.trace = TagTracer().get("pluginmanage")
|
||||
self._plugin_distinfo = []
|
||||
self._shutdown = []
|
||||
@@ -137,7 +138,8 @@ class PluginManager(object):
|
||||
|
||||
def skipifmissing(self, name):
|
||||
if not self.hasplugin(name):
|
||||
py.test.skip("plugin %r is missing" % name)
|
||||
import pytest
|
||||
pytest.skip("plugin %r is missing" % name)
|
||||
|
||||
def hasplugin(self, name):
|
||||
return bool(self.getplugin(name))
|
||||
@@ -153,7 +155,7 @@ class PluginManager(object):
|
||||
# API for bootstrapping
|
||||
#
|
||||
def _envlist(self, varname):
|
||||
val = py.std.os.environ.get(varname, None)
|
||||
val = os.environ.get(varname, None)
|
||||
if val is not None:
|
||||
return val.split(',')
|
||||
return ()
|
||||
@@ -220,12 +222,11 @@ class PluginManager(object):
|
||||
return self.import_plugin(modname[7:])
|
||||
raise
|
||||
except:
|
||||
e = py.std.sys.exc_info()[1]
|
||||
if not hasattr(py.test, 'skip'):
|
||||
e = sys.exc_info()[1]
|
||||
import pytest
|
||||
if not hasattr(pytest, 'skip') or not isinstance(e, pytest.skip.Exception):
|
||||
raise
|
||||
elif not isinstance(e, py.test.skip.Exception):
|
||||
raise
|
||||
self._hints.append("skipped plugin %r: %s" %((modname, e.msg)))
|
||||
self._warnings.append("skipped plugin %r: %s" %((modname, e.msg)))
|
||||
else:
|
||||
self.register(mod, modname)
|
||||
self.consider_module(mod)
|
||||
@@ -240,18 +241,22 @@ class PluginManager(object):
|
||||
pass
|
||||
l = []
|
||||
last = []
|
||||
wrappers = []
|
||||
for plugin in plugins:
|
||||
try:
|
||||
meth = getattr(plugin, attrname)
|
||||
if hasattr(meth, 'tryfirst'):
|
||||
last.append(meth)
|
||||
elif hasattr(meth, 'trylast'):
|
||||
l.insert(0, meth)
|
||||
else:
|
||||
l.append(meth)
|
||||
except AttributeError:
|
||||
continue
|
||||
if hasattr(meth, 'hookwrapper'):
|
||||
wrappers.append(meth)
|
||||
elif hasattr(meth, 'tryfirst'):
|
||||
last.append(meth)
|
||||
elif hasattr(meth, 'trylast'):
|
||||
l.insert(0, meth)
|
||||
else:
|
||||
l.append(meth)
|
||||
l.extend(last)
|
||||
l.extend(wrappers)
|
||||
self._listattrcache[key] = list(l)
|
||||
return l
|
||||
|
||||
@@ -272,6 +277,14 @@ def importplugin(importspec):
|
||||
|
||||
class MultiCall:
|
||||
""" execute a call into multiple python functions/methods. """
|
||||
|
||||
class WrongHookWrapper(Exception):
|
||||
""" a hook wrapper does not behave correctly. """
|
||||
def __init__(self, func, message):
|
||||
Exception.__init__(self, func, message)
|
||||
self.func = func
|
||||
self.message = message
|
||||
|
||||
def __init__(self, methods, kwargs, firstresult=False):
|
||||
self.methods = list(methods)
|
||||
self.kwargs = kwargs
|
||||
@@ -283,16 +296,39 @@ class MultiCall:
|
||||
return "<MultiCall %s, kwargs=%r>" %(status, self.kwargs)
|
||||
|
||||
def execute(self):
|
||||
while self.methods:
|
||||
method = self.methods.pop()
|
||||
kwargs = self.getkwargs(method)
|
||||
res = method(**kwargs)
|
||||
if res is not None:
|
||||
self.results.append(res)
|
||||
if self.firstresult:
|
||||
return res
|
||||
if not self.firstresult:
|
||||
return self.results
|
||||
next_finalizers = []
|
||||
try:
|
||||
while self.methods:
|
||||
method = self.methods.pop()
|
||||
kwargs = self.getkwargs(method)
|
||||
if hasattr(method, "hookwrapper"):
|
||||
it = method(**kwargs)
|
||||
next = getattr(it, "next", None)
|
||||
if next is None:
|
||||
next = getattr(it, "__next__", None)
|
||||
if next is None:
|
||||
raise self.WrongHookWrapper(method,
|
||||
"wrapper does not contain a yield")
|
||||
res = next()
|
||||
next_finalizers.append((method, next))
|
||||
else:
|
||||
res = method(**kwargs)
|
||||
if res is not None:
|
||||
self.results.append(res)
|
||||
if self.firstresult:
|
||||
return res
|
||||
if not self.firstresult:
|
||||
return self.results
|
||||
finally:
|
||||
for method, fin in reversed(next_finalizers):
|
||||
try:
|
||||
fin()
|
||||
except StopIteration:
|
||||
pass
|
||||
else:
|
||||
raise self.WrongHookWrapper(method,
|
||||
"wrapper contain more than one yield")
|
||||
|
||||
|
||||
def getkwargs(self, method):
|
||||
kwargs = {}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
""" discover and run doctests in modules and test files."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
import traceback
|
||||
import pytest, py
|
||||
from _pytest.python import FixtureRequest, FuncFixtureInfo
|
||||
from py._code.code import TerminalRepr, ReprFileLocation
|
||||
@@ -43,7 +44,7 @@ class DoctestItem(pytest.Item):
|
||||
self.runner.run(self.dtest)
|
||||
|
||||
def repr_failure(self, excinfo):
|
||||
doctest = py.std.doctest
|
||||
import doctest
|
||||
if excinfo.errisinstance((doctest.DocTestFailure,
|
||||
doctest.UnexpectedException)):
|
||||
doctestfailure = excinfo.value
|
||||
@@ -56,8 +57,8 @@ class DoctestItem(pytest.Item):
|
||||
lineno = test.lineno + example.lineno + 1
|
||||
message = excinfo.type.__name__
|
||||
reprlocation = ReprFileLocation(filename, lineno, message)
|
||||
checker = py.std.doctest.OutputChecker()
|
||||
REPORT_UDIFF = py.std.doctest.REPORT_UDIFF
|
||||
checker = doctest.OutputChecker()
|
||||
REPORT_UDIFF = doctest.REPORT_UDIFF
|
||||
filelines = py.path.local(filename).readlines(cr=0)
|
||||
lines = []
|
||||
if lineno is not None:
|
||||
@@ -78,7 +79,7 @@ class DoctestItem(pytest.Item):
|
||||
inner_excinfo = py.code.ExceptionInfo(excinfo.value.exc_info)
|
||||
lines += ["UNEXPECTED EXCEPTION: %s" %
|
||||
repr(inner_excinfo.value)]
|
||||
lines += py.std.traceback.format_exception(*excinfo.value.exc_info)
|
||||
lines += traceback.format_exception(*excinfo.value.exc_info)
|
||||
return ReprFailDoctest(reprlocation, lines)
|
||||
else:
|
||||
return super(DoctestItem, self).repr_failure(excinfo)
|
||||
@@ -88,7 +89,7 @@ class DoctestItem(pytest.Item):
|
||||
|
||||
class DoctestTextfile(DoctestItem, pytest.File):
|
||||
def runtest(self):
|
||||
doctest = py.std.doctest
|
||||
import doctest
|
||||
# satisfy `FixtureRequest` constructor...
|
||||
self.funcargs = {}
|
||||
fm = self.session._fixturemanager
|
||||
@@ -106,7 +107,7 @@ class DoctestTextfile(DoctestItem, pytest.File):
|
||||
|
||||
class DoctestModule(pytest.File):
|
||||
def collect(self):
|
||||
doctest = py.std.doctest
|
||||
import doctest
|
||||
if self.fspath.basename == "conftest.py":
|
||||
module = self.config._conftest.importconftest(self.fspath)
|
||||
else:
|
||||
|
||||
@@ -1,9 +1,16 @@
|
||||
""" generate a single-file self-contained version of pytest """
|
||||
import py
|
||||
import os
|
||||
import sys
|
||||
import pkgutil
|
||||
|
||||
import py
|
||||
|
||||
import _pytest
|
||||
|
||||
|
||||
|
||||
def find_toplevel(name):
|
||||
for syspath in py.std.sys.path:
|
||||
for syspath in sys.path:
|
||||
base = py.path.local(syspath)
|
||||
lib = base/name
|
||||
if lib.check(dir=1):
|
||||
@@ -29,9 +36,10 @@ def pkg_to_mapping(name):
|
||||
return name2src
|
||||
|
||||
def compress_mapping(mapping):
|
||||
data = py.std.pickle.dumps(mapping, 2)
|
||||
data = py.std.zlib.compress(data, 9)
|
||||
data = py.std.base64.encodestring(data)
|
||||
import base64, pickle, zlib
|
||||
data = pickle.dumps(mapping, 2)
|
||||
data = zlib.compress(data, 9)
|
||||
data = base64.encodestring(data)
|
||||
data = data.decode('ascii')
|
||||
return data
|
||||
|
||||
@@ -64,11 +72,11 @@ def pytest_cmdline_main(config):
|
||||
deps = ['py', '_pytest', 'pytest']
|
||||
if sys.version_info < (2,7):
|
||||
deps.append("argparse")
|
||||
tw.line("generated script will run on python2.5-python3.3++")
|
||||
tw.line("generated script will run on python2.6-python3.3++")
|
||||
else:
|
||||
tw.line("WARNING: generated script will not run on python2.6 "
|
||||
"or below due to 'argparse' dependency. Use python2.6 "
|
||||
"to generate a python2.5/6 compatible script", red=True)
|
||||
"due to 'argparse' dependency. Use python2.6 "
|
||||
"to generate a python2.6 compatible script", red=True)
|
||||
script = generate_script(
|
||||
'import pytest; raise SystemExit(pytest.cmdline.main())',
|
||||
deps,
|
||||
@@ -78,3 +86,42 @@ def pytest_cmdline_main(config):
|
||||
tw.line("generated pytest standalone script: %s" % genscript,
|
||||
bold=True)
|
||||
return 0
|
||||
|
||||
|
||||
def pytest_namespace():
|
||||
return {'freeze_includes': freeze_includes}
|
||||
|
||||
|
||||
def freeze_includes():
|
||||
"""
|
||||
Returns a list of module names used by py.test that should be
|
||||
included by cx_freeze.
|
||||
"""
|
||||
result = list(_iter_all_modules(py))
|
||||
result += list(_iter_all_modules(_pytest))
|
||||
return result
|
||||
|
||||
|
||||
def _iter_all_modules(package, prefix=''):
|
||||
"""
|
||||
Iterates over the names of all modules that can be found in the given
|
||||
package, recursively.
|
||||
|
||||
Example:
|
||||
_iter_all_modules(_pytest) ->
|
||||
['_pytest.assertion.newinterpret',
|
||||
'_pytest.capture',
|
||||
'_pytest.core',
|
||||
...
|
||||
]
|
||||
"""
|
||||
if type(package) is not str:
|
||||
path, prefix = package.__path__[0], package.__name__ + '.'
|
||||
else:
|
||||
path = package
|
||||
for _, name, is_package in pkgutil.iter_modules([path]):
|
||||
if is_package:
|
||||
for m in _iter_all_modules(os.path.join(path, name), prefix=name + '.'):
|
||||
yield prefix + m
|
||||
else:
|
||||
yield prefix + name
|
||||
|
||||
@@ -64,7 +64,6 @@ def pytest_cmdline_main(config):
|
||||
def showhelp(config):
|
||||
tw = py.io.TerminalWriter()
|
||||
tw.write(config._parser.optparser.format_help())
|
||||
tw.write(config._parser.optparser.format_epilog(None))
|
||||
tw.line()
|
||||
tw.line()
|
||||
#tw.sep( "=", "config file settings")
|
||||
@@ -86,16 +85,10 @@ def showhelp(config):
|
||||
tw.line("to see available fixtures type: py.test --fixtures")
|
||||
tw.line("(shown according to specified file_or_dir or current dir "
|
||||
"if not specified)")
|
||||
for warning in config.pluginmanager._warnings:
|
||||
tw.line("warning: %s" % (warning,), red=True)
|
||||
return
|
||||
|
||||
tw.line("conftest.py options:")
|
||||
tw.line()
|
||||
conftestitems = sorted(config._parser._conftestdict.items())
|
||||
for name, help in conftest_options + conftestitems:
|
||||
line = " %-15s %s" %(name, help)
|
||||
tw.line(line[:tw.fullwidth])
|
||||
tw.line()
|
||||
#tw.sep( "=")
|
||||
|
||||
conftest_options = [
|
||||
('pytest_plugins', 'list of plugin names to load'),
|
||||
|
||||
@@ -53,8 +53,8 @@ def pytest_cmdline_main(config):
|
||||
pytest_cmdline_main.firstresult = True
|
||||
|
||||
def pytest_load_initial_conftests(args, early_config, parser):
|
||||
""" implements loading initial conftests.
|
||||
"""
|
||||
""" implements the loading of initial conftest files ahead
|
||||
of command line option parsing. """
|
||||
|
||||
def pytest_configure(config):
|
||||
""" called after command line options have been parsed
|
||||
@@ -152,9 +152,9 @@ def pytest_runtest_protocol(item, nextitem):
|
||||
|
||||
:arg item: test item for which the runtest protocol is performed.
|
||||
|
||||
:arg nexitem: the scheduled-to-be-next test item (or None if this
|
||||
is the end my friend). This argument is passed on to
|
||||
:py:func:`pytest_runtest_teardown`.
|
||||
:arg nextitem: the scheduled-to-be-next test item (or None if this
|
||||
is the end my friend). This argument is passed on to
|
||||
:py:func:`pytest_runtest_teardown`.
|
||||
|
||||
:return boolean: True if no further hook implementations should be invoked.
|
||||
"""
|
||||
@@ -172,10 +172,10 @@ def pytest_runtest_call(item):
|
||||
def pytest_runtest_teardown(item, nextitem):
|
||||
""" called after ``pytest_runtest_call``.
|
||||
|
||||
:arg nexitem: the scheduled-to-be-next test item (None if no further
|
||||
test item is scheduled). This argument can be used to
|
||||
perform exact teardowns, i.e. calling just enough finalizers
|
||||
so that nextitem only needs to call setup-functions.
|
||||
:arg nextitem: the scheduled-to-be-next test item (None if no further
|
||||
test item is scheduled). This argument can be used to
|
||||
perform exact teardowns, i.e. calling just enough finalizers
|
||||
so that nextitem only needs to call setup-functions.
|
||||
"""
|
||||
|
||||
def pytest_runtest_makereport(item, call):
|
||||
@@ -227,6 +227,11 @@ pytest_report_teststatus.firstresult = True
|
||||
def pytest_terminal_summary(terminalreporter):
|
||||
""" add additional section in terminal summary reporting. """
|
||||
|
||||
def pytest_logwarning(message, code, nodeid, fslocation):
|
||||
""" process a warning specified by a message, a code string,
|
||||
a nodeid and fslocation (both of which may be None
|
||||
if the warning is not tied to a partilar node/location)."""
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# doctest hooks
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
|
||||
Based on initial code from Ross Lawley.
|
||||
"""
|
||||
|
||||
import py
|
||||
import os
|
||||
import re
|
||||
@@ -10,20 +9,13 @@ import sys
|
||||
import time
|
||||
|
||||
# Python 2.X and 3.X compatibility
|
||||
try:
|
||||
unichr(65)
|
||||
except NameError:
|
||||
if sys.version_info[0] < 3:
|
||||
from codecs import open
|
||||
else:
|
||||
unichr = chr
|
||||
try:
|
||||
unicode('A')
|
||||
except NameError:
|
||||
unicode = str
|
||||
try:
|
||||
long(1)
|
||||
except NameError:
|
||||
long = int
|
||||
|
||||
|
||||
class Junit(py.xml.Namespace):
|
||||
pass
|
||||
|
||||
@@ -108,12 +100,14 @@ class LogXML(object):
|
||||
))
|
||||
|
||||
def _write_captured_output(self, report):
|
||||
sec = dict(report.sections)
|
||||
for name in ('out', 'err'):
|
||||
content = sec.get("Captured std%s" % name)
|
||||
if content:
|
||||
tag = getattr(Junit, 'system-'+name)
|
||||
self.append(tag(bin_xml_escape(content)))
|
||||
for capname in ('out', 'err'):
|
||||
allcontent = ""
|
||||
for name, content in report.get_sections("Captured std%s" %
|
||||
capname):
|
||||
allcontent += content
|
||||
if allcontent:
|
||||
tag = getattr(Junit, 'system-'+capname)
|
||||
self.append(tag(bin_xml_escape(allcontent)))
|
||||
|
||||
def append(self, obj):
|
||||
self.tests[-1].append(obj)
|
||||
@@ -161,7 +155,7 @@ class LogXML(object):
|
||||
if skipreason.startswith("Skipped: "):
|
||||
skipreason = bin_xml_escape(skipreason[9:])
|
||||
self.append(
|
||||
Junit.skipped("%s:%s: %s" % report.longrepr,
|
||||
Junit.skipped("%s:%s: %s" % (filename, lineno, skipreason),
|
||||
type="pytest.skip",
|
||||
message=skipreason
|
||||
))
|
||||
@@ -204,11 +198,7 @@ class LogXML(object):
|
||||
self.suite_start_time = time.time()
|
||||
|
||||
def pytest_sessionfinish(self):
|
||||
if py.std.sys.version_info[0] < 3:
|
||||
logfile = py.std.codecs.open(self.logfile, 'w', encoding='utf-8')
|
||||
else:
|
||||
logfile = open(self.logfile, 'w', encoding='utf-8')
|
||||
|
||||
logfile = open(self.logfile, 'w', encoding='utf-8')
|
||||
suite_stop_time = time.time()
|
||||
suite_time_delta = suite_stop_time - self.suite_start_time
|
||||
numtests = self.passed + self.failed
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
""" core implementation of testing process: init, session, runtest loop. """
|
||||
import re
|
||||
|
||||
import py
|
||||
import pytest, _pytest
|
||||
@@ -8,7 +9,7 @@ try:
|
||||
except ImportError:
|
||||
from UserDict import DictMixin as MappingMixin
|
||||
|
||||
from _pytest.runner import collect_one_node, Skipped
|
||||
from _pytest.runner import collect_one_node
|
||||
|
||||
tracebackcutdir = py.path.local(_pytest.__file__).dirpath()
|
||||
|
||||
@@ -19,11 +20,11 @@ EXIT_INTERRUPTED = 2
|
||||
EXIT_INTERNALERROR = 3
|
||||
EXIT_USAGEERROR = 4
|
||||
|
||||
name_re = py.std.re.compile("^[a-zA-Z_]\w*$")
|
||||
name_re = re.compile("^[a-zA-Z_]\w*$")
|
||||
|
||||
def pytest_addoption(parser):
|
||||
parser.addini("norecursedirs", "directory patterns to avoid for recursion",
|
||||
type="args", default=('.*', 'CVS', '_darcs', '{arch}'))
|
||||
type="args", default=('.*', 'CVS', '_darcs', '{arch}', '*.egg'))
|
||||
#parser.addini("dirpatterns",
|
||||
# "patterns specifying possible locations of test files",
|
||||
# type="linelist", default=["**/test_*.txt",
|
||||
@@ -38,6 +39,8 @@ def pytest_addoption(parser):
|
||||
help="exit after first num failures or errors.")
|
||||
group._addoption('--strict', action="store_true",
|
||||
help="run pytest in strict mode, warnings become errors.")
|
||||
group._addoption("-c", metavar="file", type=str, dest="inifilename",
|
||||
help="load configuration from `file` instead of trying to locate one of the implicit configuration files.")
|
||||
|
||||
group = parser.getgroup("collect", "collection")
|
||||
group.addoption('--collectonly', '--collect-only', action="store_true",
|
||||
@@ -98,6 +101,7 @@ def wrap_session(config, doit):
|
||||
if session._testsfailed:
|
||||
session.exitstatus = EXIT_TESTSFAILED
|
||||
finally:
|
||||
excinfo = None # Explicitly break reference cycle.
|
||||
session.startdir.chdir()
|
||||
if initstate >= 2:
|
||||
config.hook.pytest_sessionfinish(
|
||||
@@ -144,7 +148,7 @@ def pytest_ignore_collect(path, config):
|
||||
p = path.dirpath()
|
||||
ignore_paths = config._getconftest_pathlist("collect_ignore", path=p)
|
||||
ignore_paths = ignore_paths or []
|
||||
excludeopt = config.getvalue("ignore")
|
||||
excludeopt = config.getoption("ignore")
|
||||
if excludeopt:
|
||||
ignore_paths.extend([py.path.local(x) for x in excludeopt])
|
||||
return path in ignore_paths
|
||||
@@ -233,6 +237,7 @@ class Node(object):
|
||||
|
||||
# used for storing artificial fixturedefs for direct parametrization
|
||||
self._name2pseudofixturedef = {}
|
||||
|
||||
#self.extrainit()
|
||||
|
||||
@property
|
||||
@@ -263,6 +268,20 @@ class Node(object):
|
||||
return "<%s %r>" %(self.__class__.__name__,
|
||||
getattr(self, 'name', None))
|
||||
|
||||
def warn(self, code, message):
|
||||
""" generate a warning with the given code and message for this
|
||||
item. """
|
||||
assert isinstance(code, str)
|
||||
fslocation = getattr(self, "location", None)
|
||||
if fslocation is None:
|
||||
fslocation = getattr(self, "fspath", None)
|
||||
else:
|
||||
fslocation = "%s:%s" % fslocation[:2]
|
||||
|
||||
self.ihook.pytest_logwarning(code=code, message=message,
|
||||
nodeid=self.nodeid,
|
||||
fslocation=fslocation)
|
||||
|
||||
# methods for ordering nodes
|
||||
@property
|
||||
def nodeid(self):
|
||||
@@ -297,7 +316,7 @@ class Node(object):
|
||||
except py.builtin._sysex:
|
||||
raise
|
||||
except:
|
||||
failure = py.std.sys.exc_info()
|
||||
failure = sys.exc_info()
|
||||
setattr(self, exattrname, failure)
|
||||
raise
|
||||
setattr(self, attrname, res)
|
||||
@@ -372,20 +391,24 @@ class Node(object):
|
||||
fm = self.session._fixturemanager
|
||||
if excinfo.errisinstance(fm.FixtureLookupError):
|
||||
return excinfo.value.formatrepr()
|
||||
tbfilter = True
|
||||
if self.config.option.fulltrace:
|
||||
style="long"
|
||||
else:
|
||||
self._prunetraceback(excinfo)
|
||||
# XXX should excinfo.getrepr record all data and toterminal()
|
||||
# process it?
|
||||
tbfilter = False # prunetraceback already does it
|
||||
if style == "auto":
|
||||
style = "long"
|
||||
# XXX should excinfo.getrepr record all data and toterminal() process it?
|
||||
if style is None:
|
||||
if self.config.option.tbstyle == "short":
|
||||
style = "short"
|
||||
else:
|
||||
style = "long"
|
||||
|
||||
return excinfo.getrepr(funcargs=True,
|
||||
showlocals=self.config.option.showlocals,
|
||||
style=style)
|
||||
style=style, tbfilter=tbfilter)
|
||||
|
||||
repr_failure = _repr_failure_py
|
||||
|
||||
@@ -394,10 +417,6 @@ class Collector(Node):
|
||||
and thus iteratively build a tree.
|
||||
"""
|
||||
|
||||
# the set of exceptions to interpret as "Skip the whole module" during
|
||||
# collection
|
||||
skip_exceptions = (Skipped,)
|
||||
|
||||
class CollectError(Exception):
|
||||
""" an error during collection, contains a custom message. """
|
||||
|
||||
@@ -455,6 +474,14 @@ class Item(Node):
|
||||
"""
|
||||
nextitem = None
|
||||
|
||||
def __init__(self, name, parent=None, config=None, session=None):
|
||||
super(Item, self).__init__(name, parent, config, session)
|
||||
self._report_sections = []
|
||||
|
||||
def add_report_section(self, when, key, content):
|
||||
if content:
|
||||
self._report_sections.append((when, key, content))
|
||||
|
||||
def reportinfo(self):
|
||||
return self.fspath, None, ""
|
||||
|
||||
|
||||
@@ -59,7 +59,11 @@ def derive_importpath(import_path):
|
||||
|
||||
|
||||
|
||||
notset = object()
|
||||
class Notset:
|
||||
def __repr__(self):
|
||||
return "<notset>"
|
||||
|
||||
notset = Notset()
|
||||
|
||||
class monkeypatch:
|
||||
""" object keeping a record of setattr/item/env/syspath changes. """
|
||||
|
||||
@@ -1,17 +1,27 @@
|
||||
""" run test suites written for nose. """
|
||||
|
||||
import pytest, py
|
||||
import sys
|
||||
|
||||
import py
|
||||
import pytest
|
||||
from _pytest import unittest
|
||||
|
||||
|
||||
def get_skip_exceptions():
|
||||
skip_classes = set()
|
||||
for module_name in ('unittest', 'unittest2', 'nose'):
|
||||
mod = sys.modules.get(module_name)
|
||||
if hasattr(mod, 'SkipTest'):
|
||||
skip_classes.add(mod.SkipTest)
|
||||
return tuple(skip_classes)
|
||||
|
||||
|
||||
def pytest_runtest_makereport(__multicall__, item, call):
|
||||
SkipTest = getattr(sys.modules.get('nose', None), 'SkipTest', None)
|
||||
if SkipTest:
|
||||
if call.excinfo and call.excinfo.errisinstance(SkipTest):
|
||||
# let's substitute the excinfo with a pytest.skip one
|
||||
call2 = call.__class__(lambda:
|
||||
pytest.skip(str(call.excinfo.value)), call.when)
|
||||
call.excinfo = call2.excinfo
|
||||
if call.excinfo and call.excinfo.errisinstance(get_skip_exceptions()):
|
||||
# let's substitute the excinfo with a pytest.skip one
|
||||
call2 = call.__class__(lambda:
|
||||
pytest.skip(str(call.excinfo.value)), call.when)
|
||||
call.excinfo = call2.excinfo
|
||||
|
||||
|
||||
@pytest.mark.trylast
|
||||
@@ -38,13 +48,8 @@ def teardown_nose(item):
|
||||
# #call_optional(item._nosegensetup, 'teardown')
|
||||
# del item.parent._nosegensetup
|
||||
|
||||
|
||||
def pytest_make_collect_report(collector):
|
||||
SkipTest = getattr(sys.modules.get('unittest', None), 'SkipTest', None)
|
||||
if SkipTest is not None:
|
||||
collector.skip_exceptions += (SkipTest,)
|
||||
SkipTest = getattr(sys.modules.get('nose', None), 'SkipTest', None)
|
||||
if SkipTest is not None:
|
||||
collector.skip_exceptions += (SkipTest,)
|
||||
if isinstance(collector, pytest.Generator):
|
||||
call_optional(collector.obj, 'setup')
|
||||
|
||||
|
||||
@@ -1,8 +1,12 @@
|
||||
""" interactive debugging with PDB, the Python Debugger. """
|
||||
|
||||
import pytest, py
|
||||
from __future__ import absolute_import
|
||||
import pdb
|
||||
import sys
|
||||
|
||||
import pytest
|
||||
import py
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("general")
|
||||
group._addoption('--pdb',
|
||||
@@ -16,50 +20,37 @@ def pytest_configure(config):
|
||||
if config.getvalue("usepdb"):
|
||||
config.pluginmanager.register(PdbInvoke(), 'pdbinvoke')
|
||||
|
||||
old_trace = py.std.pdb.set_trace
|
||||
old = (pdb.set_trace, pytestPDB._pluginmanager)
|
||||
def fin():
|
||||
py.std.pdb.set_trace = old_trace
|
||||
py.std.pdb.set_trace = pytest.set_trace
|
||||
pdb.set_trace, pytestPDB._pluginmanager = old
|
||||
pdb.set_trace = pytest.set_trace
|
||||
pytestPDB._pluginmanager = config.pluginmanager
|
||||
config._cleanup.append(fin)
|
||||
|
||||
class pytestPDB:
|
||||
""" Pseudo PDB that defers to the real pdb. """
|
||||
item = None
|
||||
collector = None
|
||||
_pluginmanager = None
|
||||
|
||||
def set_trace(self):
|
||||
""" invoke PDB set_trace debugging, dropping any IO capturing. """
|
||||
frame = sys._getframe().f_back
|
||||
item = self.item or self.collector
|
||||
|
||||
if item is not None:
|
||||
capman = item.config.pluginmanager.getplugin("capturemanager")
|
||||
out, err = capman.suspendcapture()
|
||||
if hasattr(item, 'outerr'):
|
||||
item.outerr = (item.outerr[0] + out, item.outerr[1] + err)
|
||||
capman = None
|
||||
if self._pluginmanager is not None:
|
||||
capman = self._pluginmanager.getplugin("capturemanager")
|
||||
if capman:
|
||||
capman.suspendcapture(in_=True)
|
||||
tw = py.io.TerminalWriter()
|
||||
tw.line()
|
||||
tw.sep(">", "PDB set_trace (IO-capturing turned off)")
|
||||
py.std.pdb.Pdb().set_trace(frame)
|
||||
pdb.Pdb().set_trace(frame)
|
||||
|
||||
def pdbitem(item):
|
||||
pytestPDB.item = item
|
||||
pytest_runtest_setup = pytest_runtest_call = pytest_runtest_teardown = pdbitem
|
||||
|
||||
@pytest.mark.tryfirst
|
||||
def pytest_make_collect_report(__multicall__, collector):
|
||||
try:
|
||||
pytestPDB.collector = collector
|
||||
return __multicall__.execute()
|
||||
finally:
|
||||
pytestPDB.collector = None
|
||||
|
||||
def pytest_runtest_makereport():
|
||||
pytestPDB.item = None
|
||||
|
||||
class PdbInvoke:
|
||||
def pytest_exception_interact(self, node, call, report):
|
||||
return _enter_pdb(node, call.excinfo, report)
|
||||
capman = node.config.pluginmanager.getplugin("capturemanager")
|
||||
if capman:
|
||||
capman.suspendcapture(in_=True)
|
||||
_enter_pdb(node, call.excinfo, report)
|
||||
|
||||
def pytest_internalerror(self, excrepr, excinfo):
|
||||
for line in str(excrepr).split("\n"):
|
||||
@@ -87,7 +78,8 @@ def _enter_pdb(node, excinfo, rep):
|
||||
def _postmortem_traceback(excinfo):
|
||||
# A doctest.UnexpectedException is not useful for post_mortem.
|
||||
# Use the underlying exception instead:
|
||||
if isinstance(excinfo.value, py.std.doctest.UnexpectedException):
|
||||
from doctest import UnexpectedException
|
||||
if isinstance(excinfo.value, UnexpectedException):
|
||||
return excinfo.value.exc_info[2]
|
||||
else:
|
||||
return excinfo._excinfo[2]
|
||||
@@ -101,7 +93,6 @@ def _find_last_non_hidden_frame(stack):
|
||||
|
||||
|
||||
def post_mortem(t):
|
||||
pdb = py.std.pdb
|
||||
class Pdb(pdb.Pdb):
|
||||
def get_stack(self, f, t):
|
||||
stack, i = pdb.Pdb.get_stack(self, f, t)
|
||||
|
||||
@@ -1,15 +1,21 @@
|
||||
""" (disabled by default) support for testing pytest and pytest plugins. """
|
||||
|
||||
import py, pytest
|
||||
import sys, os
|
||||
import inspect
|
||||
import sys
|
||||
import os
|
||||
import codecs
|
||||
import re
|
||||
import time
|
||||
import platform
|
||||
from fnmatch import fnmatch
|
||||
from _pytest.main import Session, EXIT_OK
|
||||
import subprocess
|
||||
|
||||
import py
|
||||
import pytest
|
||||
from py.builtin import print_
|
||||
from _pytest.core import HookRelay
|
||||
|
||||
from _pytest.main import Session, EXIT_OK
|
||||
|
||||
|
||||
def get_public_names(l):
|
||||
"""Only return names from iterator l without a leading underscore."""
|
||||
@@ -87,10 +93,10 @@ class HookRecorder:
|
||||
|
||||
def _makecallparser(self, method):
|
||||
name = method.__name__
|
||||
args, varargs, varkw, default = py.std.inspect.getargspec(method)
|
||||
args, varargs, varkw, default = inspect.getargspec(method)
|
||||
if not args or args[0] != "self":
|
||||
args.insert(0, 'self')
|
||||
fspec = py.std.inspect.formatargspec(args, varargs, varkw, default)
|
||||
fspec = inspect.formatargspec(args, varargs, varkw, default)
|
||||
# we use exec because we want to have early type
|
||||
# errors on wrong input arguments, using
|
||||
# *args/**kwargs delays this and gives errors
|
||||
@@ -122,7 +128,7 @@ class HookRecorder:
|
||||
__tracebackhide__ = True
|
||||
i = 0
|
||||
entries = list(entries)
|
||||
backlocals = py.std.sys._getframe(1).f_locals
|
||||
backlocals = sys._getframe(1).f_locals
|
||||
while entries:
|
||||
name, check = entries.pop(0)
|
||||
for ind, call in enumerate(self.calls[i:]):
|
||||
@@ -210,7 +216,7 @@ class TmpTestdir:
|
||||
|
||||
def finalize(self):
|
||||
for p in self._syspathremove:
|
||||
py.std.sys.path.remove(p)
|
||||
sys.path.remove(p)
|
||||
if hasattr(self, '_olddir'):
|
||||
self._olddir.chdir()
|
||||
# delete modules that have been loaded from tmpdir
|
||||
@@ -246,8 +252,14 @@ class TmpTestdir:
|
||||
ret = None
|
||||
for name, value in items:
|
||||
p = self.tmpdir.join(name).new(ext=ext)
|
||||
source = py.builtin._totext(py.code.Source(value)).strip()
|
||||
content = source.encode("utf-8") # + "\n"
|
||||
source = py.code.Source(value)
|
||||
def my_totext(s, encoding="utf-8"):
|
||||
if py.builtin._isbytes(s):
|
||||
s = py.builtin._totext(s, encoding=encoding)
|
||||
return s
|
||||
source_unicode = "\n".join([my_totext(line) for line in source.lines])
|
||||
source = py.builtin._totext(source_unicode)
|
||||
content = source.strip().encode("utf-8") # + "\n"
|
||||
#content = content.rstrip() + "\n"
|
||||
p.write(content, "wb")
|
||||
if ret is None:
|
||||
@@ -277,7 +289,7 @@ class TmpTestdir:
|
||||
def syspathinsert(self, path=None):
|
||||
if path is None:
|
||||
path = self.tmpdir
|
||||
py.std.sys.path.insert(0, str(path))
|
||||
sys.path.insert(0, str(path))
|
||||
self._syspathremove.append(str(path))
|
||||
|
||||
def mkdir(self, name):
|
||||
@@ -420,9 +432,8 @@ class TmpTestdir:
|
||||
env['PYTHONPATH'] = os.pathsep.join(filter(None, [
|
||||
str(os.getcwd()), env.get('PYTHONPATH', '')]))
|
||||
kw['env'] = env
|
||||
#print "env", env
|
||||
return py.std.subprocess.Popen(cmdargs,
|
||||
stdout=stdout, stderr=stderr, **kw)
|
||||
return subprocess.Popen(cmdargs,
|
||||
stdout=stdout, stderr=stderr, **kw)
|
||||
|
||||
def run(self, *cmdargs):
|
||||
return self._run(*cmdargs)
|
||||
@@ -468,9 +479,9 @@ class TmpTestdir:
|
||||
def _getpybinargs(self, scriptname):
|
||||
if not self.request.config.getvalue("notoolsonpath"):
|
||||
# XXX we rely on script referring to the correct environment
|
||||
# we cannot use "(py.std.sys.executable,script)"
|
||||
# we cannot use "(sys.executable,script)"
|
||||
# because on windows the script is e.g. a py.test.exe
|
||||
return (py.std.sys.executable, _pytest_fullpath,) # noqa
|
||||
return (sys.executable, _pytest_fullpath,) # noqa
|
||||
else:
|
||||
pytest.skip("cannot run %r with --no-tools-on-path" % scriptname)
|
||||
|
||||
@@ -490,7 +501,7 @@ class TmpTestdir:
|
||||
|
||||
def runpython_c(self, command):
|
||||
command = self._getsysprepend() + command
|
||||
return self.run(py.std.sys.executable, "-c", command)
|
||||
return self.run(sys.executable, "-c", command)
|
||||
|
||||
def runpytest(self, *args):
|
||||
p = py.path.local.make_numbered_dir(prefix="runpytest-",
|
||||
@@ -517,7 +528,7 @@ class TmpTestdir:
|
||||
|
||||
def spawn(self, cmd, expect_timeout=10.0):
|
||||
pexpect = pytest.importorskip("pexpect", "3.0")
|
||||
if hasattr(sys, 'pypy_version_info') and '64' in py.std.platform.machine():
|
||||
if hasattr(sys, 'pypy_version_info') and '64' in platform.machine():
|
||||
pytest.skip("pypy-64 bit not supported")
|
||||
if sys.platform == "darwin":
|
||||
pytest.xfail("pexpect does not work reliably on darwin?!")
|
||||
@@ -664,7 +675,7 @@ class LineMatcher:
|
||||
|
||||
def fnmatch_lines(self, lines2):
|
||||
def show(arg1, arg2):
|
||||
py.builtin.print_(arg1, arg2, file=py.std.sys.stderr)
|
||||
py.builtin.print_(arg1, arg2, file=sys.stderr)
|
||||
lines2 = self._getlines(lines2)
|
||||
lines1 = self.lines[:]
|
||||
nextline = None
|
||||
|
||||
@@ -11,7 +11,8 @@ cutdir = py.path.local(_pytest.__file__).dirpath()
|
||||
|
||||
NoneType = type(None)
|
||||
NOTSET = object()
|
||||
|
||||
isfunction = inspect.isfunction
|
||||
isclass = inspect.isclass
|
||||
callable = py.builtin.callable
|
||||
|
||||
def getfslineno(obj):
|
||||
@@ -44,7 +45,7 @@ class FixtureFunctionMarker:
|
||||
self.ids = ids
|
||||
|
||||
def __call__(self, function):
|
||||
if inspect.isclass(function):
|
||||
if isclass(function):
|
||||
raise ValueError(
|
||||
"class fixtures not supported (may be in the future)")
|
||||
function._pytestfixturefunction = self
|
||||
@@ -213,18 +214,26 @@ def pytest_pycollect_makeitem(__multicall__, collector, name, obj):
|
||||
res = __multicall__.execute()
|
||||
if res is not None:
|
||||
return res
|
||||
if inspect.isclass(obj):
|
||||
if isclass(obj):
|
||||
#if hasattr(collector.obj, 'unittest'):
|
||||
# return # we assume it's a mixin class for a TestCase derived one
|
||||
if collector.classnamefilter(name):
|
||||
Class = collector._getcustomclass("Class")
|
||||
return Class(name, parent=collector)
|
||||
elif collector.funcnamefilter(name) and hasattr(obj, '__call__') and \
|
||||
elif collector.funcnamefilter(name) and hasattr(obj, "__call__") and \
|
||||
getfixturemarker(obj) is None:
|
||||
if is_generator(obj):
|
||||
return Generator(name, parent=collector)
|
||||
else:
|
||||
return list(collector._genfunctions(name, obj))
|
||||
# mock seems to store unbound methods (issue473), let's normalize it
|
||||
obj = getattr(obj, "__func__", obj)
|
||||
if not isfunction(obj):
|
||||
collector.warn(code="C2", message=
|
||||
"cannot collect %r because it is not a function."
|
||||
% name, )
|
||||
return
|
||||
if getattr(obj, "__test__", True):
|
||||
if is_generator(obj):
|
||||
return Generator(name, parent=collector)
|
||||
else:
|
||||
return list(collector._genfunctions(name, obj))
|
||||
|
||||
def is_generator(func):
|
||||
try:
|
||||
@@ -306,6 +315,9 @@ class PyCollector(PyobjMixin, pytest.Collector):
|
||||
return True
|
||||
|
||||
def collect(self):
|
||||
if not getattr(self.obj, "__test__", True):
|
||||
return []
|
||||
|
||||
# NB. we avoid random getattrs and peek in the __dict__ instead
|
||||
# (XXX originally introduced from a PyPy need, still true?)
|
||||
dicts = [getattr(self.obj, '__dict__', {})]
|
||||
@@ -450,8 +462,8 @@ class Module(pytest.File, PyCollector):
|
||||
try:
|
||||
mod = self.fspath.pyimport(ensuresyspath=True)
|
||||
except SyntaxError:
|
||||
excinfo = py.code.ExceptionInfo()
|
||||
raise self.CollectError(excinfo.getrepr(style="short"))
|
||||
raise self.CollectError(
|
||||
py.code.ExceptionInfo().getrepr(style="short"))
|
||||
except self.fspath.ImportMismatchError:
|
||||
e = sys.exc_info()[1]
|
||||
raise self.CollectError(
|
||||
@@ -498,10 +510,9 @@ class Class(PyCollector):
|
||||
""" Collector for test methods. """
|
||||
def collect(self):
|
||||
if hasinit(self.obj):
|
||||
pytest.skip("class %s.%s with __init__ won't get collected" % (
|
||||
self.obj.__module__,
|
||||
self.obj.__name__,
|
||||
))
|
||||
self.warn("C1", "cannot collect test class %r because it has a "
|
||||
"__init__ constructor" % self.obj.__name__)
|
||||
return []
|
||||
return [self._getcustomclass("Instance")(name="()", parent=self)]
|
||||
|
||||
def setup(self):
|
||||
@@ -567,6 +578,12 @@ class FunctionMixin(PyobjMixin):
|
||||
if ntraceback == traceback:
|
||||
ntraceback = ntraceback.cut(excludepath=cutdir)
|
||||
excinfo.traceback = ntraceback.filter()
|
||||
# issue364: mark all but first and last frames to
|
||||
# only show a single-line message for each frame
|
||||
if self.config.option.tbstyle == "auto":
|
||||
if len(excinfo.traceback) > 2:
|
||||
for entry in excinfo.traceback[1:-1]:
|
||||
entry.set_repr_style('short')
|
||||
|
||||
def _repr_failure_py(self, excinfo, style="long"):
|
||||
if excinfo.errisinstance(pytest.fail.Exception):
|
||||
@@ -577,8 +594,10 @@ class FunctionMixin(PyobjMixin):
|
||||
|
||||
def repr_failure(self, excinfo, outerr=None):
|
||||
assert outerr is None, "XXX outerr usage is deprecated"
|
||||
return self._repr_failure_py(excinfo,
|
||||
style=self.config.option.tbstyle)
|
||||
style = self.config.option.tbstyle
|
||||
if style == "auto":
|
||||
style = "long"
|
||||
return self._repr_failure_py(excinfo, style=style)
|
||||
|
||||
|
||||
class Generator(FunctionMixin, PyCollector):
|
||||
@@ -966,15 +985,16 @@ def raises(ExpectedException, *args, **kwargs):
|
||||
Performance note:
|
||||
-----------------
|
||||
|
||||
Similar to caught exception objects in Python, explicitly clearing local
|
||||
references to returned ``py.code.ExceptionInfo`` objects can help the Python
|
||||
interpreter speed up its garbage collection.
|
||||
Similar to caught exception objects in Python, explicitly clearing
|
||||
local references to returned ``py.code.ExceptionInfo`` objects can
|
||||
help the Python interpreter speed up its garbage collection.
|
||||
|
||||
Clearing those references breaks a reference cycle (``ExceptionInfo`` -->
|
||||
caught exception --> frame stack raising the exception --> current frame
|
||||
stack --> local variables --> ``ExceptionInfo``) which makes Python keep all
|
||||
objects referenced from that cycle (including all local variables in the
|
||||
current frame) alive until the next cyclic garbage collection run. See the
|
||||
Clearing those references breaks a reference cycle
|
||||
(``ExceptionInfo`` --> caught exception --> frame stack raising
|
||||
the exception --> current frame stack --> local variables -->
|
||||
``ExceptionInfo``) which makes Python keep all objects referenced
|
||||
from that cycle (including all local variables in the current
|
||||
frame) alive until the next cyclic garbage collection run. See the
|
||||
official Python ``try`` statement documentation for more detailed
|
||||
information.
|
||||
|
||||
@@ -984,7 +1004,16 @@ def raises(ExpectedException, *args, **kwargs):
|
||||
# we want to catch a AssertionError
|
||||
# replace our subclass with the builtin one
|
||||
# see https://bitbucket.org/hpk42/pytest/issue/176/pytestraises
|
||||
from _pytest.assertion.util import BuiltinAssertionError as ExpectedException
|
||||
from _pytest.assertion.util import BuiltinAssertionError \
|
||||
as ExpectedException
|
||||
msg = ("exceptions must be old-style classes or"
|
||||
" derived from BaseException, not %s")
|
||||
if isinstance(ExpectedException, tuple):
|
||||
for exc in ExpectedException:
|
||||
if not inspect.isclass(exc):
|
||||
raise TypeError(msg % type(exc))
|
||||
elif not inspect.isclass(ExpectedException):
|
||||
raise TypeError(msg % type(ExpectedException))
|
||||
|
||||
if not args:
|
||||
return RaisesContext(ExpectedException)
|
||||
@@ -1316,7 +1345,7 @@ class FixtureRequest(FuncargnamesCompatAttr):
|
||||
except FixtureLookupError:
|
||||
if argname == "request":
|
||||
class PseudoFixtureDef:
|
||||
cached_result = (self, [0])
|
||||
cached_result = (self, [0], None)
|
||||
return PseudoFixtureDef
|
||||
raise
|
||||
result = self._getfuncargvalue(fixturedef)
|
||||
@@ -1773,13 +1802,15 @@ class FixtureDef:
|
||||
self._finalizer.append(finalizer)
|
||||
|
||||
def finish(self):
|
||||
while self._finalizer:
|
||||
func = self._finalizer.pop()
|
||||
func()
|
||||
try:
|
||||
del self.cached_result
|
||||
except AttributeError:
|
||||
pass
|
||||
while self._finalizer:
|
||||
func = self._finalizer.pop()
|
||||
func()
|
||||
finally:
|
||||
# even if finalization fails, we invalidate
|
||||
# the cached fixture value
|
||||
if hasattr(self, "cached_result"):
|
||||
del self.cached_result
|
||||
|
||||
def execute(self, request):
|
||||
# get required arguments and register our own finish()
|
||||
@@ -1787,7 +1818,7 @@ class FixtureDef:
|
||||
kwargs = {}
|
||||
for argname in self.argnames:
|
||||
fixturedef = request._get_active_fixturedef(argname)
|
||||
result, arg_cache_key = fixturedef.cached_result
|
||||
result, arg_cache_key, exc = fixturedef.cached_result
|
||||
kwargs[argname] = result
|
||||
if argname != "request":
|
||||
fixturedef.addfinalizer(self.finish)
|
||||
@@ -1795,13 +1826,12 @@ class FixtureDef:
|
||||
my_cache_key = request.param_index
|
||||
cached_result = getattr(self, "cached_result", None)
|
||||
if cached_result is not None:
|
||||
#print argname, "Found cached_result", cached_result
|
||||
#print argname, "param_index", param_index
|
||||
result, cache_key = cached_result
|
||||
result, cache_key, err = cached_result
|
||||
if my_cache_key == cache_key:
|
||||
#print request.fixturename, "CACHE HIT", repr(my_cache_key)
|
||||
return result
|
||||
#print request.fixturename, "CACHE MISS"
|
||||
if err is not None:
|
||||
py.builtin._reraise(*err)
|
||||
else:
|
||||
return result
|
||||
# we have a previous but differently parametrized fixture instance
|
||||
# so we need to tear it down before creating a new one
|
||||
self.finish()
|
||||
@@ -1818,15 +1848,31 @@ class FixtureDef:
|
||||
fixturefunc = getimfunc(self.func)
|
||||
if fixturefunc != self.func:
|
||||
fixturefunc = fixturefunc.__get__(request.instance)
|
||||
result = call_fixture_func(fixturefunc, request, kwargs,
|
||||
self.yieldctx)
|
||||
self.cached_result = (result, my_cache_key)
|
||||
try:
|
||||
result = call_fixture_func(fixturefunc, request, kwargs,
|
||||
self.yieldctx)
|
||||
except Exception:
|
||||
self.cached_result = (None, my_cache_key, sys.exc_info())
|
||||
raise
|
||||
self.cached_result = (result, my_cache_key, None)
|
||||
return result
|
||||
|
||||
def __repr__(self):
|
||||
return ("<FixtureDef name=%r scope=%r baseid=%r >" %
|
||||
(self.argname, self.scope, self.baseid))
|
||||
|
||||
def num_mock_patch_args(function):
|
||||
""" return number of arguments used up by mock arguments (if any) """
|
||||
patchings = getattr(function, "patchings", None)
|
||||
if not patchings:
|
||||
return 0
|
||||
mock = sys.modules.get("mock", sys.modules.get("unittest.mock", None))
|
||||
if mock is not None:
|
||||
return len([p for p in patchings
|
||||
if not p.attribute_name and p.new is mock.DEFAULT])
|
||||
return len(patchings)
|
||||
|
||||
|
||||
def getfuncargnames(function, startindex=None):
|
||||
# XXX merge with main.py's varnames
|
||||
#assert not inspect.isclass(function)
|
||||
@@ -1836,7 +1882,7 @@ def getfuncargnames(function, startindex=None):
|
||||
if startindex is None:
|
||||
startindex = inspect.ismethod(function) and 1 or 0
|
||||
if realfunction != function:
|
||||
startindex += len(getattr(function, "patchings", []))
|
||||
startindex += num_mock_patch_args(function)
|
||||
function = realfunction
|
||||
argnames = inspect.getargs(py.code.getrawcode(function))[0]
|
||||
defaults = getattr(function, 'func_defaults',
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
""" recording warnings during test function execution. """
|
||||
|
||||
import py
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
|
||||
def pytest_funcarg__recwarn(request):
|
||||
"""Return a WarningsRecorder instance that provides these methods:
|
||||
@@ -13,7 +14,6 @@ def pytest_funcarg__recwarn(request):
|
||||
on warning categories.
|
||||
"""
|
||||
if sys.version_info >= (2,7):
|
||||
import warnings
|
||||
oldfilters = warnings.filters[:]
|
||||
warnings.simplefilter('default')
|
||||
def reset_filters():
|
||||
@@ -30,26 +30,24 @@ def deprecated_call(func, *args, **kwargs):
|
||||
""" assert that calling ``func(*args, **kwargs)``
|
||||
triggers a DeprecationWarning.
|
||||
"""
|
||||
warningmodule = py.std.warnings
|
||||
l = []
|
||||
oldwarn_explicit = getattr(warningmodule, 'warn_explicit')
|
||||
oldwarn_explicit = getattr(warnings, 'warn_explicit')
|
||||
def warn_explicit(*args, **kwargs):
|
||||
l.append(args)
|
||||
oldwarn_explicit(*args, **kwargs)
|
||||
oldwarn = getattr(warningmodule, 'warn')
|
||||
oldwarn = getattr(warnings, 'warn')
|
||||
def warn(*args, **kwargs):
|
||||
l.append(args)
|
||||
oldwarn(*args, **kwargs)
|
||||
|
||||
warningmodule.warn_explicit = warn_explicit
|
||||
warningmodule.warn = warn
|
||||
warnings.warn_explicit = warn_explicit
|
||||
warnings.warn = warn
|
||||
try:
|
||||
ret = func(*args, **kwargs)
|
||||
finally:
|
||||
warningmodule.warn_explicit = warn_explicit
|
||||
warningmodule.warn = warn
|
||||
warnings.warn_explicit = warn_explicit
|
||||
warnings.warn = warn
|
||||
if not l:
|
||||
#print warningmodule
|
||||
__tracebackhide__ = True
|
||||
raise AssertionError("%r did not produce DeprecationWarning" %(func,))
|
||||
return ret
|
||||
@@ -65,7 +63,6 @@ class RecordedWarning:
|
||||
|
||||
class WarningsRecorder:
|
||||
def __init__(self):
|
||||
warningmodule = py.std.warnings
|
||||
self.list = []
|
||||
def showwarning(message, category, filename, lineno, line=0):
|
||||
self.list.append(RecordedWarning(
|
||||
@@ -76,8 +73,8 @@ class WarningsRecorder:
|
||||
except TypeError:
|
||||
# < python2.6
|
||||
self.old_showwarning(message, category, filename, lineno)
|
||||
self.old_showwarning = warningmodule.showwarning
|
||||
warningmodule.showwarning = showwarning
|
||||
self.old_showwarning = warnings.showwarning
|
||||
warnings.showwarning = showwarning
|
||||
|
||||
def pop(self, cls=Warning):
|
||||
""" pop the first recorded warning, raise exception if not exists."""
|
||||
@@ -88,7 +85,6 @@ class WarningsRecorder:
|
||||
assert 0, "%r not found in %r" %(cls, self.list)
|
||||
|
||||
#def resetregistry(self):
|
||||
# import warnings
|
||||
# warnings.onceregistry.clear()
|
||||
# warnings.__warningregistry__.clear()
|
||||
|
||||
@@ -96,4 +92,4 @@ class WarningsRecorder:
|
||||
self.list[:] = []
|
||||
|
||||
def finalize(self):
|
||||
py.std.warnings.showwarning = self.old_showwarning
|
||||
warnings.showwarning = self.old_showwarning
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
""" basic collect and runtest protocol implementations """
|
||||
import bdb
|
||||
import sys
|
||||
from time import time
|
||||
|
||||
import py
|
||||
import pytest
|
||||
import sys
|
||||
from time import time
|
||||
from py._code.code import TerminalRepr
|
||||
|
||||
def pytest_namespace():
|
||||
@@ -118,7 +119,7 @@ def check_interactive_exception(call, report):
|
||||
return call.excinfo and not (
|
||||
hasattr(report, "wasxfail") or
|
||||
call.excinfo.errisinstance(skip.Exception) or
|
||||
call.excinfo.errisinstance(py.std.bdb.BdbQuit))
|
||||
call.excinfo.errisinstance(bdb.BdbQuit))
|
||||
|
||||
def call_runtest_hook(item, when, **kwds):
|
||||
hookname = "pytest_runtest_" + when
|
||||
@@ -135,14 +136,13 @@ class CallInfo:
|
||||
self.when = when
|
||||
self.start = time()
|
||||
try:
|
||||
try:
|
||||
self.result = func()
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except:
|
||||
self.excinfo = py.code.ExceptionInfo()
|
||||
finally:
|
||||
self.result = func()
|
||||
except KeyboardInterrupt:
|
||||
self.stop = time()
|
||||
raise
|
||||
except:
|
||||
self.excinfo = py.code.ExceptionInfo()
|
||||
self.stop = time()
|
||||
|
||||
def __repr__(self):
|
||||
if self.excinfo:
|
||||
@@ -178,6 +178,11 @@ class BaseReport(object):
|
||||
except UnicodeEncodeError:
|
||||
out.line("<unprintable longrepr>")
|
||||
|
||||
def get_sections(self, prefix):
|
||||
for name, content in self.sections:
|
||||
if name.startswith(prefix):
|
||||
yield prefix, content
|
||||
|
||||
passed = property(lambda x: x.outcome == "passed")
|
||||
failed = property(lambda x: x.outcome == "failed")
|
||||
skipped = property(lambda x: x.outcome == "skipped")
|
||||
@@ -191,6 +196,7 @@ def pytest_runtest_makereport(item, call):
|
||||
duration = call.stop-call.start
|
||||
keywords = dict([(x,1) for x in item.keywords])
|
||||
excinfo = call.excinfo
|
||||
sections = []
|
||||
if not call.excinfo:
|
||||
outcome = "passed"
|
||||
longrepr = None
|
||||
@@ -209,16 +215,18 @@ def pytest_runtest_makereport(item, call):
|
||||
else: # exception in setup or teardown
|
||||
longrepr = item._repr_failure_py(excinfo,
|
||||
style=item.config.option.tbstyle)
|
||||
for rwhen, key, content in item._report_sections:
|
||||
sections.append(("Captured std%s %s" %(key, rwhen), content))
|
||||
return TestReport(item.nodeid, item.location,
|
||||
keywords, outcome, longrepr, when,
|
||||
duration=duration)
|
||||
sections, duration)
|
||||
|
||||
class TestReport(BaseReport):
|
||||
""" Basic test report object (also used for setup and teardown calls if
|
||||
they fail).
|
||||
"""
|
||||
def __init__(self, nodeid, location,
|
||||
keywords, outcome, longrepr, when, sections=(), duration=0, **extra):
|
||||
def __init__(self, nodeid, location, keywords, outcome,
|
||||
longrepr, when, sections=(), duration=0, **extra):
|
||||
#: normalized collection node id
|
||||
self.nodeid = nodeid
|
||||
|
||||
@@ -267,7 +275,9 @@ def pytest_make_collect_report(collector):
|
||||
if not call.excinfo:
|
||||
outcome = "passed"
|
||||
else:
|
||||
if call.excinfo.errisinstance(collector.skip_exceptions):
|
||||
from _pytest import nose
|
||||
skip_exceptions = (Skipped,) + nose.get_skip_exceptions()
|
||||
if call.excinfo.errisinstance(skip_exceptions):
|
||||
outcome = "skipped"
|
||||
r = collector._repr_failure_py(call.excinfo, "line").reprcrash
|
||||
longrepr = (str(r.path), r.lineno, r.message)
|
||||
@@ -284,7 +294,8 @@ def pytest_make_collect_report(collector):
|
||||
|
||||
|
||||
class CollectReport(BaseReport):
|
||||
def __init__(self, nodeid, outcome, longrepr, result, sections=(), **extra):
|
||||
def __init__(self, nodeid, outcome, longrepr, result,
|
||||
sections=(), **extra):
|
||||
self.nodeid = nodeid
|
||||
self.outcome = outcome
|
||||
self.longrepr = longrepr
|
||||
@@ -318,7 +329,7 @@ class SetupState(object):
|
||||
is called at the end of teardown_all().
|
||||
"""
|
||||
assert colitem and not isinstance(colitem, tuple)
|
||||
assert callable(finalizer)
|
||||
assert py.builtin.callable(finalizer)
|
||||
#assert colitem in self.stack # some unit tests don't setup stack :/
|
||||
self._finalizers.setdefault(colitem, []).append(finalizer)
|
||||
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
""" support for skip/xfail functions and markers. """
|
||||
|
||||
import py, pytest
|
||||
import os
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
import py
|
||||
import pytest
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("general")
|
||||
@@ -26,11 +29,13 @@ def pytest_configure(config):
|
||||
"http://pytest.org/latest/skipping.html"
|
||||
)
|
||||
config.addinivalue_line("markers",
|
||||
"xfail(condition, reason=None, run=True): mark the the test function "
|
||||
"xfail(condition, reason=None, run=True, raises=None): mark the the test function "
|
||||
"as an expected failure if eval(condition) has a True value. "
|
||||
"Optionally specify a reason for better reporting and run=False if "
|
||||
"you don't even want to execute the test function. See "
|
||||
"http://pytest.org/latest/skipping.html"
|
||||
"you don't even want to execute the test function. If only specific "
|
||||
"exception(s) are expected, you can list them in raises, and if the test fails "
|
||||
"in other ways, it will be reported as a true failure. "
|
||||
"See http://pytest.org/latest/skipping.html"
|
||||
)
|
||||
|
||||
def pytest_namespace():
|
||||
@@ -60,6 +65,12 @@ class MarkEvaluator:
|
||||
def wasvalid(self):
|
||||
return not hasattr(self, 'exc')
|
||||
|
||||
def invalidraise(self, exc):
|
||||
raises = self.get('raises')
|
||||
if not raises:
|
||||
return
|
||||
return not isinstance(exc, raises)
|
||||
|
||||
def istrue(self):
|
||||
try:
|
||||
return self._istrue()
|
||||
@@ -71,7 +82,7 @@ class MarkEvaluator:
|
||||
msg = [" " * (self.exc[1].offset + 4) + "^",]
|
||||
msg.append("SyntaxError: invalid syntax")
|
||||
else:
|
||||
msg = py.std.traceback.format_exception_only(*self.exc[:2])
|
||||
msg = traceback.format_exception_only(*self.exc[:2])
|
||||
pytest.fail("Error evaluating %r expression\n"
|
||||
" %s\n"
|
||||
"%s"
|
||||
@@ -79,7 +90,7 @@ class MarkEvaluator:
|
||||
pytrace=False)
|
||||
|
||||
def _getglobals(self):
|
||||
d = {'os': py.std.os, 'sys': py.std.sys, 'config': self.item.config}
|
||||
d = {'os': os, 'sys': sys, 'config': self.item.config}
|
||||
func = self.item.obj
|
||||
try:
|
||||
d.update(func.__globals__)
|
||||
@@ -171,7 +182,11 @@ def pytest_runtest_makereport(__multicall__, item, call):
|
||||
if not item.config.option.runxfail:
|
||||
if evalxfail.wasvalid() and evalxfail.istrue():
|
||||
if call.excinfo:
|
||||
rep.outcome = "skipped"
|
||||
if evalxfail.invalidraise(call.excinfo.value):
|
||||
rep.outcome = "failed"
|
||||
return rep
|
||||
else:
|
||||
rep.outcome = "skipped"
|
||||
elif call.when == "call":
|
||||
rep.outcome = "failed"
|
||||
else:
|
||||
@@ -186,7 +201,7 @@ def pytest_report_teststatus(report):
|
||||
if report.skipped:
|
||||
return "xfailed", "x", "xfail"
|
||||
elif report.failed:
|
||||
return "xpassed", "X", "XPASS"
|
||||
return "xpassed", "X", ("XPASS", {'yellow': True})
|
||||
|
||||
# called by the terminalreporter instance/plugin
|
||||
def pytest_terminal_summary(terminalreporter):
|
||||
|
||||
@@ -5,6 +5,8 @@ This is a good source for looking at the various reporting hooks.
|
||||
import pytest
|
||||
import py
|
||||
import sys
|
||||
import time
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("terminal reporting", "reporting", after="general")
|
||||
@@ -15,7 +17,7 @@ def pytest_addoption(parser):
|
||||
group._addoption('-r',
|
||||
action="store", dest="reportchars", default=None, metavar="chars",
|
||||
help="show extra test summary info as specified by chars (f)ailed, "
|
||||
"(E)error, (s)skipped, (x)failed, (X)passed.")
|
||||
"(E)error, (s)skipped, (x)failed, (X)passed (w)warnings.")
|
||||
group._addoption('-l', '--showlocals',
|
||||
action="store_true", dest="showlocals", default=False,
|
||||
help="show locals in tracebacks (disabled by default).")
|
||||
@@ -23,8 +25,8 @@ def pytest_addoption(parser):
|
||||
action="store", dest="report", default=None, metavar="opts",
|
||||
help="(deprecated, use -r)")
|
||||
group._addoption('--tb', metavar="style",
|
||||
action="store", dest="tbstyle", default='long',
|
||||
choices=['long', 'short', 'no', 'line', 'native'],
|
||||
action="store", dest="tbstyle", default='auto',
|
||||
choices=['auto', 'long', 'short', 'no', 'line', 'native'],
|
||||
help="traceback print mode (long/short/line/native/no).")
|
||||
group._addoption('--fulltrace', '--full-trace',
|
||||
action="store_true", default=False,
|
||||
@@ -49,7 +51,7 @@ def getreportopt(config):
|
||||
optvalue = config.option.report
|
||||
if optvalue:
|
||||
py.builtin.print_("DEPRECATED: use -r instead of --report option.",
|
||||
file=py.std.sys.stderr)
|
||||
file=sys.stderr)
|
||||
if optvalue:
|
||||
for setting in optvalue.split(","):
|
||||
setting = setting.strip()
|
||||
@@ -75,6 +77,14 @@ def pytest_report_teststatus(report):
|
||||
letter = "f"
|
||||
return report.outcome, letter, report.outcome.upper()
|
||||
|
||||
class WarningReport:
|
||||
def __init__(self, code, message, nodeid=None, fslocation=None):
|
||||
self.code = code
|
||||
self.message = message
|
||||
self.nodeid = nodeid
|
||||
self.fslocation = fslocation
|
||||
|
||||
|
||||
class TerminalReporter:
|
||||
def __init__(self, config, file=None):
|
||||
self.config = config
|
||||
@@ -87,7 +97,7 @@ class TerminalReporter:
|
||||
self.stats = {}
|
||||
self.startdir = self.curdir = py.path.local()
|
||||
if file is None:
|
||||
file = py.std.sys.stdout
|
||||
file = sys.stdout
|
||||
self._tw = self.writer = py.io.TerminalWriter(file)
|
||||
if self.config.option.color == 'yes':
|
||||
self._tw.hasmarkup = True
|
||||
@@ -128,7 +138,8 @@ class TerminalReporter:
|
||||
self._tw.write(content, **markup)
|
||||
|
||||
def write_line(self, line, **markup):
|
||||
line = str(line)
|
||||
if not py.builtin._istext(line):
|
||||
line = py.builtin.text(line, errors="replace")
|
||||
self.ensure_newline()
|
||||
self._tw.line(line, **markup)
|
||||
|
||||
@@ -147,10 +158,16 @@ class TerminalReporter:
|
||||
self._tw.line(msg, **kw)
|
||||
|
||||
def pytest_internalerror(self, excrepr):
|
||||
for line in str(excrepr).split("\n"):
|
||||
for line in py.builtin.text(excrepr).split("\n"):
|
||||
self.write_line("INTERNALERROR> " + line)
|
||||
return 1
|
||||
|
||||
def pytest_logwarning(self, code, fslocation, message, nodeid):
|
||||
warnings = self.stats.setdefault("warnings", [])
|
||||
warning = WarningReport(code=code, fslocation=fslocation,
|
||||
message=message, nodeid=nodeid)
|
||||
warnings.append(warning)
|
||||
|
||||
def pytest_plugin_registered(self, plugin):
|
||||
if self.config.option.traceconfig:
|
||||
msg = "PLUGIN registered: %s" % (plugin,)
|
||||
@@ -250,7 +267,7 @@ class TerminalReporter:
|
||||
|
||||
@pytest.mark.trylast
|
||||
def pytest_sessionstart(self, session):
|
||||
self._sessionstarttime = py.std.time.time()
|
||||
self._sessionstarttime = time.time()
|
||||
if not self.showheader:
|
||||
return
|
||||
self.write_sep("=", "test session starts", bold=True)
|
||||
@@ -334,7 +351,7 @@ class TerminalReporter:
|
||||
if exitstatus in (0, 1, 2, 4):
|
||||
self.summary_errors()
|
||||
self.summary_failures()
|
||||
self.summary_hints()
|
||||
self.summary_warnings()
|
||||
self.config.hook.pytest_terminal_summary(terminalreporter=self)
|
||||
if exitstatus == 2:
|
||||
self._report_keyboardinterrupt()
|
||||
@@ -365,11 +382,10 @@ class TerminalReporter:
|
||||
fspath = "%s <- %s" % (collect_fspath, fspath)
|
||||
if fspath:
|
||||
line = str(fspath)
|
||||
if lineno is not None:
|
||||
lineno += 1
|
||||
line += ":" + str(lineno)
|
||||
if domain:
|
||||
line += ": " + str(domain)
|
||||
split = str(domain).split('[')
|
||||
split[0] = split[0].replace('.', '::') # don't replace '.' in params
|
||||
line += "::" + '['.join(split)
|
||||
else:
|
||||
line = "[location]"
|
||||
return line + " "
|
||||
@@ -400,10 +416,15 @@ class TerminalReporter:
|
||||
l.append(x)
|
||||
return l
|
||||
|
||||
def summary_hints(self):
|
||||
if self.config.option.traceconfig:
|
||||
for hint in self.config.pluginmanager._hints:
|
||||
self._tw.line("hint: %s" % hint)
|
||||
def summary_warnings(self):
|
||||
if self.hasopt("w"):
|
||||
warnings = self.stats.get("warnings")
|
||||
if not warnings:
|
||||
return
|
||||
self.write_sep("=", "warning summary")
|
||||
for w in warnings:
|
||||
self._tw.line("W%s %s %s" % (w.code,
|
||||
w.fslocation, w.message))
|
||||
|
||||
def summary_failures(self):
|
||||
if self.config.option.tbstyle != "no":
|
||||
@@ -447,9 +468,10 @@ class TerminalReporter:
|
||||
self._tw.line(content)
|
||||
|
||||
def summary_stats(self):
|
||||
session_duration = py.std.time.time() - self._sessionstarttime
|
||||
session_duration = time.time() - self._sessionstarttime
|
||||
|
||||
keys = "failed passed skipped deselected xfailed xpassed".split()
|
||||
keys = ("failed passed skipped deselected "
|
||||
"xfailed xpassed warnings").split()
|
||||
for key in self.stats.keys():
|
||||
if key not in keys:
|
||||
keys.append(key)
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
""" support for providing temporary directories to test functions. """
|
||||
import pytest, py
|
||||
import re
|
||||
|
||||
import pytest
|
||||
import py
|
||||
from _pytest.monkeypatch import monkeypatch
|
||||
|
||||
|
||||
class TempdirHandler:
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
@@ -63,7 +67,7 @@ def tmpdir(request):
|
||||
path object.
|
||||
"""
|
||||
name = request.node.name
|
||||
name = py.std.re.sub("[\W]", "_", name)
|
||||
name = re.sub("[\W]", "_", name)
|
||||
MAXVAL = 30
|
||||
if len(name) > MAXVAL:
|
||||
name = name[:MAXVAL]
|
||||
|
||||
@@ -1,27 +1,25 @@
|
||||
""" discovery and running of std-library "unittest" style tests. """
|
||||
import pytest, py
|
||||
from __future__ import absolute_import
|
||||
import traceback
|
||||
import sys
|
||||
|
||||
import pytest
|
||||
import py
|
||||
|
||||
|
||||
# for transfering markers
|
||||
from _pytest.python import transfer_markers
|
||||
|
||||
|
||||
def is_unittest(obj):
|
||||
"""Is obj a subclass of unittest.TestCase?"""
|
||||
unittest = sys.modules.get('unittest')
|
||||
if unittest is None:
|
||||
return # nobody can have derived unittest.TestCase
|
||||
try:
|
||||
return issubclass(obj, unittest.TestCase)
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except:
|
||||
return False
|
||||
|
||||
|
||||
def pytest_pycollect_makeitem(collector, name, obj):
|
||||
if is_unittest(obj):
|
||||
return UnitTestCase(name, parent=collector)
|
||||
# has unittest been imported and is obj a subclass of its TestCase?
|
||||
try:
|
||||
if not issubclass(obj, sys.modules["unittest"].TestCase):
|
||||
return
|
||||
except Exception:
|
||||
return
|
||||
# yes, so let's collect it
|
||||
return UnitTestCase(name, parent=collector)
|
||||
|
||||
|
||||
class UnitTestCase(pytest.Class):
|
||||
@@ -41,10 +39,13 @@ class UnitTestCase(pytest.Class):
|
||||
super(UnitTestCase, self).setup()
|
||||
|
||||
def collect(self):
|
||||
self.session._fixturemanager.parsefactories(self, unittest=True)
|
||||
loader = py.std.unittest.TestLoader()
|
||||
module = self.getparent(pytest.Module).obj
|
||||
from unittest import TestLoader
|
||||
cls = self.obj
|
||||
if not getattr(cls, "__test__", True):
|
||||
return
|
||||
self.session._fixturemanager.parsefactories(self, unittest=True)
|
||||
loader = TestLoader()
|
||||
module = self.getparent(pytest.Module).obj
|
||||
foundsomething = False
|
||||
for name in loader.getTestCaseNames(self.obj):
|
||||
x = getattr(self.obj, name)
|
||||
@@ -88,7 +89,7 @@ class TestCaseFunction(pytest.Function):
|
||||
except TypeError:
|
||||
try:
|
||||
try:
|
||||
l = py.std.traceback.format_exception(*rawexcinfo)
|
||||
l = traceback.format_exception(*rawexcinfo)
|
||||
l.insert(0, "NOTE: Incompatible Exception Representation, "
|
||||
"displaying natively:\n\n")
|
||||
pytest.fail("".join(l), pytrace=False)
|
||||
@@ -154,7 +155,7 @@ def pytest_runtest_protocol(item, __multicall__):
|
||||
if isinstance(item, TestCaseFunction):
|
||||
if 'twisted.trial.unittest' in sys.modules:
|
||||
ut = sys.modules['twisted.python.failure']
|
||||
Failure__init__ = ut.Failure.__init__.im_func
|
||||
Failure__init__ = ut.Failure.__init__
|
||||
check_testcase_implements_trial_reporter()
|
||||
def excstore(self, exc_value=None, exc_type=None, exc_tb=None,
|
||||
captureVars=None):
|
||||
|
||||
@@ -4,8 +4,8 @@ if __name__ == '__main__':
|
||||
import cProfile
|
||||
import pytest
|
||||
import pstats
|
||||
script = sys.argv[1] if len(sys.argv) > 1 else "empty.py"
|
||||
stats = cProfile.run('pytest.cmdline.main([%r])' % script, 'prof')
|
||||
script = sys.argv[1:] if len(sys.argv) > 1 else "empty.py"
|
||||
stats = cProfile.run('pytest.cmdline.main(%r)' % script, 'prof')
|
||||
p = pstats.Stats("prof")
|
||||
p.strip_dirs()
|
||||
p.sort_stats('cumulative')
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
<li><a href="{{ pathto('contributing') }}">Contribution Guide</a></li>
|
||||
<li><a href="https://pypi.python.org/pypi/pytest">pytest @ PyPI</a></li>
|
||||
<li><a href="https://bitbucket.org/hpk42/pytest/">pytest @ Bitbucket</a></li>
|
||||
<li><a href="http://pytest.org/latest/plugins_index/index.html">3rd party plugins (beta)</a></li>
|
||||
<li><a href="http://pytest.org/latest/plugins_index/index.html">3rd party plugins</a></li>
|
||||
<li><a href="https://bitbucket.org/hpk42/pytest/issues?status=new&status=open">Issue Tracker</a></li>
|
||||
<li><a href="http://pytest.org/latest/pytest.pdf">PDF Documentation</a>
|
||||
</ul>
|
||||
|
||||
@@ -8,8 +8,9 @@
|
||||
|
||||
{% set page_width = '1020px' %}
|
||||
{% set sidebar_width = '220px' %}
|
||||
{% set link_color = '#490' %}
|
||||
{% set link_hover_color = '#9c0' %}
|
||||
/* orange of logo is #d67c29 but we use black for links for now */
|
||||
{% set link_color = '#000' %}
|
||||
{% set link_hover_color = '#000' %}
|
||||
{% set base_font = 'sans-serif' %}
|
||||
{% set header_font = 'sans-serif' %}
|
||||
|
||||
|
||||
@@ -5,6 +5,9 @@ Release announcements
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
release-2.6.2
|
||||
release-2.6.1
|
||||
release-2.6.0
|
||||
release-2.5.2
|
||||
release-2.5.1
|
||||
release-2.5.0
|
||||
|
||||
153
doc/en/announce/release-2.6.0.txt
Normal file
153
doc/en/announce/release-2.6.0.txt
Normal file
@@ -0,0 +1,153 @@
|
||||
pytest-2.6.0: shorter tracebacks, new warning system, test runner compat
|
||||
===========================================================================
|
||||
|
||||
pytest is a mature Python testing tool with more than a 1000 tests
|
||||
against itself, passing on many different interpreters and platforms.
|
||||
|
||||
The 2.6.0 release should be drop-in backward compatible to 2.5.2 and
|
||||
fixes a number of bugs and brings some new features, mainly:
|
||||
|
||||
- shorter tracebacks by default: only the first (test function) entry
|
||||
and the last (failure location) entry are shown, the ones between
|
||||
only in "short" format. Use ``--tb=long`` to get back the old
|
||||
behaviour of showing "long" entries everywhere.
|
||||
|
||||
- a new warning system which reports oddities during collection
|
||||
and execution. For example, ignoring collecting Test* classes with an
|
||||
``__init__`` now produces a warning.
|
||||
|
||||
- various improvements to nose/mock/unittest integration
|
||||
|
||||
Note also that 2.6.0 departs with the "zero reported bugs" policy
|
||||
because it has been too hard to keep up with it, unfortunately.
|
||||
Instead we are for now rather bound to work on "upvoted" issues in
|
||||
the https://bitbucket.org/hpk42/pytest/issues?status=new&status=open&sort=-votes
|
||||
issue tracker.
|
||||
|
||||
See docs at:
|
||||
|
||||
http://pytest.org
|
||||
|
||||
As usual, you can upgrade from pypi via::
|
||||
|
||||
pip install -U pytest
|
||||
|
||||
Thanks to all who contributed, among them:
|
||||
|
||||
Benjamin Peterson
|
||||
Jurko Gospodnetić
|
||||
Floris Bruynooghe
|
||||
Marc Abramowitz
|
||||
Marc Schlaich
|
||||
Trevor Bekolay
|
||||
Bruno Oliveira
|
||||
Alex Groenholm
|
||||
|
||||
have fun,
|
||||
holger krekel
|
||||
|
||||
2.6.0
|
||||
-----------------------------------
|
||||
|
||||
- fix issue537: Avoid importing old assertion reinterpretation code by default.
|
||||
Thanks Benjamin Peterson.
|
||||
|
||||
- fix issue364: shorten and enhance tracebacks representation by default.
|
||||
The new "--tb=auto" option (default) will only display long tracebacks
|
||||
for the first and last entry. You can get the old behaviour of printing
|
||||
all entries as long entries with "--tb=long". Also short entries by
|
||||
default are now printed very similarly to "--tb=native" ones.
|
||||
|
||||
- fix issue514: teach assertion reinterpretation about private class attributes
|
||||
Thanks Benjamin Peterson.
|
||||
|
||||
- change -v output to include full node IDs of tests. Users can copy
|
||||
a node ID from a test run, including line number, and use it as a
|
||||
positional argument in order to run only a single test.
|
||||
|
||||
- fix issue 475: fail early and comprehensible if calling
|
||||
pytest.raises with wrong exception type.
|
||||
|
||||
- fix issue516: tell in getting-started about current dependencies.
|
||||
|
||||
- cleanup setup.py a bit and specify supported versions. Thanks Jurko
|
||||
Gospodnetic for the PR.
|
||||
|
||||
- change XPASS colour to yellow rather then red when tests are run
|
||||
with -v.
|
||||
|
||||
- fix issue473: work around mock putting an unbound method into a class
|
||||
dict when double-patching.
|
||||
|
||||
- fix issue498: if a fixture finalizer fails, make sure that
|
||||
the fixture is still invalidated.
|
||||
|
||||
- fix issue453: the result of the pytest_assertrepr_compare hook now gets
|
||||
it's newlines escaped so that format_exception does not blow up.
|
||||
|
||||
- internal new warning system: pytest will now produce warnings when
|
||||
it detects oddities in your test collection or execution.
|
||||
Warnings are ultimately sent to a new pytest_logwarning hook which is
|
||||
currently only implemented by the terminal plugin which displays
|
||||
warnings in the summary line and shows more details when -rw (report on
|
||||
warnings) is specified.
|
||||
|
||||
- change skips into warnings for test classes with an __init__ and
|
||||
callables in test modules which look like a test but are not functions.
|
||||
|
||||
- fix issue436: improved finding of initial conftest files from command
|
||||
line arguments by using the result of parse_known_args rather than
|
||||
the previous flaky heuristics. Thanks Marc Abramowitz for tests
|
||||
and initial fixing approaches in this area.
|
||||
|
||||
- fix issue #479: properly handle nose/unittest(2) SkipTest exceptions
|
||||
during collection/loading of test modules. Thanks to Marc Schlaich
|
||||
for the complete PR.
|
||||
|
||||
- fix issue490: include pytest_load_initial_conftests in documentation
|
||||
and improve docstring.
|
||||
|
||||
- fix issue472: clarify that ``pytest.config.getvalue()`` cannot work
|
||||
if it's triggered ahead of command line parsing.
|
||||
|
||||
- merge PR123: improved integration with mock.patch decorator on tests.
|
||||
|
||||
- fix issue412: messing with stdout/stderr FD-level streams is now
|
||||
captured without crashes.
|
||||
|
||||
- fix issue483: trial/py33 works now properly. Thanks Daniel Grana for PR.
|
||||
|
||||
- improve example for pytest integration with "python setup.py test"
|
||||
which now has a generic "-a" or "--pytest-args" option where you
|
||||
can pass additional options as a quoted string. Thanks Trevor Bekolay.
|
||||
|
||||
- simplified internal capturing mechanism and made it more robust
|
||||
against tests or setups changing FD1/FD2, also better integrated
|
||||
now with pytest.pdb() in single tests.
|
||||
|
||||
- improvements to pytest's own test-suite leakage detection, courtesy of PRs
|
||||
from Marc Abramowitz
|
||||
|
||||
- fix issue492: avoid leak in test_writeorg. Thanks Marc Abramowitz.
|
||||
|
||||
- fix issue493: don't run tests in doc directory with ``python setup.py test``
|
||||
(use tox -e doctesting for that)
|
||||
|
||||
- fix issue486: better reporting and handling of early conftest loading failures
|
||||
|
||||
- some cleanup and simplification of internal conftest handling.
|
||||
|
||||
- work a bit harder to break reference cycles when catching exceptions.
|
||||
Thanks Jurko Gospodnetic.
|
||||
|
||||
- fix issue443: fix skip examples to use proper comparison. Thanks Alex
|
||||
Groenholm.
|
||||
|
||||
- support nose-style ``__test__`` attribute on modules, classes and
|
||||
functions, including unittest-style Classes. If set to False, the
|
||||
test will not be collected.
|
||||
|
||||
- fix issue512: show "<notset>" for arguments which might not be set
|
||||
in monkeypatch plugin. Improves output in documentation.
|
||||
|
||||
- avoid importing "py.test" (an old alias module for "pytest")
|
||||
59
doc/en/announce/release-2.6.1.txt
Normal file
59
doc/en/announce/release-2.6.1.txt
Normal file
@@ -0,0 +1,59 @@
|
||||
pytest-2.6.1: fixes and new xfail feature
|
||||
===========================================================================
|
||||
|
||||
pytest is a mature Python testing tool with more than a 1100 tests
|
||||
against itself, passing on many different interpreters and platforms.
|
||||
The 2.6.1 release is drop-in compatible to 2.5.2 and actually fixes some
|
||||
regressions introduced with 2.6.0. It also brings a little feature
|
||||
to the xfail marker which now recognizes expected exceptions,
|
||||
see the CHANGELOG below.
|
||||
|
||||
See docs at:
|
||||
|
||||
http://pytest.org
|
||||
|
||||
As usual, you can upgrade from pypi via::
|
||||
|
||||
pip install -U pytest
|
||||
|
||||
Thanks to all who contributed, among them:
|
||||
|
||||
Floris Bruynooghe
|
||||
Bruno Oliveira
|
||||
Nicolas Delaby
|
||||
|
||||
have fun,
|
||||
holger krekel
|
||||
|
||||
Changes 2.6.1
|
||||
=================
|
||||
|
||||
- No longer show line numbers in the --verbose output, the output is now
|
||||
purely the nodeid. The line number is still shown in failure reports.
|
||||
Thanks Floris Bruynooghe.
|
||||
|
||||
- fix issue437 where assertion rewriting could cause pytest-xdist slaves
|
||||
to collect different tests. Thanks Bruno Oliveira.
|
||||
|
||||
- fix issue555: add "errors" attribute to capture-streams to satisfy
|
||||
some distutils and possibly other code accessing sys.stdout.errors.
|
||||
|
||||
- fix issue547 capsys/capfd also work when output capturing ("-s") is disabled.
|
||||
|
||||
- address issue170: allow pytest.mark.xfail(...) to specify expected exceptions via
|
||||
an optional "raises=EXC" argument where EXC can be a single exception
|
||||
or a tuple of exception classes. Thanks David Mohr for the complete
|
||||
PR.
|
||||
|
||||
- fix integration of pytest with unittest.mock.patch decorator when
|
||||
it uses the "new" argument. Thanks Nicolas Delaby for test and PR.
|
||||
|
||||
- fix issue with detecting conftest files if the arguments contain
|
||||
"::" node id specifications (copy pasted from "-v" output)
|
||||
|
||||
- fix issue544 by only removing "@NUM" at the end of "::" separated parts
|
||||
and if the part has an ".py" extension
|
||||
|
||||
- don't use py.std import helper, rather import things directly.
|
||||
Thanks Bruno Oliveira.
|
||||
|
||||
52
doc/en/announce/release-2.6.2.txt
Normal file
52
doc/en/announce/release-2.6.2.txt
Normal file
@@ -0,0 +1,52 @@
|
||||
pytest-2.6.2: few fixes and cx_freeze support
|
||||
===========================================================================
|
||||
|
||||
pytest is a mature Python testing tool with more than a 1100 tests
|
||||
against itself, passing on many different interpreters and platforms.
|
||||
This release is drop-in compatible to 2.5.2 and 2.6.X. It also
|
||||
brings support for including pytest with cx_freeze or similar
|
||||
freezing tools into your single-file app distribution. For details
|
||||
see the CHANGELOG below.
|
||||
|
||||
See docs at:
|
||||
|
||||
http://pytest.org
|
||||
|
||||
As usual, you can upgrade from pypi via::
|
||||
|
||||
pip install -U pytest
|
||||
|
||||
Thanks to all who contributed, among them:
|
||||
|
||||
Floris Bruynooghe
|
||||
Benjamin Peterson
|
||||
Bruno Oliveira
|
||||
|
||||
have fun,
|
||||
holger krekel
|
||||
|
||||
2.6.2
|
||||
-----------
|
||||
|
||||
- Added function pytest.freeze_includes(), which makes it easy to embed
|
||||
pytest into executables using tools like cx_freeze.
|
||||
See docs for examples and rationale. Thanks Bruno Oliveira.
|
||||
|
||||
- Improve assertion rewriting cache invalidation precision.
|
||||
|
||||
- fixed issue561: adapt autouse fixture example for python3.
|
||||
|
||||
- fixed issue453: assertion rewriting issue with __repr__ containing
|
||||
"\n{", "\n}" and "\n~".
|
||||
|
||||
- fix issue560: correctly display code if an "else:" or "finally:" is
|
||||
followed by statements on the same line.
|
||||
|
||||
- Fix example in monkeypatch documentation, thanks t-8ch.
|
||||
|
||||
- fix issue572: correct tmpdir doc example for python3.
|
||||
|
||||
- Do not mark as universal wheel because Python 2.6 is different from
|
||||
other builds due to the extra argparse dependency. Fixes issue566.
|
||||
Thanks sontek.
|
||||
|
||||
@@ -26,7 +26,7 @@ you will see the return value of the function call::
|
||||
|
||||
$ py.test test_assert1.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 1 items
|
||||
|
||||
test_assert1.py F
|
||||
@@ -95,6 +95,22 @@ asserts that the given ``ExpectedException`` is raised. The reporter will
|
||||
provide you with helpful output in case of failures such as *no
|
||||
exception* or *wrong exception*.
|
||||
|
||||
Note that it is also possible to specify a "raises" argument to
|
||||
``pytest.mark.xfail``, which checks that the test is failing in a more
|
||||
specific way than just having any exception raised::
|
||||
|
||||
@pytest.mark.xfail(raises=IndexError)
|
||||
def test_f():
|
||||
f()
|
||||
|
||||
Using ``pytest.raises`` is likely to be better for cases where you are testing
|
||||
exceptions your own code is deliberately raising, whereas using
|
||||
``@pytest.mark.xfail`` with a check function is probably better for something
|
||||
like documenting unfixed bugs (where the test describes what "should" happen)
|
||||
or bugs in dependencies.
|
||||
|
||||
|
||||
|
||||
.. _newreport:
|
||||
|
||||
Making use of context-sensitive comparisons
|
||||
@@ -116,7 +132,7 @@ if you run this module::
|
||||
|
||||
$ py.test test_assert2.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 1 items
|
||||
|
||||
test_assert2.py F
|
||||
|
||||
@@ -1,188 +0,0 @@
|
||||
|
||||
**Test classes, modules or whole projects can make use of
|
||||
one or more fixtures**. All required fixture functions will execute
|
||||
before a test from the specifying context executes. As You can use this
|
||||
to make tests operate from a pre-initialized directory or with
|
||||
certain environment variables or with pre-configured global application
|
||||
settings.
|
||||
|
||||
For example, the Django_ project requires database
|
||||
initialization to be able to import from and use its model objects.
|
||||
For that, the `pytest-django`_ plugin provides fixtures which your
|
||||
project can then easily depend or extend on, simply by referencing the
|
||||
name of the particular fixture.
|
||||
|
||||
Fixture functions have limited visilibity which depends on where they
|
||||
are defined. If they are defined on a test class, only its test methods
|
||||
may use it. A fixture defined in a module can only be used
|
||||
from that test module. A fixture defined in a conftest.py file
|
||||
can only be used by the tests below the directory of that file.
|
||||
Lastly, plugins can define fixtures which are available across all
|
||||
projects.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
Python, Java and many other languages support a so called xUnit_ style
|
||||
for providing a fixed state, `test fixtures`_, for running tests. It
|
||||
typically involves calling a autouse function ahead and a teardown
|
||||
function after test execute. In 2005 pytest introduced a scope-specific
|
||||
model of automatically detecting and calling autouse and teardown
|
||||
functions on a per-module, class or function basis. The Python unittest
|
||||
package and nose have subsequently incorporated them. This model
|
||||
remains supported by pytest as :ref:`classic xunit`.
|
||||
|
||||
One property of xunit fixture functions is that they work implicitely
|
||||
by preparing global state or setting attributes on TestCase objects.
|
||||
By contrast, pytest provides :ref:`funcargs` which allow to
|
||||
dependency-inject application test state into test functions or
|
||||
methods as function arguments. If your application is sufficiently modular
|
||||
or if you are creating a new project, we recommend you now rather head over to
|
||||
:ref:`funcargs` instead because many pytest users agree that using this
|
||||
paradigm leads to better application and test organisation.
|
||||
|
||||
However, not all programs and frameworks work and can be tested in
|
||||
a fully modular way. They rather require preparation of global state
|
||||
like database autouse on which further fixtures like preparing application
|
||||
specific tables or wrapping tests in transactions can take place. For those
|
||||
needs, pytest-2.3 now supports new **fixture functions** which come with
|
||||
a ton of improvements over classic xunit fixture writing. Fixture functions:
|
||||
|
||||
- allow to separate different autouse concerns into multiple modular functions
|
||||
|
||||
- can receive and fully interoperate with :ref:`funcargs <resources>`,
|
||||
|
||||
- are called multiple times if its funcargs are parametrized,
|
||||
|
||||
- don't need to be defined directly in your test classes or modules,
|
||||
they can also be defined in a plugin or :ref:`conftest.py <conftest.py>` files and get called
|
||||
|
||||
- are called on a per-session, per-module, per-class or per-function basis
|
||||
by means of a simple "scope" declaration.
|
||||
|
||||
- can access the :ref:`request <request>` object which allows to
|
||||
introspect and interact with the (scoped) testcontext.
|
||||
|
||||
- can add cleanup functions which will be invoked when the last test
|
||||
of the fixture test context has finished executing.
|
||||
|
||||
All of these features are now demonstrated by little examples.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
test modules accessing a global resource
|
||||
-------------------------------------------------------
|
||||
|
||||
.. note::
|
||||
|
||||
Relying on `global state is considered bad programming practise <http://en.wikipedia.org/wiki/Global_variable>`_ but when you work with an application
|
||||
that relies on it you often have no choice.
|
||||
|
||||
If you want test modules to access a global resource,
|
||||
you can stick the resource to the module globals in
|
||||
a per-module autouse function. We use a :ref:`resource factory
|
||||
<@pytest.fixture>` to create our global resource::
|
||||
|
||||
# content of conftest.py
|
||||
import pytest
|
||||
|
||||
class GlobalResource:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def globresource():
|
||||
return GlobalResource()
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def setresource(request, globresource):
|
||||
request.module.globresource = globresource
|
||||
|
||||
Now any test module can access ``globresource`` as a module global::
|
||||
|
||||
# content of test_glob.py
|
||||
|
||||
def test_1():
|
||||
print ("test_1 %s" % globresource)
|
||||
def test_2():
|
||||
print ("test_2 %s" % globresource)
|
||||
|
||||
Let's run this module without output-capturing::
|
||||
|
||||
$ py.test -qs test_glob.py
|
||||
FF
|
||||
================================= FAILURES =================================
|
||||
__________________________________ test_1 __________________________________
|
||||
|
||||
def test_1():
|
||||
> print ("test_1 %s" % globresource)
|
||||
E NameError: global name 'globresource' is not defined
|
||||
|
||||
test_glob.py:3: NameError
|
||||
__________________________________ test_2 __________________________________
|
||||
|
||||
def test_2():
|
||||
> print ("test_2 %s" % globresource)
|
||||
E NameError: global name 'globresource' is not defined
|
||||
|
||||
test_glob.py:5: NameError
|
||||
2 failed in 0.01 seconds
|
||||
|
||||
The two tests see the same global ``globresource`` object.
|
||||
|
||||
Parametrizing the global resource
|
||||
+++++++++++++++++++++++++++++++++++++++++++++++++
|
||||
|
||||
We extend the previous example and add parametrization to the globresource
|
||||
factory and also add a finalizer::
|
||||
|
||||
# content of conftest.py
|
||||
|
||||
import pytest
|
||||
|
||||
class GlobalResource:
|
||||
def __init__(self, param):
|
||||
self.param = param
|
||||
|
||||
@pytest.fixture(scope="session", params=[1,2])
|
||||
def globresource(request):
|
||||
g = GlobalResource(request.param)
|
||||
def fin():
|
||||
print "finalizing", g
|
||||
request.addfinalizer(fin)
|
||||
return g
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def setresource(request, globresource):
|
||||
request.module.globresource = globresource
|
||||
|
||||
And then re-run our test module::
|
||||
|
||||
$ py.test -qs test_glob.py
|
||||
FF
|
||||
================================= FAILURES =================================
|
||||
__________________________________ test_1 __________________________________
|
||||
|
||||
def test_1():
|
||||
> print ("test_1 %s" % globresource)
|
||||
E NameError: global name 'globresource' is not defined
|
||||
|
||||
test_glob.py:3: NameError
|
||||
__________________________________ test_2 __________________________________
|
||||
|
||||
def test_2():
|
||||
> print ("test_2 %s" % globresource)
|
||||
E NameError: global name 'globresource' is not defined
|
||||
|
||||
test_glob.py:5: NameError
|
||||
2 failed in 0.01 seconds
|
||||
|
||||
We are now running the two tests twice with two different global resource
|
||||
instances. Note that the tests are ordered such that only
|
||||
one instance is active at any given time: the finalizer of
|
||||
the first globresource instance is called before the second
|
||||
instance is created and sent to the autouse functions.
|
||||
|
||||
@@ -80,7 +80,7 @@ You can ask for available builtin or project-custom
|
||||
|
||||
capfd
|
||||
enables capturing of writes to file descriptors 1 and 2 and makes
|
||||
captured output available via ``capsys.readouterr()`` method calls
|
||||
captured output available via ``capfd.readouterr()`` method calls
|
||||
which return a ``(out, err)`` tuple.
|
||||
|
||||
monkeypatch
|
||||
|
||||
@@ -64,7 +64,7 @@ of the failing function and hide the other one::
|
||||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 2 items
|
||||
|
||||
test_module.py .F
|
||||
@@ -77,18 +77,16 @@ of the failing function and hide the other one::
|
||||
E assert False
|
||||
|
||||
test_module.py:9: AssertionError
|
||||
----------------------------- Captured stdout ------------------------------
|
||||
setting up <function test_func2 at 0x1ec25f0>
|
||||
-------------------------- Captured stdout setup ---------------------------
|
||||
setting up <function test_func2 at 0x2b5d6a81c9d8>
|
||||
==================== 1 failed, 1 passed in 0.01 seconds ====================
|
||||
|
||||
Accessing captured output from a test function
|
||||
---------------------------------------------------
|
||||
|
||||
The :ref:`funcarg mechanism` allows test function a very easy
|
||||
way to access the captured output by simply using the names
|
||||
``capsys`` or ``capfd`` in the test function signature. Here
|
||||
is an example test function that performs some output related
|
||||
checks::
|
||||
The ``capsys`` and ``capfd`` fixtures allow to access stdout/stderr
|
||||
output created during test execution. Here is an example test function
|
||||
that performs some output related checks::
|
||||
|
||||
def test_myoutput(capsys): # or use "capfd" for fd-level
|
||||
print ("hello")
|
||||
@@ -108,8 +106,10 @@ test from having to care about setting/resetting
|
||||
output streams and also interacts well with pytest's
|
||||
own per-test capturing.
|
||||
|
||||
If you want to capture on ``fd`` level you can use
|
||||
If you want to capture on filedescriptor level you can use
|
||||
the ``capfd`` function argument which offers the exact
|
||||
same interface.
|
||||
same interface but allows to also capture output from
|
||||
libraries or subprocesses that directly write to operating
|
||||
system level output streams (FD1 and FD2).
|
||||
|
||||
.. include:: links.inc
|
||||
|
||||
@@ -17,8 +17,8 @@
|
||||
#
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
# The short X.Y version.
|
||||
version = "2.5.2"
|
||||
release = "2.5.2"
|
||||
version = "2.6"
|
||||
release = "2.6.2"
|
||||
|
||||
import sys, os
|
||||
|
||||
@@ -54,7 +54,7 @@ master_doc = 'contents'
|
||||
|
||||
# General information about the project.
|
||||
project = u'pytest'
|
||||
copyright = u'2013, holger krekel'
|
||||
copyright = u'2014, holger krekel'
|
||||
|
||||
|
||||
|
||||
@@ -131,12 +131,12 @@ html_short_title = "pytest-%s" % release
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top
|
||||
# of the sidebar.
|
||||
#html_logo = None
|
||||
html_logo = "img/pytest1.png"
|
||||
|
||||
# The name of an image file (within the static path) to use as favicon of the
|
||||
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
||||
# pixels large.
|
||||
#html_favicon = None
|
||||
html_favicon = "img/pytest1favi.ico"
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
@@ -225,7 +225,7 @@ latex_documents = [
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top of
|
||||
# the title page.
|
||||
#latex_logo = None
|
||||
latex_logo = 'img/pytest1.png'
|
||||
|
||||
# For "manual" documents, if this is true, then toplevel headings are parts,
|
||||
# not chapters.
|
||||
|
||||
@@ -1,4 +1,10 @@
|
||||
|
||||
.. note::
|
||||
|
||||
`improving your automated testing with pytest <https://ep2014.europython.eu/en/schedule/sessions/92/>`_, July 25th 2014, Berlin, Germany
|
||||
|
||||
`professional testing with pytest and tox <http://www.python-academy.com/courses/specialtopics/python_course_testing.html>`_, 24-26th November 2014, Freiburg, Germany
|
||||
|
||||
.. _toc:
|
||||
|
||||
Full pytest documentation
|
||||
|
||||
@@ -97,9 +97,9 @@ Builtin configuration file options
|
||||
[seq] matches any character in seq
|
||||
[!seq] matches any char not in seq
|
||||
|
||||
Default patterns are ``.* _darcs CVS {args}``. Setting a ``norecursedir``
|
||||
replaces the default. Here is an example of how to avoid
|
||||
certain directories::
|
||||
Default patterns are ``'.*', 'CVS', '_darcs', '{arch}', '*.egg'``.
|
||||
Setting a ``norecursedirs`` replaces the default. Here is an example of
|
||||
how to avoid certain directories::
|
||||
|
||||
# content of setup.cfg
|
||||
[pytest]
|
||||
|
||||
@@ -44,12 +44,12 @@ then you can just invoke ``py.test`` without command line options::
|
||||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 1 items
|
||||
|
||||
mymodule.py .
|
||||
|
||||
========================= 1 passed in 0.01 seconds =========================
|
||||
========================= 1 passed in 0.06 seconds =========================
|
||||
|
||||
It is possible to use fixtures using the ``getfixture`` helper::
|
||||
|
||||
|
||||
@@ -21,6 +21,9 @@ You can "mark" a test function with custom metadata like this::
|
||||
pass
|
||||
def test_another():
|
||||
pass
|
||||
class TestClass:
|
||||
def test_method(self):
|
||||
pass
|
||||
|
||||
.. versionadded:: 2.2
|
||||
|
||||
@@ -28,26 +31,82 @@ You can then restrict a test run to only run tests marked with ``webtest``::
|
||||
|
||||
$ py.test -v -m webtest
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2 -- /home/hpk/p/pytest/.tox/regen/bin/python
|
||||
collecting ... collected 3 items
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 -- /home/hpk/p/pytest/.tox/regen/bin/python3.4
|
||||
collecting ... collected 4 items
|
||||
|
||||
test_server.py:3: test_send_http PASSED
|
||||
test_server.py::test_send_http PASSED
|
||||
|
||||
=================== 2 tests deselected by "-m 'webtest'" ===================
|
||||
================== 1 passed, 2 deselected in 0.01 seconds ==================
|
||||
=================== 3 tests deselected by "-m 'webtest'" ===================
|
||||
================== 1 passed, 3 deselected in 0.01 seconds ==================
|
||||
|
||||
Or the inverse, running all tests except the webtest ones::
|
||||
|
||||
$ py.test -v -m "not webtest"
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2 -- /home/hpk/p/pytest/.tox/regen/bin/python
|
||||
collecting ... collected 3 items
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 -- /home/hpk/p/pytest/.tox/regen/bin/python3.4
|
||||
collecting ... collected 4 items
|
||||
|
||||
test_server.py:6: test_something_quick PASSED
|
||||
test_server.py:8: test_another PASSED
|
||||
test_server.py::test_something_quick PASSED
|
||||
test_server.py::test_another PASSED
|
||||
test_server.py::TestClass::test_method PASSED
|
||||
|
||||
================= 1 tests deselected by "-m 'not webtest'" =================
|
||||
================== 2 passed, 1 deselected in 0.01 seconds ==================
|
||||
================== 3 passed, 1 deselected in 0.01 seconds ==================
|
||||
|
||||
Selecing tests based on their node ID
|
||||
-------------------------------------
|
||||
|
||||
You can provide one or more :ref:`node IDs <node-id>` as positional
|
||||
arguments to select only specified tests. This makes it easy to select
|
||||
tests based on their module, class, method, or function name::
|
||||
|
||||
$ py.test -v test_server.py::TestClass::test_method
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 -- /home/hpk/p/pytest/.tox/regen/bin/python3.4
|
||||
collecting ... collected 5 items
|
||||
|
||||
test_server.py::TestClass::test_method PASSED
|
||||
|
||||
========================= 1 passed in 0.01 seconds =========================
|
||||
|
||||
You can also select on the class::
|
||||
|
||||
$ py.test -v test_server.py::TestClass
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 -- /home/hpk/p/pytest/.tox/regen/bin/python3.4
|
||||
collecting ... collected 4 items
|
||||
|
||||
test_server.py::TestClass::test_method PASSED
|
||||
|
||||
========================= 1 passed in 0.01 seconds =========================
|
||||
|
||||
Or select multiple nodes::
|
||||
|
||||
$ py.test -v test_server.py::TestClass test_server.py::test_send_http
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 -- /home/hpk/p/pytest/.tox/regen/bin/python3.4
|
||||
collecting ... collected 8 items
|
||||
|
||||
test_server.py::TestClass::test_method PASSED
|
||||
test_server.py::test_send_http PASSED
|
||||
|
||||
========================= 2 passed in 0.01 seconds =========================
|
||||
|
||||
.. _node-id:
|
||||
|
||||
.. note::
|
||||
|
||||
Node IDs are of the form ``module.py::class::method`` or
|
||||
``module.py::function``. Node IDs control which tests are
|
||||
collected, so ``module.py::class`` will select all test methods
|
||||
on the class. Nodes are also created for each parameter of a
|
||||
parametrized fixture or test, so selecting a parametrized test
|
||||
must include the parameter value, e.g.
|
||||
``module.py::function[param]``.
|
||||
|
||||
Node IDs for failing tests are displayed in the test summary info
|
||||
when running py.test with the ``-rf`` option. You can also
|
||||
construct Node IDs from the output of ``py.test --collectonly``.
|
||||
|
||||
Using ``-k expr`` to select tests based on their name
|
||||
-------------------------------------------------------
|
||||
@@ -61,39 +120,40 @@ select tests based on their names::
|
||||
|
||||
$ py.test -v -k http # running with the above defined example module
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2 -- /home/hpk/p/pytest/.tox/regen/bin/python
|
||||
collecting ... collected 3 items
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 -- /home/hpk/p/pytest/.tox/regen/bin/python3.4
|
||||
collecting ... collected 4 items
|
||||
|
||||
test_server.py:3: test_send_http PASSED
|
||||
test_server.py::test_send_http PASSED
|
||||
|
||||
====================== 2 tests deselected by '-khttp' ======================
|
||||
================== 1 passed, 2 deselected in 0.01 seconds ==================
|
||||
====================== 3 tests deselected by '-khttp' ======================
|
||||
================== 1 passed, 3 deselected in 0.01 seconds ==================
|
||||
|
||||
And you can also run all tests except the ones that match the keyword::
|
||||
|
||||
$ py.test -k "not send_http" -v
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2 -- /home/hpk/p/pytest/.tox/regen/bin/python
|
||||
collecting ... collected 3 items
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 -- /home/hpk/p/pytest/.tox/regen/bin/python3.4
|
||||
collecting ... collected 4 items
|
||||
|
||||
test_server.py:6: test_something_quick PASSED
|
||||
test_server.py:8: test_another PASSED
|
||||
test_server.py::test_something_quick PASSED
|
||||
test_server.py::test_another PASSED
|
||||
test_server.py::TestClass::test_method PASSED
|
||||
|
||||
================= 1 tests deselected by '-knot send_http' ==================
|
||||
================== 2 passed, 1 deselected in 0.01 seconds ==================
|
||||
================== 3 passed, 1 deselected in 0.01 seconds ==================
|
||||
|
||||
Or to select "http" and "quick" tests::
|
||||
|
||||
$ py.test -k "http or quick" -v
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2 -- /home/hpk/p/pytest/.tox/regen/bin/python
|
||||
collecting ... collected 3 items
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 -- /home/hpk/p/pytest/.tox/regen/bin/python3.4
|
||||
collecting ... collected 4 items
|
||||
|
||||
test_server.py:3: test_send_http PASSED
|
||||
test_server.py:6: test_something_quick PASSED
|
||||
test_server.py::test_send_http PASSED
|
||||
test_server.py::test_something_quick PASSED
|
||||
|
||||
================= 1 tests deselected by '-khttp or quick' ==================
|
||||
================== 2 passed, 1 deselected in 0.01 seconds ==================
|
||||
================= 2 tests deselected by '-khttp or quick' ==================
|
||||
================== 2 passed, 2 deselected in 0.01 seconds ==================
|
||||
|
||||
.. note::
|
||||
|
||||
@@ -127,7 +187,7 @@ You can ask which markers exist for your test suite - the list includes our just
|
||||
|
||||
@pytest.mark.skipif(condition): skip the given test function if eval(condition) results in a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform. see http://pytest.org/latest/skipping.html
|
||||
|
||||
@pytest.mark.xfail(condition, reason=None, run=True): mark the the test function as an expected failure if eval(condition) has a True value. Optionally specify a reason for better reporting and run=False if you don't even want to execute the test function. See http://pytest.org/latest/skipping.html
|
||||
@pytest.mark.xfail(condition, reason=None, run=True, raises=None): mark the the test function as an expected failure if eval(condition) has a True value. Optionally specify a reason for better reporting and run=False if you don't even want to execute the test function. If only specific exception(s) are expected, you can list them in raises, and if the test fails in other ways, it will be reported as a true failure. See http://pytest.org/latest/skipping.html
|
||||
|
||||
@pytest.mark.parametrize(argnames, argvalues): call a test function multiple times passing in different arguments in turn. argvalues generally needs to be a list of values if argnames specifies only one name or a list of tuples of values if argnames specifies multiple names. Example: @parametrize('arg1', [1,2]) would lead to two calls of the decorated test function, one with arg1=1 and another with arg1=2.see http://pytest.org/latest/parametrize.html for more info and examples.
|
||||
|
||||
@@ -266,7 +326,7 @@ the test needs::
|
||||
|
||||
$ py.test -E stage2
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 1 items
|
||||
|
||||
test_someenv.py s
|
||||
@@ -277,7 +337,7 @@ and here is one that specifies exactly the environment needed::
|
||||
|
||||
$ py.test -E stage1
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 1 items
|
||||
|
||||
test_someenv.py .
|
||||
@@ -291,7 +351,7 @@ The ``--markers`` option always gives you a list of available markers::
|
||||
|
||||
@pytest.mark.skipif(condition): skip the given test function if eval(condition) results in a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform. see http://pytest.org/latest/skipping.html
|
||||
|
||||
@pytest.mark.xfail(condition, reason=None, run=True): mark the the test function as an expected failure if eval(condition) has a True value. Optionally specify a reason for better reporting and run=False if you don't even want to execute the test function. See http://pytest.org/latest/skipping.html
|
||||
@pytest.mark.xfail(condition, reason=None, run=True, raises=None): mark the the test function as an expected failure if eval(condition) has a True value. Optionally specify a reason for better reporting and run=False if you don't even want to execute the test function. If only specific exception(s) are expected, you can list them in raises, and if the test fails in other ways, it will be reported as a true failure. See http://pytest.org/latest/skipping.html
|
||||
|
||||
@pytest.mark.parametrize(argnames, argvalues): call a test function multiple times passing in different arguments in turn. argvalues generally needs to be a list of values if argnames specifies only one name or a list of tuples of values if argnames specifies multiple names. Example: @parametrize('arg1', [1,2]) would lead to two calls of the decorated test function, one with arg1=1 and another with arg1=2.see http://pytest.org/latest/parametrize.html for more info and examples.
|
||||
|
||||
@@ -395,26 +455,26 @@ then you will see two test skipped and two executed tests as expected::
|
||||
|
||||
$ py.test -rs # this option reports skip reasons
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 4 items
|
||||
|
||||
test_plat.py s.s.
|
||||
test_plat.py sss.
|
||||
========================= short test summary info ==========================
|
||||
SKIP [2] /tmp/doc-exec-65/conftest.py:12: cannot run on platform linux2
|
||||
SKIP [3] /tmp/doc-exec-238/conftest.py:12: cannot run on platform linux
|
||||
|
||||
=================== 2 passed, 2 skipped in 0.01 seconds ====================
|
||||
=================== 1 passed, 3 skipped in 0.01 seconds ====================
|
||||
|
||||
Note that if you specify a platform via the marker-command line option like this::
|
||||
|
||||
$ py.test -m linux2
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 4 items
|
||||
|
||||
test_plat.py .
|
||||
test_plat.py s
|
||||
|
||||
=================== 3 tests deselected by "-m 'linux2'" ====================
|
||||
================== 1 passed, 3 deselected in 0.01 seconds ==================
|
||||
================= 1 skipped, 3 deselected in 0.01 seconds ==================
|
||||
|
||||
then the unmarked-tests will not be run. It is thus a way to restrict the run to the specific tests.
|
||||
|
||||
@@ -459,7 +519,7 @@ We can now use the ``-m option`` to select one set::
|
||||
|
||||
$ py.test -m interface --tb=short
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 4 items
|
||||
|
||||
test_module.py FF
|
||||
@@ -467,12 +527,12 @@ We can now use the ``-m option`` to select one set::
|
||||
================================= FAILURES =================================
|
||||
__________________________ test_interface_simple ___________________________
|
||||
test_module.py:3: in test_interface_simple
|
||||
> assert 0
|
||||
E assert 0
|
||||
assert 0
|
||||
E assert 0
|
||||
__________________________ test_interface_complex __________________________
|
||||
test_module.py:6: in test_interface_complex
|
||||
> assert 0
|
||||
E assert 0
|
||||
assert 0
|
||||
E assert 0
|
||||
================== 2 tests deselected by "-m 'interface'" ==================
|
||||
================== 2 failed, 2 deselected in 0.01 seconds ==================
|
||||
|
||||
@@ -480,7 +540,7 @@ or to select both "event" and "interface" tests::
|
||||
|
||||
$ py.test -m "interface or event" --tb=short
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 4 items
|
||||
|
||||
test_module.py FFF
|
||||
@@ -488,15 +548,15 @@ or to select both "event" and "interface" tests::
|
||||
================================= FAILURES =================================
|
||||
__________________________ test_interface_simple ___________________________
|
||||
test_module.py:3: in test_interface_simple
|
||||
> assert 0
|
||||
E assert 0
|
||||
assert 0
|
||||
E assert 0
|
||||
__________________________ test_interface_complex __________________________
|
||||
test_module.py:6: in test_interface_complex
|
||||
> assert 0
|
||||
E assert 0
|
||||
assert 0
|
||||
E assert 0
|
||||
____________________________ test_event_simple _____________________________
|
||||
test_module.py:9: in test_event_simple
|
||||
> assert 0
|
||||
E assert 0
|
||||
assert 0
|
||||
E assert 0
|
||||
============= 1 tests deselected by "-m 'interface or event'" ==============
|
||||
================== 3 failed, 1 deselected in 0.01 seconds ==================
|
||||
|
||||
@@ -5,7 +5,7 @@ serialization via the pickle module.
|
||||
import py
|
||||
import pytest
|
||||
|
||||
pythonlist = ['python2.4', 'python2.5', 'python2.6', 'python2.7', 'python2.8']
|
||||
pythonlist = ['python2.6', 'python2.7', 'python3.4']
|
||||
@pytest.fixture(params=pythonlist)
|
||||
def python1(request, tmpdir):
|
||||
picklefile = tmpdir.join("data.pickle")
|
||||
|
||||
@@ -27,10 +27,10 @@ now execute the test specification::
|
||||
|
||||
nonpython $ py.test test_simple.yml
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 2 items
|
||||
|
||||
test_simple.yml F.
|
||||
test_simple.yml .F
|
||||
|
||||
================================= FAILURES =================================
|
||||
______________________________ usecase: hello ______________________________
|
||||
@@ -56,11 +56,11 @@ consulted when reporting in ``verbose`` mode::
|
||||
|
||||
nonpython $ py.test -v
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2 -- /home/hpk/p/pytest/.tox/regen/bin/python
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 -- /home/hpk/p/pytest/.tox/regen/bin/python3.4
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_simple.yml:1: usecase: hello FAILED
|
||||
test_simple.yml:1: usecase: ok PASSED
|
||||
test_simple.yml::usecase: ok PASSED
|
||||
test_simple.yml::usecase: hello FAILED
|
||||
|
||||
================================= FAILURES =================================
|
||||
______________________________ usecase: hello ______________________________
|
||||
@@ -74,10 +74,10 @@ interesting to just look at the collection tree::
|
||||
|
||||
nonpython $ py.test --collect-only
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 2 items
|
||||
<YamlFile 'test_simple.yml'>
|
||||
<YamlItem 'hello'>
|
||||
<YamlItem 'ok'>
|
||||
<YamlItem 'hello'>
|
||||
|
||||
============================= in 0.02 seconds =============================
|
||||
============================= in 0.03 seconds =============================
|
||||
|
||||
@@ -106,7 +106,7 @@ this is a fully self-contained example which you can run with::
|
||||
|
||||
$ py.test test_scenarios.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 4 items
|
||||
|
||||
test_scenarios.py ....
|
||||
@@ -118,7 +118,7 @@ If you just collect tests you'll also nicely see 'advanced' and 'basic' as varia
|
||||
|
||||
$ py.test --collect-only test_scenarios.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 4 items
|
||||
<Module 'test_scenarios.py'>
|
||||
<Class 'TestSampleWithScenarios'>
|
||||
@@ -182,7 +182,7 @@ Let's first see how it looks like at collection time::
|
||||
|
||||
$ py.test test_backends.py --collect-only
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 2 items
|
||||
<Module 'test_backends.py'>
|
||||
<Function 'test_db_initialized[d1]'>
|
||||
@@ -197,7 +197,7 @@ And then when we run the test::
|
||||
================================= FAILURES =================================
|
||||
_________________________ test_db_initialized[d2] __________________________
|
||||
|
||||
db = <conftest.DB2 instance at 0x1e5f050>
|
||||
db = <conftest.DB2 object at 0x2b83684b5eb8>
|
||||
|
||||
def test_db_initialized(db):
|
||||
# a dummy test
|
||||
@@ -251,9 +251,9 @@ argument sets to use for each test function. Let's run it::
|
||||
$ py.test -q
|
||||
F..
|
||||
================================= FAILURES =================================
|
||||
________________________ TestClass.test_equals[1-2] ________________________
|
||||
________________________ TestClass.test_equals[2-1] ________________________
|
||||
|
||||
self = <test_parametrize.TestClass instance at 0x246c4d0>, a = 1, b = 2
|
||||
self = <test_parametrize.TestClass object at 0x2ae94130e390>, a = 1, b = 2
|
||||
|
||||
def test_equals(self, a, b):
|
||||
> assert a == b
|
||||
@@ -279,10 +279,12 @@ is to be run with different sets of arguments for its three arguments:
|
||||
Running it results in some skips if we don't have all the python interpreters installed and otherwise runs all combinations (5 interpreters times 5 interpreters times 3 objects to serialize/deserialize)::
|
||||
|
||||
. $ py.test -rs -q multipython.py
|
||||
............sss............sss............sss............ssssssssssssssssss
|
||||
ssssssssssssssssssssssssssssssssssss......sssssssss......ssssssssssssssssss
|
||||
========================= short test summary info ==========================
|
||||
SKIP [27] /home/hpk/p/pytest/doc/en/example/multipython.py:22: 'python2.8' not found
|
||||
48 passed, 27 skipped in 1.30 seconds
|
||||
SKIP [21] /home/hpk/p/pytest/doc/en/example/multipython.py:22: 'python2.5' not found
|
||||
SKIP [21] /home/hpk/p/pytest/doc/en/example/multipython.py:22: 'python2.8' not found
|
||||
SKIP [21] /home/hpk/p/pytest/doc/en/example/multipython.py:22: 'python2.4' not found
|
||||
12 passed, 63 skipped in 0.65 seconds
|
||||
|
||||
Indirect parametrization of optional implementations/imports
|
||||
--------------------------------------------------------------------
|
||||
@@ -329,12 +331,12 @@ If you run this with reporting for skips enabled::
|
||||
|
||||
$ py.test -rs test_module.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 2 items
|
||||
|
||||
test_module.py .s
|
||||
========================= short test summary info ==========================
|
||||
SKIP [1] /tmp/doc-exec-67/conftest.py:10: could not import 'opt2'
|
||||
SKIP [1] /tmp/doc-exec-240/conftest.py:10: could not import 'opt2'
|
||||
|
||||
=================== 1 passed, 1 skipped in 0.01 seconds ====================
|
||||
|
||||
|
||||
@@ -43,7 +43,7 @@ then the test collection looks like this::
|
||||
|
||||
$ py.test --collect-only
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 2 items
|
||||
<Module 'check_myapp.py'>
|
||||
<Class 'CheckMyApp'>
|
||||
@@ -88,7 +88,7 @@ You can always peek at the collection tree without running tests like this::
|
||||
|
||||
. $ py.test --collect-only pythoncollection.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 3 items
|
||||
<Module 'pythoncollection.py'>
|
||||
<Function 'test_function'>
|
||||
@@ -141,10 +141,8 @@ interpreters and will leave out the setup.py file::
|
||||
|
||||
$ py.test --collect-only
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
|
||||
collected 1 items
|
||||
<Module 'pkg/module_py2.py'>
|
||||
<Function 'test_only_on_python2'>
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 0 items
|
||||
|
||||
============================= in 0.01 seconds =============================
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ get on the terminal - we are working on that):
|
||||
|
||||
assertion $ py.test failure_demo.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 39 items
|
||||
|
||||
failure_demo.py FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
|
||||
@@ -30,7 +30,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:15: AssertionError
|
||||
_________________________ TestFailing.test_simple __________________________
|
||||
|
||||
self = <failure_demo.TestFailing object at 0x29e5210>
|
||||
self = <failure_demo.TestFailing object at 0x2aec3e52d470>
|
||||
|
||||
def test_simple(self):
|
||||
def f():
|
||||
@@ -40,13 +40,13 @@ get on the terminal - we are working on that):
|
||||
|
||||
> assert f() == g()
|
||||
E assert 42 == 43
|
||||
E + where 42 = <function f at 0x296a9b0>()
|
||||
E + and 43 = <function g at 0x296aa28>()
|
||||
E + where 42 = <function TestFailing.test_simple.<locals>.f at 0x2aec3e47b158>()
|
||||
E + and 43 = <function TestFailing.test_simple.<locals>.g at 0x2aec3e47b268>()
|
||||
|
||||
failure_demo.py:28: AssertionError
|
||||
____________________ TestFailing.test_simple_multiline _____________________
|
||||
|
||||
self = <failure_demo.TestFailing object at 0x29cef50>
|
||||
self = <failure_demo.TestFailing object at 0x2aec3e474ac8>
|
||||
|
||||
def test_simple_multiline(self):
|
||||
otherfunc_multi(
|
||||
@@ -66,19 +66,19 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:11: AssertionError
|
||||
___________________________ TestFailing.test_not ___________________________
|
||||
|
||||
self = <failure_demo.TestFailing object at 0x29be250>
|
||||
self = <failure_demo.TestFailing object at 0x2aec3e5156a0>
|
||||
|
||||
def test_not(self):
|
||||
def f():
|
||||
return 42
|
||||
> assert not f()
|
||||
E assert not 42
|
||||
E + where 42 = <function f at 0x296ac08>()
|
||||
E + where 42 = <function TestFailing.test_not.<locals>.f at 0x2aec3e47e620>()
|
||||
|
||||
failure_demo.py:38: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_eq_text _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x29c3990>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2aec3e50cba8>
|
||||
|
||||
def test_eq_text(self):
|
||||
> assert 'spam' == 'eggs'
|
||||
@@ -89,7 +89,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:42: AssertionError
|
||||
_____________ TestSpecialisedExplanations.test_eq_similar_text _____________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2acef90>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2aec3e4e24e0>
|
||||
|
||||
def test_eq_similar_text(self):
|
||||
> assert 'foo 1 bar' == 'foo 2 bar'
|
||||
@@ -102,7 +102,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:45: AssertionError
|
||||
____________ TestSpecialisedExplanations.test_eq_multiline_text ____________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x29f1f50>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2aec3e4cc6d8>
|
||||
|
||||
def test_eq_multiline_text(self):
|
||||
> assert 'foo\nspam\nbar' == 'foo\neggs\nbar'
|
||||
@@ -115,7 +115,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:48: AssertionError
|
||||
______________ TestSpecialisedExplanations.test_eq_long_text _______________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x29e58d0>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2aec3e501908>
|
||||
|
||||
def test_eq_long_text(self):
|
||||
a = '1'*100 + 'a' + '2'*100
|
||||
@@ -132,7 +132,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:53: AssertionError
|
||||
_________ TestSpecialisedExplanations.test_eq_long_text_multiline __________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x29cee50>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2aec3e3af048>
|
||||
|
||||
def test_eq_long_text_multiline(self):
|
||||
a = '1\n'*100 + 'a' + '2\n'*100
|
||||
@@ -156,7 +156,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:58: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_eq_list _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x29c3810>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2aec3e474c50>
|
||||
|
||||
def test_eq_list(self):
|
||||
> assert [0, 1, 2] == [0, 1, 3]
|
||||
@@ -166,7 +166,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:61: AssertionError
|
||||
______________ TestSpecialisedExplanations.test_eq_list_long _______________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x29e50d0>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2aec3e515dd8>
|
||||
|
||||
def test_eq_list_long(self):
|
||||
a = [0]*100 + [1] + [3]*100
|
||||
@@ -178,7 +178,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:66: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_eq_dict _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x29c5dd0>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2aec3e4a5ef0>
|
||||
|
||||
def test_eq_dict(self):
|
||||
> assert {'a': 0, 'b': 1, 'c': 0} == {'a': 0, 'b': 2, 'd': 0}
|
||||
@@ -194,7 +194,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:69: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_eq_set __________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x29e2690>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2aec3e4a2e48>
|
||||
|
||||
def test_eq_set(self):
|
||||
> assert set([0, 10, 11, 12]) == set([0, 20, 21])
|
||||
@@ -210,7 +210,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:72: AssertionError
|
||||
_____________ TestSpecialisedExplanations.test_eq_longer_list ______________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x29ceb50>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2aec3e4e0c50>
|
||||
|
||||
def test_eq_longer_list(self):
|
||||
> assert [1,2] == [1,2,3]
|
||||
@@ -220,7 +220,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:75: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_in_list _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x29c3050>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2aec3e4950f0>
|
||||
|
||||
def test_in_list(self):
|
||||
> assert 1 in [0, 2, 3, 4, 5]
|
||||
@@ -229,7 +229,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:78: AssertionError
|
||||
__________ TestSpecialisedExplanations.test_not_in_text_multiline __________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x29e5b10>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2aec3e474f98>
|
||||
|
||||
def test_not_in_text_multiline(self):
|
||||
text = 'some multiline\ntext\nwhich\nincludes foo\nand a\ntail'
|
||||
@@ -247,7 +247,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:82: AssertionError
|
||||
___________ TestSpecialisedExplanations.test_not_in_text_single ____________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x29f1610>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2aec3e5333c8>
|
||||
|
||||
def test_not_in_text_single(self):
|
||||
text = 'single foo line'
|
||||
@@ -260,7 +260,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:86: AssertionError
|
||||
_________ TestSpecialisedExplanations.test_not_in_text_single_long _________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x29cea50>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2aec3e4ccb70>
|
||||
|
||||
def test_not_in_text_single_long(self):
|
||||
text = 'head ' * 50 + 'foo ' + 'tail ' * 20
|
||||
@@ -273,7 +273,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:90: AssertionError
|
||||
______ TestSpecialisedExplanations.test_not_in_text_single_long_term _______
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x29e2a10>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2aec3e502080>
|
||||
|
||||
def test_not_in_text_single_long_term(self):
|
||||
text = 'head ' * 50 + 'f'*70 + 'tail ' * 20
|
||||
@@ -292,7 +292,7 @@ get on the terminal - we are working on that):
|
||||
i = Foo()
|
||||
> assert i.b == 2
|
||||
E assert 1 == 2
|
||||
E + where 1 = <failure_demo.Foo object at 0x29c77d0>.b
|
||||
E + where 1 = <failure_demo.test_attribute.<locals>.Foo object at 0x2aec3e519c18>.b
|
||||
|
||||
failure_demo.py:101: AssertionError
|
||||
_________________________ test_attribute_instance __________________________
|
||||
@@ -302,8 +302,8 @@ get on the terminal - we are working on that):
|
||||
b = 1
|
||||
> assert Foo().b == 2
|
||||
E assert 1 == 2
|
||||
E + where 1 = <failure_demo.Foo object at 0x29e5f10>.b
|
||||
E + where <failure_demo.Foo object at 0x29e5f10> = <class 'failure_demo.Foo'>()
|
||||
E + where 1 = <failure_demo.test_attribute_instance.<locals>.Foo object at 0x2aec3e52d898>.b
|
||||
E + where <failure_demo.test_attribute_instance.<locals>.Foo object at 0x2aec3e52d898> = <class 'failure_demo.test_attribute_instance.<locals>.Foo'>()
|
||||
|
||||
failure_demo.py:107: AssertionError
|
||||
__________________________ test_attribute_failure __________________________
|
||||
@@ -319,7 +319,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:116:
|
||||
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
||||
|
||||
self = <failure_demo.Foo object at 0x29e6b10>
|
||||
self = <failure_demo.test_attribute_failure.<locals>.Foo object at 0x2aec3e4e0b38>
|
||||
|
||||
def _get_b(self):
|
||||
> raise Exception('Failed to get attrib')
|
||||
@@ -335,15 +335,15 @@ get on the terminal - we are working on that):
|
||||
b = 2
|
||||
> assert Foo().b == Bar().b
|
||||
E assert 1 == 2
|
||||
E + where 1 = <failure_demo.Foo object at 0x29c3b10>.b
|
||||
E + where <failure_demo.Foo object at 0x29c3b10> = <class 'failure_demo.Foo'>()
|
||||
E + and 2 = <failure_demo.Bar object at 0x29c3350>.b
|
||||
E + where <failure_demo.Bar object at 0x29c3350> = <class 'failure_demo.Bar'>()
|
||||
E + where 1 = <failure_demo.test_attribute_multiple.<locals>.Foo object at 0x2aec3e4a5748>.b
|
||||
E + where <failure_demo.test_attribute_multiple.<locals>.Foo object at 0x2aec3e4a5748> = <class 'failure_demo.test_attribute_multiple.<locals>.Foo'>()
|
||||
E + and 2 = <failure_demo.test_attribute_multiple.<locals>.Bar object at 0x2aec3e4a51d0>.b
|
||||
E + where <failure_demo.test_attribute_multiple.<locals>.Bar object at 0x2aec3e4a51d0> = <class 'failure_demo.test_attribute_multiple.<locals>.Bar'>()
|
||||
|
||||
failure_demo.py:124: AssertionError
|
||||
__________________________ TestRaises.test_raises __________________________
|
||||
|
||||
self = <failure_demo.TestRaises instance at 0x2aec878>
|
||||
self = <failure_demo.TestRaises object at 0x2aec3e4a2d68>
|
||||
|
||||
def test_raises(self):
|
||||
s = 'qwe'
|
||||
@@ -355,10 +355,10 @@ get on the terminal - we are working on that):
|
||||
> int(s)
|
||||
E ValueError: invalid literal for int() with base 10: 'qwe'
|
||||
|
||||
<0-codegen /home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/python.py:999>:1: ValueError
|
||||
<0-codegen /home/hpk/p/pytest/.tox/regen/lib/python3.4/site-packages/_pytest/python.py:1028>:1: ValueError
|
||||
______________________ TestRaises.test_raises_doesnt _______________________
|
||||
|
||||
self = <failure_demo.TestRaises instance at 0x2aafef0>
|
||||
self = <failure_demo.TestRaises object at 0x2aec3e4e2198>
|
||||
|
||||
def test_raises_doesnt(self):
|
||||
> raises(IOError, "int('3')")
|
||||
@@ -367,7 +367,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:136: Failed
|
||||
__________________________ TestRaises.test_raise ___________________________
|
||||
|
||||
self = <failure_demo.TestRaises instance at 0x2ae5758>
|
||||
self = <failure_demo.TestRaises object at 0x2aec3e5017b8>
|
||||
|
||||
def test_raise(self):
|
||||
> raise ValueError("demo error")
|
||||
@@ -376,7 +376,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:139: ValueError
|
||||
________________________ TestRaises.test_tupleerror ________________________
|
||||
|
||||
self = <failure_demo.TestRaises instance at 0x29cf4d0>
|
||||
self = <failure_demo.TestRaises object at 0x2aec3e533160>
|
||||
|
||||
def test_tupleerror(self):
|
||||
> a,b = [1]
|
||||
@@ -385,7 +385,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:142: ValueError
|
||||
______ TestRaises.test_reinterpret_fails_with_print_for_the_fun_of_it ______
|
||||
|
||||
self = <failure_demo.TestRaises instance at 0x29cf9e0>
|
||||
self = <failure_demo.TestRaises object at 0x2aec3e4cc438>
|
||||
|
||||
def test_reinterpret_fails_with_print_for_the_fun_of_it(self):
|
||||
l = [1,2,3]
|
||||
@@ -394,15 +394,15 @@ get on the terminal - we are working on that):
|
||||
E TypeError: 'int' object is not iterable
|
||||
|
||||
failure_demo.py:147: TypeError
|
||||
----------------------------- Captured stdout ------------------------------
|
||||
--------------------------- Captured stdout call ---------------------------
|
||||
l is [1, 2, 3]
|
||||
________________________ TestRaises.test_some_error ________________________
|
||||
|
||||
self = <failure_demo.TestRaises instance at 0x29d9ea8>
|
||||
self = <failure_demo.TestRaises object at 0x2aec3e5199e8>
|
||||
|
||||
def test_some_error(self):
|
||||
> if namenotexi:
|
||||
E NameError: global name 'namenotexi' is not defined
|
||||
E NameError: name 'namenotexi' is not defined
|
||||
|
||||
failure_demo.py:150: NameError
|
||||
____________________ test_dynamic_compile_shows_nicely _____________________
|
||||
@@ -426,7 +426,7 @@ get on the terminal - we are working on that):
|
||||
<2-codegen 'abc-123' /home/hpk/p/pytest/doc/en/example/assertion/failure_demo.py:162>:2: AssertionError
|
||||
____________________ TestMoreErrors.test_complex_error _____________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x29ca8c0>
|
||||
self = <failure_demo.TestMoreErrors object at 0x2aec3e515cf8>
|
||||
|
||||
def test_complex_error(self):
|
||||
def f():
|
||||
@@ -437,13 +437,8 @@ get on the terminal - we are working on that):
|
||||
|
||||
failure_demo.py:175:
|
||||
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
||||
|
||||
x = 44, y = 43
|
||||
|
||||
def somefunc(x,y):
|
||||
> otherfunc(x,y)
|
||||
|
||||
failure_demo.py:8:
|
||||
failure_demo.py:8: in somefunc
|
||||
otherfunc(x,y)
|
||||
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
||||
|
||||
a = 44, b = 43
|
||||
@@ -455,7 +450,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:5: AssertionError
|
||||
___________________ TestMoreErrors.test_z1_unpack_error ____________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x2ae2ea8>
|
||||
self = <failure_demo.TestMoreErrors object at 0x2aec3e4f7a58>
|
||||
|
||||
def test_z1_unpack_error(self):
|
||||
l = []
|
||||
@@ -465,7 +460,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:179: ValueError
|
||||
____________________ TestMoreErrors.test_z2_type_error _____________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x29da518>
|
||||
self = <failure_demo.TestMoreErrors object at 0x2aec3e52db38>
|
||||
|
||||
def test_z2_type_error(self):
|
||||
l = 3
|
||||
@@ -475,19 +470,19 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:183: TypeError
|
||||
______________________ TestMoreErrors.test_startswith ______________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x29b8440>
|
||||
self = <failure_demo.TestMoreErrors object at 0x2aec3e538a58>
|
||||
|
||||
def test_startswith(self):
|
||||
s = "123"
|
||||
g = "456"
|
||||
> assert s.startswith(g)
|
||||
E assert <built-in method startswith of str object at 0x29ea328>('456')
|
||||
E + where <built-in method startswith of str object at 0x29ea328> = '123'.startswith
|
||||
E assert <built-in method startswith of str object at 0x2aec3e501420>('456')
|
||||
E + where <built-in method startswith of str object at 0x2aec3e501420> = '123'.startswith
|
||||
|
||||
failure_demo.py:188: AssertionError
|
||||
__________________ TestMoreErrors.test_startswith_nested ___________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x2ae4e18>
|
||||
self = <failure_demo.TestMoreErrors object at 0x2aec3e4f1b00>
|
||||
|
||||
def test_startswith_nested(self):
|
||||
def f():
|
||||
@@ -495,15 +490,15 @@ get on the terminal - we are working on that):
|
||||
def g():
|
||||
return "456"
|
||||
> assert f().startswith(g())
|
||||
E assert <built-in method startswith of str object at 0x29ea328>('456')
|
||||
E + where <built-in method startswith of str object at 0x29ea328> = '123'.startswith
|
||||
E + where '123' = <function f at 0x29595f0>()
|
||||
E + and '456' = <function g at 0x2ab5320>()
|
||||
E assert <built-in method startswith of str object at 0x2aec3e501420>('456')
|
||||
E + where <built-in method startswith of str object at 0x2aec3e501420> = '123'.startswith
|
||||
E + where '123' = <function TestMoreErrors.test_startswith_nested.<locals>.f at 0x2aec3e5572f0>()
|
||||
E + and '456' = <function TestMoreErrors.test_startswith_nested.<locals>.g at 0x2aec3e557268>()
|
||||
|
||||
failure_demo.py:195: AssertionError
|
||||
_____________________ TestMoreErrors.test_global_func ______________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x2abf320>
|
||||
self = <failure_demo.TestMoreErrors object at 0x2aec3e495438>
|
||||
|
||||
def test_global_func(self):
|
||||
> assert isinstance(globf(42), float)
|
||||
@@ -513,18 +508,18 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:198: AssertionError
|
||||
_______________________ TestMoreErrors.test_instance _______________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x2aaf050>
|
||||
self = <failure_demo.TestMoreErrors object at 0x2aec3e567240>
|
||||
|
||||
def test_instance(self):
|
||||
self.x = 6*7
|
||||
> assert self.x != 42
|
||||
E assert 42 != 42
|
||||
E + where 42 = <failure_demo.TestMoreErrors instance at 0x2aaf050>.x
|
||||
E + where 42 = <failure_demo.TestMoreErrors object at 0x2aec3e567240>.x
|
||||
|
||||
failure_demo.py:202: AssertionError
|
||||
_______________________ TestMoreErrors.test_compare ________________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x2aedbd8>
|
||||
self = <failure_demo.TestMoreErrors object at 0x2aec3e502cc0>
|
||||
|
||||
def test_compare(self):
|
||||
> assert globf(10) < 5
|
||||
@@ -534,7 +529,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:205: AssertionError
|
||||
_____________________ TestMoreErrors.test_try_finally ______________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x29f2098>
|
||||
self = <failure_demo.TestMoreErrors object at 0x2aec3e5197f0>
|
||||
|
||||
def test_try_finally(self):
|
||||
x = 1
|
||||
@@ -543,4 +538,4 @@ get on the terminal - we are working on that):
|
||||
E assert 1 == 0
|
||||
|
||||
failure_demo.py:210: AssertionError
|
||||
======================== 39 failed in 0.20 seconds =========================
|
||||
======================== 39 failed in 0.22 seconds =========================
|
||||
|
||||
@@ -41,9 +41,9 @@ Let's run this without supplying our new option::
|
||||
F
|
||||
================================= FAILURES =================================
|
||||
_______________________________ test_answer ________________________________
|
||||
|
||||
|
||||
cmdopt = 'type1'
|
||||
|
||||
|
||||
def test_answer(cmdopt):
|
||||
if cmdopt == "type1":
|
||||
print ("first")
|
||||
@@ -51,9 +51,9 @@ Let's run this without supplying our new option::
|
||||
print ("second")
|
||||
> assert 0 # to see what was printed
|
||||
E assert 0
|
||||
|
||||
|
||||
test_sample.py:6: AssertionError
|
||||
----------------------------- Captured stdout ------------------------------
|
||||
--------------------------- Captured stdout call ---------------------------
|
||||
first
|
||||
1 failed in 0.01 seconds
|
||||
|
||||
@@ -63,9 +63,9 @@ And now with supplying a command line option::
|
||||
F
|
||||
================================= FAILURES =================================
|
||||
_______________________________ test_answer ________________________________
|
||||
|
||||
|
||||
cmdopt = 'type2'
|
||||
|
||||
|
||||
def test_answer(cmdopt):
|
||||
if cmdopt == "type1":
|
||||
print ("first")
|
||||
@@ -73,9 +73,9 @@ And now with supplying a command line option::
|
||||
print ("second")
|
||||
> assert 0 # to see what was printed
|
||||
E assert 0
|
||||
|
||||
|
||||
test_sample.py:6: AssertionError
|
||||
----------------------------- Captured stdout ------------------------------
|
||||
--------------------------- Captured stdout call ---------------------------
|
||||
second
|
||||
1 failed in 0.01 seconds
|
||||
|
||||
@@ -108,7 +108,7 @@ directory with the above conftest.py::
|
||||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 0 items
|
||||
|
||||
============================= in 0.00 seconds =============================
|
||||
@@ -152,12 +152,12 @@ and when running it will see a skipped "slow" test::
|
||||
|
||||
$ py.test -rs # "-rs" means report details on the little 's'
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 2 items
|
||||
|
||||
test_module.py .s
|
||||
========================= short test summary info ==========================
|
||||
SKIP [1] /tmp/doc-exec-70/conftest.py:9: need --runslow option to run
|
||||
SKIP [1] /tmp/doc-exec-243/conftest.py:9: need --runslow option to run
|
||||
|
||||
=================== 1 passed, 1 skipped in 0.01 seconds ====================
|
||||
|
||||
@@ -165,7 +165,7 @@ Or run it including the ``slow`` marked test::
|
||||
|
||||
$ py.test --runslow
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 2 items
|
||||
|
||||
test_module.py ..
|
||||
@@ -256,7 +256,7 @@ which will add the string to the test header accordingly::
|
||||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
project deps: mylib-1.1
|
||||
collected 0 items
|
||||
|
||||
@@ -279,7 +279,7 @@ which will add info only when run with "--v"::
|
||||
|
||||
$ py.test -v
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2 -- /home/hpk/p/pytest/.tox/regen/bin/python
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 -- /home/hpk/p/pytest/.tox/regen/bin/python3.4
|
||||
info1: did you know that ...
|
||||
did you?
|
||||
collecting ... collected 0 items
|
||||
@@ -290,7 +290,7 @@ and nothing when run plainly::
|
||||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 0 items
|
||||
|
||||
============================= in 0.00 seconds =============================
|
||||
@@ -322,7 +322,7 @@ Now we can profile which test functions execute the slowest::
|
||||
|
||||
$ py.test --durations=3
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 3 items
|
||||
|
||||
test_some_are_slow.py ...
|
||||
@@ -330,7 +330,7 @@ Now we can profile which test functions execute the slowest::
|
||||
========================= slowest 3 test durations =========================
|
||||
0.20s call test_some_are_slow.py::test_funcslow2
|
||||
0.10s call test_some_are_slow.py::test_funcslow1
|
||||
0.00s setup test_some_are_slow.py::test_funcfast
|
||||
0.00s setup test_some_are_slow.py::test_funcslow2
|
||||
========================= 3 passed in 0.31 seconds =========================
|
||||
|
||||
incremental testing - test steps
|
||||
@@ -383,7 +383,7 @@ If we run this::
|
||||
|
||||
$ py.test -rx
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 4 items
|
||||
|
||||
test_step.py .Fx.
|
||||
@@ -391,7 +391,7 @@ If we run this::
|
||||
================================= FAILURES =================================
|
||||
____________________ TestUserHandling.test_modification ____________________
|
||||
|
||||
self = <test_step.TestUserHandling instance at 0x2768dd0>
|
||||
self = <test_step.TestUserHandling object at 0x2b2ef2a4feb8>
|
||||
|
||||
def test_modification(self):
|
||||
> assert 0
|
||||
@@ -453,7 +453,7 @@ We can run this::
|
||||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 7 items
|
||||
|
||||
test_step.py .Fx.
|
||||
@@ -463,17 +463,17 @@ We can run this::
|
||||
|
||||
================================== ERRORS ==================================
|
||||
_______________________ ERROR at setup of test_root ________________________
|
||||
file /tmp/doc-exec-70/b/test_error.py, line 1
|
||||
file /tmp/doc-exec-243/b/test_error.py, line 1
|
||||
def test_root(db): # no db here, will error out
|
||||
fixture 'db' not found
|
||||
available fixtures: pytestconfig, capfd, monkeypatch, capsys, recwarn, tmpdir
|
||||
available fixtures: tmpdir, monkeypatch, capsys, capfd, pytestconfig, recwarn
|
||||
use 'py.test --fixtures [testpath]' for help on them.
|
||||
|
||||
/tmp/doc-exec-70/b/test_error.py:1
|
||||
/tmp/doc-exec-243/b/test_error.py:1
|
||||
================================= FAILURES =================================
|
||||
____________________ TestUserHandling.test_modification ____________________
|
||||
|
||||
self = <test_step.TestUserHandling instance at 0x238fdd0>
|
||||
self = <test_step.TestUserHandling object at 0x2b63a7aec710>
|
||||
|
||||
def test_modification(self):
|
||||
> assert 0
|
||||
@@ -482,20 +482,20 @@ We can run this::
|
||||
test_step.py:9: AssertionError
|
||||
_________________________________ test_a1 __________________________________
|
||||
|
||||
db = <conftest.DB instance at 0x23f9998>
|
||||
db = <conftest.DB object at 0x2b63a7b04470>
|
||||
|
||||
def test_a1(db):
|
||||
> assert 0, db # to show value
|
||||
E AssertionError: <conftest.DB instance at 0x23f9998>
|
||||
E AssertionError: <conftest.DB object at 0x2b63a7b04470>
|
||||
|
||||
a/test_db.py:2: AssertionError
|
||||
_________________________________ test_a2 __________________________________
|
||||
|
||||
db = <conftest.DB instance at 0x23f9998>
|
||||
db = <conftest.DB object at 0x2b63a7b04470>
|
||||
|
||||
def test_a2(db):
|
||||
> assert 0, db # to show value
|
||||
E AssertionError: <conftest.DB instance at 0x23f9998>
|
||||
E AssertionError: <conftest.DB object at 0x2b63a7b04470>
|
||||
|
||||
a/test_db2.py:2: AssertionError
|
||||
========== 3 failed, 2 passed, 1 xfailed, 1 error in 0.03 seconds ==========
|
||||
@@ -553,7 +553,7 @@ and run them::
|
||||
|
||||
$ py.test test_module.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 2 items
|
||||
|
||||
test_module.py FF
|
||||
@@ -561,7 +561,7 @@ and run them::
|
||||
================================= FAILURES =================================
|
||||
________________________________ test_fail1 ________________________________
|
||||
|
||||
tmpdir = local('/tmp/pytest-1012/test_fail10')
|
||||
tmpdir = local('/tmp/pytest-509/test_fail10')
|
||||
|
||||
def test_fail1(tmpdir):
|
||||
> assert 0
|
||||
@@ -575,12 +575,12 @@ and run them::
|
||||
E assert 0
|
||||
|
||||
test_module.py:4: AssertionError
|
||||
========================= 2 failed in 0.01 seconds =========================
|
||||
========================= 2 failed in 0.02 seconds =========================
|
||||
|
||||
you will have a "failures" file which contains the failing test ids::
|
||||
|
||||
$ cat failures
|
||||
test_module.py::test_fail1 (/tmp/pytest-1012/test_fail10)
|
||||
test_module.py::test_fail1 (/tmp/pytest-509/test_fail10)
|
||||
test_module.py::test_fail2
|
||||
|
||||
Making test result information available in fixtures
|
||||
@@ -642,42 +642,80 @@ if you then have failing tests::
|
||||
and run it::
|
||||
|
||||
$ py.test -s test_module.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
|
||||
collected 3 items
|
||||
Traceback (most recent call last):
|
||||
File "/home/hpk/p/pytest/.tox/regen/lib/python3.4/site-packages/_pytest/config.py", line 513, in getconftestmodules
|
||||
return self._path2confmods[path]
|
||||
KeyError: local('/tmp/doc-exec-243/test_module.py')
|
||||
|
||||
test_module.py Esetting up a test failed! test_module.py::test_setup_fails
|
||||
Fexecuting test failed test_module.py::test_call_fails
|
||||
F
|
||||
During handling of the above exception, another exception occurred:
|
||||
Traceback (most recent call last):
|
||||
File "/home/hpk/p/pytest/.tox/regen/lib/python3.4/site-packages/_pytest/config.py", line 537, in importconftest
|
||||
return self._conftestpath2mod[conftestpath]
|
||||
KeyError: local('/tmp/doc-exec-243/conftest.py')
|
||||
|
||||
================================== ERRORS ==================================
|
||||
____________________ ERROR at setup of test_setup_fails ____________________
|
||||
During handling of the above exception, another exception occurred:
|
||||
Traceback (most recent call last):
|
||||
File "/home/hpk/p/pytest/.tox/regen/lib/python3.4/site-packages/_pytest/config.py", line 543, in importconftest
|
||||
mod = conftestpath.pyimport()
|
||||
File "/home/hpk/p/pytest/.tox/regen/lib/python3.4/site-packages/py/_path/local.py", line 620, in pyimport
|
||||
__import__(modname)
|
||||
File "/tmp/doc-exec-243/conftest.py", line 22
|
||||
print "setting up a test failed!", request.node.nodeid
|
||||
^
|
||||
SyntaxError: invalid syntax
|
||||
ERROR: could not load /tmp/doc-exec-243/conftest.py
|
||||
|
||||
@pytest.fixture
|
||||
def other():
|
||||
> assert 0
|
||||
E assert 0
|
||||
|
||||
test_module.py:6: AssertionError
|
||||
================================= FAILURES =================================
|
||||
_____________________________ test_call_fails ______________________________
|
||||
|
||||
something = None
|
||||
|
||||
def test_call_fails(something):
|
||||
> assert 0
|
||||
E assert 0
|
||||
|
||||
test_module.py:12: AssertionError
|
||||
________________________________ test_fail2 ________________________________
|
||||
|
||||
def test_fail2():
|
||||
> assert 0
|
||||
E assert 0
|
||||
|
||||
test_module.py:15: AssertionError
|
||||
==================== 2 failed, 1 error in 0.01 seconds =====================
|
||||
|
||||
You'll see that the fixture finalizers could use the precise reporting
|
||||
information.
|
||||
|
||||
Integrating pytest runner and cx_freeze
|
||||
-----------------------------------------------------------
|
||||
|
||||
If you freeze your application using a tool like
|
||||
`cx_freeze <http://cx-freeze.readthedocs.org>`_ in order to distribute it
|
||||
to your end-users, it is a good idea to also package your test runner and run
|
||||
your tests using the frozen application.
|
||||
|
||||
This way packaging errors such as dependencies not being
|
||||
included into the executable can be detected early while also allowing you to
|
||||
send test files to users so they can run them in their machines, which can be
|
||||
invaluable to obtain more information about a hard to reproduce bug.
|
||||
|
||||
Unfortunately ``cx_freeze`` can't discover them
|
||||
automatically because of ``pytest``'s use of dynamic module loading, so you
|
||||
must declare them explicitly by using ``pytest.freeze_includes()``::
|
||||
|
||||
# contents of setup.py
|
||||
from cx_Freeze import setup, Executable
|
||||
import pytest
|
||||
|
||||
setup(
|
||||
name="app_main",
|
||||
executables=[Executable("app_main.py")],
|
||||
options={"build_exe":
|
||||
{
|
||||
'includes': pytest.freeze_includes()}
|
||||
},
|
||||
# ... other options
|
||||
)
|
||||
|
||||
If you don't want to ship a different executable just in order to run your tests,
|
||||
you can make your program check for a certain flag and pass control
|
||||
over to ``pytest`` instead. For example::
|
||||
|
||||
# contents of app_main.py
|
||||
import sys
|
||||
|
||||
if len(sys.argv) > 1 and sys.argv[1] == '--pytest':
|
||||
import pytest
|
||||
sys.exit(pytest.main(sys.argv[2:]))
|
||||
else:
|
||||
# normal application execution: at this point argv can be parsed
|
||||
# by your argument-parsing library of choice as usual
|
||||
...
|
||||
|
||||
This makes it convenient to execute your tests from within your frozen
|
||||
application, using standard ``py.test`` command-line options::
|
||||
|
||||
$ ./app_main --pytest --verbose --tb=long --junit-xml=results.xml test-suite/
|
||||
|
||||
@@ -60,13 +60,26 @@ will be called ahead of running any tests::
|
||||
If you run this without output capturing::
|
||||
|
||||
$ py.test -q -s test_module.py
|
||||
callattr_ahead_of_alltests called
|
||||
callme called!
|
||||
callme other called
|
||||
SomeTest callme called
|
||||
test_method1 called
|
||||
.test_method1 called
|
||||
.test other
|
||||
.test_unit1 method called
|
||||
.
|
||||
4 passed in 0.01 seconds
|
||||
Traceback (most recent call last):
|
||||
File "/home/hpk/p/pytest/.tox/regen/lib/python3.4/site-packages/_pytest/config.py", line 513, in getconftestmodules
|
||||
return self._path2confmods[path]
|
||||
KeyError: local('/tmp/doc-exec-244/test_module.py')
|
||||
|
||||
During handling of the above exception, another exception occurred:
|
||||
Traceback (most recent call last):
|
||||
File "/home/hpk/p/pytest/.tox/regen/lib/python3.4/site-packages/_pytest/config.py", line 537, in importconftest
|
||||
return self._conftestpath2mod[conftestpath]
|
||||
KeyError: local('/tmp/doc-exec-244/conftest.py')
|
||||
|
||||
During handling of the above exception, another exception occurred:
|
||||
Traceback (most recent call last):
|
||||
File "/home/hpk/p/pytest/.tox/regen/lib/python3.4/site-packages/_pytest/config.py", line 543, in importconftest
|
||||
mod = conftestpath.pyimport()
|
||||
File "/home/hpk/p/pytest/.tox/regen/lib/python3.4/site-packages/py/_path/local.py", line 620, in pyimport
|
||||
__import__(modname)
|
||||
File "/tmp/doc-exec-244/conftest.py", line 6
|
||||
print "callattr_ahead_of_alltests called"
|
||||
^
|
||||
SyntaxError: invalid syntax
|
||||
ERROR: could not load /tmp/doc-exec-244/conftest.py
|
||||
|
||||
|
||||
@@ -23,3 +23,8 @@ def test_hello5():
|
||||
|
||||
def test_hello6():
|
||||
pytest.xfail("reason")
|
||||
|
||||
@xfail(raises=IndexError)
|
||||
def test_hello7():
|
||||
x = []
|
||||
x[1] = 1
|
||||
|
||||
@@ -76,7 +76,7 @@ marked ``smtp`` fixture function. Running the test looks like this::
|
||||
|
||||
$ py.test test_smtpsimple.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.2.dev1
|
||||
collected 1 items
|
||||
|
||||
test_smtpsimple.py F
|
||||
@@ -84,17 +84,16 @@ marked ``smtp`` fixture function. Running the test looks like this::
|
||||
================================= FAILURES =================================
|
||||
________________________________ test_ehlo _________________________________
|
||||
|
||||
smtp = <smtplib.SMTP instance at 0x15cc0e0>
|
||||
smtp = <smtplib.SMTP object at 0x2ade77b37e48>
|
||||
|
||||
def test_ehlo(smtp):
|
||||
response, msg = smtp.ehlo()
|
||||
assert response == 250
|
||||
assert "merlinux" in msg
|
||||
> assert 0 # for demo purposes
|
||||
E assert 0
|
||||
> assert "merlinux" in msg
|
||||
E TypeError: Type str doesn't support the buffer API
|
||||
|
||||
test_smtpsimple.py:12: AssertionError
|
||||
========================= 1 failed in 0.21 seconds =========================
|
||||
test_smtpsimple.py:11: TypeError
|
||||
========================= 1 failed in 0.18 seconds =========================
|
||||
|
||||
In the failure traceback we see that the test function was called with a
|
||||
``smtp`` argument, the ``smtplib.SMTP()`` instance created by the fixture
|
||||
@@ -194,7 +193,7 @@ inspect what is going on and can now run the tests::
|
||||
|
||||
$ py.test test_module.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.2.dev1
|
||||
collected 2 items
|
||||
|
||||
test_module.py FF
|
||||
@@ -202,19 +201,18 @@ inspect what is going on and can now run the tests::
|
||||
================================= FAILURES =================================
|
||||
________________________________ test_ehlo _________________________________
|
||||
|
||||
smtp = <smtplib.SMTP instance at 0x237b638>
|
||||
smtp = <smtplib.SMTP object at 0x2b4b07e38e48>
|
||||
|
||||
def test_ehlo(smtp):
|
||||
response = smtp.ehlo()
|
||||
assert response[0] == 250
|
||||
assert "merlinux" in response[1]
|
||||
> assert 0 # for demo purposes
|
||||
E assert 0
|
||||
> assert "merlinux" in response[1]
|
||||
E TypeError: Type str doesn't support the buffer API
|
||||
|
||||
test_module.py:6: AssertionError
|
||||
test_module.py:5: TypeError
|
||||
________________________________ test_noop _________________________________
|
||||
|
||||
smtp = <smtplib.SMTP instance at 0x237b638>
|
||||
smtp = <smtplib.SMTP object at 0x2b4b07e38e48>
|
||||
|
||||
def test_noop(smtp):
|
||||
response = smtp.noop()
|
||||
@@ -223,7 +221,7 @@ inspect what is going on and can now run the tests::
|
||||
E assert 0
|
||||
|
||||
test_module.py:11: AssertionError
|
||||
========================= 2 failed in 0.23 seconds =========================
|
||||
========================= 2 failed in 0.18 seconds =========================
|
||||
|
||||
You see the two ``assert 0`` failing and more importantly you can also see
|
||||
that the same (module-scoped) ``smtp`` object was passed into the two
|
||||
@@ -271,7 +269,7 @@ Let's execute it::
|
||||
$ py.test -s -q --tb=no
|
||||
FFteardown smtp
|
||||
|
||||
2 failed in 0.21 seconds
|
||||
2 failed in 0.16 seconds
|
||||
|
||||
We see that the ``smtp`` instance is finalized after the two
|
||||
tests finished execution. Note that if we decorated our fixture
|
||||
@@ -312,7 +310,7 @@ again, nothing much has changed::
|
||||
|
||||
$ py.test -s -q --tb=no
|
||||
FF
|
||||
2 failed in 0.59 seconds
|
||||
2 failed in 0.17 seconds
|
||||
|
||||
Let's quickly create another test module that actually sets the
|
||||
server URL in its module namespace::
|
||||
@@ -331,8 +329,8 @@ Running it::
|
||||
================================= FAILURES =================================
|
||||
______________________________ test_showhelo _______________________________
|
||||
test_anothersmtp.py:5: in test_showhelo
|
||||
> assert 0, smtp.helo()
|
||||
E AssertionError: (250, 'mail.python.org')
|
||||
assert 0, smtp.helo()
|
||||
E AssertionError: (250, b'mail.python.org')
|
||||
|
||||
voila! The ``smtp`` fixture function picked up our mail server name
|
||||
from the module namespace.
|
||||
@@ -379,19 +377,18 @@ So let's just do another run::
|
||||
================================= FAILURES =================================
|
||||
__________________________ test_ehlo[merlinux.eu] __________________________
|
||||
|
||||
smtp = <smtplib.SMTP instance at 0x21f3e60>
|
||||
smtp = <smtplib.SMTP object at 0x2b824acf3e80>
|
||||
|
||||
def test_ehlo(smtp):
|
||||
response = smtp.ehlo()
|
||||
assert response[0] == 250
|
||||
assert "merlinux" in response[1]
|
||||
> assert 0 # for demo purposes
|
||||
E assert 0
|
||||
> assert "merlinux" in response[1]
|
||||
E TypeError: Type str doesn't support the buffer API
|
||||
|
||||
test_module.py:6: AssertionError
|
||||
test_module.py:5: TypeError
|
||||
__________________________ test_noop[merlinux.eu] __________________________
|
||||
|
||||
smtp = <smtplib.SMTP instance at 0x21f3e60>
|
||||
smtp = <smtplib.SMTP object at 0x2b824acf3e80>
|
||||
|
||||
def test_noop(smtp):
|
||||
response = smtp.noop()
|
||||
@@ -402,20 +399,20 @@ So let's just do another run::
|
||||
test_module.py:11: AssertionError
|
||||
________________________ test_ehlo[mail.python.org] ________________________
|
||||
|
||||
smtp = <smtplib.SMTP instance at 0x22047e8>
|
||||
smtp = <smtplib.SMTP object at 0x2b824b19fb38>
|
||||
|
||||
def test_ehlo(smtp):
|
||||
response = smtp.ehlo()
|
||||
assert response[0] == 250
|
||||
> assert "merlinux" in response[1]
|
||||
E assert 'merlinux' in 'mail.python.org\nSIZE 25600000\nETRN\nSTARTTLS\nENHANCEDSTATUSCODES\n8BITMIME\nDSN'
|
||||
E TypeError: Type str doesn't support the buffer API
|
||||
|
||||
test_module.py:5: AssertionError
|
||||
----------------------------- Captured stdout ------------------------------
|
||||
finalizing <smtplib.SMTP instance at 0x21f3e60>
|
||||
test_module.py:5: TypeError
|
||||
-------------------------- Captured stdout setup ---------------------------
|
||||
finalizing <smtplib.SMTP object at 0x2b824acf3e80>
|
||||
________________________ test_noop[mail.python.org] ________________________
|
||||
|
||||
smtp = <smtplib.SMTP instance at 0x22047e8>
|
||||
smtp = <smtplib.SMTP object at 0x2b824b19fb38>
|
||||
|
||||
def test_noop(smtp):
|
||||
response = smtp.noop()
|
||||
@@ -424,7 +421,7 @@ So let's just do another run::
|
||||
E assert 0
|
||||
|
||||
test_module.py:11: AssertionError
|
||||
4 failed in 6.06 seconds
|
||||
4 failed in 6.37 seconds
|
||||
|
||||
We see that our two test functions each ran twice, against the different
|
||||
``smtp`` instances. Note also, that with the ``mail.python.org``
|
||||
@@ -464,13 +461,13 @@ Here we declare an ``app`` fixture which receives the previously defined
|
||||
|
||||
$ py.test -v test_appsetup.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2 -- /home/hpk/p/pytest/.tox/regen/bin/python
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.2.dev1 -- /home/hpk/p/pytest/.tox/regen/bin/python3.4
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_appsetup.py:12: test_smtp_exists[merlinux.eu] PASSED
|
||||
test_appsetup.py:12: test_smtp_exists[mail.python.org] PASSED
|
||||
test_appsetup.py::test_smtp_exists[merlinux.eu] PASSED
|
||||
test_appsetup.py::test_smtp_exists[mail.python.org] PASSED
|
||||
|
||||
========================= 2 passed in 6.42 seconds =========================
|
||||
========================= 2 passed in 6.11 seconds =========================
|
||||
|
||||
Due to the parametrization of ``smtp`` the test will run twice with two
|
||||
different ``App`` instances and respective smtp servers. There is no
|
||||
@@ -508,7 +505,7 @@ to show the setup/teardown flow::
|
||||
@pytest.fixture(scope="module", params=["mod1", "mod2"])
|
||||
def modarg(request):
|
||||
param = request.param
|
||||
print "create", param
|
||||
print ("create", param)
|
||||
def fin():
|
||||
print ("fin %s" % param)
|
||||
return param
|
||||
@@ -518,36 +515,36 @@ to show the setup/teardown flow::
|
||||
return request.param
|
||||
|
||||
def test_0(otherarg):
|
||||
print " test0", otherarg
|
||||
print (" test0", otherarg)
|
||||
def test_1(modarg):
|
||||
print " test1", modarg
|
||||
print (" test1", modarg)
|
||||
def test_2(otherarg, modarg):
|
||||
print " test2", otherarg, modarg
|
||||
print (" test2", otherarg, modarg)
|
||||
|
||||
Let's run the tests in verbose mode and with looking at the print-output::
|
||||
|
||||
$ py.test -v -s test_module.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2 -- /home/hpk/p/pytest/.tox/regen/bin/python
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.2.dev1 -- /home/hpk/p/pytest/.tox/regen/bin/python3.4
|
||||
collecting ... collected 8 items
|
||||
|
||||
test_module.py:15: test_0[1] test0 1
|
||||
test_module.py::test_0[1] test0 1
|
||||
PASSED
|
||||
test_module.py:15: test_0[2] test0 2
|
||||
test_module.py::test_0[2] test0 2
|
||||
PASSED
|
||||
test_module.py:17: test_1[mod1] create mod1
|
||||
test_module.py::test_1[mod1] create mod1
|
||||
test1 mod1
|
||||
PASSED
|
||||
test_module.py:19: test_2[1-mod1] test2 1 mod1
|
||||
test_module.py::test_2[1-mod1] test2 1 mod1
|
||||
PASSED
|
||||
test_module.py:19: test_2[2-mod1] test2 2 mod1
|
||||
test_module.py::test_2[2-mod1] test2 2 mod1
|
||||
PASSED
|
||||
test_module.py:17: test_1[mod2] create mod2
|
||||
test_module.py::test_1[mod2] create mod2
|
||||
test1 mod2
|
||||
PASSED
|
||||
test_module.py:19: test_2[1-mod2] test2 1 mod2
|
||||
test_module.py::test_2[1-mod2] test2 1 mod2
|
||||
PASSED
|
||||
test_module.py:19: test_2[2-mod2] test2 2 mod2
|
||||
test_module.py::test_2[2-mod2] test2 2 mod2
|
||||
PASSED
|
||||
|
||||
========================= 8 passed in 0.01 seconds =========================
|
||||
|
||||
@@ -1,12 +1,16 @@
|
||||
Installation and Getting Started
|
||||
===================================
|
||||
|
||||
**Pythons**: Python 2.5-3.3, Jython, PyPy
|
||||
**Pythons**: Python 2.6-3.4, Jython, PyPy-2.3
|
||||
|
||||
**Platforms**: Unix/Posix and Windows
|
||||
|
||||
**PyPI package name**: `pytest <http://pypi.python.org/pypi/pytest>`_
|
||||
|
||||
**dependencies**: `py <http://pypi.python.org/pypi/py>`_,
|
||||
`colorama (Windows) <http://pypi.python.org/pypi/colorama>`_,
|
||||
`argparse (py26) <http://pypi.python.org/pypi/argparse>`_.
|
||||
|
||||
**documentation as PDF**: `download latest <http://pytest.org/latest/pytest.pdf>`_
|
||||
|
||||
.. _`getstarted`:
|
||||
@@ -23,7 +27,7 @@ Installation options::
|
||||
To check your installation has installed the correct version::
|
||||
|
||||
$ py.test --version
|
||||
This is pytest version 2.5.2, imported from /home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/pytest.pyc
|
||||
This is pytest version 2.6.1, imported from /home/hpk/p/pytest/.tox/regen/lib/python3.4/site-packages/pytest.py
|
||||
|
||||
If you get an error checkout :ref:`installation issues`.
|
||||
|
||||
@@ -45,19 +49,19 @@ That's it. You can execute the test function now::
|
||||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 1 items
|
||||
|
||||
|
||||
test_sample.py F
|
||||
|
||||
|
||||
================================= FAILURES =================================
|
||||
_______________________________ test_answer ________________________________
|
||||
|
||||
|
||||
def test_answer():
|
||||
> assert func(3) == 5
|
||||
E assert 4 == 5
|
||||
E + where 4 = func(3)
|
||||
|
||||
|
||||
test_sample.py:5: AssertionError
|
||||
========================= 1 failed in 0.01 seconds =========================
|
||||
|
||||
@@ -93,7 +97,7 @@ Running it with, this time in "quiet" reporting mode::
|
||||
|
||||
$ py.test -q test_sysexit.py
|
||||
.
|
||||
1 passed in 0.01 seconds
|
||||
1 passed in 0.00 seconds
|
||||
|
||||
.. todo:: For further ways to assert exceptions see the `raises`
|
||||
|
||||
@@ -122,14 +126,14 @@ run the module by passing its filename::
|
||||
.F
|
||||
================================= FAILURES =================================
|
||||
____________________________ TestClass.test_two ____________________________
|
||||
|
||||
self = <test_class.TestClass instance at 0x255a0e0>
|
||||
|
||||
|
||||
self = <test_class.TestClass object at 0x2ad4b005b710>
|
||||
|
||||
def test_two(self):
|
||||
x = "hello"
|
||||
> assert hasattr(x, 'check')
|
||||
E assert hasattr('hello', 'check')
|
||||
|
||||
|
||||
test_class.py:8: AssertionError
|
||||
1 failed, 1 passed in 0.01 seconds
|
||||
|
||||
@@ -147,7 +151,7 @@ resources, for example a unique temporary directory::
|
||||
|
||||
# content of test_tmpdir.py
|
||||
def test_needsfiles(tmpdir):
|
||||
print tmpdir
|
||||
print (tmpdir)
|
||||
assert 0
|
||||
|
||||
We list the name ``tmpdir`` in the test function signature and
|
||||
@@ -155,21 +159,18 @@ We list the name ``tmpdir`` in the test function signature and
|
||||
before performing the test function call. Let's just run it::
|
||||
|
||||
$ py.test -q test_tmpdir.py
|
||||
F
|
||||
================================= FAILURES =================================
|
||||
_____________________________ test_needsfiles ______________________________
|
||||
|
||||
tmpdir = local('/tmp/pytest-1008/test_needsfiles0')
|
||||
|
||||
def test_needsfiles(tmpdir):
|
||||
print tmpdir
|
||||
> assert 0
|
||||
E assert 0
|
||||
|
||||
test_tmpdir.py:3: AssertionError
|
||||
----------------------------- Captured stdout ------------------------------
|
||||
/tmp/pytest-1008/test_needsfiles0
|
||||
1 failed in 0.01 seconds
|
||||
|
||||
================================== ERRORS ==================================
|
||||
_____________________ ERROR collecting test_tmpdir.py ______________________
|
||||
/home/hpk/p/pytest/.tox/regen/lib/python3.4/site-packages/_pytest/python.py:463: in _importtestmodule
|
||||
mod = self.fspath.pyimport(ensuresyspath=True)
|
||||
/home/hpk/p/pytest/.tox/regen/lib/python3.4/site-packages/py/_path/local.py:620: in pyimport
|
||||
__import__(modname)
|
||||
E File "/tmp/doc-exec-187/test_tmpdir.py", line 2
|
||||
E print tmpdir
|
||||
E ^
|
||||
E SyntaxError: invalid syntax
|
||||
1 error in 0.03 seconds
|
||||
|
||||
Before the test runs, a unique-per-test-invocation temporary directory
|
||||
was created. More info at :ref:`tmpdir handling`.
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
|
||||
.. highlightlang:: python
|
||||
.. _`goodpractises`:
|
||||
|
||||
@@ -69,7 +68,7 @@ Important notes relating to both schemes:
|
||||
|
||||
- **avoid "__init__.py" files in your test directories**.
|
||||
This way your tests can run easily against an installed version
|
||||
of ``mypkg``, independently from if the installed package contains
|
||||
of ``mypkg``, independently from the installed package if it contains
|
||||
the tests or not.
|
||||
|
||||
- With inlined tests you might put ``__init__.py`` into test
|
||||
@@ -190,12 +189,16 @@ this to your ``setup.py`` file::
|
||||
user_options = []
|
||||
def initialize_options(self):
|
||||
pass
|
||||
|
||||
def finalize_options(self):
|
||||
pass
|
||||
|
||||
def run(self):
|
||||
import sys,subprocess
|
||||
errno = subprocess.call([sys.executable, 'runtests.py'])
|
||||
raise SystemExit(errno)
|
||||
|
||||
|
||||
setup(
|
||||
#...,
|
||||
cmdclass = {'test': PyTest},
|
||||
@@ -220,20 +223,30 @@ Setuptools supports writing our own Test command for invoking pytest.
|
||||
Most often it is better to use tox_ instead, but here is how you can
|
||||
get started with setuptools integration::
|
||||
|
||||
from setuptools.command.test import test as TestCommand
|
||||
import sys
|
||||
|
||||
from setuptools.command.test import test as TestCommand
|
||||
|
||||
|
||||
class PyTest(TestCommand):
|
||||
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
|
||||
|
||||
def initialize_options(self):
|
||||
TestCommand.initialize_options(self)
|
||||
self.pytest_args = None
|
||||
|
||||
def finalize_options(self):
|
||||
TestCommand.finalize_options(self)
|
||||
self.test_args = []
|
||||
self.test_suite = True
|
||||
|
||||
def run_tests(self):
|
||||
#import here, cause outside the eggs aren't loaded
|
||||
import pytest
|
||||
errno = pytest.main(self.test_args)
|
||||
errno = pytest.main(self.pytest_args)
|
||||
sys.exit(errno)
|
||||
|
||||
|
||||
setup(
|
||||
#...,
|
||||
tests_require=['pytest'],
|
||||
@@ -245,7 +258,12 @@ Now if you run::
|
||||
python setup.py test
|
||||
|
||||
this will download ``pytest`` if needed and then run your tests
|
||||
as you would expect it to.
|
||||
as you would expect it to. You can pass a single string of arguments
|
||||
using the ``--pytest-args`` or ``-a`` command-line option. For example::
|
||||
|
||||
python setup.py test -a "--durations=5"
|
||||
|
||||
is equivalent to running ``py.test --durations=5``.
|
||||
|
||||
.. _`test discovery`:
|
||||
.. _`Python test discovery`:
|
||||
|
||||
BIN
doc/en/img/pytest1.png
Normal file
BIN
doc/en/img/pytest1.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 5.9 KiB |
BIN
doc/en/img/pytest1favi.ico
Normal file
BIN
doc/en/img/pytest1favi.ico
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 3.7 KiB |
@@ -1,24 +1,27 @@
|
||||
|
||||
.. _features:
|
||||
|
||||
.. second training: `professional testing with Python <http://www.python-academy.com/courses/specialtopics/python_course_testing.html>`_ , 25-27th November 2013, Leipzig.
|
||||
.. note::
|
||||
|
||||
next training: `professional testing with pytest and tox <http://www.python-academy.com/courses/specialtopics/python_course_testing.html>`_, 24-26th November 2014, Freiburg, Germany
|
||||
|
||||
|
||||
pytest: helps you write better programs
|
||||
=============================================
|
||||
|
||||
**a mature full-featured Python testing tool**
|
||||
|
||||
- runs on Posix/Windows, Python 2.5-3.3, PyPy and Jython-2.5.1
|
||||
- **zero-reported-bugs** policy with >1000 tests against itself
|
||||
- runs on Posix/Windows, Python 2.6-3.4, PyPy and (possibly still) Jython-2.5.1
|
||||
- **well tested** with more than a thousand tests against itself
|
||||
- **strict backward compatibility policy** for safe pytest upgrades
|
||||
- :ref:`comprehensive online <toc>` and `PDF documentation <pytest.pdf>`_
|
||||
- many :ref:`third party plugins <extplugins>` and :ref:`builtin helpers <pytest helpers>`,
|
||||
- many :ref:`third party plugins <extplugins>` and :ref:`builtin helpers <pytest helpers>`,
|
||||
- used in :ref:`many small and large projects and organisations <projects>`
|
||||
- comes with many :ref:`tested examples <examples>`
|
||||
|
||||
**provides easy no-boilerplate testing**
|
||||
|
||||
- makes it :ref:`easy to get started <getstarted>`,
|
||||
- makes it :ref:`easy to get started <getstarted>`,
|
||||
has many :ref:`usage options <usage>`
|
||||
- :ref:`assert with the assert statement`
|
||||
- helpful :ref:`traceback and failing assertion reporting <tbreportdemo>`
|
||||
@@ -38,7 +41,7 @@ pytest: helps you write better programs
|
||||
|
||||
**integrates with other testing methods and tools**:
|
||||
|
||||
- multi-paradigm: pytest can run ``nose``, ``unittest`` and
|
||||
- multi-paradigm: pytest can run ``nose``, ``unittest`` and
|
||||
``doctest`` style test suites, including running testcases made for
|
||||
Django and trial
|
||||
- supports :ref:`good integration practises <goodpractises>`
|
||||
|
||||
@@ -48,7 +48,7 @@ requests in all your tests, you can do::
|
||||
import pytest
|
||||
@pytest.fixture(autouse=True)
|
||||
def no_requests(monkeypatch):
|
||||
monkeypatch.delattr("requests.session.Session.request")
|
||||
monkeypatch.delattr("requests.sessions.Session.request")
|
||||
|
||||
This autouse fixture will be executed for each test function and it
|
||||
will delete the method ``request.session.Session.request``
|
||||
|
||||
@@ -25,6 +25,7 @@ Supported nose Idioms
|
||||
* SkipTest exceptions and markers
|
||||
* setup/teardown decorators
|
||||
* yield-based tests and their setup
|
||||
* ``__test__`` attribute on modules/classes/functions
|
||||
* general usage of nose utilities
|
||||
|
||||
Unsupported idioms / known issues
|
||||
|
||||
@@ -53,7 +53,7 @@ them in turn::
|
||||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 3 items
|
||||
|
||||
test_expectation.py ..F
|
||||
@@ -100,7 +100,7 @@ Let's run this::
|
||||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 3 items
|
||||
|
||||
test_expectation.py ..x
|
||||
@@ -170,8 +170,8 @@ Let's also run with a stringinput that will lead to a failing test::
|
||||
|
||||
def test_valid_string(stringinput):
|
||||
> assert stringinput.isalpha()
|
||||
E assert <built-in method isalpha of str object at 0x2b869b32b148>()
|
||||
E + where <built-in method isalpha of str object at 0x2b869b32b148> = '!'.isalpha
|
||||
E assert <built-in method isalpha of str object at 0x2ab7463a6b58>()
|
||||
E + where <built-in method isalpha of str object at 0x2ab7463a6b58> = '!'.isalpha
|
||||
|
||||
test_strings.py:3: AssertionError
|
||||
1 failed in 0.01 seconds
|
||||
@@ -185,7 +185,7 @@ listlist::
|
||||
$ py.test -q -rs test_strings.py
|
||||
s
|
||||
========================= short test summary info ==========================
|
||||
SKIP [1] /home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/python.py:1110: got empty parameter set, function test_valid_string at /tmp/doc-exec-24/test_strings.py:1
|
||||
SKIP [1] /home/hpk/p/pytest/.tox/regen/lib/python3.4/site-packages/_pytest/python.py:1139: got empty parameter set, function test_valid_string at /tmp/doc-exec-195/test_strings.py:1
|
||||
1 skipped in 0.01 seconds
|
||||
|
||||
For further examples, you might want to look at :ref:`more
|
||||
|
||||
@@ -64,9 +64,10 @@ tool, for example::
|
||||
pip uninstall pytest-NAME
|
||||
|
||||
If a plugin is installed, ``pytest`` automatically finds and integrates it,
|
||||
there is no need to activate it. We have a :doc:`beta page listing
|
||||
all 3rd party plugins and their status <plugins_index/index>` and here
|
||||
is a little annotated list for some popular plugins:
|
||||
there is no need to activate it. We have a :doc:`page listing
|
||||
all 3rd party plugins and their status against the latest py.test version
|
||||
<plugins_index/index>` and here is a little annotated list
|
||||
for some popular plugins:
|
||||
|
||||
.. _`django`: https://www.djangoproject.com/
|
||||
|
||||
@@ -109,7 +110,11 @@ is a little annotated list for some popular plugins:
|
||||
* `oejskit <http://pypi.python.org/pypi/oejskit>`_:
|
||||
a plugin to run javascript unittests in life browsers
|
||||
|
||||
You may discover more plugins through a `pytest- pypi.python.org search`_.
|
||||
To see a complete list of all plugins with their latest testing
|
||||
status against different py.test and Python versions, please visit
|
||||
`pytest-plugs <http://pytest-plugs.herokuapp.com/>`_.
|
||||
|
||||
You may also discover more plugins through a `pytest- pypi.python.org search`_.
|
||||
|
||||
.. _`available installable plugins`:
|
||||
.. _`pytest- pypi.python.org search`: http://pypi.python.org/pypi?%3Aaction=search&term=pytest-&submit=search
|
||||
@@ -304,6 +309,7 @@ Initialization, command line and configuration hooks
|
||||
|
||||
.. currentmodule:: _pytest.hookspec
|
||||
|
||||
.. autofunction:: pytest_load_initial_conftests
|
||||
.. autofunction:: pytest_cmdline_preparse
|
||||
.. autofunction:: pytest_cmdline_parse
|
||||
.. autofunction:: pytest_namespace
|
||||
@@ -315,7 +321,7 @@ Initialization, command line and configuration hooks
|
||||
Generic "runtest" hooks
|
||||
------------------------------
|
||||
|
||||
All all runtest related hooks receive a :py:class:`pytest.Item` object.
|
||||
All runtest related hooks receive a :py:class:`pytest.Item` object.
|
||||
|
||||
.. autofunction:: pytest_runtest_protocol
|
||||
.. autofunction:: pytest_runtest_setup
|
||||
|
||||
BIN
doc/en/plugins_index/bitbucket.png
Normal file
BIN
doc/en/plugins_index/bitbucket.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 679 B |
BIN
doc/en/plugins_index/github.png
Normal file
BIN
doc/en/plugins_index/github.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 734 B |
@@ -3,110 +3,163 @@
|
||||
List of Third-Party Plugins
|
||||
===========================
|
||||
|
||||
========================================================================================== ======================================================================================================== ======================================================================================================== ============================================================= =============================================================================================================================================
|
||||
Name Py27 Py33 Repository Summary
|
||||
========================================================================================== ======================================================================================================== ======================================================================================================== ============================================================= =============================================================================================================================================
|
||||
`pytest-bdd-0.6.8 <http://pypi.python.org/pypi/pytest-bdd/0.6.8>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-bdd-0.6.8?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-bdd-0.6.8?py=py33&pytest=2.5.1 https://github.com/olegpidsadnyi/pytest-bdd BDD for pytest
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-bdd-0.6.8?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-bdd-0.6.8?py=py33&pytest=2.5.1
|
||||
`pytest-bdd-splinter-0.5.98 <http://pypi.python.org/pypi/pytest-bdd-splinter/0.5.98>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-bdd-splinter-0.5.98?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-bdd-splinter-0.5.98?py=py33&pytest=2.5.1 https://github.com/olegpidsadnyi/pytest-bdd-splinter Splinter subplugin for Pytest BDD plugin
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-bdd-splinter-0.5.98?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-bdd-splinter-0.5.98?py=py33&pytest=2.5.1
|
||||
`pytest-bench-0.2.5 <http://pypi.python.org/pypi/pytest-bench/0.2.5>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-bench-0.2.5?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-bench-0.2.5?py=py33&pytest=2.5.1 http://github.com/concordusapps/pytest-bench Benchmark utility that plugs into pytest.
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-bench-0.2.5?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-bench-0.2.5?py=py33&pytest=2.5.1
|
||||
`pytest-blockage-0.1 <http://pypi.python.org/pypi/pytest-blockage/0.1>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-blockage-0.1?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-blockage-0.1?py=py33&pytest=2.5.1 https://github.com/rob-b/pytest-blockage Disable network requests during a test run.
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-blockage-0.1?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-blockage-0.1?py=py33&pytest=2.5.1
|
||||
`pytest-browsermob-proxy-0.1 <http://pypi.python.org/pypi/pytest-browsermob-proxy/0.1>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-browsermob-proxy-0.1?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-browsermob-proxy-0.1?py=py33&pytest=2.5.1 https://github.com/davehunt/pytest-browsermob-proxy BrowserMob proxy plugin for py.test.
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-browsermob-proxy-0.1?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-browsermob-proxy-0.1?py=py33&pytest=2.5.1
|
||||
`pytest-bugzilla-0.2 <http://pypi.python.org/pypi/pytest-bugzilla/0.2>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-bugzilla-0.2?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-bugzilla-0.2?py=py33&pytest=2.5.1 http://github.com/nibrahim/pytest_bugzilla py.test bugzilla integration plugin
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-bugzilla-0.2?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-bugzilla-0.2?py=py33&pytest=2.5.1
|
||||
`pytest-cache-1.0 <http://pypi.python.org/pypi/pytest-cache/1.0>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-cache-1.0?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-cache-1.0?py=py33&pytest=2.5.1 http://bitbucket.org/hpk42/pytest-cache/ pytest plugin with mechanisms for caching across test runs
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-cache-1.0?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-cache-1.0?py=py33&pytest=2.5.1
|
||||
`pytest-capturelog-0.7 <http://pypi.python.org/pypi/pytest-capturelog/0.7>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-capturelog-0.7?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-capturelog-0.7?py=py33&pytest=2.5.1 http://bitbucket.org/memedough/pytest-capturelog/overview py.test plugin to capture log messages
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-capturelog-0.7?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-capturelog-0.7?py=py33&pytest=2.5.1
|
||||
`pytest-codecheckers-0.2 <http://pypi.python.org/pypi/pytest-codecheckers/0.2>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-codecheckers-0.2?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-codecheckers-0.2?py=py33&pytest=2.5.1 http://bitbucket.org/RonnyPfannschmidt/pytest-codecheckers/ pytest plugin to add source code sanity checks (pep8 and friends)
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-codecheckers-0.2?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-codecheckers-0.2?py=py33&pytest=2.5.1
|
||||
`pytest-contextfixture-0.1.1 <http://pypi.python.org/pypi/pytest-contextfixture/0.1.1>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-contextfixture-0.1.1?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-contextfixture-0.1.1?py=py33&pytest=2.5.1 http://github.com/pelme/pytest-contextfixture/ Define pytest fixtures as context managers.
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-contextfixture-0.1.1?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-contextfixture-0.1.1?py=py33&pytest=2.5.1
|
||||
`pytest-couchdbkit-0.5.1 <http://pypi.python.org/pypi/pytest-couchdbkit/0.5.1>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-couchdbkit-0.5.1?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-couchdbkit-0.5.1?py=py33&pytest=2.5.1 http://bitbucket.org/RonnyPfannschmidt/pytest-couchdbkit py.test extension for per-test couchdb databases using couchdbkit
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-couchdbkit-0.5.1?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-couchdbkit-0.5.1?py=py33&pytest=2.5.1
|
||||
`pytest-cov-1.6 <http://pypi.python.org/pypi/pytest-cov/1.6>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-cov-1.6?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-cov-1.6?py=py33&pytest=2.5.1 http://bitbucket.org/memedough/pytest-cov/overview py.test plugin for coverage reporting with support for both centralised and distributed testing, including subprocesses and multiprocessing
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-cov-1.6?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-cov-1.6?py=py33&pytest=2.5.1
|
||||
`pytest-dbfixtures-0.4.3 <http://pypi.python.org/pypi/pytest-dbfixtures/0.4.3>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-dbfixtures-0.4.3?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-dbfixtures-0.4.3?py=py33&pytest=2.5.1 https://github.com/clearcode/pytest-dbfixtures dbfixtures plugin for py.test.
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-dbfixtures-0.4.3?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-dbfixtures-0.4.3?py=py33&pytest=2.5.1
|
||||
`pytest-django-2.5 <http://pypi.python.org/pypi/pytest-django/2.5>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-django-2.5?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-django-2.5?py=py33&pytest=2.5.1 http://pytest-django.readthedocs.org/ A Django plugin for py.test.
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-django-2.5?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-django-2.5?py=py33&pytest=2.5.1
|
||||
`pytest-django-lite-0.1.0 <http://pypi.python.org/pypi/pytest-django-lite/0.1.0>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-django-lite-0.1.0?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-django-lite-0.1.0?py=py33&pytest=2.5.1 UNKNOWN The bare minimum to integrate py.test with Django.
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-django-lite-0.1.0?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-django-lite-0.1.0?py=py33&pytest=2.5.1
|
||||
`pytest-figleaf-1.0 <http://pypi.python.org/pypi/pytest-figleaf/1.0>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-figleaf-1.0?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-figleaf-1.0?py=py33&pytest=2.5.1 http://bitbucket.org/hpk42/pytest-figleaf py.test figleaf coverage plugin
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-figleaf-1.0?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-figleaf-1.0?py=py33&pytest=2.5.1
|
||||
`pytest-flakes-0.2 <http://pypi.python.org/pypi/pytest-flakes/0.2>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-flakes-0.2?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-flakes-0.2?py=py33&pytest=2.5.1 https://github.com/fschulze/pytest-flakes pytest plugin to check source code with pyflakes
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-flakes-0.2?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-flakes-0.2?py=py33&pytest=2.5.1
|
||||
`pytest-greendots-0.2 <http://pypi.python.org/pypi/pytest-greendots/0.2>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-greendots-0.2?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-greendots-0.2?py=py33&pytest=2.5.1 UNKNOWN Green progress dots
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-greendots-0.2?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-greendots-0.2?py=py33&pytest=2.5.1
|
||||
`pytest-growl-0.2 <http://pypi.python.org/pypi/pytest-growl/0.2>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-growl-0.2?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-growl-0.2?py=py33&pytest=2.5.1 UNKNOWN Growl notifications for pytest results.
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-growl-0.2?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-growl-0.2?py=py33&pytest=2.5.1
|
||||
`pytest-incremental-0.3.0 <http://pypi.python.org/pypi/pytest-incremental/0.3.0>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-incremental-0.3.0?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-incremental-0.3.0?py=py33&pytest=2.5.1 https://bitbucket.org/schettino72/pytest-incremental an incremental test runner (pytest plugin)
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-incremental-0.3.0?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-incremental-0.3.0?py=py33&pytest=2.5.1
|
||||
`pytest-instafail-0.1.1 <http://pypi.python.org/pypi/pytest-instafail/0.1.1>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-instafail-0.1.1?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-instafail-0.1.1?py=py33&pytest=2.5.1 https://github.com/jpvanhal/pytest-instafail py.test plugin to show failures instantly
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-instafail-0.1.1?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-instafail-0.1.1?py=py33&pytest=2.5.1
|
||||
`pytest-ipdb-0.1-prerelease <http://pypi.python.org/pypi/pytest-ipdb/0.1-prerelease>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-ipdb-0.1-prerelease?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-ipdb-0.1-prerelease?py=py33&pytest=2.5.1 https://github.com/mverteuil/pytest-ipdb A py.test plug-in to enable drop to ipdb debugger on test failure.
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-ipdb-0.1-prerelease?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-ipdb-0.1-prerelease?py=py33&pytest=2.5.1
|
||||
`pytest-jira-0.01 <http://pypi.python.org/pypi/pytest-jira/0.01>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-jira-0.01?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-jira-0.01?py=py33&pytest=2.5.1 http://github.com/jlaska/pytest_jira py.test JIRA integration plugin, using markers
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-jira-0.01?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-jira-0.01?py=py33&pytest=2.5.1
|
||||
`pytest-konira-0.2 <http://pypi.python.org/pypi/pytest-konira/0.2>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-konira-0.2?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-konira-0.2?py=py33&pytest=2.5.1 http://github.com/alfredodeza/pytest-konira Run Konira DSL tests with py.test
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-konira-0.2?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-konira-0.2?py=py33&pytest=2.5.1
|
||||
`pytest-localserver-0.3.2 <http://pypi.python.org/pypi/pytest-localserver/0.3.2>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-localserver-0.3.2?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-localserver-0.3.2?py=py33&pytest=2.5.1 http://bitbucket.org/basti/pytest-localserver/ py.test plugin to test server connections locally.
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-localserver-0.3.2?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-localserver-0.3.2?py=py33&pytest=2.5.1
|
||||
`pytest-marker-bugzilla-0.06 <http://pypi.python.org/pypi/pytest-marker-bugzilla/0.06>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-marker-bugzilla-0.06?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-marker-bugzilla-0.06?py=py33&pytest=2.5.1 http://github.com/eanxgeek/pytest_marker_bugzilla py.test bugzilla integration plugin, using markers
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-marker-bugzilla-0.06?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-marker-bugzilla-0.06?py=py33&pytest=2.5.1
|
||||
`pytest-markfiltration-0.8 <http://pypi.python.org/pypi/pytest-markfiltration/0.8>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-markfiltration-0.8?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-markfiltration-0.8?py=py33&pytest=2.5.1 https://github.com/adamgoucher/pytest-markfiltration UNKNOWN
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-markfiltration-0.8?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-markfiltration-0.8?py=py33&pytest=2.5.1
|
||||
`pytest-marks-0.4 <http://pypi.python.org/pypi/pytest-marks/0.4>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-marks-0.4?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-marks-0.4?py=py33&pytest=2.5.1 https://github.com/adamgoucher/pytest-marks UNKNOWN
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-marks-0.4?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-marks-0.4?py=py33&pytest=2.5.1
|
||||
`pytest-monkeyplus-1.1.0 <http://pypi.python.org/pypi/pytest-monkeyplus/1.1.0>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-monkeyplus-1.1.0?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-monkeyplus-1.1.0?py=py33&pytest=2.5.1 http://bitbucket.org/hsoft/pytest-monkeyplus/ pytest's monkeypatch subclass with extra functionalities
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-monkeyplus-1.1.0?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-monkeyplus-1.1.0?py=py33&pytest=2.5.1
|
||||
`pytest-mozwebqa-1.1.1 <http://pypi.python.org/pypi/pytest-mozwebqa/1.1.1>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-mozwebqa-1.1.1?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-mozwebqa-1.1.1?py=py33&pytest=2.5.1 https://github.com/davehunt/pytest-mozwebqa Mozilla WebQA plugin for py.test.
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-mozwebqa-1.1.1?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-mozwebqa-1.1.1?py=py33&pytest=2.5.1
|
||||
`pytest-oerp-0.2.0 <http://pypi.python.org/pypi/pytest-oerp/0.2.0>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-oerp-0.2.0?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-oerp-0.2.0?py=py33&pytest=2.5.1 http://github.com/santagada/pytest-oerp/ pytest plugin to test OpenERP modules
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-oerp-0.2.0?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-oerp-0.2.0?py=py33&pytest=2.5.1
|
||||
`pytest-osxnotify-0.1.4 <http://pypi.python.org/pypi/pytest-osxnotify/0.1.4>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-osxnotify-0.1.4?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-osxnotify-0.1.4?py=py33&pytest=2.5.1 https://github.com/dbader/pytest-osxnotify OS X notifications for py.test results.
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-osxnotify-0.1.4?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-osxnotify-0.1.4?py=py33&pytest=2.5.1
|
||||
`pytest-paste-config-0.1 <http://pypi.python.org/pypi/pytest-paste-config/0.1>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-paste-config-0.1?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-paste-config-0.1?py=py33&pytest=2.5.1 UNKNOWN Allow setting the path to a paste config file
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-paste-config-0.1?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-paste-config-0.1?py=py33&pytest=2.5.1
|
||||
`pytest-pep8-1.0.5 <http://pypi.python.org/pypi/pytest-pep8/1.0.5>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-pep8-1.0.5?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-pep8-1.0.5?py=py33&pytest=2.5.1 http://bitbucket.org/hpk42/pytest-pep8/ pytest plugin to check PEP8 requirements
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-pep8-1.0.5?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-pep8-1.0.5?py=py33&pytest=2.5.1
|
||||
`pytest-poo-0.2 <http://pypi.python.org/pypi/pytest-poo/0.2>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-poo-0.2?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-poo-0.2?py=py33&pytest=2.5.1 http://github.com/pelme/pytest-poo Visualize your crappy tests
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-poo-0.2?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-poo-0.2?py=py33&pytest=2.5.1
|
||||
`pytest-pydev-0.1 <http://pypi.python.org/pypi/pytest-pydev/0.1>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-pydev-0.1?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-pydev-0.1?py=py33&pytest=2.5.1 http://bitbucket.org/basti/pytest-pydev/ py.test plugin to connect to a remote debug server with PyDev or PyCharm.
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-pydev-0.1?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-pydev-0.1?py=py33&pytest=2.5.1
|
||||
`pytest-qt-1.0.2 <http://pypi.python.org/pypi/pytest-qt/1.0.2>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-qt-1.0.2?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-qt-1.0.2?py=py33&pytest=2.5.1 http://github.com/nicoddemus/pytest-qt pytest plugin that adds fixtures for testing Qt (PyQt and PySide) applications.
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-qt-1.0.2?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-qt-1.0.2?py=py33&pytest=2.5.1
|
||||
`pytest-quickcheck-0.8 <http://pypi.python.org/pypi/pytest-quickcheck/0.8>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-quickcheck-0.8?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-quickcheck-0.8?py=py33&pytest=2.5.1 http://bitbucket.org/t2y/pytest-quickcheck/ pytest plugin to generate random data inspired by QuickCheck
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-quickcheck-0.8?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-quickcheck-0.8?py=py33&pytest=2.5.1
|
||||
`pytest-rage-0.1 <http://pypi.python.org/pypi/pytest-rage/0.1>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-rage-0.1?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-rage-0.1?py=py33&pytest=2.5.1 http://github.com/santagada/pytest-rage/ pytest plugin to implement PEP712
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-rage-0.1?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-rage-0.1?py=py33&pytest=2.5.1
|
||||
`pytest-random-0.02 <http://pypi.python.org/pypi/pytest-random/0.02>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-random-0.02?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-random-0.02?py=py33&pytest=2.5.1 https://github.com/klrmn/pytest-random py.test plugin to randomize tests
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-random-0.02?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-random-0.02?py=py33&pytest=2.5.1
|
||||
`pytest-rerunfailures-0.03 <http://pypi.python.org/pypi/pytest-rerunfailures/0.03>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-rerunfailures-0.03?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-rerunfailures-0.03?py=py33&pytest=2.5.1 https://github.com/klrmn/pytest-rerunfailures py.test plugin to re-run tests to eliminate flakey failures
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-rerunfailures-0.03?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-rerunfailures-0.03?py=py33&pytest=2.5.1
|
||||
`pytest-runfailed-0.3 <http://pypi.python.org/pypi/pytest-runfailed/0.3>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-runfailed-0.3?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-runfailed-0.3?py=py33&pytest=2.5.1 http://github.com/dmerejkowsky/pytest-runfailed implement a --failed option for pytest
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-runfailed-0.3?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-runfailed-0.3?py=py33&pytest=2.5.1
|
||||
`pytest-runner-2.0 <http://pypi.python.org/pypi/pytest-runner/2.0>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-runner-2.0?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-runner-2.0?py=py33&pytest=2.5.1 https://bitbucket.org/jaraco/pytest-runner UNKNOWN
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-runner-2.0?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-runner-2.0?py=py33&pytest=2.5.1
|
||||
`pytest-sugar-0.2.2 <http://pypi.python.org/pypi/pytest-sugar/0.2.2>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-sugar-0.2.2?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-sugar-0.2.2?py=py33&pytest=2.5.1 http://pivotfinland.com/pytest-sugar/ py.test plugin that adds instafail, ETA and neat graphics
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-sugar-0.2.2?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-sugar-0.2.2?py=py33&pytest=2.5.1
|
||||
`pytest-timeout-0.3 <http://pypi.python.org/pypi/pytest-timeout/0.3>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-timeout-0.3?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-timeout-0.3?py=py33&pytest=2.5.1 http://bitbucket.org/flub/pytest-timeout/ pytest plugin to abort tests after a timeout
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-timeout-0.3?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-timeout-0.3?py=py33&pytest=2.5.1
|
||||
`pytest-twisted-1.4 <http://pypi.python.org/pypi/pytest-twisted/1.4>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-twisted-1.4?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-twisted-1.4?py=py33&pytest=2.5.1 https://github.com/schmir/pytest-twisted A twisted plugin for py.test.
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-twisted-1.4?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-twisted-1.4?py=py33&pytest=2.5.1
|
||||
`pytest-xdist-1.9 <http://pypi.python.org/pypi/pytest-xdist/1.9>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-xdist-1.9?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-xdist-1.9?py=py33&pytest=2.5.1 http://bitbucket.org/hpk42/pytest-xdist py.test xdist plugin for distributed testing and loop-on-failing modes
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-xdist-1.9?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-xdist-1.9?py=py33&pytest=2.5.1
|
||||
`pytest-xprocess-0.8 <http://pypi.python.org/pypi/pytest-xprocess/0.8>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-xprocess-0.8?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-xprocess-0.8?py=py33&pytest=2.5.1 http://bitbucket.org/hpk42/pytest-xprocess/ pytest plugin to manage external processes across test runs
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-xprocess-0.8?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-xprocess-0.8?py=py33&pytest=2.5.1
|
||||
`pytest-yamlwsgi-0.6 <http://pypi.python.org/pypi/pytest-yamlwsgi/0.6>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-yamlwsgi-0.6?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-yamlwsgi-0.6?py=py33&pytest=2.5.1 UNKNOWN Run tests against wsgi apps defined in yaml
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-yamlwsgi-0.6?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-yamlwsgi-0.6?py=py33&pytest=2.5.1
|
||||
`pytest-zap-0.1 <http://pypi.python.org/pypi/pytest-zap/0.1>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-zap-0.1?py=py27&pytest=2.5.1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-zap-0.1?py=py33&pytest=2.5.1 https://github.com/davehunt/pytest-zap OWASP ZAP plugin for py.test.
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-zap-0.1?py=py27&pytest=2.5.1 :target: http://pytest-plugs.herokuapp.com/output/pytest-zap-0.1?py=py33&pytest=2.5.1
|
||||
The table below contains a listing of plugins found in PyPI and
|
||||
their status when tested using py.test **2.6.2.dev1** and python 2.7 and
|
||||
3.3.
|
||||
|
||||
========================================================================================== ======================================================================================================== ======================================================================================================== ============================================================= =============================================================================================================================================
|
||||
A complete listing can also be found at
|
||||
`pytest-plugs <http://pytest-plugs.herokuapp.com/>`_, which contains tests
|
||||
status against other py.test releases.
|
||||
|
||||
*(Updated on 2014-01-15)*
|
||||
|
||||
========================================================================================== ================================================================================================================= ================================================================================================================= ======================================================================================== =============================================================================================================================================
|
||||
Name Py27 Py34 Repo Summary
|
||||
========================================================================================== ================================================================================================================= ================================================================================================================= ======================================================================================== =============================================================================================================================================
|
||||
`pytest-allure-adaptor-1.4.0 <http://pypi.python.org/pypi/pytest-allure-adaptor>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-allure-adaptor-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-allure-adaptor-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png Plugin for py.test to generate allure xml reports
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-allure-adaptor-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-allure-adaptor-latest?py=py34&pytest=2.6.2.dev1 :target: https://github.com/allure-framework/allure-python
|
||||
`pytest-bdd-2.3.1 <http://pypi.python.org/pypi/pytest-bdd>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-bdd-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-bdd-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png BDD for pytest
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-bdd-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-bdd-latest?py=py34&pytest=2.6.2.dev1 :target: https://github.com/olegpidsadnyi/pytest-bdd
|
||||
`pytest-beds-0.0.1 <http://pypi.python.org/pypi/pytest-beds>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-beds-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-beds-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png Fixtures for testing Google Appengine (GAE) apps
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-beds-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-beds-latest?py=py34&pytest=2.6.2.dev1 :target: https://github.com/kaste/pytest-beds
|
||||
`pytest-bench-0.3.0 <http://pypi.python.org/pypi/pytest-bench>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-bench-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-bench-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png Benchmark utility that plugs into pytest.
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-bench-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-bench-latest?py=py34&pytest=2.6.2.dev1 :target: http://github.com/concordusapps/pytest-bench
|
||||
`pytest-blockage-0.1 <http://pypi.python.org/pypi/pytest-blockage>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-blockage-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-blockage-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png Disable network requests during a test run.
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-blockage-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-blockage-latest?py=py34&pytest=2.6.2.dev1 :target: https://github.com/rob-b/pytest-blockage
|
||||
`pytest-browsermob-proxy-0.1 <http://pypi.python.org/pypi/pytest-browsermob-proxy>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-browsermob-proxy-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-browsermob-proxy-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png BrowserMob proxy plugin for py.test.
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-browsermob-proxy-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-browsermob-proxy-latest?py=py34&pytest=2.6.2.dev1 :target: https://github.com/davehunt/pytest-browsermob-proxy
|
||||
`pytest-bugzilla-0.2 <http://pypi.python.org/pypi/pytest-bugzilla>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-bugzilla-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-bugzilla-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png py.test bugzilla integration plugin
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-bugzilla-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-bugzilla-latest?py=py34&pytest=2.6.2.dev1 :target: http://github.com/nibrahim/pytest_bugzilla
|
||||
`pytest-cache-1.0 <http://pypi.python.org/pypi/pytest-cache>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-cache-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-cache-latest?py=py34&pytest=2.6.2.dev1 .. image:: bitbucket.png pytest plugin with mechanisms for caching across test runs
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-cache-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-cache-latest?py=py34&pytest=2.6.2.dev1 :target: http://bitbucket.org/hpk42/pytest-cache/
|
||||
`pytest-capturelog-0.7 <http://pypi.python.org/pypi/pytest-capturelog>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-capturelog-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-capturelog-latest?py=py34&pytest=2.6.2.dev1 .. image:: bitbucket.png py.test plugin to capture log messages
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-capturelog-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-capturelog-latest?py=py34&pytest=2.6.2.dev1 :target: http://bitbucket.org/memedough/pytest-capturelog/overview
|
||||
`pytest-codecheckers-0.2 <http://pypi.python.org/pypi/pytest-codecheckers>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-codecheckers-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-codecheckers-latest?py=py34&pytest=2.6.2.dev1 .. image:: bitbucket.png pytest plugin to add source code sanity checks (pep8 and friends)
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-codecheckers-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-codecheckers-latest?py=py34&pytest=2.6.2.dev1 :target: http://bitbucket.org/RonnyPfannschmidt/pytest-codecheckers/
|
||||
`pytest-config-0.0.10 <http://pypi.python.org/pypi/pytest-config>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-config-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-config-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png Base configurations and utilities for developing your Python project test suite with pytest.
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-config-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-config-latest?py=py34&pytest=2.6.2.dev1 :target: https://github.com/buzzfeed/pytest_config
|
||||
`pytest-contextfixture-0.1.1 <http://pypi.python.org/pypi/pytest-contextfixture>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-contextfixture-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-contextfixture-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png Define pytest fixtures as context managers.
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-contextfixture-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-contextfixture-latest?py=py34&pytest=2.6.2.dev1 :target: http://github.com/pelme/pytest-contextfixture/
|
||||
`pytest-couchdbkit-0.5.1 <http://pypi.python.org/pypi/pytest-couchdbkit>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-couchdbkit-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-couchdbkit-latest?py=py34&pytest=2.6.2.dev1 .. image:: bitbucket.png py.test extension for per-test couchdb databases using couchdbkit
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-couchdbkit-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-couchdbkit-latest?py=py34&pytest=2.6.2.dev1 :target: http://bitbucket.org/RonnyPfannschmidt/pytest-couchdbkit
|
||||
`pytest-cov-1.8.0 <http://pypi.python.org/pypi/pytest-cov>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-cov-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-cov-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png py.test plugin for coverage reporting with support for both centralised and distributed testing, including subprocesses and multiprocessing
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-cov-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-cov-latest?py=py34&pytest=2.6.2.dev1 :target: https://github.com/schlamar/pytest-cov
|
||||
`pytest-cpp-0.3.0 <http://pypi.python.org/pypi/pytest-cpp>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-cpp-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-cpp-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png Use pytest's runner to discover and execute C++ tests
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-cpp-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-cpp-latest?py=py34&pytest=2.6.2.dev1 :target: http://github.com/nicoddemus/pytest-cpp
|
||||
`pytest-dbfixtures-0.5.1 <http://pypi.python.org/pypi/pytest-dbfixtures>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-dbfixtures-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-dbfixtures-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png Databases fixtures plugin for py.test.
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-dbfixtures-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-dbfixtures-latest?py=py34&pytest=2.6.2.dev1 :target: https://github.com/ClearcodeHQ/pytest-dbfixtures
|
||||
`pytest-dbus-notification-1.0.1 <http://pypi.python.org/pypi/pytest-dbus-notification>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-dbus-notification-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-dbus-notification-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png D-BUS notifications for pytest results.
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-dbus-notification-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-dbus-notification-latest?py=py34&pytest=2.6.2.dev1 :target: https://github.com/bmathieu33/pytest-dbus-notification
|
||||
`pytest-diffeo-0.1.8.dev1 <http://pypi.python.org/pypi/pytest-diffeo>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-diffeo-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-diffeo-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png Common py.test support for Diffeo packages
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-diffeo-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-diffeo-latest?py=py34&pytest=2.6.2.dev1 :target: https://github.com/diffeo/pytest-diffeo
|
||||
`pytest-django-2.6.2 <http://pypi.python.org/pypi/pytest-django>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-django-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-django-latest?py=py34&pytest=2.6.2.dev1 `http://pytest-django.readthedocs.org/ <http://pytest-django.readthedocs.org/>`_ A Django plugin for py.test.
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-django-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-django-latest?py=py34&pytest=2.6.2.dev1
|
||||
`pytest-django-haystack-0.1.1 <http://pypi.python.org/pypi/pytest-django-haystack>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-django-haystack-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-django-haystack-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png Cleanup your Haystack indexes between tests
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-django-haystack-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-django-haystack-latest?py=py34&pytest=2.6.2.dev1 :target: http://github.com/rouge8/pytest-django-haystack
|
||||
`pytest-django-lite-0.1.1 <http://pypi.python.org/pypi/pytest-django-lite>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-django-lite-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-django-lite-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png The bare minimum to integrate py.test with Django.
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-django-lite-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-django-lite-latest?py=py34&pytest=2.6.2.dev1 :target: https://github.com/dcramer/pytest-django-lite
|
||||
`pytest-echo-1.3 <http://pypi.python.org/pypi/pytest-echo>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-echo-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-echo-latest?py=py34&pytest=2.6.2.dev1 `http://pypi.python.org/pypi/pytest-echo/ <http://pypi.python.org/pypi/pytest-echo/>`_ pytest plugin with mechanisms for echoing environment variables, package version and generic attributes
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-echo-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-echo-latest?py=py34&pytest=2.6.2.dev1
|
||||
`pytest-eradicate-0.0.2 <http://pypi.python.org/pypi/pytest-eradicate>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-eradicate-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-eradicate-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png pytest plugin to check for commented out code
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-eradicate-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-eradicate-latest?py=py34&pytest=2.6.2.dev1 :target: https://github.com/spil-johan/pytest-eradicate
|
||||
`pytest-figleaf-1.0 <http://pypi.python.org/pypi/pytest-figleaf>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-figleaf-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-figleaf-latest?py=py34&pytest=2.6.2.dev1 .. image:: bitbucket.png py.test figleaf coverage plugin
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-figleaf-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-figleaf-latest?py=py34&pytest=2.6.2.dev1 :target: http://bitbucket.org/hpk42/pytest-figleaf
|
||||
`pytest-fixture-tools-1.0.0 <http://pypi.python.org/pypi/pytest-fixture-tools>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-fixture-tools-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-fixture-tools-latest?py=py34&pytest=2.6.2.dev1 ? Plugin for pytest which provides tools for fixtures
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-fixture-tools-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-fixture-tools-latest?py=py34&pytest=2.6.2.dev1
|
||||
`pytest-flakes-0.2 <http://pypi.python.org/pypi/pytest-flakes>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-flakes-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-flakes-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png pytest plugin to check source code with pyflakes
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-flakes-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-flakes-latest?py=py34&pytest=2.6.2.dev1 :target: https://github.com/fschulze/pytest-flakes
|
||||
`pytest-greendots-0.3 <http://pypi.python.org/pypi/pytest-greendots>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-greendots-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-greendots-latest?py=py34&pytest=2.6.2.dev1 ? Green progress dots
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-greendots-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-greendots-latest?py=py34&pytest=2.6.2.dev1
|
||||
`pytest-growl-0.2 <http://pypi.python.org/pypi/pytest-growl>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-growl-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-growl-latest?py=py34&pytest=2.6.2.dev1 ? Growl notifications for pytest results.
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-growl-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-growl-latest?py=py34&pytest=2.6.2.dev1
|
||||
`pytest-httpbin-0.0.2 <http://pypi.python.org/pypi/pytest-httpbin>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-httpbin-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-httpbin-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png Easily test your HTTP library against a local copy of httpbin
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-httpbin-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-httpbin-latest?py=py34&pytest=2.6.2.dev1 :target: https://github.com/kevin1024/pytest-httpbin
|
||||
`pytest-httpretty-0.2.0 <http://pypi.python.org/pypi/pytest-httpretty>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-httpretty-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-httpretty-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png A thin wrapper of HTTPretty for pytest
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-httpretty-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-httpretty-latest?py=py34&pytest=2.6.2.dev1 :target: http://github.com/papaeye/pytest-httpretty
|
||||
`pytest-incremental-0.3.0 <http://pypi.python.org/pypi/pytest-incremental>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-incremental-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-incremental-latest?py=py34&pytest=2.6.2.dev1 .. image:: bitbucket.png an incremental test runner (pytest plugin)
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-incremental-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-incremental-latest?py=py34&pytest=2.6.2.dev1 :target: https://bitbucket.org/schettino72/pytest-incremental
|
||||
`pytest-instafail-0.2.0 <http://pypi.python.org/pypi/pytest-instafail>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-instafail-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-instafail-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png py.test plugin to show failures instantly
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-instafail-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-instafail-latest?py=py34&pytest=2.6.2.dev1 :target: https://github.com/jpvanhal/pytest-instafail
|
||||
`pytest-ipdb-0.1-prerelease <http://pypi.python.org/pypi/pytest-ipdb>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-ipdb-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-ipdb-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png A py.test plug-in to enable drop to ipdb debugger on test failure.
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-ipdb-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-ipdb-latest?py=py34&pytest=2.6.2.dev1 :target: https://github.com/mverteuil/pytest-ipdb
|
||||
`pytest-jira-0.01 <http://pypi.python.org/pypi/pytest-jira>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-jira-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-jira-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png py.test JIRA integration plugin, using markers
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-jira-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-jira-latest?py=py34&pytest=2.6.2.dev1 :target: http://github.com/jlaska/pytest_jira
|
||||
`pytest-knows-0.1.5 <http://pypi.python.org/pypi/pytest-knows>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-knows-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-knows-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png A pytest plugin that can automaticly skip test case based on dependence info calculated by trace
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-knows-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-knows-latest?py=py34&pytest=2.6.2.dev1 :target: https://github.com/mapix/ptknows
|
||||
`pytest-konira-0.2 <http://pypi.python.org/pypi/pytest-konira>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-konira-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-konira-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png Run Konira DSL tests with py.test
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-konira-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-konira-latest?py=py34&pytest=2.6.2.dev1 :target: http://github.com/alfredodeza/pytest-konira
|
||||
`pytest-localserver-0.3.2 <http://pypi.python.org/pypi/pytest-localserver>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-localserver-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-localserver-latest?py=py34&pytest=2.6.2.dev1 .. image:: bitbucket.png py.test plugin to test server connections locally.
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-localserver-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-localserver-latest?py=py34&pytest=2.6.2.dev1 :target: http://bitbucket.org/basti/pytest-localserver/
|
||||
`pytest-marker-bugzilla-0.06 <http://pypi.python.org/pypi/pytest-marker-bugzilla>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-marker-bugzilla-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-marker-bugzilla-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png py.test bugzilla integration plugin, using markers
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-marker-bugzilla-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-marker-bugzilla-latest?py=py34&pytest=2.6.2.dev1 :target: http://github.com/eanxgeek/pytest_marker_bugzilla
|
||||
`pytest-markfiltration-0.8 <http://pypi.python.org/pypi/pytest-markfiltration>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-markfiltration-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-markfiltration-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png UNKNOWN
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-markfiltration-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-markfiltration-latest?py=py34&pytest=2.6.2.dev1 :target: https://github.com/adamgoucher/pytest-markfiltration
|
||||
`pytest-marks-0.4 <http://pypi.python.org/pypi/pytest-marks>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-marks-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-marks-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png UNKNOWN
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-marks-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-marks-latest?py=py34&pytest=2.6.2.dev1 :target: https://github.com/adamgoucher/pytest-marks
|
||||
`pytest-mock-0.3.0 <http://pypi.python.org/pypi/pytest-mock>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-mock-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-mock-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png Thin-wrapper around the mock package for easier use with py.test
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-mock-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-mock-latest?py=py34&pytest=2.6.2.dev1 :target: https://github.com/nicoddemus/pytest-mock/
|
||||
`pytest-monkeyplus-1.1.0 <http://pypi.python.org/pypi/pytest-monkeyplus>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-monkeyplus-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-monkeyplus-latest?py=py34&pytest=2.6.2.dev1 .. image:: bitbucket.png pytest's monkeypatch subclass with extra functionalities
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-monkeyplus-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-monkeyplus-latest?py=py34&pytest=2.6.2.dev1 :target: http://bitbucket.org/hsoft/pytest-monkeyplus/
|
||||
`pytest-mozwebqa-1.1.1 <http://pypi.python.org/pypi/pytest-mozwebqa>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-mozwebqa-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-mozwebqa-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png Mozilla WebQA plugin for py.test.
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-mozwebqa-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-mozwebqa-latest?py=py34&pytest=2.6.2.dev1 :target: https://github.com/davehunt/pytest-mozwebqa
|
||||
`pytest-oerp-0.2.0 <http://pypi.python.org/pypi/pytest-oerp>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-oerp-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-oerp-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png pytest plugin to test OpenERP modules
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-oerp-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-oerp-latest?py=py34&pytest=2.6.2.dev1 :target: http://github.com/santagada/pytest-oerp/
|
||||
`pytest-ordering-0.3 <http://pypi.python.org/pypi/pytest-ordering>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-ordering-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-ordering-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png pytest plugin to run your tests in a specific order
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-ordering-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-ordering-latest?py=py34&pytest=2.6.2.dev1 :target: https://github.com/ftobia/pytest-ordering
|
||||
`pytest-osxnotify-0.1.4 <http://pypi.python.org/pypi/pytest-osxnotify>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-osxnotify-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-osxnotify-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png OS X notifications for py.test results.
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-osxnotify-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-osxnotify-latest?py=py34&pytest=2.6.2.dev1 :target: https://github.com/dbader/pytest-osxnotify
|
||||
`pytest-paste-config-0.1 <http://pypi.python.org/pypi/pytest-paste-config>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-paste-config-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-paste-config-latest?py=py34&pytest=2.6.2.dev1 ? Allow setting the path to a paste config file
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-paste-config-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-paste-config-latest?py=py34&pytest=2.6.2.dev1
|
||||
`pytest-pep8-1.0.6 <http://pypi.python.org/pypi/pytest-pep8>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-pep8-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-pep8-latest?py=py34&pytest=2.6.2.dev1 .. image:: bitbucket.png pytest plugin to check PEP8 requirements
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-pep8-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-pep8-latest?py=py34&pytest=2.6.2.dev1 :target: http://bitbucket.org/hpk42/pytest-pep8/
|
||||
`pytest-pipeline-0.1.0 <http://pypi.python.org/pypi/pytest-pipeline>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-pipeline-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-pipeline-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png Pytest plugin for functional testing of data analysis pipelines
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-pipeline-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-pipeline-latest?py=py34&pytest=2.6.2.dev1 :target: https://github.com/bow/pytest_pipeline
|
||||
`pytest-poo-0.2 <http://pypi.python.org/pypi/pytest-poo>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-poo-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-poo-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png Visualize your crappy tests
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-poo-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-poo-latest?py=py34&pytest=2.6.2.dev1 :target: http://github.com/pelme/pytest-poo
|
||||
`pytest-pycharm-0.1.0 <http://pypi.python.org/pypi/pytest-pycharm>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-pycharm-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-pycharm-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png Plugin for py.test to enter PyCharm debugger on uncaught exceptions
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-pycharm-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-pycharm-latest?py=py34&pytest=2.6.2.dev1 :target: https://github.com/jlubcke/pytest-pycharm
|
||||
`pytest-pydev-0.1 <http://pypi.python.org/pypi/pytest-pydev>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-pydev-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-pydev-latest?py=py34&pytest=2.6.2.dev1 .. image:: bitbucket.png py.test plugin to connect to a remote debug server with PyDev or PyCharm.
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-pydev-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-pydev-latest?py=py34&pytest=2.6.2.dev1 :target: http://bitbucket.org/basti/pytest-pydev/
|
||||
`pytest-pythonpath-0.3 <http://pypi.python.org/pypi/pytest-pythonpath>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-pythonpath-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-pythonpath-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png pytest plugin for adding to the PYTHONPATH from command line or configs.
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-pythonpath-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-pythonpath-latest?py=py34&pytest=2.6.2.dev1 :target: https://github.com/bigsassy/pytest-pythonpath
|
||||
`pytest-qt-1.2.0 <http://pypi.python.org/pypi/pytest-qt>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-qt-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-qt-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png pytest support for PyQt and PySide applications
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-qt-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-qt-latest?py=py34&pytest=2.6.2.dev1 :target: http://github.com/nicoddemus/pytest-qt
|
||||
`pytest-quickcheck-0.8 <http://pypi.python.org/pypi/pytest-quickcheck>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-quickcheck-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-quickcheck-latest?py=py34&pytest=2.6.2.dev1 .. image:: bitbucket.png pytest plugin to generate random data inspired by QuickCheck
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-quickcheck-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-quickcheck-latest?py=py34&pytest=2.6.2.dev1 :target: http://bitbucket.org/t2y/pytest-quickcheck/
|
||||
`pytest-rage-0.1 <http://pypi.python.org/pypi/pytest-rage>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-rage-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-rage-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png pytest plugin to implement PEP712
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-rage-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-rage-latest?py=py34&pytest=2.6.2.dev1 :target: http://github.com/santagada/pytest-rage/
|
||||
`pytest-raisesregexp-1.0 <http://pypi.python.org/pypi/pytest-raisesregexp>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-raisesregexp-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-raisesregexp-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png Simple pytest plugin to look for regex in Exceptions
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-raisesregexp-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-raisesregexp-latest?py=py34&pytest=2.6.2.dev1 :target: https://github.com/Walkman/pytest_raisesregexp
|
||||
`pytest-random-0.02 <http://pypi.python.org/pypi/pytest-random>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-random-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-random-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png py.test plugin to randomize tests
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-random-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-random-latest?py=py34&pytest=2.6.2.dev1 :target: https://github.com/klrmn/pytest-random
|
||||
`pytest-rerunfailures-0.05 <http://pypi.python.org/pypi/pytest-rerunfailures>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-rerunfailures-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-rerunfailures-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png py.test plugin to re-run tests to eliminate flakey failures
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-rerunfailures-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-rerunfailures-latest?py=py34&pytest=2.6.2.dev1 :target: https://github.com/klrmn/pytest-rerunfailures
|
||||
`pytest-runfailed-0.3 <http://pypi.python.org/pypi/pytest-runfailed>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-runfailed-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-runfailed-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png implement a --failed option for pytest
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-runfailed-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-runfailed-latest?py=py34&pytest=2.6.2.dev1 :target: http://github.com/dmerejkowsky/pytest-runfailed
|
||||
`pytest-runner-2.1 <http://pypi.python.org/pypi/pytest-runner>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-runner-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-runner-latest?py=py34&pytest=2.6.2.dev1 .. image:: bitbucket.png UNKNOWN
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-runner-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-runner-latest?py=py34&pytest=2.6.2.dev1 :target: https://bitbucket.org/jaraco/pytest-runner
|
||||
`pytest-sftpserver-1.0.2 <http://pypi.python.org/pypi/pytest-sftpserver>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-sftpserver-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-sftpserver-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png py.test plugin to locally test sftp server connections.
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-sftpserver-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-sftpserver-latest?py=py34&pytest=2.6.2.dev1 :target: http://github.com/ulope/pytest-sftpserver/
|
||||
`pytest-spec-0.2.22 <http://pypi.python.org/pypi/pytest-spec>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-spec-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-spec-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png pytest plugin to display test execution output like a SPECIFICATION
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-spec-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-spec-latest?py=py34&pytest=2.6.2.dev1 :target: https://github.com/pchomik/pytest-spec
|
||||
`pytest-splinter-1.0.3 <http://pypi.python.org/pypi/pytest-splinter>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-splinter-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-splinter-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png Splinter subplugin for Pytest BDD plugin
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-splinter-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-splinter-latest?py=py34&pytest=2.6.2.dev1 :target: https://github.com/paylogic/pytest-splinter
|
||||
`pytest-stepwise-0.2 <http://pypi.python.org/pypi/pytest-stepwise>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-stepwise-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-stepwise-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png Run a test suite one failing test at a time.
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-stepwise-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-stepwise-latest?py=py34&pytest=2.6.2.dev1 :target: https://github.com/nip3o/pytest-stepwise
|
||||
`pytest-sugar-0.3.4 <http://pypi.python.org/pypi/pytest-sugar>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-sugar-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-sugar-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png py.test is a plugin for py.test that changes the default look and feel of py.test (e.g. progressbar, show tests that fail instantly).
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-sugar-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-sugar-latest?py=py34&pytest=2.6.2.dev1 :target: https://github.com/Frozenball/pytest-sugar
|
||||
`pytest-timeout-0.4 <http://pypi.python.org/pypi/pytest-timeout>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-timeout-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-timeout-latest?py=py34&pytest=2.6.2.dev1 .. image:: bitbucket.png py.test plugin to abort hanging tests
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-timeout-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-timeout-latest?py=py34&pytest=2.6.2.dev1 :target: http://bitbucket.org/flub/pytest-timeout/
|
||||
`pytest-twisted-1.5 <http://pypi.python.org/pypi/pytest-twisted>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-twisted-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-twisted-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png A twisted plugin for py.test.
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-twisted-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-twisted-latest?py=py34&pytest=2.6.2.dev1 :target: https://github.com/schmir/pytest-twisted
|
||||
`pytest-xdist-1.10 <http://pypi.python.org/pypi/pytest-xdist>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-xdist-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-xdist-latest?py=py34&pytest=2.6.2.dev1 .. image:: bitbucket.png py.test xdist plugin for distributed testing and loop-on-failing modes
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-xdist-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-xdist-latest?py=py34&pytest=2.6.2.dev1 :target: http://bitbucket.org/hpk42/pytest-xdist
|
||||
`pytest-xprocess-0.8 <http://pypi.python.org/pypi/pytest-xprocess>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-xprocess-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-xprocess-latest?py=py34&pytest=2.6.2.dev1 .. image:: bitbucket.png pytest plugin to manage external processes across test runs
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-xprocess-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-xprocess-latest?py=py34&pytest=2.6.2.dev1 :target: http://bitbucket.org/hpk42/pytest-xprocess/
|
||||
`pytest-yamlwsgi-0.6 <http://pypi.python.org/pypi/pytest-yamlwsgi>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-yamlwsgi-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-yamlwsgi-latest?py=py34&pytest=2.6.2.dev1 ? Run tests against wsgi apps defined in yaml
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-yamlwsgi-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-yamlwsgi-latest?py=py34&pytest=2.6.2.dev1
|
||||
`pytest-zap-0.2 <http://pypi.python.org/pypi/pytest-zap>`_ .. image:: http://pytest-plugs.herokuapp.com/status/pytest-zap-latest?py=py27&pytest=2.6.2.dev1 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-zap-latest?py=py34&pytest=2.6.2.dev1 .. image:: github.png OWASP ZAP plugin for py.test.
|
||||
:target: http://pytest-plugs.herokuapp.com/output/pytest-zap-latest?py=py27&pytest=2.6.2.dev1 :target: http://pytest-plugs.herokuapp.com/output/pytest-zap-latest?py=py34&pytest=2.6.2.dev1 :target: https://github.com/davehunt/pytest-zap
|
||||
|
||||
========================================================================================== ================================================================================================================= ================================================================================================================= ======================================================================================== =============================================================================================================================================
|
||||
|
||||
*(Updated on 2014-08-26)*
|
||||
|
||||
@@ -1,6 +1,13 @@
|
||||
"""
|
||||
Script to generate the file `index.txt` with information about
|
||||
pytest plugins taken directly from a live PyPI server.
|
||||
pytest plugins taken directly from PyPI.
|
||||
|
||||
Usage:
|
||||
python plugins_index.py
|
||||
|
||||
This command will update `index.txt` in the same directory found as this script.
|
||||
This should be issued before every major documentation release to obtain latest
|
||||
versions from PyPI.
|
||||
|
||||
Also includes plugin compatibility between different python and pytest versions,
|
||||
obtained from http://pytest-plugs.herokuapp.com.
|
||||
@@ -66,9 +73,39 @@ def obtain_plugins_table(plugins, client):
|
||||
:param plugins: list of (name, version)
|
||||
:param client: ServerProxy
|
||||
"""
|
||||
def get_repo_markup(repo):
|
||||
"""
|
||||
obtains appropriate markup for the given repository, as two lines
|
||||
that should be output in the same table row. We use this to display an icon
|
||||
for known repository hosts (github, etc), just a "?" char when
|
||||
repository is not registered in pypi or a simple link otherwise.
|
||||
"""
|
||||
target = repo
|
||||
if 'github.com' in repo:
|
||||
image = 'github.png'
|
||||
elif 'bitbucket.org' in repo:
|
||||
image = 'bitbucket.png'
|
||||
elif repo.lower() == 'unknown':
|
||||
return '?', ''
|
||||
else:
|
||||
image = None
|
||||
|
||||
if image is not None:
|
||||
image_markup = '.. image:: %s' % image
|
||||
target_markup = ' :target: %s' % repo
|
||||
pad_right = ('%-' + str(len(target_markup)) + 's')
|
||||
return pad_right % image_markup, target_markup
|
||||
else:
|
||||
return ('`%s <%s>`_' % (target, target)), ''
|
||||
|
||||
def sanitize_summary(summary):
|
||||
"""Make sure summaries don't break our table formatting.
|
||||
"""
|
||||
return summary.replace('\n', ' ')
|
||||
|
||||
rows = []
|
||||
ColumnData = namedtuple('ColumnData', 'text link')
|
||||
headers = ['Name', 'Py27', 'Py33', 'Repository', 'Summary']
|
||||
headers = ['Name', 'Py27', 'Py34', 'Repo', 'Summary']
|
||||
pytest_version = pytest.__version__
|
||||
repositories = obtain_override_repositories()
|
||||
print('*** pytest-{0} ***'.format(pytest_version))
|
||||
@@ -83,27 +120,29 @@ def obtain_plugins_table(plugins, client):
|
||||
name=package_name,
|
||||
version=version)
|
||||
|
||||
repository = repositories.get(package_name, release_data['home_page'])
|
||||
repo_markup_1, repo_markup_2 = get_repo_markup(repository)
|
||||
|
||||
# first row: name, images and simple links
|
||||
url = '.. image:: {site}/status/{name}-{version}'
|
||||
url = '.. image:: {site}/status/{name}-latest'
|
||||
image_url = url.format(**common_params)
|
||||
image_url += '?py={py}&pytest={pytest}'
|
||||
row = (
|
||||
ColumnData(package_name + '-' + version,
|
||||
release_data['release_url']),
|
||||
ColumnData(package_name + "-" + version, release_data['package_url']),
|
||||
ColumnData(image_url.format(py='py27', pytest=pytest_version),
|
||||
None),
|
||||
ColumnData(image_url.format(py='py33', pytest=pytest_version),
|
||||
ColumnData(image_url.format(py='py34', pytest=pytest_version),
|
||||
None),
|
||||
ColumnData(
|
||||
repositories.get(package_name, release_data['home_page']),
|
||||
repo_markup_1,
|
||||
None),
|
||||
ColumnData(release_data['summary'], None),
|
||||
ColumnData(sanitize_summary(release_data['summary']), None),
|
||||
)
|
||||
assert len(row) == len(headers)
|
||||
rows.append(row)
|
||||
|
||||
# second row: links for images (they should be in their own line)
|
||||
url = ' :target: {site}/output/{name}-{version}'
|
||||
url = ' :target: {site}/output/{name}-latest'
|
||||
output_url = url.format(**common_params)
|
||||
output_url += '?py={py}&pytest={pytest}'
|
||||
|
||||
@@ -111,10 +150,11 @@ def obtain_plugins_table(plugins, client):
|
||||
ColumnData('', None),
|
||||
ColumnData(output_url.format(py='py27', pytest=pytest_version),
|
||||
None),
|
||||
ColumnData(output_url.format(py='py33', pytest=pytest_version),
|
||||
ColumnData(output_url.format(py='py34', pytest=pytest_version),
|
||||
None),
|
||||
ColumnData(repo_markup_2, None),
|
||||
ColumnData('', None),
|
||||
ColumnData('', None),
|
||||
|
||||
)
|
||||
assert len(row) == len(headers)
|
||||
rows.append(row)
|
||||
@@ -135,6 +175,7 @@ def obtain_override_repositories():
|
||||
return {
|
||||
'pytest-blockage': 'https://github.com/rob-b/pytest-blockage',
|
||||
'pytest-konira': 'http://github.com/alfredodeza/pytest-konira',
|
||||
'pytest-sugar': 'https://github.com/Frozenball/pytest-sugar',
|
||||
}
|
||||
|
||||
|
||||
@@ -168,11 +209,9 @@ def generate_plugins_index_from_table(filename, headers, rows):
|
||||
return ' '.join(char * length for length in column_lengths)
|
||||
|
||||
with open(filename, 'w') as f:
|
||||
# write welcome
|
||||
print('.. _plugins_index:', file=f)
|
||||
print(file=f)
|
||||
print('List of Third-Party Plugins', file=f)
|
||||
print('===========================', file=f)
|
||||
# header
|
||||
header_text = HEADER.format(pytest_version=pytest.__version__)
|
||||
print(header_text, file=f)
|
||||
print(file=f)
|
||||
|
||||
# table
|
||||
@@ -233,5 +272,21 @@ def main(argv):
|
||||
return 0
|
||||
|
||||
|
||||
# header for the plugins_index page
|
||||
HEADER = '''.. _plugins_index:
|
||||
|
||||
List of Third-Party Plugins
|
||||
===========================
|
||||
|
||||
The table below contains a listing of plugins found in PyPI and
|
||||
their status when tested using py.test **{pytest_version}** and python 2.7 and
|
||||
3.3.
|
||||
|
||||
A complete listing can also be found at
|
||||
`pytest-plugs <http://pytest-plugs.herokuapp.com/>`_, which contains tests
|
||||
status against other py.test releases.
|
||||
'''
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main(sys.argv))
|
||||
|
||||
@@ -35,7 +35,7 @@ Here is an example of marking a test function to be skipped
|
||||
when run on a Python3.3 interpreter::
|
||||
|
||||
import sys
|
||||
@pytest.mark.skipif(sys.version_info >= (3,3),
|
||||
@pytest.mark.skipif(sys.version_info < (3,3),
|
||||
reason="requires python3.3")
|
||||
def test_function():
|
||||
...
|
||||
@@ -51,7 +51,7 @@ You can share skipif markers between modules. Consider this test module::
|
||||
# content of test_mymodule.py
|
||||
|
||||
import mymodule
|
||||
minversion = pytest.mark.skipif(mymodule.__versioninfo__ >= (1,1),
|
||||
minversion = pytest.mark.skipif(mymodule.__versioninfo__ < (1,1),
|
||||
reason="at least mymodule-1.1 required")
|
||||
@minversion
|
||||
def test_function():
|
||||
@@ -149,6 +149,11 @@ on a particular platform::
|
||||
def test_function():
|
||||
...
|
||||
|
||||
If you want to be more specific as to why the test is failing, you can specify
|
||||
a single exception, or a list of exceptions, in the ``raises`` argument. Then
|
||||
the test will be reported as a regular failure if it fails with an
|
||||
exception not mentioned in ``raises``.
|
||||
|
||||
You can furthermore prevent the running of an "xfail" test or
|
||||
specify a reason such as a bug ID or similar. Here is
|
||||
a simple test file with the several usages:
|
||||
@@ -159,10 +164,10 @@ Running it with the report-on-xfail option gives this output::
|
||||
|
||||
example $ py.test -rx xfail_demo.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
|
||||
collected 6 items
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 7 items
|
||||
|
||||
xfail_demo.py xxxxxx
|
||||
xfail_demo.py xxxxxxx
|
||||
========================= short test summary info ==========================
|
||||
XFAIL xfail_demo.py::test_hello
|
||||
XFAIL xfail_demo.py::test_hello2
|
||||
@@ -175,8 +180,9 @@ Running it with the report-on-xfail option gives this output::
|
||||
condition: pytest.__version__[0] != "17"
|
||||
XFAIL xfail_demo.py::test_hello6
|
||||
reason: reason
|
||||
XFAIL xfail_demo.py::test_hello7
|
||||
|
||||
======================== 6 xfailed in 0.04 seconds =========================
|
||||
======================== 7 xfailed in 0.05 seconds =========================
|
||||
|
||||
.. _`skip/xfail with parametrize`:
|
||||
|
||||
@@ -286,4 +292,9 @@ The equivalent with "boolean conditions" is::
|
||||
def test_function(...):
|
||||
pass
|
||||
|
||||
.. note::
|
||||
|
||||
You cannot use ``pytest.config.getvalue()`` in code
|
||||
imported before py.test's argument parsing takes place. For example,
|
||||
``conftest.py`` files are imported before command line parsing and thus
|
||||
``config.getvalue()`` will not execute correctly.
|
||||
|
||||
@@ -2,34 +2,50 @@
|
||||
Talks and Tutorials
|
||||
==========================
|
||||
|
||||
.. sidebar:: Next Open Trainings
|
||||
|
||||
`professional testing with pytest and tox <http://www.python-academy.com/courses/specialtopics/python_course_testing.html>`_, 24-26th November 2014, Freiburg, Germany
|
||||
|
||||
.. _`funcargs`: funcargs.html
|
||||
|
||||
Tutorial examples and blog postings
|
||||
Talks and blog postings
|
||||
---------------------------------------------
|
||||
|
||||
.. _`tutorial1 repository`: http://bitbucket.org/hpk42/pytest-tutorial1/
|
||||
.. _`pycon 2010 tutorial PDF`: http://bitbucket.org/hpk42/pytest-tutorial1/raw/tip/pytest-basic.pdf
|
||||
|
||||
Basic usage and fixtures:
|
||||
- `Introduction to pytest, Andreas Pelme, EuroPython 2014
|
||||
<https://www.youtube.com/watch?v=LdVJj65ikRY>`_.
|
||||
|
||||
- `pytest feature and release highlights (GERMAN, October 2013)
|
||||
- `Advanced Uses of py.test Fixtures, Floris Bruynooghe, EuroPython
|
||||
2014 <https://www.youtube.com/watch?v=IBC_dxr-4ps>`_.
|
||||
|
||||
- `Why i use py.test and maybe you should too, Andy Todd, Pycon AU 2013
|
||||
<https://www.youtube.com/watch?v=P-AhpukDIik>`_
|
||||
|
||||
- `3-part blog series about pytest from @pydanny alias Daniel Greenfeld (January
|
||||
2014) <http://pydanny.com/pytest-no-boilerplate-testing.html>`_
|
||||
|
||||
- `pytest: helps you write better Django apps, Andreas Pelme, DjangoCon
|
||||
Europe 2014 <https://www.youtube.com/watch?v=aaArYVh6XSM>`_.
|
||||
|
||||
- :ref:`fixtures`
|
||||
|
||||
- `Testing Django Applications with pytest, Andreas Pelme, EuroPython
|
||||
2013 <https://www.youtube.com/watch?v=aUf8Fkb7TaY>`_.
|
||||
|
||||
- `Testes pythonics com py.test, Vinicius Belchior Assef Neto, Plone
|
||||
Conf 2013, Brazil <https://www.youtube.com/watch?v=QUKoq2K7bis>`_.
|
||||
|
||||
- `Introduction to py.test fixtures, FOSDEM 2013, Floris Bruynooghe
|
||||
<https://www.youtube.com/watch?v=bJhRW4eZMco>`_.
|
||||
|
||||
- `pytest feature and release highlights, Holger Krekel (GERMAN, October 2013)
|
||||
<http://pyvideo.org/video/2429/pytest-feature-and-new-release-highlights>`_
|
||||
|
||||
- `pytest introduction from Brian Okken (January 2013)
|
||||
<http://pythontesting.net/framework/pytest-introduction/>`_
|
||||
|
||||
- `3-part blog series about pytest from Daniel Greenfeld (January
|
||||
2014) <http://pydanny.com/pytest-no-boilerplate-testing.html>`_
|
||||
|
||||
- `pycon australia 2012 pytest talk from Brianna Laugher
|
||||
<http://2012.pycon-au.org/schedule/52/view_talk?day=sunday>`_ (`video <http://www.youtube.com/watch?v=DTNejE9EraI>`_, `slides <http://www.slideshare.net/pfctdayelise/funcargs-other-fun-with-pytest>`_, `code <https://gist.github.com/3386951>`_)
|
||||
- `pycon 2012 US talk video from Holger Krekel <http://www.youtube.com/watch?v=9LVqBQcFmyw>`_
|
||||
- `pycon 2010 tutorial PDF`_ and `tutorial1 repository`_
|
||||
|
||||
|
||||
Fixtures and Function arguments:
|
||||
|
||||
- :ref:`fixtures`
|
||||
- `monkey patching done right`_ (blog post, consult `monkeypatch
|
||||
plugin`_ for up-to-date API)
|
||||
|
||||
@@ -69,6 +85,11 @@ Plugin specific examples:
|
||||
Older conference talks and tutorials
|
||||
----------------------------------------
|
||||
|
||||
- `pycon australia 2012 pytest talk from Brianna Laugher
|
||||
<http://2012.pycon-au.org/schedule/52/view_talk?day=sunday>`_ (`video <http://www.youtube.com/watch?v=DTNejE9EraI>`_, `slides <http://www.slideshare.net/pfctdayelise/funcargs-other-fun-with-pytest>`_, `code <https://gist.github.com/3386951>`_)
|
||||
- `pycon 2012 US talk video from Holger Krekel <http://www.youtube.com/watch?v=9LVqBQcFmyw>`_
|
||||
- `pycon 2010 tutorial PDF`_ and `tutorial1 repository`_
|
||||
|
||||
- `ep2009-rapidtesting.pdf`_ tutorial slides (July 2009):
|
||||
|
||||
- testing terminology
|
||||
|
||||
@@ -29,7 +29,7 @@ Running this would result in a passed test except for the last
|
||||
|
||||
$ py.test test_tmpdir.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 1 items
|
||||
|
||||
test_tmpdir.py F
|
||||
@@ -37,7 +37,7 @@ Running this would result in a passed test except for the last
|
||||
================================= FAILURES =================================
|
||||
_____________________________ test_create_file _____________________________
|
||||
|
||||
tmpdir = local('/tmp/pytest-1009/test_create_file0')
|
||||
tmpdir = local('/tmp/pytest-506/test_create_file0')
|
||||
|
||||
def test_create_file(tmpdir):
|
||||
p = tmpdir.mkdir("sub").join("hello.txt")
|
||||
@@ -48,7 +48,7 @@ Running this would result in a passed test except for the last
|
||||
E assert 0
|
||||
|
||||
test_tmpdir.py:7: AssertionError
|
||||
========================= 1 failed in 0.01 seconds =========================
|
||||
========================= 1 failed in 0.02 seconds =========================
|
||||
|
||||
.. _`base temporary directory`:
|
||||
|
||||
|
||||
@@ -88,7 +88,7 @@ the ``self.db`` values in the traceback::
|
||||
|
||||
$ py.test test_unittest_db.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 2 items
|
||||
|
||||
test_unittest_db.py FF
|
||||
@@ -101,7 +101,7 @@ the ``self.db`` values in the traceback::
|
||||
def test_method1(self):
|
||||
assert hasattr(self, "db")
|
||||
> assert 0, self.db # fail for demo purposes
|
||||
E AssertionError: <conftest.DummyDB instance at 0x12124d0>
|
||||
E AssertionError: <conftest.db_class.<locals>.DummyDB object at 0x2b12849f90b8>
|
||||
|
||||
test_unittest_db.py:9: AssertionError
|
||||
___________________________ MyTest.test_method2 ____________________________
|
||||
@@ -110,10 +110,10 @@ the ``self.db`` values in the traceback::
|
||||
|
||||
def test_method2(self):
|
||||
> assert 0, self.db # fail for demo purposes
|
||||
E AssertionError: <conftest.DummyDB instance at 0x12124d0>
|
||||
E AssertionError: <conftest.db_class.<locals>.DummyDB object at 0x2b12849f90b8>
|
||||
|
||||
test_unittest_db.py:12: AssertionError
|
||||
========================= 2 failed in 0.01 seconds =========================
|
||||
========================= 2 failed in 0.05 seconds =========================
|
||||
|
||||
This default pytest traceback shows that the two test methods
|
||||
share the same ``self.db`` instance which was our intention
|
||||
@@ -160,7 +160,7 @@ Running this test module ...::
|
||||
|
||||
$ py.test -q test_unittest_cleandir.py
|
||||
.
|
||||
1 passed in 0.01 seconds
|
||||
1 passed in 0.05 seconds
|
||||
|
||||
... gives us one passed test because the ``initdir`` fixture function
|
||||
was executed ahead of the ``test_method``.
|
||||
|
||||
@@ -49,6 +49,9 @@ Several test run options::
|
||||
# the "string expression", e.g. "MyClass and not method"
|
||||
# will select TestMyClass.test_something
|
||||
# but not TestMyClass.test_method_simple
|
||||
py.test test_mod.py::test_func # only run tests that match the "node ID",
|
||||
# e.g "test_mod.py::test_func" will select
|
||||
# only test_func in test_mod.py
|
||||
|
||||
Import 'pkg' and use its filesystem location to find and run tests::
|
||||
|
||||
@@ -67,13 +70,13 @@ Examples for modifying traceback printing::
|
||||
py.test --tb=short # a shorter traceback format
|
||||
py.test --tb=line # only one line per failure
|
||||
|
||||
Dropping to PDB (Python Debugger) on failures
|
||||
----------------------------------------------
|
||||
Dropping to PDB_ (Python Debugger) on failures
|
||||
-----------------------------------------------
|
||||
|
||||
.. _PDB: http://docs.python.org/library/pdb.html
|
||||
|
||||
Python comes with a builtin Python debugger called PDB_. ``pytest``
|
||||
allows one to drop into the PDB prompt via a command line option::
|
||||
allows one to drop into the PDB_ prompt via a command line option::
|
||||
|
||||
py.test --pdb
|
||||
|
||||
@@ -82,7 +85,7 @@ only want to do this for the first failing test to understand a certain
|
||||
failure situation::
|
||||
|
||||
py.test -x --pdb # drop to PDB on first failure, then end test session
|
||||
py.test --pdb --maxfail=3 # drop to PDB for the first three failures
|
||||
py.test --pdb --maxfail=3 # drop to PDB for first three failures
|
||||
|
||||
|
||||
Setting a breakpoint / aka ``set_trace()``
|
||||
@@ -98,8 +101,24 @@ can use a helper::
|
||||
|
||||
.. versionadded: 2.0.0
|
||||
|
||||
In previous versions you could only enter PDB tracing if
|
||||
you disabled capturing on the command line via ``py.test -s``.
|
||||
Prior to pytest version 2.0.0 you could only enter PDB_ tracing if you disabled
|
||||
capturing on the command line via ``py.test -s``. In later versions, pytest
|
||||
automatically disables its output capture when you enter PDB_ tracing:
|
||||
|
||||
* Output capture in other tests is not affected.
|
||||
* Any prior test output that has already been captured and will be processed as
|
||||
such.
|
||||
* Any later output produced within the same test will not be captured and will
|
||||
instead get sent directly to ``sys.stdout``. Note that this holds true even
|
||||
for test output occuring after you exit the interactive PDB_ tracing session
|
||||
and continue with the regular test run.
|
||||
|
||||
.. versionadded: 2.4.0
|
||||
|
||||
Since pytest version 2.4.0 you can also use the native Python
|
||||
``import pdb;pdb.set_trace()`` call to enter PDB_ tracing without having to use
|
||||
the ``pytest.set_trace()`` wrapper or explicitly disable pytest's output
|
||||
capturing via ``py.test -s``.
|
||||
|
||||
.. _durations:
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
|
||||
.. _yieldfixture:
|
||||
|
||||
Fixture functions using "yield" / context manager integration
|
||||
@@ -52,9 +51,9 @@ Let's run it with output capturing disabled::
|
||||
test called
|
||||
.teardown after yield
|
||||
|
||||
1 passed in 0.00 seconds
|
||||
1 passed in 0.01 seconds
|
||||
|
||||
We can also seemlessly use the new syntax with ``with`` statements.
|
||||
We can also seamlessly use the new syntax with ``with`` statements.
|
||||
Let's simplify the above ``passwd`` fixture::
|
||||
|
||||
# content of test_yield2.py
|
||||
|
||||
10
runtox.py
10
runtox.py
@@ -1,10 +1,8 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
if __name__ == "__main__":
|
||||
subprocess.call(["tox",
|
||||
import subprocess
|
||||
import sys
|
||||
subprocess.call([sys.executable, "-m", "tox",
|
||||
"-i", "ALL=https://devpi.net/hpk/dev/",
|
||||
"--develop",] + sys.argv[1:])
|
||||
|
||||
"--develop"] + sys.argv[1:])
|
||||
|
||||
63
setup.py
63
setup.py
@@ -1,41 +1,41 @@
|
||||
import os, sys
|
||||
from setuptools import setup, Command
|
||||
|
||||
classifiers=['Development Status :: 6 - Mature',
|
||||
'Intended Audience :: Developers',
|
||||
'License :: OSI Approved :: MIT License',
|
||||
'Operating System :: POSIX',
|
||||
'Operating System :: Microsoft :: Windows',
|
||||
'Operating System :: MacOS :: MacOS X',
|
||||
'Topic :: Software Development :: Testing',
|
||||
'Topic :: Software Development :: Libraries',
|
||||
'Topic :: Utilities',
|
||||
'Programming Language :: Python :: 2',
|
||||
'Programming Language :: Python :: 3'] + [
|
||||
("Programming Language :: Python :: %s" % x) for x in
|
||||
"2.6 2.7 3.0 3.1 3.2 3.3".split()]
|
||||
classifiers = ['Development Status :: 6 - Mature',
|
||||
'Intended Audience :: Developers',
|
||||
'License :: OSI Approved :: MIT License',
|
||||
'Operating System :: POSIX',
|
||||
'Operating System :: Microsoft :: Windows',
|
||||
'Operating System :: MacOS :: MacOS X',
|
||||
'Topic :: Software Development :: Testing',
|
||||
'Topic :: Software Development :: Libraries',
|
||||
'Topic :: Utilities'] + [
|
||||
('Programming Language :: Python :: %s' % x) for x in
|
||||
'2 2.6 2.7 3 3.2 3.3 3.4'.split()]
|
||||
|
||||
long_description = open('README.rst').read()
|
||||
|
||||
|
||||
long_description = open("README.rst").read()
|
||||
def main():
|
||||
install_requires = ["py>=1.4.20"]
|
||||
if sys.version_info < (2,7):
|
||||
install_requires.append("argparse")
|
||||
if sys.platform == "win32":
|
||||
install_requires.append("colorama")
|
||||
install_requires = ['py>=1.4.24']
|
||||
if sys.version_info < (2, 7) or (3,) <= sys.version_info < (3, 2):
|
||||
install_requires.append('argparse')
|
||||
if sys.platform == 'win32':
|
||||
install_requires.append('colorama')
|
||||
|
||||
setup(
|
||||
name='pytest',
|
||||
description='pytest: simple powerful testing with Python',
|
||||
long_description = long_description,
|
||||
version='2.5.2',
|
||||
long_description=long_description,
|
||||
version='2.6.2',
|
||||
url='http://pytest.org',
|
||||
license='MIT license',
|
||||
platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'],
|
||||
author='Holger Krekel, Benjamin Peterson, Ronny Pfannschmidt, Floris Bruynooghe and others',
|
||||
author_email='holger at merlinux.eu',
|
||||
entry_points= make_entry_points(),
|
||||
entry_points=make_entry_points(),
|
||||
classifiers=classifiers,
|
||||
cmdclass = {'test': PyTest},
|
||||
cmdclass={'test': PyTest},
|
||||
# the following should be enabled for release
|
||||
install_requires=install_requires,
|
||||
packages=['_pytest', '_pytest.assertion'],
|
||||
@@ -43,24 +43,26 @@ def main():
|
||||
zip_safe=False,
|
||||
)
|
||||
|
||||
|
||||
def cmdline_entrypoints(versioninfo, platform, basename):
|
||||
target = 'pytest:main'
|
||||
if platform.startswith('java'):
|
||||
points = {'py.test-jython': target}
|
||||
else:
|
||||
if basename.startswith("pypy"):
|
||||
if basename.startswith('pypy'):
|
||||
points = {'py.test-%s' % basename: target}
|
||||
else: # cpython
|
||||
points = {'py.test-%s.%s' % versioninfo[:2] : target,}
|
||||
points = {'py.test-%s.%s' % versioninfo[:2] : target}
|
||||
points['py.test'] = target
|
||||
return points
|
||||
|
||||
|
||||
def make_entry_points():
|
||||
basename = os.path.basename(sys.executable)
|
||||
points = cmdline_entrypoints(sys.version_info, sys.platform, basename)
|
||||
keys = list(points.keys())
|
||||
keys.sort()
|
||||
l = ["%s = %s" % (x, points[x]) for x in keys]
|
||||
l = ['%s = %s' % (x, points[x]) for x in keys]
|
||||
return {'console_scripts': l}
|
||||
|
||||
|
||||
@@ -71,12 +73,13 @@ class PyTest(Command):
|
||||
def finalize_options(self):
|
||||
pass
|
||||
def run(self):
|
||||
import sys,subprocess
|
||||
PPATH=[x for x in os.environ.get("PYTHONPATH", "").split(":") if x]
|
||||
import subprocess
|
||||
PPATH = [x for x in os.environ.get('PYTHONPATH', '').split(':') if x]
|
||||
PPATH.insert(0, os.getcwd())
|
||||
os.environ["PYTHONPATH"] = ":".join(PPATH)
|
||||
errno = subprocess.call([sys.executable, 'pytest.py'])
|
||||
os.environ['PYTHONPATH'] = ':'.join(PPATH)
|
||||
errno = subprocess.call([sys.executable, 'pytest.py', '--ignore=doc'])
|
||||
raise SystemExit(errno)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
@@ -124,6 +124,18 @@ class TestGeneralUsage:
|
||||
"*ERROR: not found:*%s" %(p2.basename,)
|
||||
])
|
||||
|
||||
def test_issue486_better_reporting_on_conftest_load_failure(self, testdir):
|
||||
testdir.makepyfile("")
|
||||
testdir.makeconftest("import qwerty")
|
||||
result = testdir.runpytest("--help")
|
||||
result.stdout.fnmatch_lines("""
|
||||
*--version*
|
||||
*warning*conftest.py*
|
||||
""")
|
||||
result = testdir.runpytest()
|
||||
result.stderr.fnmatch_lines("""
|
||||
*ERROR*could not load*conftest.py*
|
||||
""")
|
||||
|
||||
|
||||
def test_early_skip(self, testdir):
|
||||
@@ -335,6 +347,12 @@ class TestGeneralUsage:
|
||||
res = testdir.runpytest(p.basename)
|
||||
assert res.ret == 0
|
||||
|
||||
def test_unknown_option(self, testdir):
|
||||
result = testdir.runpytest("--qwlkej")
|
||||
result.stderr.fnmatch_lines("""
|
||||
*unrecognized*
|
||||
""")
|
||||
|
||||
|
||||
class TestInvocationVariants:
|
||||
def test_earlyinit(self, testdir):
|
||||
|
||||
@@ -4,48 +4,71 @@ import sys
|
||||
pytest_plugins = "pytester",
|
||||
|
||||
import os, py
|
||||
pid = os.getpid()
|
||||
|
||||
class LsofFdLeakChecker(object):
|
||||
def get_open_files(self):
|
||||
out = self._exec_lsof()
|
||||
open_files = self._parse_lsof_output(out)
|
||||
return open_files
|
||||
|
||||
def _exec_lsof(self):
|
||||
pid = os.getpid()
|
||||
return py.process.cmdexec("lsof -Ffn0 -p %d" % pid)
|
||||
|
||||
def _parse_lsof_output(self, out):
|
||||
def isopen(line):
|
||||
return line.startswith('f') and (
|
||||
"deleted" not in line and 'mem' not in line and "txt" not in line and 'cwd' not in line)
|
||||
|
||||
open_files = []
|
||||
|
||||
for line in out.split("\n"):
|
||||
if isopen(line):
|
||||
fields = line.split('\0')
|
||||
fd = fields[0][1:]
|
||||
filename = fields[1][1:]
|
||||
if filename.startswith('/'):
|
||||
open_files.append((fd, filename))
|
||||
|
||||
return open_files
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption('--lsof',
|
||||
action="store_true", dest="lsof", default=False,
|
||||
help=("run FD checks if lsof is available"))
|
||||
|
||||
def pytest_configure(config):
|
||||
def pytest_runtest_setup(item):
|
||||
config = item.config
|
||||
config._basedir = py.path.local()
|
||||
if config.getvalue("lsof"):
|
||||
try:
|
||||
out = py.process.cmdexec("lsof -p %d" % pid)
|
||||
config._fd_leak_checker = LsofFdLeakChecker()
|
||||
config._openfiles = config._fd_leak_checker.get_open_files()
|
||||
except py.process.cmdexec.Error:
|
||||
pass
|
||||
else:
|
||||
config._numfiles = len(getopenfiles(out))
|
||||
|
||||
#def pytest_report_header():
|
||||
# return "pid: %s" % os.getpid()
|
||||
|
||||
def getopenfiles(out):
|
||||
def isopen(line):
|
||||
return ("REG" in line or "CHR" in line) and (
|
||||
"deleted" not in line and 'mem' not in line and "txt" not in line)
|
||||
return [x for x in out.split("\n") if isopen(x)]
|
||||
|
||||
def check_open_files(config):
|
||||
out2 = py.process.cmdexec("lsof -p %d" % pid)
|
||||
lines2 = getopenfiles(out2)
|
||||
if len(lines2) > config._numfiles + 3:
|
||||
lines2 = config._fd_leak_checker.get_open_files()
|
||||
new_fds = set([t[0] for t in lines2]) - set([t[0] for t in config._openfiles])
|
||||
open_files = [t for t in lines2 if t[0] in new_fds]
|
||||
if open_files:
|
||||
error = []
|
||||
error.append("***** %s FD leackage detected" %
|
||||
(len(lines2)-config._numfiles))
|
||||
error.extend(lines2)
|
||||
error.append("***** %s FD leakage detected" % len(open_files))
|
||||
error.extend([str(f) for f in open_files])
|
||||
error.append("*** Before:")
|
||||
error.extend([str(f) for f in config._openfiles])
|
||||
error.append("*** After:")
|
||||
error.extend([str(f) for f in lines2])
|
||||
error.append(error[0])
|
||||
# update numfile so that the overall test run continuess
|
||||
config._numfiles = len(lines2)
|
||||
raise AssertionError("\n".join(error))
|
||||
|
||||
def pytest_runtest_teardown(item, __multicall__):
|
||||
item.config._basedir.chdir()
|
||||
if hasattr(item.config, '_numfiles'):
|
||||
if hasattr(item.config, '_openfiles'):
|
||||
x = __multicall__.execute()
|
||||
check_open_files(item.config)
|
||||
return x
|
||||
@@ -54,12 +77,11 @@ def pytest_runtest_teardown(item, __multicall__):
|
||||
winpymap = {
|
||||
'python2.7': r'C:\Python27\python.exe',
|
||||
'python2.6': r'C:\Python26\python.exe',
|
||||
'python2.5': r'C:\Python25\python.exe',
|
||||
'python2.4': r'C:\Python24\python.exe',
|
||||
'python3.1': r'C:\Python31\python.exe',
|
||||
'python3.2': r'C:\Python32\python.exe',
|
||||
'python3.3': r'C:\Python33\python.exe',
|
||||
'python3.4': r'C:\Python34\python.exe',
|
||||
'python3.5': r'C:\Python35\python.exe',
|
||||
}
|
||||
|
||||
def getexecutable(name, cache={}):
|
||||
@@ -80,9 +102,8 @@ def getexecutable(name, cache={}):
|
||||
cache[name] = executable
|
||||
return executable
|
||||
|
||||
@pytest.fixture(params=['python2.5', 'python2.6',
|
||||
'python2.7', 'python3.2', "python3.3",
|
||||
'pypy', 'jython'])
|
||||
@pytest.fixture(params=['python2.6', 'python2.7', 'python3.3', "python3.4",
|
||||
'pypy', 'pypy3', 'jython'])
|
||||
def anypython(request):
|
||||
name = request.param
|
||||
executable = getexecutable(name)
|
||||
|
||||
9
testing/cx_freeze/runtests_script.py
Normal file
9
testing/cx_freeze/runtests_script.py
Normal file
@@ -0,0 +1,9 @@
|
||||
"""
|
||||
This is the script that is actually frozen into an executable: simply executes
|
||||
py.test main().
|
||||
"""
|
||||
|
||||
if __name__ == '__main__':
|
||||
import sys
|
||||
import pytest
|
||||
sys.exit(pytest.main())
|
||||
15
testing/cx_freeze/runtests_setup.py
Normal file
15
testing/cx_freeze/runtests_setup.py
Normal file
@@ -0,0 +1,15 @@
|
||||
"""
|
||||
Sample setup.py script that generates an executable with pytest runner embedded.
|
||||
"""
|
||||
if __name__ == '__main__':
|
||||
from cx_Freeze import setup, Executable
|
||||
import pytest
|
||||
|
||||
setup(
|
||||
name="runtests",
|
||||
version="0.1",
|
||||
description="exemple of how embedding py.test into an executable using cx_freeze",
|
||||
executables=[Executable("runtests_script.py")],
|
||||
options={"build_exe": {'includes': pytest.freeze_includes()}},
|
||||
)
|
||||
|
||||
6
testing/cx_freeze/tests/test_doctest.txt
Normal file
6
testing/cx_freeze/tests/test_doctest.txt
Normal file
@@ -0,0 +1,6 @@
|
||||
|
||||
|
||||
Testing doctest::
|
||||
|
||||
>>> 1 + 1
|
||||
2
|
||||
6
testing/cx_freeze/tests/test_trivial.py
Normal file
6
testing/cx_freeze/tests/test_trivial.py
Normal file
@@ -0,0 +1,6 @@
|
||||
|
||||
def test_upper():
|
||||
assert 'foo'.upper() == 'FOO'
|
||||
|
||||
def test_lower():
|
||||
assert 'FOO'.lower() == 'foo'
|
||||
15
testing/cx_freeze/tox_run.py
Normal file
15
testing/cx_freeze/tox_run.py
Normal file
@@ -0,0 +1,15 @@
|
||||
"""
|
||||
Called by tox.ini: uses the generated executable to run the tests in ./tests/
|
||||
directory.
|
||||
|
||||
.. note:: somehow calling "build/runtests_script" directly from tox doesn't
|
||||
seem to work (at least on Windows).
|
||||
"""
|
||||
if __name__ == '__main__':
|
||||
import os
|
||||
import sys
|
||||
|
||||
executable = os.path.join(os.getcwd(), 'build', 'runtests_script')
|
||||
if sys.platform.startswith('win'):
|
||||
executable += '.exe'
|
||||
sys.exit(os.system('%s tests' % executable))
|
||||
@@ -33,8 +33,8 @@ class TestModule:
|
||||
pytest.raises(ImportError, lambda: modcol.obj)
|
||||
|
||||
class TestClass:
|
||||
def test_class_with_init_skip_collect(self, testdir):
|
||||
modcol = testdir.getmodulecol("""
|
||||
def test_class_with_init_warning(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
class TestClass1:
|
||||
def __init__(self):
|
||||
pass
|
||||
@@ -42,11 +42,11 @@ class TestClass:
|
||||
def __init__(self):
|
||||
pass
|
||||
""")
|
||||
l = modcol.collect()
|
||||
assert len(l) == 2
|
||||
|
||||
for classcol in l:
|
||||
pytest.raises(pytest.skip.Exception, classcol.collect)
|
||||
result = testdir.runpytest("-rw")
|
||||
result.stdout.fnmatch_lines("""
|
||||
WC1*test_class_with_init_warning.py*__init__*
|
||||
*2 warnings*
|
||||
""")
|
||||
|
||||
def test_class_subclassobject(self, testdir):
|
||||
testdir.getmodulecol("""
|
||||
@@ -276,6 +276,17 @@ class TestFunction:
|
||||
assert isinstance(modcol, pytest.Module)
|
||||
assert hasattr(modcol.obj, 'test_func')
|
||||
|
||||
def test_function_as_object_instance_ignored(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
class A:
|
||||
def __call__(self, tmpdir):
|
||||
0/0
|
||||
|
||||
test_a = A()
|
||||
""")
|
||||
reprec = testdir.inline_run()
|
||||
reprec.assertoutcome()
|
||||
|
||||
def test_function_equality(self, testdir, tmpdir):
|
||||
from _pytest.python import FixtureManager
|
||||
config = testdir.parseconfigure()
|
||||
@@ -630,7 +641,7 @@ class TestTracebackCutting:
|
||||
assert "x = 1" not in out
|
||||
assert "x = 2" not in out
|
||||
result.stdout.fnmatch_lines([
|
||||
">*asd*",
|
||||
" *asd*",
|
||||
"E*NameError*",
|
||||
])
|
||||
result = testdir.runpytest("--fulltrace")
|
||||
|
||||
@@ -1430,6 +1430,78 @@ class TestFixtureMarker:
|
||||
reprec = testdir.inline_run()
|
||||
reprec.assertoutcome(passed=3)
|
||||
|
||||
def test_scope_session_exc(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
l = []
|
||||
@pytest.fixture(scope="session")
|
||||
def fix():
|
||||
l.append(1)
|
||||
pytest.skip('skipping')
|
||||
|
||||
def test_1(fix):
|
||||
pass
|
||||
def test_2(fix):
|
||||
pass
|
||||
def test_last():
|
||||
assert l == [1]
|
||||
""")
|
||||
reprec = testdir.inline_run()
|
||||
reprec.assertoutcome(skipped=2, passed=1)
|
||||
|
||||
def test_scope_session_exc_two_fix(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
l = []
|
||||
m = []
|
||||
@pytest.fixture(scope="session")
|
||||
def a():
|
||||
l.append(1)
|
||||
pytest.skip('skipping')
|
||||
@pytest.fixture(scope="session")
|
||||
def b(a):
|
||||
m.append(1)
|
||||
|
||||
def test_1(b):
|
||||
pass
|
||||
def test_2(b):
|
||||
pass
|
||||
def test_last():
|
||||
assert l == [1]
|
||||
assert m == []
|
||||
""")
|
||||
reprec = testdir.inline_run()
|
||||
reprec.assertoutcome(skipped=2, passed=1)
|
||||
|
||||
def test_scope_exc(self, testdir):
|
||||
testdir.makepyfile(
|
||||
test_foo="""
|
||||
def test_foo(fix):
|
||||
pass
|
||||
""",
|
||||
test_bar="""
|
||||
def test_bar(fix):
|
||||
pass
|
||||
""",
|
||||
conftest="""
|
||||
import pytest
|
||||
reqs = []
|
||||
@pytest.fixture(scope="session")
|
||||
def fix(request):
|
||||
reqs.append(1)
|
||||
pytest.skip()
|
||||
@pytest.fixture
|
||||
def req_list():
|
||||
return reqs
|
||||
""",
|
||||
test_real="""
|
||||
def test_last(req_list):
|
||||
assert req_list == [1]
|
||||
"""
|
||||
)
|
||||
reprec = testdir.inline_run()
|
||||
reprec.assertoutcome(skipped=2, passed=1)
|
||||
|
||||
def test_scope_module_uses_session(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
@@ -1620,22 +1692,22 @@ class TestFixtureMarker:
|
||||
""")
|
||||
result = testdir.runpytest("-v")
|
||||
result.stdout.fnmatch_lines("""
|
||||
test_mod1.py:1: test_func[s1] PASSED
|
||||
test_mod2.py:1: test_func2[s1] PASSED
|
||||
test_mod2.py:3: test_func3[s1-m1] PASSED
|
||||
test_mod2.py:5: test_func3b[s1-m1] PASSED
|
||||
test_mod2.py:3: test_func3[s1-m2] PASSED
|
||||
test_mod2.py:5: test_func3b[s1-m2] PASSED
|
||||
test_mod1.py:1: test_func[s2] PASSED
|
||||
test_mod2.py:1: test_func2[s2] PASSED
|
||||
test_mod2.py:3: test_func3[s2-m1] PASSED
|
||||
test_mod2.py:5: test_func3b[s2-m1] PASSED
|
||||
test_mod2.py:7: test_func4[m1] PASSED
|
||||
test_mod2.py:3: test_func3[s2-m2] PASSED
|
||||
test_mod2.py:5: test_func3b[s2-m2] PASSED
|
||||
test_mod2.py:7: test_func4[m2] PASSED
|
||||
test_mod1.py:3: test_func1[m1] PASSED
|
||||
test_mod1.py:3: test_func1[m2] PASSED
|
||||
test_mod1.py::test_func[s1] PASSED
|
||||
test_mod2.py::test_func2[s1] PASSED
|
||||
test_mod2.py::test_func3[s1-m1] PASSED
|
||||
test_mod2.py::test_func3b[s1-m1] PASSED
|
||||
test_mod2.py::test_func3[s1-m2] PASSED
|
||||
test_mod2.py::test_func3b[s1-m2] PASSED
|
||||
test_mod1.py::test_func[s2] PASSED
|
||||
test_mod2.py::test_func2[s2] PASSED
|
||||
test_mod2.py::test_func3[s2-m1] PASSED
|
||||
test_mod2.py::test_func3b[s2-m1] PASSED
|
||||
test_mod2.py::test_func4[m1] PASSED
|
||||
test_mod2.py::test_func3[s2-m2] PASSED
|
||||
test_mod2.py::test_func3b[s2-m2] PASSED
|
||||
test_mod2.py::test_func4[m2] PASSED
|
||||
test_mod1.py::test_func1[m1] PASSED
|
||||
test_mod1.py::test_func1[m2] PASSED
|
||||
""")
|
||||
|
||||
def test_class_ordering(self, testdir):
|
||||
@@ -1672,18 +1744,18 @@ class TestFixtureMarker:
|
||||
""")
|
||||
result = testdir.runpytest("-vs")
|
||||
result.stdout.fnmatch_lines("""
|
||||
test_class_ordering.py:4: TestClass2.test_1[1-a] PASSED
|
||||
test_class_ordering.py:4: TestClass2.test_1[2-a] PASSED
|
||||
test_class_ordering.py:6: TestClass2.test_2[1-a] PASSED
|
||||
test_class_ordering.py:6: TestClass2.test_2[2-a] PASSED
|
||||
test_class_ordering.py:4: TestClass2.test_1[1-b] PASSED
|
||||
test_class_ordering.py:4: TestClass2.test_1[2-b] PASSED
|
||||
test_class_ordering.py:6: TestClass2.test_2[1-b] PASSED
|
||||
test_class_ordering.py:6: TestClass2.test_2[2-b] PASSED
|
||||
test_class_ordering.py:9: TestClass.test_3[1-a] PASSED
|
||||
test_class_ordering.py:9: TestClass.test_3[2-a] PASSED
|
||||
test_class_ordering.py:9: TestClass.test_3[1-b] PASSED
|
||||
test_class_ordering.py:9: TestClass.test_3[2-b] PASSED
|
||||
test_class_ordering.py::TestClass2::test_1[1-a] PASSED
|
||||
test_class_ordering.py::TestClass2::test_1[2-a] PASSED
|
||||
test_class_ordering.py::TestClass2::test_2[1-a] PASSED
|
||||
test_class_ordering.py::TestClass2::test_2[2-a] PASSED
|
||||
test_class_ordering.py::TestClass2::test_1[1-b] PASSED
|
||||
test_class_ordering.py::TestClass2::test_1[2-b] PASSED
|
||||
test_class_ordering.py::TestClass2::test_2[1-b] PASSED
|
||||
test_class_ordering.py::TestClass2::test_2[2-b] PASSED
|
||||
test_class_ordering.py::TestClass::test_3[1-a] PASSED
|
||||
test_class_ordering.py::TestClass::test_3[2-a] PASSED
|
||||
test_class_ordering.py::TestClass::test_3[1-b] PASSED
|
||||
test_class_ordering.py::TestClass::test_3[2-b] PASSED
|
||||
""")
|
||||
|
||||
def test_parametrize_separated_order_higher_scope_first(self, testdir):
|
||||
@@ -2087,6 +2159,35 @@ class TestErrors:
|
||||
"*1 error*",
|
||||
])
|
||||
|
||||
def test_issue498_fixture_finalizer_failing(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
@pytest.fixture
|
||||
def fix1(request):
|
||||
def f():
|
||||
raise KeyError
|
||||
request.addfinalizer(f)
|
||||
return object()
|
||||
|
||||
l = []
|
||||
def test_1(fix1):
|
||||
l.append(fix1)
|
||||
def test_2(fix1):
|
||||
l.append(fix1)
|
||||
def test_3():
|
||||
assert l[0] != l[1]
|
||||
""")
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines("""
|
||||
*ERROR*teardown*test_1*
|
||||
*KeyError*
|
||||
*ERROR*teardown*test_2*
|
||||
*KeyError*
|
||||
*3 pass*2 error*
|
||||
""")
|
||||
|
||||
|
||||
|
||||
def test_setupfunc_missing_funcarg(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
|
||||
@@ -112,6 +112,26 @@ class TestMockDecoration:
|
||||
reprec = testdir.inline_run()
|
||||
reprec.assertoutcome(passed=1)
|
||||
|
||||
def test_unittest_mock_and_fixture(self, testdir):
|
||||
pytest.importorskip("unittest.mock")
|
||||
testdir.makepyfile("""
|
||||
import os.path
|
||||
import unittest.mock
|
||||
import pytest
|
||||
|
||||
@pytest.fixture
|
||||
def inject_me():
|
||||
pass
|
||||
|
||||
@unittest.mock.patch.object(os.path, "abspath",
|
||||
new=unittest.mock.MagicMock)
|
||||
def test_hello(inject_me):
|
||||
import os
|
||||
os.path.abspath("hello")
|
||||
""")
|
||||
reprec = testdir.inline_run()
|
||||
reprec.assertoutcome(passed=1)
|
||||
|
||||
def test_mock(self, testdir):
|
||||
pytest.importorskip("mock", "1.0.1")
|
||||
testdir.makepyfile("""
|
||||
@@ -124,12 +144,16 @@ class TestMockDecoration:
|
||||
def test_hello(self, abspath):
|
||||
os.path.abspath("hello")
|
||||
abspath.assert_any_call("hello")
|
||||
def mock_basename(path):
|
||||
return "mock_basename"
|
||||
@mock.patch("os.path.abspath")
|
||||
@mock.patch("os.path.normpath")
|
||||
@mock.patch("os.path.basename", new=mock_basename)
|
||||
def test_someting(normpath, abspath, tmpdir):
|
||||
abspath.return_value = "this"
|
||||
os.path.normpath(os.path.abspath("hello"))
|
||||
normpath.assert_any_call("this")
|
||||
assert os.path.basename("123") == "mock_basename"
|
||||
""")
|
||||
reprec = testdir.inline_run()
|
||||
reprec.assertoutcome(passed=2)
|
||||
@@ -160,6 +184,22 @@ class TestMockDecoration:
|
||||
names = [x.nodeid.split("::")[-1] for x in calls]
|
||||
assert names == ["test_one", "test_two", "test_three"]
|
||||
|
||||
def test_mock_double_patch_issue473(self, testdir):
|
||||
pytest.importorskip("mock", "1.0.1")
|
||||
testdir.makepyfile("""
|
||||
from mock import patch
|
||||
from pytest import mark
|
||||
|
||||
@patch('os.getcwd')
|
||||
@patch('os.path')
|
||||
@mark.slow
|
||||
class TestSimple:
|
||||
def test_simple_thing(self, mock_path, mock_getcwd):
|
||||
pass
|
||||
""")
|
||||
reprec = testdir.inline_run()
|
||||
reprec.assertoutcome(passed=1)
|
||||
|
||||
|
||||
class TestReRunTests:
|
||||
def test_rerun(self, testdir):
|
||||
@@ -195,3 +235,52 @@ class TestReRunTests:
|
||||
def test_pytestconfig_is_session_scoped():
|
||||
from _pytest.python import pytestconfig
|
||||
assert pytestconfig._pytestfixturefunction.scope == "session"
|
||||
|
||||
|
||||
class TestNoselikeTestAttribute:
|
||||
def test_module(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
__test__ = False
|
||||
def test_hello():
|
||||
pass
|
||||
""")
|
||||
reprec = testdir.inline_run()
|
||||
assert not reprec.getfailedcollections()
|
||||
calls = reprec.getreports("pytest_runtest_logreport")
|
||||
assert not calls
|
||||
|
||||
def test_class_and_method(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
__test__ = True
|
||||
def test_func():
|
||||
pass
|
||||
test_func.__test__ = False
|
||||
|
||||
class TestSome:
|
||||
__test__ = False
|
||||
def test_method(self):
|
||||
pass
|
||||
""")
|
||||
reprec = testdir.inline_run()
|
||||
assert not reprec.getfailedcollections()
|
||||
calls = reprec.getreports("pytest_runtest_logreport")
|
||||
assert not calls
|
||||
|
||||
def test_unittest_class(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
import unittest
|
||||
class TC(unittest.TestCase):
|
||||
def test_1(self):
|
||||
pass
|
||||
class TC2(unittest.TestCase):
|
||||
__test__ = False
|
||||
def test_2(self):
|
||||
pass
|
||||
""")
|
||||
reprec = testdir.inline_run()
|
||||
assert not reprec.getfailedcollections()
|
||||
call = reprec.getcalls("pytest_collection_modifyitems")[0]
|
||||
assert len(call.items) == 1
|
||||
assert call.items[0].cls.__name__ == "TC"
|
||||
|
||||
|
||||
|
||||
@@ -62,3 +62,10 @@ class TestRaises:
|
||||
'*3 passed*',
|
||||
])
|
||||
|
||||
def test_noclass(self):
|
||||
with pytest.raises(TypeError):
|
||||
pytest.raises('wrong', lambda: None)
|
||||
|
||||
def test_tuple(self):
|
||||
with pytest.raises((KeyError, ValueError)):
|
||||
raise KeyError('oops')
|
||||
|
||||
@@ -131,6 +131,18 @@ def test_assert_keyword_arg():
|
||||
e = exvalue()
|
||||
assert "x=5" in e.msg
|
||||
|
||||
def test_private_class_variable():
|
||||
class X:
|
||||
def __init__(self):
|
||||
self.__v = 41
|
||||
def m(self):
|
||||
assert self.__v == 42
|
||||
try:
|
||||
X().m()
|
||||
except AssertionError:
|
||||
e = exvalue()
|
||||
assert "== 42" in e.msg
|
||||
|
||||
# These tests should both fail, but should fail nicely...
|
||||
class WeirdRepr:
|
||||
def __repr__(self):
|
||||
|
||||
@@ -199,6 +199,21 @@ class TestAssert_reprcompare:
|
||||
assert msg
|
||||
|
||||
|
||||
class TestFormatExplanation:
|
||||
|
||||
def test_speical_chars_full(self, testdir):
|
||||
# Issue 453, for the bug this would raise IndexError
|
||||
testdir.makepyfile("""
|
||||
def test_foo():
|
||||
assert '\\n}' == ''
|
||||
""")
|
||||
result = testdir.runpytest()
|
||||
assert result.ret == 1
|
||||
result.stdout.fnmatch_lines([
|
||||
"*AssertionError*",
|
||||
])
|
||||
|
||||
|
||||
def test_python25_compile_issue257(testdir):
|
||||
testdir.makepyfile("""
|
||||
def test_rewritten():
|
||||
@@ -354,7 +369,7 @@ def test_traceback_failure(testdir):
|
||||
def test_onefails():
|
||||
f(3)
|
||||
""")
|
||||
result = testdir.runpytest(p1)
|
||||
result = testdir.runpytest(p1, "--tb=long")
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_traceback_failure.py F",
|
||||
"====* FAILURES *====",
|
||||
@@ -374,6 +389,25 @@ def test_traceback_failure(testdir):
|
||||
"*test_traceback_failure.py:4: AssertionError"
|
||||
])
|
||||
|
||||
result = testdir.runpytest(p1) # "auto"
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_traceback_failure.py F",
|
||||
"====* FAILURES *====",
|
||||
"____*____",
|
||||
"",
|
||||
" def test_onefails():",
|
||||
"> f(3)",
|
||||
"",
|
||||
"*test_*.py:6: ",
|
||||
"",
|
||||
" def f(x):",
|
||||
"> assert x == g()",
|
||||
"E assert 3 == 2",
|
||||
"E + where 2 = g()",
|
||||
"",
|
||||
"*test_traceback_failure.py:4: AssertionError"
|
||||
])
|
||||
|
||||
@pytest.mark.skipif("sys.version_info < (2,5) or '__pypy__' in sys.builtin_module_names or sys.platform.startswith('java')" )
|
||||
def test_warn_missing(testdir):
|
||||
testdir.makepyfile("")
|
||||
|
||||
@@ -313,6 +313,17 @@ class TestAssertionRewrite:
|
||||
assert "%test" == "test"
|
||||
assert getmsg(f).startswith("assert '%test' == 'test'")
|
||||
|
||||
def test_custom_repr(self):
|
||||
def f():
|
||||
class Foo(object):
|
||||
a = 1
|
||||
|
||||
def __repr__(self):
|
||||
return "\n{ \n~ \n}"
|
||||
f = Foo()
|
||||
assert 0 == f.a
|
||||
assert r"where 1 = \n{ \n~ \n}.a" in util._format_lines([getmsg(f)])[0]
|
||||
|
||||
|
||||
class TestRewriteOnImport:
|
||||
|
||||
@@ -458,28 +469,35 @@ class TestAssertionRewriteHookDetails(object):
|
||||
|
||||
@pytest.mark.skipif("sys.version_info[0] >= 3")
|
||||
def test_detect_coding_cookie(self, testdir):
|
||||
testdir.tmpdir.join("test_cookie.py").write("""# -*- coding: utf-8 -*-
|
||||
u"St\xc3\xa4d"
|
||||
def test_rewritten():
|
||||
assert "@py_builtins" in globals()""", "wb")
|
||||
testdir.makepyfile(test_cookie="""
|
||||
# -*- coding: utf-8 -*-
|
||||
u"St\xc3\xa4d"
|
||||
def test_rewritten():
|
||||
assert "@py_builtins" in globals()""")
|
||||
assert testdir.runpytest().ret == 0
|
||||
|
||||
@pytest.mark.skipif("sys.version_info[0] >= 3")
|
||||
def test_detect_coding_cookie_second_line(self, testdir):
|
||||
testdir.tmpdir.join("test_cookie.py").write("""#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
u"St\xc3\xa4d"
|
||||
def test_rewritten():
|
||||
assert "@py_builtins" in globals()""", "wb")
|
||||
testdir.makepyfile(test_cookie="""
|
||||
# -*- coding: utf-8 -*-
|
||||
u"St\xc3\xa4d"
|
||||
def test_rewritten():
|
||||
assert "@py_builtins" in globals()""")
|
||||
assert testdir.runpytest().ret == 0
|
||||
|
||||
@pytest.mark.skipif("sys.version_info[0] >= 3")
|
||||
def test_detect_coding_cookie_crlf(self, testdir):
|
||||
testdir.tmpdir.join("test_cookie.py").write("""#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
u"St\xc3\xa4d"
|
||||
def test_rewritten():
|
||||
assert "@py_builtins" in globals()""".replace("\n", "\r\n"), "wb")
|
||||
testdir.makepyfile(test_cookie="""
|
||||
# -*- coding: utf-8 -*-
|
||||
u"St\xc3\xa4d"
|
||||
def test_rewritten():
|
||||
assert "@py_builtins" in globals()""")
|
||||
assert testdir.runpytest().ret == 0
|
||||
|
||||
def test_sys_meta_path_munged(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
def test_meta_path():
|
||||
import sys; sys.meta_path = []""")
|
||||
assert testdir.runpytest().ret == 0
|
||||
|
||||
def test_write_pyc(self, testdir, tmpdir, monkeypatch):
|
||||
@@ -493,13 +511,13 @@ def test_rewritten():
|
||||
state = AssertionState(config, "rewrite")
|
||||
source_path = tmpdir.ensure("source.py")
|
||||
pycpath = tmpdir.join("pyc").strpath
|
||||
assert _write_pyc(state, [1], source_path, pycpath)
|
||||
assert _write_pyc(state, [1], source_path.stat(), pycpath)
|
||||
def open(*args):
|
||||
e = IOError()
|
||||
e.errno = 10
|
||||
raise e
|
||||
monkeypatch.setattr(b, "open", open)
|
||||
assert not _write_pyc(state, [1], source_path, pycpath)
|
||||
assert not _write_pyc(state, [1], source_path.stat(), pycpath)
|
||||
|
||||
def test_resources_provider_for_loader(self, testdir):
|
||||
"""
|
||||
@@ -532,3 +550,25 @@ def test_rewritten():
|
||||
result.stdout.fnmatch_lines([
|
||||
'* 1 passed*',
|
||||
])
|
||||
|
||||
def test_read_pyc(self, tmpdir):
|
||||
"""
|
||||
Ensure that the `_read_pyc` can properly deal with corrupted pyc files.
|
||||
In those circumstances it should just give up instead of generating
|
||||
an exception that is propagated to the caller.
|
||||
"""
|
||||
import py_compile
|
||||
from _pytest.assertion.rewrite import _read_pyc
|
||||
|
||||
source = tmpdir.join('source.py')
|
||||
pyc = source + 'c'
|
||||
|
||||
source.write('def test(): pass')
|
||||
py_compile.compile(str(source), str(pyc))
|
||||
|
||||
contents = pyc.read(mode='rb')
|
||||
strip_bytes = 20 # header is around 8 bytes, strip a little more
|
||||
assert len(contents) > strip_bytes
|
||||
pyc.write(contents[:strip_bytes], mode='wb')
|
||||
|
||||
assert _read_pyc(source, str(pyc)) is None # no error
|
||||
|
||||
@@ -44,82 +44,64 @@ def oswritebytes(fd, obj):
|
||||
|
||||
|
||||
|
||||
def StdCaptureFD(out=True, err=True, in_=True):
|
||||
return capture.MultiCapture(out, err, in_, Capture=capture.FDCapture)
|
||||
|
||||
def StdCapture(out=True, err=True, in_=True):
|
||||
return capture.MultiCapture(out, err, in_, Capture=capture.SysCapture)
|
||||
|
||||
|
||||
class TestCaptureManager:
|
||||
def test_getmethod_default_no_fd(self, testdir, monkeypatch):
|
||||
config = testdir.parseconfig(testdir.tmpdir)
|
||||
assert config.getvalue("capture") is None
|
||||
capman = CaptureManager()
|
||||
def test_getmethod_default_no_fd(self, monkeypatch):
|
||||
from _pytest.capture import pytest_addoption
|
||||
from _pytest.config import Parser
|
||||
parser = Parser()
|
||||
pytest_addoption(parser)
|
||||
default = parser._groups[0].options[0].default
|
||||
assert default == "fd" if hasattr(os, "dup") else "sys"
|
||||
parser = Parser()
|
||||
monkeypatch.delattr(os, 'dup', raising=False)
|
||||
try:
|
||||
assert capman._getmethod(config, None) == "sys"
|
||||
finally:
|
||||
monkeypatch.undo()
|
||||
|
||||
@pytest.mark.parametrize("mode", "no fd sys".split())
|
||||
def test_configure_per_fspath(self, testdir, mode):
|
||||
config = testdir.parseconfig(testdir.tmpdir)
|
||||
capman = CaptureManager()
|
||||
hasfd = hasattr(os, 'dup')
|
||||
if hasfd:
|
||||
assert capman._getmethod(config, None) == "fd"
|
||||
else:
|
||||
assert capman._getmethod(config, None) == "sys"
|
||||
|
||||
if not hasfd and mode == 'fd':
|
||||
return
|
||||
sub = testdir.tmpdir.mkdir("dir" + mode)
|
||||
sub.ensure("__init__.py")
|
||||
sub.join("conftest.py").write('option_capture = %r' % mode)
|
||||
assert capman._getmethod(config, sub.join("test_hello.py")) == mode
|
||||
pytest_addoption(parser)
|
||||
assert parser._groups[0].options[0].default == "sys"
|
||||
|
||||
@needsosdup
|
||||
@pytest.mark.parametrize("method", ['no', 'fd', 'sys'])
|
||||
@pytest.mark.parametrize("method",
|
||||
['no', 'sys', pytest.mark.skipif('not hasattr(os, "dup")', 'fd')])
|
||||
def test_capturing_basic_api(self, method):
|
||||
capouter = capture.StdCaptureFD()
|
||||
capouter = StdCaptureFD()
|
||||
old = sys.stdout, sys.stderr, sys.stdin
|
||||
try:
|
||||
capman = CaptureManager()
|
||||
# call suspend without resume or start
|
||||
outerr = capman.suspendcapture()
|
||||
capman = CaptureManager(method)
|
||||
capman.init_capturings()
|
||||
outerr = capman.suspendcapture()
|
||||
assert outerr == ("", "")
|
||||
outerr = capman.suspendcapture()
|
||||
assert outerr == ("", "")
|
||||
capman.resumecapture(method)
|
||||
print ("hello")
|
||||
out, err = capman.suspendcapture()
|
||||
if method == "no":
|
||||
assert old == (sys.stdout, sys.stderr, sys.stdin)
|
||||
else:
|
||||
assert out == "hello\n"
|
||||
capman.resumecapture(method)
|
||||
assert not out
|
||||
capman.resumecapture()
|
||||
print ("hello")
|
||||
out, err = capman.suspendcapture()
|
||||
assert not out and not err
|
||||
if method != "no":
|
||||
assert out == "hello\n"
|
||||
capman.reset_capturings()
|
||||
finally:
|
||||
capouter.reset()
|
||||
capouter.stop_capturing()
|
||||
|
||||
@needsosdup
|
||||
def test_juggle_capturings(self, testdir):
|
||||
capouter = capture.StdCaptureFD()
|
||||
def test_init_capturing(self):
|
||||
capouter = StdCaptureFD()
|
||||
try:
|
||||
#config = testdir.parseconfig(testdir.tmpdir)
|
||||
capman = CaptureManager()
|
||||
try:
|
||||
capman.resumecapture("fd")
|
||||
pytest.raises(ValueError, 'capman.resumecapture("fd")')
|
||||
pytest.raises(ValueError, 'capman.resumecapture("sys")')
|
||||
os.write(1, "hello\n".encode('ascii'))
|
||||
out, err = capman.suspendcapture()
|
||||
assert out == "hello\n"
|
||||
capman.resumecapture("sys")
|
||||
os.write(1, "hello\n".encode('ascii'))
|
||||
py.builtin.print_("world", file=sys.stderr)
|
||||
out, err = capman.suspendcapture()
|
||||
assert not out
|
||||
assert err == "world\n"
|
||||
finally:
|
||||
capman.reset_capturings()
|
||||
capman = CaptureManager("fd")
|
||||
capman.init_capturings()
|
||||
pytest.raises(AssertionError, "capman.init_capturings()")
|
||||
capman.reset_capturings()
|
||||
finally:
|
||||
capouter.reset()
|
||||
capouter.stop_capturing()
|
||||
|
||||
|
||||
@pytest.mark.parametrize("method", ['fd', 'sys'])
|
||||
@@ -282,9 +264,9 @@ class TestPerTestCapturing:
|
||||
"====* FAILURES *====",
|
||||
"____*____",
|
||||
"*test_capturing_outerr.py:8: ValueError",
|
||||
"*--- Captured stdout ---*",
|
||||
"*--- Captured stdout *call*",
|
||||
"1",
|
||||
"*--- Captured stderr ---*",
|
||||
"*--- Captured stderr *call*",
|
||||
"2",
|
||||
])
|
||||
|
||||
@@ -410,13 +392,14 @@ class TestLoggingInteraction:
|
||||
|
||||
|
||||
class TestCaptureFixture:
|
||||
def test_std_functional(self, testdir):
|
||||
@pytest.mark.parametrize("opt", [[], ["-s"]])
|
||||
def test_std_functional(self, testdir, opt):
|
||||
reprec = testdir.inline_runsource("""
|
||||
def test_hello(capsys):
|
||||
print (42)
|
||||
out, err = capsys.readouterr()
|
||||
assert out.startswith("42")
|
||||
""")
|
||||
""", *opt)
|
||||
reprec.assertoutcome(passed=1)
|
||||
|
||||
def test_capsyscapfd(self, testdir):
|
||||
@@ -533,6 +516,24 @@ def test_capture_conftest_runtest_setup(testdir):
|
||||
assert 'hello19' not in result.stdout.str()
|
||||
|
||||
|
||||
def test_capture_badoutput_issue412(testdir):
|
||||
testdir.makepyfile("""
|
||||
import os
|
||||
|
||||
def test_func():
|
||||
omg = bytearray([1,129,1])
|
||||
os.write(1, omg)
|
||||
assert 0
|
||||
""")
|
||||
result = testdir.runpytest('--cap=fd')
|
||||
result.stdout.fnmatch_lines('''
|
||||
*def test_func*
|
||||
*assert 0*
|
||||
*Captured*
|
||||
*1 failed*
|
||||
''')
|
||||
|
||||
|
||||
def test_capture_early_option_parsing(testdir):
|
||||
testdir.makeconftest("""
|
||||
def pytest_runtest_setup():
|
||||
@@ -544,8 +545,6 @@ def test_capture_early_option_parsing(testdir):
|
||||
assert 'hello19' in result.stdout.str()
|
||||
|
||||
|
||||
@pytest.mark.xfail(sys.version_info >= (3, 0), reason='encoding issues')
|
||||
@pytest.mark.xfail(sys.version_info < (2, 6), reason='test not run on py25')
|
||||
def test_capture_binary_output(testdir):
|
||||
testdir.makepyfile(r"""
|
||||
import pytest
|
||||
@@ -590,7 +589,7 @@ class TestTextIO:
|
||||
|
||||
|
||||
def test_bytes_io():
|
||||
f = capture.BytesIO()
|
||||
f = py.io.BytesIO()
|
||||
f.write(tobytes("hello"))
|
||||
pytest.raises(TypeError, "f.write(totext('hello'))")
|
||||
s = f.getvalue()
|
||||
@@ -619,7 +618,7 @@ def tmpfile(testdir):
|
||||
def test_dupfile(tmpfile):
|
||||
flist = []
|
||||
for i in range(5):
|
||||
nf = capture.dupfile(tmpfile, encoding="utf-8")
|
||||
nf = capture.safe_text_dupfile(tmpfile, "wb")
|
||||
assert nf != tmpfile
|
||||
assert nf.fileno() != tmpfile.fileno()
|
||||
assert nf not in flist
|
||||
@@ -633,19 +632,17 @@ def test_dupfile(tmpfile):
|
||||
assert "01234" in repr(s)
|
||||
tmpfile.close()
|
||||
|
||||
def test_dupfile_on_bytesio():
|
||||
io = py.io.BytesIO()
|
||||
f = capture.safe_text_dupfile(io, "wb")
|
||||
f.write("hello")
|
||||
assert io.getvalue() == b"hello"
|
||||
|
||||
def test_dupfile_no_mode():
|
||||
"""
|
||||
dupfile should trap an AttributeError and return f if no mode is supplied.
|
||||
"""
|
||||
class SomeFileWrapper(object):
|
||||
"An object with a fileno method but no mode attribute"
|
||||
def fileno(self):
|
||||
return 1
|
||||
tmpfile = SomeFileWrapper()
|
||||
assert capture.dupfile(tmpfile) is tmpfile
|
||||
with pytest.raises(AttributeError):
|
||||
capture.dupfile(tmpfile, raising=True)
|
||||
def test_dupfile_on_textio():
|
||||
io = py.io.TextIO()
|
||||
f = capture.safe_text_dupfile(io, "wb")
|
||||
f.write("hello")
|
||||
assert io.getvalue() == "hello"
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
@@ -670,17 +667,15 @@ class TestFDCapture:
|
||||
cap = capture.FDCapture(fd)
|
||||
data = tobytes("hello")
|
||||
os.write(fd, data)
|
||||
f = cap.done()
|
||||
s = f.read()
|
||||
f.close()
|
||||
s = cap.snap()
|
||||
cap.done()
|
||||
assert not s
|
||||
cap = capture.FDCapture(fd)
|
||||
cap.start()
|
||||
os.write(fd, data)
|
||||
f = cap.done()
|
||||
s = f.read()
|
||||
s = cap.snap()
|
||||
cap.done()
|
||||
assert s == "hello"
|
||||
f.close()
|
||||
|
||||
def test_simple_many(self, tmpfile):
|
||||
for i in range(10):
|
||||
@@ -694,155 +689,177 @@ class TestFDCapture:
|
||||
def test_simple_fail_second_start(self, tmpfile):
|
||||
fd = tmpfile.fileno()
|
||||
cap = capture.FDCapture(fd)
|
||||
f = cap.done()
|
||||
cap.done()
|
||||
pytest.raises(ValueError, cap.start)
|
||||
f.close()
|
||||
|
||||
def test_stderr(self):
|
||||
cap = capture.FDCapture(2, patchsys=True)
|
||||
cap = capture.FDCapture(2)
|
||||
cap.start()
|
||||
print_("hello", file=sys.stderr)
|
||||
f = cap.done()
|
||||
s = f.read()
|
||||
s = cap.snap()
|
||||
cap.done()
|
||||
assert s == "hello\n"
|
||||
|
||||
def test_stdin(self, tmpfile):
|
||||
tmpfile.write(tobytes("3"))
|
||||
tmpfile.seek(0)
|
||||
cap = capture.FDCapture(0, tmpfile=tmpfile)
|
||||
cap = capture.FDCapture(0)
|
||||
cap.start()
|
||||
# check with os.read() directly instead of raw_input(), because
|
||||
# sys.stdin itself may be redirected (as pytest now does by default)
|
||||
x = os.read(0, 100).strip()
|
||||
cap.done()
|
||||
assert x == tobytes("3")
|
||||
assert x == tobytes('')
|
||||
|
||||
def test_writeorg(self, tmpfile):
|
||||
data1, data2 = tobytes("foo"), tobytes("bar")
|
||||
try:
|
||||
cap = capture.FDCapture(tmpfile.fileno())
|
||||
cap.start()
|
||||
tmpfile.write(data1)
|
||||
cap.writeorg(data2)
|
||||
finally:
|
||||
tmpfile.close()
|
||||
f = cap.done()
|
||||
scap = f.read()
|
||||
cap = capture.FDCapture(tmpfile.fileno())
|
||||
cap.start()
|
||||
tmpfile.write(data1)
|
||||
tmpfile.flush()
|
||||
cap.writeorg(data2)
|
||||
scap = cap.snap()
|
||||
cap.done()
|
||||
assert scap == totext(data1)
|
||||
stmp = open(tmpfile.name, 'rb').read()
|
||||
assert stmp == data2
|
||||
with open(tmpfile.name, 'rb') as stmp_file:
|
||||
stmp = stmp_file.read()
|
||||
assert stmp == data2
|
||||
|
||||
def test_simple_resume_suspend(self, tmpfile):
|
||||
with saved_fd(1):
|
||||
cap = capture.FDCapture(1)
|
||||
cap.start()
|
||||
data = tobytes("hello")
|
||||
os.write(1, data)
|
||||
sys.stdout.write("whatever")
|
||||
s = cap.snap()
|
||||
assert s == "hellowhatever"
|
||||
cap.suspend()
|
||||
os.write(1, tobytes("world"))
|
||||
sys.stdout.write("qlwkej")
|
||||
assert not cap.snap()
|
||||
cap.resume()
|
||||
os.write(1, tobytes("but now"))
|
||||
sys.stdout.write(" yes\n")
|
||||
s = cap.snap()
|
||||
assert s == "but now yes\n"
|
||||
cap.suspend()
|
||||
cap.done()
|
||||
pytest.raises(AttributeError, cap.suspend)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def saved_fd(fd):
|
||||
new_fd = os.dup(fd)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
os.dup2(new_fd, fd)
|
||||
|
||||
|
||||
class TestStdCapture:
|
||||
captureclass = staticmethod(StdCapture)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def getcapture(self, **kw):
|
||||
cap = capture.StdCapture(**kw)
|
||||
cap.startall()
|
||||
return cap
|
||||
cap = self.__class__.captureclass(**kw)
|
||||
cap.start_capturing()
|
||||
try:
|
||||
yield cap
|
||||
finally:
|
||||
cap.stop_capturing()
|
||||
|
||||
def test_capturing_done_simple(self):
|
||||
cap = self.getcapture()
|
||||
sys.stdout.write("hello")
|
||||
sys.stderr.write("world")
|
||||
outfile, errfile = cap.done()
|
||||
s = outfile.read()
|
||||
assert s == "hello"
|
||||
s = errfile.read()
|
||||
assert s == "world"
|
||||
with self.getcapture() as cap:
|
||||
sys.stdout.write("hello")
|
||||
sys.stderr.write("world")
|
||||
out, err = cap.readouterr()
|
||||
assert out == "hello"
|
||||
assert err == "world"
|
||||
|
||||
def test_capturing_reset_simple(self):
|
||||
cap = self.getcapture()
|
||||
print("hello world")
|
||||
sys.stderr.write("hello error\n")
|
||||
out, err = cap.reset()
|
||||
with self.getcapture() as cap:
|
||||
print("hello world")
|
||||
sys.stderr.write("hello error\n")
|
||||
out, err = cap.readouterr()
|
||||
assert out == "hello world\n"
|
||||
assert err == "hello error\n"
|
||||
|
||||
def test_capturing_readouterr(self):
|
||||
cap = self.getcapture()
|
||||
try:
|
||||
with self.getcapture() as cap:
|
||||
print ("hello world")
|
||||
sys.stderr.write("hello error\n")
|
||||
out, err = cap.readouterr()
|
||||
assert out == "hello world\n"
|
||||
assert err == "hello error\n"
|
||||
sys.stderr.write("error2")
|
||||
finally:
|
||||
out, err = cap.reset()
|
||||
out, err = cap.readouterr()
|
||||
assert err == "error2"
|
||||
|
||||
def test_capturing_readouterr_unicode(self):
|
||||
cap = self.getcapture()
|
||||
try:
|
||||
with self.getcapture() as cap:
|
||||
print ("hx\xc4\x85\xc4\x87")
|
||||
out, err = cap.readouterr()
|
||||
finally:
|
||||
cap.reset()
|
||||
assert out == py.builtin._totext("hx\xc4\x85\xc4\x87\n", "utf8")
|
||||
|
||||
@pytest.mark.skipif('sys.version_info >= (3,)',
|
||||
reason='text output different for bytes on python3')
|
||||
def test_capturing_readouterr_decode_error_handling(self):
|
||||
cap = self.getcapture()
|
||||
# triggered a internal error in pytest
|
||||
print('\xa6')
|
||||
out, err = cap.readouterr()
|
||||
with self.getcapture() as cap:
|
||||
# triggered a internal error in pytest
|
||||
print('\xa6')
|
||||
out, err = cap.readouterr()
|
||||
assert out == py.builtin._totext('\ufffd\n', 'unicode-escape')
|
||||
|
||||
def test_reset_twice_error(self):
|
||||
cap = self.getcapture()
|
||||
print ("hello")
|
||||
out, err = cap.reset()
|
||||
pytest.raises(ValueError, cap.reset)
|
||||
with self.getcapture() as cap:
|
||||
print ("hello")
|
||||
out, err = cap.readouterr()
|
||||
pytest.raises(ValueError, cap.stop_capturing)
|
||||
assert out == "hello\n"
|
||||
assert not err
|
||||
|
||||
def test_capturing_modify_sysouterr_in_between(self):
|
||||
oldout = sys.stdout
|
||||
olderr = sys.stderr
|
||||
cap = self.getcapture()
|
||||
sys.stdout.write("hello")
|
||||
sys.stderr.write("world")
|
||||
sys.stdout = capture.TextIO()
|
||||
sys.stderr = capture.TextIO()
|
||||
print ("not seen")
|
||||
sys.stderr.write("not seen\n")
|
||||
out, err = cap.reset()
|
||||
with self.getcapture() as cap:
|
||||
sys.stdout.write("hello")
|
||||
sys.stderr.write("world")
|
||||
sys.stdout = capture.TextIO()
|
||||
sys.stderr = capture.TextIO()
|
||||
print ("not seen")
|
||||
sys.stderr.write("not seen\n")
|
||||
out, err = cap.readouterr()
|
||||
assert out == "hello"
|
||||
assert err == "world"
|
||||
assert sys.stdout == oldout
|
||||
assert sys.stderr == olderr
|
||||
|
||||
def test_capturing_error_recursive(self):
|
||||
cap1 = self.getcapture()
|
||||
print ("cap1")
|
||||
cap2 = self.getcapture()
|
||||
print ("cap2")
|
||||
out2, err2 = cap2.reset()
|
||||
out1, err1 = cap1.reset()
|
||||
with self.getcapture() as cap1:
|
||||
print ("cap1")
|
||||
with self.getcapture() as cap2:
|
||||
print ("cap2")
|
||||
out2, err2 = cap2.readouterr()
|
||||
out1, err1 = cap1.readouterr()
|
||||
assert out1 == "cap1\n"
|
||||
assert out2 == "cap2\n"
|
||||
|
||||
def test_just_out_capture(self):
|
||||
cap = self.getcapture(out=True, err=False)
|
||||
sys.stdout.write("hello")
|
||||
sys.stderr.write("world")
|
||||
out, err = cap.reset()
|
||||
with self.getcapture(out=True, err=False) as cap:
|
||||
sys.stdout.write("hello")
|
||||
sys.stderr.write("world")
|
||||
out, err = cap.readouterr()
|
||||
assert out == "hello"
|
||||
assert not err
|
||||
|
||||
def test_just_err_capture(self):
|
||||
cap = self.getcapture(out=False, err=True)
|
||||
sys.stdout.write("hello")
|
||||
sys.stderr.write("world")
|
||||
out, err = cap.reset()
|
||||
with self.getcapture(out=False, err=True) as cap:
|
||||
sys.stdout.write("hello")
|
||||
sys.stderr.write("world")
|
||||
out, err = cap.readouterr()
|
||||
assert err == "world"
|
||||
assert not out
|
||||
|
||||
def test_stdin_restored(self):
|
||||
old = sys.stdin
|
||||
cap = self.getcapture(in_=True)
|
||||
newstdin = sys.stdin
|
||||
out, err = cap.reset()
|
||||
with self.getcapture(in_=True):
|
||||
newstdin = sys.stdin
|
||||
assert newstdin != sys.stdin
|
||||
assert sys.stdin is old
|
||||
|
||||
@@ -850,68 +867,47 @@ class TestStdCapture:
|
||||
print ("XXX this test may well hang instead of crashing")
|
||||
print ("XXX which indicates an error in the underlying capturing")
|
||||
print ("XXX mechanisms")
|
||||
cap = self.getcapture()
|
||||
pytest.raises(IOError, "sys.stdin.read()")
|
||||
out, err = cap.reset()
|
||||
|
||||
def test_suspend_resume(self):
|
||||
cap = self.getcapture(out=True, err=False, in_=False)
|
||||
try:
|
||||
print ("hello")
|
||||
sys.stderr.write("error\n")
|
||||
out, err = cap.suspend()
|
||||
assert out == "hello\n"
|
||||
assert not err
|
||||
print ("in between")
|
||||
sys.stderr.write("in between\n")
|
||||
cap.resume()
|
||||
print ("after")
|
||||
sys.stderr.write("error_after\n")
|
||||
finally:
|
||||
out, err = cap.reset()
|
||||
assert out == "after\n"
|
||||
assert not err
|
||||
with self.getcapture():
|
||||
pytest.raises(IOError, "sys.stdin.read()")
|
||||
|
||||
|
||||
class TestStdCaptureFD(TestStdCapture):
|
||||
pytestmark = needsosdup
|
||||
captureclass = staticmethod(StdCaptureFD)
|
||||
|
||||
def getcapture(self, **kw):
|
||||
cap = capture.StdCaptureFD(**kw)
|
||||
cap.startall()
|
||||
return cap
|
||||
def test_simple_only_fd(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
import os
|
||||
def test_x():
|
||||
os.write(1, "hello\\n".encode("ascii"))
|
||||
assert 0
|
||||
""")
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines("""
|
||||
*test_x*
|
||||
*assert 0*
|
||||
*Captured stdout*
|
||||
""")
|
||||
|
||||
def test_intermingling(self):
|
||||
cap = self.getcapture()
|
||||
oswritebytes(1, "1")
|
||||
sys.stdout.write(str(2))
|
||||
sys.stdout.flush()
|
||||
oswritebytes(1, "3")
|
||||
oswritebytes(2, "a")
|
||||
sys.stderr.write("b")
|
||||
sys.stderr.flush()
|
||||
oswritebytes(2, "c")
|
||||
out, err = cap.reset()
|
||||
with self.getcapture() as cap:
|
||||
oswritebytes(1, "1")
|
||||
sys.stdout.write(str(2))
|
||||
sys.stdout.flush()
|
||||
oswritebytes(1, "3")
|
||||
oswritebytes(2, "a")
|
||||
sys.stderr.write("b")
|
||||
sys.stderr.flush()
|
||||
oswritebytes(2, "c")
|
||||
out, err = cap.readouterr()
|
||||
assert out == "123"
|
||||
assert err == "abc"
|
||||
|
||||
def test_many(self, capfd):
|
||||
with lsof_check():
|
||||
for i in range(10):
|
||||
cap = capture.StdCaptureFD()
|
||||
cap.reset()
|
||||
|
||||
|
||||
@needsosdup
|
||||
def test_stdcapture_fd_tmpfile(tmpfile):
|
||||
capfd = capture.StdCaptureFD(out=tmpfile)
|
||||
try:
|
||||
os.write(1, "hello".encode("ascii"))
|
||||
os.write(2, "world".encode("ascii"))
|
||||
outf, errf = capfd.done()
|
||||
finally:
|
||||
capfd.reset()
|
||||
assert outf == tmpfile
|
||||
cap = StdCaptureFD()
|
||||
cap.stop_capturing()
|
||||
|
||||
|
||||
class TestStdCaptureFDinvalidFD:
|
||||
@@ -920,19 +916,22 @@ class TestStdCaptureFDinvalidFD:
|
||||
def test_stdcapture_fd_invalid_fd(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
import os
|
||||
from _pytest.capture import StdCaptureFD
|
||||
from _pytest import capture
|
||||
def StdCaptureFD(out=True, err=True, in_=True):
|
||||
return capture.MultiCapture(out, err, in_,
|
||||
Capture=capture.FDCapture)
|
||||
def test_stdout():
|
||||
os.close(1)
|
||||
cap = StdCaptureFD(out=True, err=False, in_=False)
|
||||
cap.done()
|
||||
cap.stop_capturing()
|
||||
def test_stderr():
|
||||
os.close(2)
|
||||
cap = StdCaptureFD(out=False, err=True, in_=False)
|
||||
cap.done()
|
||||
cap.stop_capturing()
|
||||
def test_stdin():
|
||||
os.close(0)
|
||||
cap = StdCaptureFD(out=False, err=False, in_=True)
|
||||
cap.done()
|
||||
cap.stop_capturing()
|
||||
""")
|
||||
result = testdir.runpytest("--capture=fd")
|
||||
assert result.ret == 0
|
||||
@@ -940,27 +939,8 @@ class TestStdCaptureFDinvalidFD:
|
||||
|
||||
|
||||
def test_capture_not_started_but_reset():
|
||||
capsys = capture.StdCapture()
|
||||
capsys.done()
|
||||
capsys.done()
|
||||
capsys.reset()
|
||||
|
||||
|
||||
@needsosdup
|
||||
def test_capture_no_sys():
|
||||
capsys = capture.StdCapture()
|
||||
try:
|
||||
cap = capture.StdCaptureFD(patchsys=False)
|
||||
cap.startall()
|
||||
sys.stdout.write("hello")
|
||||
sys.stderr.write("world")
|
||||
oswritebytes(1, "1")
|
||||
oswritebytes(2, "2")
|
||||
out, err = cap.reset()
|
||||
assert out == "1"
|
||||
assert err == "2"
|
||||
finally:
|
||||
capsys.reset()
|
||||
capsys = StdCapture()
|
||||
capsys.stop_capturing()
|
||||
|
||||
|
||||
@needsosdup
|
||||
@@ -968,19 +948,37 @@ def test_capture_no_sys():
|
||||
def test_fdcapture_tmpfile_remains_the_same(tmpfile, use):
|
||||
if not use:
|
||||
tmpfile = True
|
||||
cap = capture.StdCaptureFD(out=False, err=tmpfile)
|
||||
cap = StdCaptureFD(out=False, err=tmpfile)
|
||||
try:
|
||||
cap.startall()
|
||||
cap.start_capturing()
|
||||
capfile = cap.err.tmpfile
|
||||
cap.suspend()
|
||||
cap.resume()
|
||||
cap.readouterr()
|
||||
finally:
|
||||
cap.reset()
|
||||
cap.stop_capturing()
|
||||
capfile2 = cap.err.tmpfile
|
||||
assert capfile2 == capfile
|
||||
|
||||
@needsosdup
|
||||
def test_close_and_capture_again(testdir):
|
||||
testdir.makepyfile("""
|
||||
import os
|
||||
def test_close():
|
||||
os.close(1)
|
||||
def test_capture_again():
|
||||
os.write(1, b"hello\\n")
|
||||
assert 0
|
||||
""")
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines("""
|
||||
*test_capture_again*
|
||||
*assert 0*
|
||||
*stdout*
|
||||
*hello*
|
||||
""")
|
||||
|
||||
@pytest.mark.parametrize('method', ['StdCapture', 'StdCaptureFD'])
|
||||
|
||||
|
||||
@pytest.mark.parametrize('method', ['SysCapture', 'FDCapture'])
|
||||
def test_capturing_and_logging_fundamentals(testdir, method):
|
||||
if method == "StdCaptureFD" and not hasattr(os, 'dup'):
|
||||
pytest.skip("need os.dup")
|
||||
@@ -989,23 +987,38 @@ def test_capturing_and_logging_fundamentals(testdir, method):
|
||||
import sys, os
|
||||
import py, logging
|
||||
from _pytest import capture
|
||||
cap = capture.%s(out=False, in_=False)
|
||||
cap.startall()
|
||||
cap = capture.MultiCapture(out=False, in_=False,
|
||||
Capture=capture.%s)
|
||||
cap.start_capturing()
|
||||
|
||||
logging.warn("hello1")
|
||||
outerr = cap.suspend()
|
||||
outerr = cap.readouterr()
|
||||
print ("suspend, captured %%s" %%(outerr,))
|
||||
logging.warn("hello2")
|
||||
|
||||
cap.resume()
|
||||
cap.pop_outerr_to_orig()
|
||||
logging.warn("hello3")
|
||||
|
||||
outerr = cap.suspend()
|
||||
outerr = cap.readouterr()
|
||||
print ("suspend2, captured %%s" %% (outerr,))
|
||||
""" % (method,))
|
||||
result = testdir.runpython(p)
|
||||
result.stdout.fnmatch_lines([
|
||||
"suspend, captured*hello1*",
|
||||
"suspend2, captured*hello2*WARNING:root:hello3*",
|
||||
])
|
||||
result.stdout.fnmatch_lines("""
|
||||
suspend, captured*hello1*
|
||||
suspend2, captured*WARNING:root:hello3*
|
||||
""")
|
||||
result.stderr.fnmatch_lines("""
|
||||
WARNING:root:hello2
|
||||
""")
|
||||
assert "atexit" not in result.stderr.str()
|
||||
|
||||
|
||||
def test_error_attribute_issue555(testdir):
|
||||
testdir.makepyfile("""
|
||||
import sys
|
||||
def test_capattr():
|
||||
assert sys.stdout.errors == "strict"
|
||||
assert sys.stderr.errors == "strict"
|
||||
""")
|
||||
reprec = testdir.inline_run()
|
||||
reprec.assertoutcome(passed=1)
|
||||
|
||||
@@ -79,6 +79,21 @@ class TestConfigCmdlineParsing:
|
||||
config = testdir.parseconfig()
|
||||
pytest.raises(AssertionError, lambda: config.parse([]))
|
||||
|
||||
def test_explicitly_specified_config_file_is_loaded(self, testdir):
|
||||
testdir.makeconftest("""
|
||||
def pytest_addoption(parser):
|
||||
parser.addini("custom", "")
|
||||
""")
|
||||
testdir.makeini("""
|
||||
[pytest]
|
||||
custom = 0
|
||||
""")
|
||||
testdir.makefile(".cfg", custom = """
|
||||
[pytest]
|
||||
custom = 1
|
||||
""")
|
||||
config = testdir.parseconfig("-c", "custom.cfg")
|
||||
assert config.getini("custom") == "1"
|
||||
|
||||
class TestConfigAPI:
|
||||
def test_config_trace(self, testdir):
|
||||
@@ -89,20 +104,6 @@ class TestConfigAPI:
|
||||
assert len(l) == 1
|
||||
assert l[0] == "hello [config]\n"
|
||||
|
||||
def test_config_getvalue_honours_conftest(self, testdir):
|
||||
testdir.makepyfile(conftest="x=1")
|
||||
testdir.mkdir("sub").join("conftest.py").write("x=2 ; y = 3")
|
||||
config = testdir.parseconfig()
|
||||
o = testdir.tmpdir
|
||||
assert config.getvalue("x") == 1
|
||||
assert config.getvalue("x", o.join('sub')) == 2
|
||||
pytest.raises(KeyError, "config.getvalue('y')")
|
||||
config = testdir.parseconfigure(str(o.join('sub')))
|
||||
assert config.getvalue("x") == 2
|
||||
assert config.getvalue("y") == 3
|
||||
assert config.getvalue("x", o) == 1
|
||||
pytest.raises(KeyError, 'config.getvalue("y", o)')
|
||||
|
||||
def test_config_getoption(self, testdir):
|
||||
testdir.makeconftest("""
|
||||
def pytest_addoption(parser):
|
||||
@@ -130,34 +131,29 @@ class TestConfigAPI:
|
||||
"config.getvalueorskip('hello')")
|
||||
verbose = config.getvalueorskip("verbose")
|
||||
assert verbose == config.option.verbose
|
||||
config.option.hello = None
|
||||
try:
|
||||
config.getvalueorskip('hello')
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except:
|
||||
excinfo = py.code.ExceptionInfo()
|
||||
frame = excinfo.traceback[-2].frame
|
||||
assert frame.code.name == "getvalueorskip"
|
||||
assert frame.eval("__tracebackhide__")
|
||||
|
||||
def test_config_overwrite(self, testdir):
|
||||
o = testdir.tmpdir
|
||||
o.ensure("conftest.py").write("x=1")
|
||||
config = testdir.parseconfig(str(o))
|
||||
assert config.getvalue('x') == 1
|
||||
config.option.x = 2
|
||||
assert config.getvalue('x') == 2
|
||||
config = testdir.parseconfig([str(o)])
|
||||
assert config.getvalue('x') == 1
|
||||
def test_config_getvalueorskip_None(self, testdir):
|
||||
testdir.makeconftest("""
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption("--hello")
|
||||
""")
|
||||
config = testdir.parseconfig()
|
||||
with pytest.raises(pytest.skip.Exception):
|
||||
config.getvalueorskip('hello')
|
||||
|
||||
def test_getoption(self, testdir):
|
||||
config = testdir.parseconfig()
|
||||
with pytest.raises(ValueError):
|
||||
config.getvalue('x')
|
||||
assert config.getoption("x", 1) == 1
|
||||
|
||||
def test_getconftest_pathlist(self, testdir, tmpdir):
|
||||
somepath = tmpdir.join("x", "y", "z")
|
||||
p = tmpdir.join("conftest.py")
|
||||
p.write("pathlist = ['.', %r]" % str(somepath))
|
||||
config = testdir.parseconfigure(p)
|
||||
assert config._getconftest_pathlist('notexist') is None
|
||||
pl = config._getconftest_pathlist('pathlist')
|
||||
assert config._getconftest_pathlist('notexist', path=tmpdir) is None
|
||||
pl = config._getconftest_pathlist('pathlist', path=tmpdir)
|
||||
print(pl)
|
||||
assert len(pl) == 2
|
||||
assert pl[0] == tmpdir
|
||||
@@ -360,4 +356,43 @@ def test_load_initial_conftest_last_ordering(testdir):
|
||||
assert l[-2] == m.pytest_load_initial_conftests
|
||||
assert l[-3].__module__ == "_pytest.config"
|
||||
|
||||
class TestWarning:
|
||||
def test_warn_config(self, testdir):
|
||||
testdir.makeconftest("""
|
||||
l = []
|
||||
def pytest_configure(config):
|
||||
config.warn("C1", "hello")
|
||||
def pytest_logwarning(code, message):
|
||||
assert code == "C1"
|
||||
assert message == "hello"
|
||||
l.append(1)
|
||||
""")
|
||||
testdir.makepyfile("""
|
||||
def test_proper(pytestconfig):
|
||||
import conftest
|
||||
assert conftest.l == [1]
|
||||
""")
|
||||
reprec = testdir.inline_run()
|
||||
reprec.assertoutcome(passed=1)
|
||||
|
||||
def test_warn_on_test_item_from_request(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
|
||||
@pytest.fixture
|
||||
def fix(request):
|
||||
request.node.warn("T1", "hello")
|
||||
def test_hello(fix):
|
||||
pass
|
||||
""")
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines("""
|
||||
*1 warning*
|
||||
""")
|
||||
assert "hello" not in result.stdout.str()
|
||||
result = testdir.runpytest("-rw")
|
||||
result.stdout.fnmatch_lines("""
|
||||
===*warning summary*===
|
||||
*WT1*test_warn_on_test_item*:5*hello*
|
||||
*1 warning*
|
||||
""")
|
||||
|
||||
@@ -1,42 +1,44 @@
|
||||
import py, pytest
|
||||
from _pytest.config import Conftest
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
if "basedir" in metafunc.fixturenames:
|
||||
metafunc.addcall(param="global")
|
||||
metafunc.addcall(param="inpackage")
|
||||
|
||||
def pytest_funcarg__basedir(request):
|
||||
def basedirmaker(request):
|
||||
d = request.getfuncargvalue("tmpdir")
|
||||
d.ensure("adir/conftest.py").write("a=1 ; Directory = 3")
|
||||
d.ensure("adir/b/conftest.py").write("b=2 ; a = 1.5")
|
||||
if request.param == "inpackage":
|
||||
d.ensure("adir/__init__.py")
|
||||
d.ensure("adir/b/__init__.py")
|
||||
return d
|
||||
return request.cached_setup(
|
||||
lambda: basedirmaker(request), extrakey=request.param)
|
||||
@pytest.fixture(scope="module", params=["global", "inpackage"])
|
||||
def basedir(request):
|
||||
from _pytest.tmpdir import tmpdir
|
||||
tmpdir = tmpdir(request)
|
||||
tmpdir.ensure("adir/conftest.py").write("a=1 ; Directory = 3")
|
||||
tmpdir.ensure("adir/b/conftest.py").write("b=2 ; a = 1.5")
|
||||
if request.param == "inpackage":
|
||||
tmpdir.ensure("adir/__init__.py")
|
||||
tmpdir.ensure("adir/b/__init__.py")
|
||||
return tmpdir
|
||||
|
||||
def ConftestWithSetinitial(path):
|
||||
conftest = Conftest()
|
||||
conftest.setinitial([path])
|
||||
conftest_setinitial(conftest, [path])
|
||||
return conftest
|
||||
|
||||
def conftest_setinitial(conftest, args, confcutdir=None):
|
||||
class Namespace:
|
||||
def __init__(self):
|
||||
self.file_or_dir = args
|
||||
self.confcutdir = str(confcutdir)
|
||||
conftest.setinitial(Namespace())
|
||||
|
||||
class TestConftestValueAccessGlobal:
|
||||
def test_basic_init(self, basedir):
|
||||
conftest = Conftest()
|
||||
conftest.setinitial([basedir.join("adir")])
|
||||
assert conftest.rget("a") == 1
|
||||
p = basedir.join("adir")
|
||||
assert conftest.rget_with_confmod("a", p)[1] == 1
|
||||
|
||||
def test_onimport(self, basedir):
|
||||
l = []
|
||||
conftest = Conftest(onimport=l.append)
|
||||
conftest.setinitial([basedir.join("adir"),
|
||||
'--confcutdir=%s' % basedir])
|
||||
adir = basedir.join("adir")
|
||||
conftest_setinitial(conftest, [adir], confcutdir=basedir)
|
||||
assert len(l) == 1
|
||||
assert conftest.rget("a") == 1
|
||||
assert conftest.rget("b", basedir.join("adir", "b")) == 2
|
||||
assert conftest.rget_with_confmod("a", adir)[1] == 1
|
||||
assert conftest.rget_with_confmod("b", adir.join("b"))[1] == 2
|
||||
assert len(l) == 2
|
||||
|
||||
def test_immediate_initialiation_and_incremental_are_the_same(self, basedir):
|
||||
@@ -50,37 +52,16 @@ class TestConftestValueAccessGlobal:
|
||||
conftest.getconftestmodules(basedir.join('b'))
|
||||
assert len(conftest._path2confmods) == snap1 + 2
|
||||
|
||||
def test_default_has_lower_prio(self, basedir):
|
||||
conftest = ConftestWithSetinitial(basedir.join("adir"))
|
||||
assert conftest.rget('Directory') == 3
|
||||
#assert conftest.lget('Directory') == pytest.Directory
|
||||
|
||||
def test_value_access_not_existing(self, basedir):
|
||||
conftest = ConftestWithSetinitial(basedir)
|
||||
pytest.raises(KeyError, lambda: conftest.rget('a'))
|
||||
#pytest.raises(KeyError, "conftest.lget('a')")
|
||||
with pytest.raises(KeyError):
|
||||
conftest.rget_with_confmod('a', basedir)
|
||||
|
||||
def test_value_access_by_path(self, basedir):
|
||||
conftest = ConftestWithSetinitial(basedir)
|
||||
assert conftest.rget("a", basedir.join('adir')) == 1
|
||||
#assert conftest.lget("a", basedir.join('adir')) == 1
|
||||
assert conftest.rget("a", basedir.join('adir', 'b')) == 1.5
|
||||
#assert conftest.lget("a", basedir.join('adir', 'b')) == 1
|
||||
#assert conftest.lget("b", basedir.join('adir', 'b')) == 2
|
||||
#assert pytest.raises(KeyError,
|
||||
# 'conftest.lget("b", basedir.join("a"))'
|
||||
#)
|
||||
|
||||
def test_value_access_with_init_one_conftest(self, basedir):
|
||||
conftest = ConftestWithSetinitial(basedir.join('adir'))
|
||||
assert conftest.rget("a") == 1
|
||||
#assert conftest.lget("a") == 1
|
||||
|
||||
def test_value_access_with_init_two_conftests(self, basedir):
|
||||
conftest = ConftestWithSetinitial(basedir.join("adir", "b"))
|
||||
conftest.rget("a") == 1.5
|
||||
#conftest.lget("a") == 1
|
||||
#conftest.lget("b") == 1
|
||||
adir = basedir.join("adir")
|
||||
assert conftest.rget_with_confmod("a", adir)[1] == 1
|
||||
assert conftest.rget_with_confmod("a", adir.join("b"))[1] == 1.5
|
||||
|
||||
def test_value_access_with_confmod(self, basedir):
|
||||
startdir = basedir.join("adir", "b")
|
||||
@@ -99,13 +80,13 @@ def test_conftest_in_nonpkg_with_init(tmpdir):
|
||||
tmpdir.ensure("adir-1.0/__init__.py")
|
||||
ConftestWithSetinitial(tmpdir.join("adir-1.0", "b"))
|
||||
|
||||
def test_doubledash_not_considered(testdir):
|
||||
def test_doubledash_considered(testdir):
|
||||
conf = testdir.mkdir("--option")
|
||||
conf.join("conftest.py").ensure()
|
||||
conftest = Conftest()
|
||||
conftest.setinitial([conf.basename, conf.basename])
|
||||
l = conftest.getconftestmodules(None)
|
||||
assert len(l) == 0
|
||||
conftest_setinitial(conftest, [conf.basename, conf.basename])
|
||||
l = conftest.getconftestmodules(conf)
|
||||
assert len(l) == 1
|
||||
|
||||
def test_issue151_load_all_conftests(testdir):
|
||||
names = "code proj src".split()
|
||||
@@ -114,7 +95,7 @@ def test_issue151_load_all_conftests(testdir):
|
||||
p.ensure("conftest.py")
|
||||
|
||||
conftest = Conftest()
|
||||
conftest.setinitial(names)
|
||||
conftest_setinitial(conftest, names)
|
||||
d = list(conftest._conftestpath2mod.values())
|
||||
assert len(d) == len(names)
|
||||
|
||||
@@ -142,8 +123,8 @@ def test_conftest_global_import(testdir):
|
||||
def test_conftestcutdir(testdir):
|
||||
conf = testdir.makeconftest("")
|
||||
p = testdir.mkdir("x")
|
||||
conftest = Conftest(confcutdir=p)
|
||||
conftest.setinitial([testdir.tmpdir])
|
||||
conftest = Conftest()
|
||||
conftest_setinitial(conftest, [testdir.tmpdir], confcutdir=p)
|
||||
l = conftest.getconftestmodules(p)
|
||||
assert len(l) == 0
|
||||
l = conftest.getconftestmodules(conf.dirpath())
|
||||
@@ -160,34 +141,18 @@ def test_conftestcutdir(testdir):
|
||||
|
||||
def test_conftestcutdir_inplace_considered(testdir):
|
||||
conf = testdir.makeconftest("")
|
||||
conftest = Conftest(confcutdir=conf.dirpath())
|
||||
conftest.setinitial([conf.dirpath()])
|
||||
conftest = Conftest()
|
||||
conftest_setinitial(conftest, [conf.dirpath()], confcutdir=conf.dirpath())
|
||||
l = conftest.getconftestmodules(conf.dirpath())
|
||||
assert len(l) == 1
|
||||
assert l[0].__file__.startswith(str(conf))
|
||||
|
||||
def test_setinitial_confcut(testdir):
|
||||
conf = testdir.makeconftest("")
|
||||
sub = testdir.mkdir("sub")
|
||||
sub.chdir()
|
||||
for opts in (["--confcutdir=%s" % sub, sub],
|
||||
[sub, "--confcutdir=%s" % sub],
|
||||
["--confcutdir=.", sub],
|
||||
[sub, "--confcutdir", sub],
|
||||
[str(sub), "--confcutdir", "."],
|
||||
):
|
||||
conftest = Conftest()
|
||||
conftest.setinitial(opts)
|
||||
assert conftest._confcutdir == sub
|
||||
assert conftest.getconftestmodules(sub) == []
|
||||
assert conftest.getconftestmodules(conf.dirpath()) == []
|
||||
|
||||
@pytest.mark.parametrize("name", 'test tests whatever .dotdir'.split())
|
||||
def test_setinitial_conftest_subdirs(testdir, name):
|
||||
sub = testdir.mkdir(name)
|
||||
subconftest = sub.ensure("conftest.py")
|
||||
conftest = Conftest()
|
||||
conftest.setinitial([sub.dirpath(), '--confcutdir=%s' % testdir.tmpdir])
|
||||
conftest_setinitial(conftest, [sub.dirpath()], confcutdir=testdir.tmpdir)
|
||||
if name not in ('whatever', '.dotdir'):
|
||||
assert subconftest in conftest._conftestpath2mod
|
||||
assert len(conftest._conftestpath2mod) == 1
|
||||
@@ -205,6 +170,26 @@ def test_conftest_confcutdir(testdir):
|
||||
result = testdir.runpytest("-h", "--confcutdir=%s" % x, x)
|
||||
result.stdout.fnmatch_lines(["*--xyz*"])
|
||||
|
||||
def test_conftest_existing_resultlog(testdir):
|
||||
x = testdir.mkdir("tests")
|
||||
x.join("conftest.py").write(py.code.Source("""
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption("--xyz", action="store_true")
|
||||
"""))
|
||||
testdir.makefile(ext=".log", result="") # Writes result.log
|
||||
result = testdir.runpytest("-h", "--resultlog", "result.log")
|
||||
result.stdout.fnmatch_lines(["*--xyz*"])
|
||||
|
||||
def test_conftest_existing_junitxml(testdir):
|
||||
x = testdir.mkdir("tests")
|
||||
x.join("conftest.py").write(py.code.Source("""
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption("--xyz", action="store_true")
|
||||
"""))
|
||||
testdir.makefile(ext=".xml", junit="") # Writes junit.xml
|
||||
result = testdir.runpytest("-h", "--junitxml", "junit.xml")
|
||||
result.stdout.fnmatch_lines(["*--xyz*"])
|
||||
|
||||
def test_conftest_import_order(testdir, monkeypatch):
|
||||
ct1 = testdir.makeconftest("")
|
||||
sub = testdir.mkdir("sub")
|
||||
@@ -252,3 +237,21 @@ def test_fixture_dependency(testdir, monkeypatch):
|
||||
"""))
|
||||
result = testdir.runpytest("sub")
|
||||
result.stdout.fnmatch_lines(["*1 passed*"])
|
||||
|
||||
|
||||
def test_conftest_found_with_double_dash(testdir):
|
||||
sub = testdir.mkdir("sub")
|
||||
sub.join("conftest.py").write(py.std.textwrap.dedent("""
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption("--hello-world", action="store_true")
|
||||
"""))
|
||||
p = sub.join("test_hello.py")
|
||||
p.write(py.std.textwrap.dedent("""
|
||||
import pytest
|
||||
def test_hello(found):
|
||||
assert found == 1
|
||||
"""))
|
||||
result = testdir.runpytest(str(p) + "::test_hello", "-h")
|
||||
result.stdout.fnmatch_lines("""
|
||||
*--hello-world*
|
||||
""")
|
||||
|
||||
@@ -43,11 +43,11 @@ class TestBootstrapping:
|
||||
""")
|
||||
p.copy(p.dirpath("skipping2.py"))
|
||||
monkeypatch.setenv("PYTEST_PLUGINS", "skipping2")
|
||||
result = testdir.runpytest("-p", "skipping1", "--traceconfig")
|
||||
result = testdir.runpytest("-rw", "-p", "skipping1", "--traceconfig")
|
||||
assert result.ret == 0
|
||||
result.stdout.fnmatch_lines([
|
||||
"*hint*skipping1*hello*",
|
||||
"*hint*skipping2*hello*",
|
||||
"WI1*skipped plugin*skipping1*hello*",
|
||||
"WI1*skipped plugin*skipping2*hello*",
|
||||
])
|
||||
|
||||
def test_consider_env_plugin_instantiation(self, testdir, monkeypatch):
|
||||
@@ -523,6 +523,95 @@ class TestMultiCall:
|
||||
res = MultiCall([m1, m2], {}).execute()
|
||||
assert res == [1]
|
||||
|
||||
def test_hookwrapper(self):
|
||||
l = []
|
||||
def m1():
|
||||
l.append("m1 init")
|
||||
yield None
|
||||
l.append("m1 finish")
|
||||
m1.hookwrapper = True
|
||||
|
||||
def m2():
|
||||
l.append("m2")
|
||||
return 2
|
||||
res = MultiCall([m2, m1], {}).execute()
|
||||
assert res == [2]
|
||||
assert l == ["m1 init", "m2", "m1 finish"]
|
||||
l[:] = []
|
||||
res = MultiCall([m2, m1], {}, firstresult=True).execute()
|
||||
assert res == 2
|
||||
assert l == ["m1 init", "m2", "m1 finish"]
|
||||
|
||||
def test_hookwrapper_order(self):
|
||||
l = []
|
||||
def m1():
|
||||
l.append("m1 init")
|
||||
yield 1
|
||||
l.append("m1 finish")
|
||||
m1.hookwrapper = True
|
||||
|
||||
def m2():
|
||||
l.append("m2 init")
|
||||
yield 2
|
||||
l.append("m2 finish")
|
||||
m2.hookwrapper = True
|
||||
res = MultiCall([m2, m1], {}).execute()
|
||||
assert res == [1, 2]
|
||||
assert l == ["m1 init", "m2 init", "m2 finish", "m1 finish"]
|
||||
|
||||
def test_listattr_hookwrapper_ordering(self):
|
||||
class P1:
|
||||
@pytest.mark.hookwrapper
|
||||
def m(self):
|
||||
return 17
|
||||
|
||||
class P2:
|
||||
def m(self):
|
||||
return 23
|
||||
|
||||
class P3:
|
||||
@pytest.mark.tryfirst
|
||||
def m(self):
|
||||
return 19
|
||||
|
||||
pluginmanager = PluginManager()
|
||||
p1 = P1()
|
||||
p2 = P2()
|
||||
p3 = P3()
|
||||
pluginmanager.register(p1)
|
||||
pluginmanager.register(p2)
|
||||
pluginmanager.register(p3)
|
||||
methods = pluginmanager.listattr('m')
|
||||
assert methods == [p2.m, p3.m, p1.m]
|
||||
## listattr keeps a cache and deleting
|
||||
## a function attribute requires clearing it
|
||||
#pluginmanager._listattrcache.clear()
|
||||
#del P1.m.__dict__['tryfirst']
|
||||
|
||||
def test_hookwrapper_not_yield(self):
|
||||
def m1():
|
||||
pass
|
||||
m1.hookwrapper = True
|
||||
|
||||
mc = MultiCall([m1], {})
|
||||
with pytest.raises(mc.WrongHookWrapper) as ex:
|
||||
mc.execute()
|
||||
assert ex.value.func == m1
|
||||
assert ex.value.message
|
||||
|
||||
def test_hookwrapper_too_many_yield(self):
|
||||
def m1():
|
||||
yield 1
|
||||
yield 2
|
||||
m1.hookwrapper = True
|
||||
|
||||
mc = MultiCall([m1], {})
|
||||
with pytest.raises(mc.WrongHookWrapper) as ex:
|
||||
mc.execute()
|
||||
assert ex.value.func == m1
|
||||
assert ex.value.message
|
||||
|
||||
|
||||
class TestHookRelay:
|
||||
def test_happypath(self):
|
||||
pm = PluginManager()
|
||||
|
||||
@@ -80,6 +80,8 @@ class TestDoctests:
|
||||
assert isinstance(items[0].parent, DoctestModule)
|
||||
assert items[0].parent is items[1].parent
|
||||
|
||||
@pytest.mark.xfail('hasattr(sys, "pypy_version_info")', reason=
|
||||
"pypy leaks one FD")
|
||||
def test_simple_doctestfile(self, testdir):
|
||||
p = testdir.maketxtfile(test_doc="""
|
||||
>>> x = 1
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user