Compare commits
132 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
69fc6987ad | ||
|
|
0790f7a75f | ||
|
|
db8fbe7661 | ||
|
|
91c41cd6b3 | ||
|
|
1bf1cfd07a | ||
|
|
51d94a4a6e | ||
|
|
e18abfd013 | ||
|
|
6c7ea8191f | ||
|
|
329dca42a7 | ||
|
|
0362aaba5a | ||
|
|
948dea8bb4 | ||
|
|
6155e9139d | ||
|
|
6dd8405aed | ||
|
|
c076f4e789 | ||
|
|
d32a132b51 | ||
|
|
0e3779b14f | ||
|
|
fe1c35f8d0 | ||
|
|
b4588f1798 | ||
|
|
64c7c1be15 | ||
|
|
1c817aa7bd | ||
|
|
d02eaa8881 | ||
|
|
b92176024c | ||
|
|
1c746e0819 | ||
|
|
166aae4418 | ||
|
|
58933aac2a | ||
|
|
45aa4e5229 | ||
|
|
e643e99586 | ||
|
|
9f6d6f630d | ||
|
|
812ba87f37 | ||
|
|
2b0887fa5f | ||
|
|
ee8d2f9950 | ||
|
|
51d29cf4c6 | ||
|
|
e378496b24 | ||
|
|
4d21274a29 | ||
|
|
705442cf4e | ||
|
|
87b4cb283f | ||
|
|
83505b790d | ||
|
|
2ca6d9f039 | ||
|
|
87b8769680 | ||
|
|
78e7d7aed0 | ||
|
|
68b353be0d | ||
|
|
a756dc8106 | ||
|
|
604e27658c | ||
|
|
dfa273dc25 | ||
|
|
5263656df6 | ||
|
|
d88fe07377 | ||
|
|
2e23057804 | ||
|
|
303f49a5ad | ||
|
|
adbbd164ff | ||
|
|
93424b0f9c | ||
|
|
fb7706d4c7 | ||
|
|
4131923c0f | ||
|
|
7b95af2400 | ||
|
|
eb6481c663 | ||
|
|
c126cac98d | ||
|
|
e3a8b1e062 | ||
|
|
fa6d5bd15b | ||
|
|
f2c8a837af | ||
|
|
ccc1b21ebd | ||
|
|
85f2a78005 | ||
|
|
e21202b730 | ||
|
|
dc0535f7d5 | ||
|
|
f2791988f9 | ||
|
|
8e83af1c33 | ||
|
|
268c051eba | ||
|
|
03cb37b1eb | ||
|
|
d5c3265763 | ||
|
|
13e0340350 | ||
|
|
5093d8b925 | ||
|
|
40187ec9bb | ||
|
|
f5f8695587 | ||
|
|
27f5213718 | ||
|
|
b83a3bcc80 | ||
|
|
3a3f69372f | ||
|
|
4a08ee2b74 | ||
|
|
82ba764bb6 | ||
|
|
94e31e414a | ||
|
|
a94a6b4282 | ||
|
|
af0edf0d10 | ||
|
|
8307270cec | ||
|
|
c4fe622b82 | ||
|
|
b28977fbaf | ||
|
|
96cb1208d3 | ||
|
|
0c8e71faa5 | ||
|
|
d965101f6a | ||
|
|
d15ee2fb87 | ||
|
|
826d1e6153 | ||
|
|
50c9e3f654 | ||
|
|
59b8ea1746 | ||
|
|
cf02fb60c1 | ||
|
|
679d72eedf | ||
|
|
03b23e2587 | ||
|
|
48e6823c7a | ||
|
|
6b4e6eee09 | ||
|
|
f7648e11d8 | ||
|
|
7bb7d1205c | ||
|
|
a1d41c6811 | ||
|
|
58e0301f87 | ||
|
|
a5e7b2760d | ||
|
|
efe438d3e8 | ||
|
|
ec0565fac5 | ||
|
|
48a6a504b6 | ||
|
|
8f55425898 | ||
|
|
a51e52aee3 | ||
|
|
69dfc75572 | ||
|
|
9d3e51af9f | ||
|
|
f7c1b9087a | ||
|
|
36c42b5c15 | ||
|
|
bc8ee95e72 | ||
|
|
979dfd20f2 | ||
|
|
67fbd24ebf | ||
|
|
7f7589afa9 | ||
|
|
4f01cda2a7 | ||
|
|
bd296c796f | ||
|
|
7144cec580 | ||
|
|
99a1188287 | ||
|
|
0b18b6094e | ||
|
|
ae53d04780 | ||
|
|
a324826dfd | ||
|
|
29bf205f3a | ||
|
|
3b9fd3abd8 | ||
|
|
974e4e3a9d | ||
|
|
369b7709f7 | ||
|
|
78438db752 | ||
|
|
a2f4a11301 | ||
|
|
077c468589 | ||
|
|
d4fe273b2f | ||
|
|
761a95e542 | ||
|
|
5ae04397bd | ||
|
|
2c230f910d | ||
|
|
ae54151467 | ||
|
|
05af53d160 |
5
.hgtags
5
.hgtags
@@ -43,3 +43,8 @@ c777dcad166548b7499564cb49ae5c8b4b07f935 2.0.3
|
||||
363e5a5a59c803e6bc176a6f9cc4bf1a1ca2dab0 2.0.3
|
||||
e5e1746a197f0398356a43fbe2eebac9690f795d 2.1.0
|
||||
5864412c6f3c903384243bd315639d101d7ebc67 2.1.2
|
||||
12a05d59249f80276e25fd8b96e8e545b1332b7a 2.1.3
|
||||
1522710369337d96bf9568569d5f0ca9b38a74e0 2.2.0
|
||||
3da8cec6c5326ed27c144c9b6d7a64a648370005 2.2.1
|
||||
92b916483c1e65a80dc80e3f7816b39e84b36a4d 2.2.2
|
||||
3c11c5c9776f3c678719161e96cc0a08169c1cb8 2.2.3
|
||||
|
||||
1
AUTHORS
1
AUTHORS
@@ -22,3 +22,4 @@ Jan Balster
|
||||
Grig Gheorghiu
|
||||
Bob Ippolito
|
||||
Christian Tismer
|
||||
Daniel Nuri
|
||||
|
||||
94
CHANGELOG
94
CHANGELOG
@@ -1,3 +1,97 @@
|
||||
Changes between 2.2.3 and 2.2.4
|
||||
-----------------------------------
|
||||
|
||||
- fix error message for rewritten assertions involving the % operator
|
||||
- fix issue 126: correctly match all invalid xml characters for junitxml
|
||||
binary escape
|
||||
- fix issue with unittest: now @unittest.expectedFailure markers should
|
||||
be processed correctly (you can also use @pytest.mark markers)
|
||||
- document integration with the extended distribute/setuptools test commands
|
||||
- fix issue 140: propperly get the real functions
|
||||
of bound classmethods for setup/teardown_class
|
||||
- fix issue #141: switch from the deceased paste.pocoo.org to bpaste.net
|
||||
- fix issue #143: call unconfigure/sessionfinish always when
|
||||
configure/sessionstart where called
|
||||
- fix issue #144: better mangle test ids to junitxml classnames
|
||||
- upgrade distribute_setup.py to 0.6.27
|
||||
|
||||
Changes between 2.2.2 and 2.2.3
|
||||
----------------------------------------
|
||||
|
||||
- fix uploaded package to only include neccesary files
|
||||
|
||||
Changes between 2.2.1 and 2.2.2
|
||||
----------------------------------------
|
||||
|
||||
- fix issue101: wrong args to unittest.TestCase test function now
|
||||
produce better output
|
||||
- fix issue102: report more useful errors and hints for when a
|
||||
test directory was renamed and some pyc/__pycache__ remain
|
||||
- fix issue106: allow parametrize to be applied multiple times
|
||||
e.g. from module, class and at function level.
|
||||
- fix issue107: actually perform session scope finalization
|
||||
- don't check in parametrize if indirect parameters are funcarg names
|
||||
- add chdir method to monkeypatch funcarg
|
||||
- fix crash resulting from calling monkeypatch undo a second time
|
||||
- fix issue115: make --collectonly robust against early failure
|
||||
(missing files/directories)
|
||||
- "-qq --collectonly" now shows only files and the number of tests in them
|
||||
- "-q --collectonly" now shows test ids
|
||||
- allow adding of attributes to test reports such that it also works
|
||||
with distributed testing (no upgrade of pytest-xdist needed)
|
||||
|
||||
Changes between 2.2.0 and 2.2.1
|
||||
----------------------------------------
|
||||
|
||||
- fix issue99 (in pytest and py) internallerrors with resultlog now
|
||||
produce better output - fixed by normalizing pytest_internalerror
|
||||
input arguments.
|
||||
- fix issue97 / traceback issues (in pytest and py) improve traceback output
|
||||
in conjunction with jinja2 and cython which hack tracebacks
|
||||
- fix issue93 (in pytest and pytest-xdist) avoid "delayed teardowns":
|
||||
the final test in a test node will now run its teardown directly
|
||||
instead of waiting for the end of the session. Thanks Dave Hunt for
|
||||
the good reporting and feedback. The pytest_runtest_protocol as well
|
||||
as the pytest_runtest_teardown hooks now have "nextitem" available
|
||||
which will be None indicating the end of the test run.
|
||||
- fix collection crash due to unknown-source collected items, thanks
|
||||
to Ralf Schmitt (fixed by depending on a more recent pylib)
|
||||
|
||||
Changes between 2.1.3 and 2.2.0
|
||||
----------------------------------------
|
||||
|
||||
- fix issue90: introduce eager tearing down of test items so that
|
||||
teardown function are called earlier.
|
||||
- add an all-powerful metafunc.parametrize function which allows to
|
||||
parametrize test function arguments in multiple steps and therefore
|
||||
from indepdenent plugins and palces.
|
||||
- add a @pytest.mark.parametrize helper which allows to easily
|
||||
call a test function with different argument values
|
||||
- Add examples to the "parametrize" example page, including a quick port
|
||||
of Test scenarios and the new parametrize function and decorator.
|
||||
- introduce registration for "pytest.mark.*" helpers via ini-files
|
||||
or through plugin hooks. Also introduce a "--strict" option which
|
||||
will treat unregistered markers as errors
|
||||
allowing to avoid typos and maintain a well described set of markers
|
||||
for your test suite. See exaples at http://pytest.org/latest/mark.html
|
||||
and its links.
|
||||
- issue50: introduce "-m marker" option to select tests based on markers
|
||||
(this is a stricter and more predictable version of '-k' in that "-m"
|
||||
only matches complete markers and has more obvious rules for and/or
|
||||
semantics.
|
||||
- new feature to help optimizing the speed of your tests:
|
||||
--durations=N option for displaying N slowest test calls
|
||||
and setup/teardown methods.
|
||||
- fix issue87: --pastebin now works with python3
|
||||
- fix issue89: --pdb with unexpected exceptions in doctest work more sensibly
|
||||
- fix and cleanup pytest's own test suite to not leak FDs
|
||||
- fix issue83: link to generated funcarg list
|
||||
- fix issue74: pyarg module names are now checked against imp.find_module false positives
|
||||
- fix compatibility with twisted/trial-11.1.0 use cases
|
||||
- simplify Node.listchain
|
||||
- simplify junitxml output code by relying on py.xml
|
||||
- add support for skip properties on unittest classes and functions
|
||||
|
||||
Changes between 2.1.2 and 2.1.3
|
||||
----------------------------------------
|
||||
|
||||
|
||||
241
ISSUES.txt
241
ISSUES.txt
@@ -1,3 +1,125 @@
|
||||
|
||||
improve / add to dependency/test resource injection
|
||||
-------------------------------------------------------------
|
||||
tags: wish feature docs
|
||||
|
||||
write up better examples showing the connection between
|
||||
the two.
|
||||
|
||||
refine parametrize API
|
||||
-------------------------------------------------------------
|
||||
tags: critical feature
|
||||
|
||||
extend metafunc.parametrize to directly support indirection, example:
|
||||
|
||||
def setupdb(request, config):
|
||||
# setup "resource" based on test request and the values passed
|
||||
# in to parametrize. setupfunc is called for each such value.
|
||||
# you may use request.addfinalizer() or request.cached_setup ...
|
||||
return dynamic_setup_database(val)
|
||||
|
||||
@pytest.mark.parametrize("db", ["pg", "mysql"], setupfunc=setupdb)
|
||||
def test_heavy_functional_test(db):
|
||||
...
|
||||
|
||||
There would be no need to write or explain funcarg factories and
|
||||
their special __ syntax.
|
||||
|
||||
The examples and improvements should also show how to put the parametrize
|
||||
decorator to a class, to a module or even to a directory. For the directory
|
||||
part a conftest.py content like this::
|
||||
|
||||
pytestmark = [
|
||||
@pytest.mark.parametrize_setup("db", ...),
|
||||
]
|
||||
|
||||
probably makes sense in order to keep the declarative nature. This mirrors
|
||||
the marker-mechanism with respect to a test module but puts it to a directory
|
||||
scale.
|
||||
|
||||
When doing larger scoped parametrization it probably becomes neccessary
|
||||
to allow parametrization to be ignored if the according parameter is not
|
||||
used (currently any parametrized argument that is not present in a function will cause a ValueError). Example:
|
||||
|
||||
@pytest.mark.parametrize("db", ..., mustmatch=False)
|
||||
|
||||
means to not raise an error but simply ignore the parametrization
|
||||
if the signature of a decorated function does not match. XXX is it
|
||||
not sufficient to always allow non-matches?
|
||||
|
||||
|
||||
unify item/request classes, generalize items
|
||||
---------------------------------------------------------------
|
||||
tags: 2.4 wish
|
||||
|
||||
in lieu of extended parametrization and the new way to specify resource
|
||||
factories in terms of the parametrize decorator, consider unification
|
||||
of the item and request class. This also is connected with allowing
|
||||
funcargs in setup functions. Example of new item API:
|
||||
|
||||
item.getresource("db") # alias for request.getfuncargvalue
|
||||
item.addfinalizer(...)
|
||||
item.cached_setup(...)
|
||||
item.applymarker(...)
|
||||
|
||||
test classes/modules could then use this api via::
|
||||
|
||||
def pytest_runtest_setup(item):
|
||||
use item API ...
|
||||
|
||||
introduction of this new method needs to be _fully_ backward compatible -
|
||||
and the documentation needs to change along to mention this new way of
|
||||
doing things.
|
||||
|
||||
impl note: probably Request._fillfuncargs would be called from the
|
||||
python plugins own pytest_runtest_setup(item) and would call
|
||||
item.getresource(X) for all X in the funcargs of a function.
|
||||
|
||||
XXX is it possible to even put the above item API to Nodes, i.e. also
|
||||
to Directorty/module/file/class collectors? Problem is that current
|
||||
funcarg factories presume they are called with a per-function (even
|
||||
per-funcarg-per-function) scope. Could there be small tweaks to the new
|
||||
API that lift this restriction?
|
||||
|
||||
consider::
|
||||
|
||||
def setup_class(cls, tmpdir):
|
||||
# would get a per-class tmpdir because tmpdir parametrization
|
||||
# would know that it is called with a class scope
|
||||
#
|
||||
#
|
||||
#
|
||||
this looks very difficult because those setup functions are also used
|
||||
by nose etc. Rather consider introduction of a new setup hook:
|
||||
|
||||
def setup_test(self, item):
|
||||
self.db = item.cached_setup(..., scope='class')
|
||||
self.tmpdir = item.getresource("tmpdir")
|
||||
|
||||
this should be compatible to unittest/nose and provide much of what
|
||||
"testresources" provide. XXX This would not allow full parametrization
|
||||
such that test function could be run multiple times with different
|
||||
values. See "parametrized attributes" issue.
|
||||
|
||||
allow parametrized attributes on classes
|
||||
--------------------------------------------------
|
||||
|
||||
tags: wish 2.4
|
||||
|
||||
example:
|
||||
|
||||
@pytest.mark.parametrize_attr("db", setupfunc, [1,2,3], scope="class")
|
||||
@pytest.mark.parametrize_attr("tmp", setupfunc, scope="...")
|
||||
class TestMe:
|
||||
def test_hello(self):
|
||||
access self.db ...
|
||||
|
||||
this would run the test_hello() function three times with three
|
||||
different values for self.db. This could also work with unittest/nose
|
||||
style tests, i.e. it leverages existing test suites without needing
|
||||
to rewrite them. Together with the previously mentioned setup_test()
|
||||
maybe the setupfunc could be ommitted?
|
||||
|
||||
checks / deprecations for next release
|
||||
---------------------------------------------------------------
|
||||
tags: bug 2.4 core xdist
|
||||
@@ -25,21 +147,9 @@ appropriately to avoid this issue. Moreover/Alternatively, we could
|
||||
record which implementations of a hook succeeded and only call their
|
||||
teardown.
|
||||
|
||||
do early-teardown of test modules
|
||||
-----------------------------------------
|
||||
tags: feature 2.2
|
||||
|
||||
currently teardowns are called when the next tests is setup
|
||||
except for the function/method level where interally
|
||||
"teardown_exact" tears down immediately. Generalize
|
||||
this to perform the "neccessary" teardown compared to
|
||||
the "next" test item during teardown - this should
|
||||
get rid of some irritations because otherwise e.g.
|
||||
prints of teardown-code appear in the setup of the next test.
|
||||
|
||||
consider and document __init__ file usage in test directories
|
||||
---------------------------------------------------------------
|
||||
tags: bug 2.2 core
|
||||
tags: bug core
|
||||
|
||||
Currently, a test module is imported with its fully qualified
|
||||
package path, determined by checking __init__ files upwards.
|
||||
@@ -54,7 +164,7 @@ certain scenarios makes sense.
|
||||
|
||||
relax requirement to have tests/testing contain an __init__
|
||||
----------------------------------------------------------------
|
||||
tags: feature 2.2
|
||||
tags: feature
|
||||
bb: http://bitbucket.org/hpk42/py-trunk/issue/64
|
||||
|
||||
A local test run of a "tests" directory may work
|
||||
@@ -65,7 +175,7 @@ i.e. port the nose-logic of unloading a test module.
|
||||
|
||||
customize test function collection
|
||||
-------------------------------------------------------
|
||||
tags: feature 2.2
|
||||
tags: feature
|
||||
|
||||
- introduce py.test.mark.nocollect for not considering a function for
|
||||
test collection at all. maybe also introduce a py.test.mark.test to
|
||||
@@ -74,7 +184,7 @@ tags: feature 2.2
|
||||
|
||||
introduce pytest.mark.importorskip
|
||||
-------------------------------------------------------
|
||||
tags: feature 2.2
|
||||
tags: feature
|
||||
|
||||
in addition to the imperative pytest.importorskip also introduce
|
||||
a pytest.mark.importorskip so that the test count is more correct.
|
||||
@@ -82,7 +192,7 @@ a pytest.mark.importorskip so that the test count is more correct.
|
||||
|
||||
introduce py.test.mark.platform
|
||||
-------------------------------------------------------
|
||||
tags: feature 2.2
|
||||
tags: feature
|
||||
|
||||
Introduce nice-to-spell platform-skipping, examples:
|
||||
|
||||
@@ -99,44 +209,36 @@ interpreter versions.
|
||||
|
||||
pytest.mark.xfail signature change
|
||||
-------------------------------------------------------
|
||||
tags: feature 2.2
|
||||
tags: feature
|
||||
|
||||
change to pytest.mark.xfail(reason, (optional)condition)
|
||||
to better implement the word meaning. It also signals
|
||||
better that we always have some kind of an implementation
|
||||
reason that can be formualated.
|
||||
Compatibility? Maybe rename to "pytest.mark.xfail"?
|
||||
|
||||
introduce py.test.mark registration
|
||||
-----------------------------------------
|
||||
tags: feature 2.2
|
||||
|
||||
introduce a hook that allows to register a named mark decorator
|
||||
with documentation and add "py.test --marks" to get
|
||||
a list of available marks. Deprecate "dynamic" mark
|
||||
definitions.
|
||||
Compatibility? how to introduce a new name/keep compat?
|
||||
|
||||
allow to non-intrusively apply skipfs/xfail/marks
|
||||
---------------------------------------------------
|
||||
tags: feature 2.2
|
||||
tags: feature
|
||||
|
||||
use case: mark a module or directory structures
|
||||
to be skipped on certain platforms (i.e. no import
|
||||
attempt will be made).
|
||||
|
||||
consider introducing a hook/mechanism that allows to apply marks
|
||||
from conftests or plugins.
|
||||
from conftests or plugins. (See extended parametrization)
|
||||
|
||||
|
||||
explicit referencing of conftest.py files
|
||||
-----------------------------------------
|
||||
tags: feature 2.2
|
||||
tags: feature
|
||||
|
||||
allow to name conftest.py files (in sub directories) that should
|
||||
be imported early, as to include command line options.
|
||||
|
||||
improve central py.test ini file
|
||||
----------------------------------
|
||||
tags: feature 2.2
|
||||
tags: feature
|
||||
|
||||
introduce more declarative configuration options:
|
||||
- (to-be-collected test directories)
|
||||
@@ -147,49 +249,24 @@ introduce more declarative configuration options:
|
||||
|
||||
new documentation
|
||||
----------------------------------
|
||||
tags: feature 2.2
|
||||
tags: feature
|
||||
|
||||
- logo py.test
|
||||
- examples for unittest or functional testing
|
||||
- resource management for functional testing
|
||||
- patterns: page object
|
||||
- parametrized testing
|
||||
- better / more integrated plugin docs
|
||||
|
||||
generalize parametrized testing to generate combinations
|
||||
-------------------------------------------------------------
|
||||
tags: feature 2.2
|
||||
|
||||
think about extending metafunc.addcall or add a new method to allow to
|
||||
generate tests with combinations of all generated versions - what to do
|
||||
about "id" and "param" in such combinations though?
|
||||
|
||||
introduce py.test.mark.multi
|
||||
-----------------------------------------
|
||||
tags: feature 1.3
|
||||
|
||||
introduce py.test.mark.multi to specify a number
|
||||
of values for a given function argument.
|
||||
|
||||
have imported module mismatch honour relative paths
|
||||
--------------------------------------------------------
|
||||
tags: bug 2.2
|
||||
tags: bug
|
||||
|
||||
With 1.1.1 py.test fails at least on windows if an import
|
||||
is relative and compared against an absolute conftest.py
|
||||
path. Normalize.
|
||||
|
||||
call termination with small timeout
|
||||
-------------------------------------------------
|
||||
tags: feature 2.2
|
||||
test: testing/pytest/dist/test_dsession.py - test_terminate_on_hanging_node
|
||||
|
||||
Call gateway group termination with a small timeout if available.
|
||||
Should make dist-testing less likely to leave lost processes.
|
||||
|
||||
consider globals: py.test.ensuretemp and config
|
||||
--------------------------------------------------------------
|
||||
tags: experimental-wish 2.2
|
||||
tags: experimental-wish
|
||||
|
||||
consider deprecating py.test.ensuretemp and py.test.config
|
||||
to further reduce py.test globality. Also consider
|
||||
@@ -198,7 +275,7 @@ a plugin rather than being there from the start.
|
||||
|
||||
consider allowing funcargs for setup methods
|
||||
--------------------------------------------------------------
|
||||
tags: experimental-wish 2.2
|
||||
tags: experimental-wish
|
||||
|
||||
Users have expressed the wish to have funcargs available to setup
|
||||
functions. Experiment with allowing funcargs there - it might
|
||||
@@ -208,13 +285,20 @@ factories with a request object that not have a cls/function
|
||||
attributes. However, how to handle parametrized test functions
|
||||
and funcargs?
|
||||
|
||||
setup_function -> request can be like it is now
|
||||
setup_class -> request has no request.function
|
||||
setup_module -> request has no request.cls
|
||||
maybe introduce a setup method like:
|
||||
|
||||
setup_invocation(self, request)
|
||||
|
||||
which has full access to the test invocation through "request"
|
||||
through which you can get funcargvalues, use cached_setup etc.
|
||||
Therefore, the access to funcargs would be indirect but it
|
||||
could be consistently implemented. setup_invocation() would
|
||||
be a "glue" function for bringing together the xUnit and funcargs
|
||||
world.
|
||||
|
||||
consider pytest_addsyspath hook
|
||||
-----------------------------------------
|
||||
tags: 2.2
|
||||
tags:
|
||||
|
||||
py.test could call a new pytest_addsyspath() in order to systematically
|
||||
allow manipulation of sys.path and to inhibit it via --no-addsyspath
|
||||
@@ -226,7 +310,7 @@ and pytest_configure.
|
||||
|
||||
show plugin information in test header
|
||||
----------------------------------------------------------------
|
||||
tags: feature 2.2
|
||||
tags: feature
|
||||
|
||||
Now that external plugins are becoming more numerous
|
||||
it would be useful to have external plugins along with
|
||||
@@ -234,7 +318,7 @@ their versions displayed as a header line.
|
||||
|
||||
deprecate global py.test.config usage
|
||||
----------------------------------------------------------------
|
||||
tags: feature 2.2
|
||||
tags: feature
|
||||
|
||||
py.test.ensuretemp and py.test.config are probably the last
|
||||
objects containing global state. Often using them is not
|
||||
@@ -244,9 +328,30 @@ as others.
|
||||
|
||||
remove deprecated bits in collect.py
|
||||
-------------------------------------------------------------------
|
||||
tags: feature 2.2
|
||||
tags: feature
|
||||
|
||||
In an effort to further simplify code, review and remove deprecated bits
|
||||
in collect.py. Probably good:
|
||||
- inline consider_file/dir methods, no need to have them
|
||||
subclass-overridable because of hooks
|
||||
|
||||
implement fslayout decorator
|
||||
---------------------------------
|
||||
tags: feature
|
||||
|
||||
Improve the way how tests can work with pre-made examples,
|
||||
keeping the layout close to the test function:
|
||||
|
||||
@pytest.mark.fslayout("""
|
||||
conftest.py:
|
||||
# empty
|
||||
tests/
|
||||
test_%(NAME)s: # becomes test_run1.py
|
||||
def test_function(self):
|
||||
pass
|
||||
""")
|
||||
def test_run(pytester, fslayout):
|
||||
p = fslayout.findone("test_*.py")
|
||||
result = pytester.runpytest(p)
|
||||
assert result.ret == 0
|
||||
assert result.passed == 1
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
#
|
||||
__version__ = '2.1.3'
|
||||
__version__ = '2.2.4'
|
||||
|
||||
@@ -50,7 +50,7 @@ def pytest_configure(config):
|
||||
hook = None
|
||||
if mode == "rewrite":
|
||||
hook = rewrite.AssertionRewritingHook()
|
||||
sys.meta_path.append(hook)
|
||||
sys.meta_path.insert(0, hook)
|
||||
warn_about_missing_assertion(mode)
|
||||
config._assertstate = AssertionState(config, mode)
|
||||
config._assertstate.hook = hook
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
import py
|
||||
import sys, inspect
|
||||
from compiler import parse, ast, pycodegen
|
||||
from _pytest.assertion.util import format_explanation
|
||||
from _pytest.assertion.reinterpret import BuiltinAssertionError
|
||||
from _pytest.assertion.util import format_explanation, BuiltinAssertionError
|
||||
|
||||
passthroughex = py.builtin._sysex
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import sys
|
||||
import py
|
||||
|
||||
BuiltinAssertionError = py.builtin.builtins.AssertionError
|
||||
from _pytest.assertion.util import BuiltinAssertionError
|
||||
|
||||
class AssertionError(BuiltinAssertionError):
|
||||
def __init__(self, *args):
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
"""Rewrite assertion AST to produce nice error messages"""
|
||||
|
||||
import ast
|
||||
import collections
|
||||
import errno
|
||||
import itertools
|
||||
import imp
|
||||
@@ -298,7 +297,7 @@ binop_map = {
|
||||
ast.Mult : "*",
|
||||
ast.Div : "/",
|
||||
ast.FloorDiv : "//",
|
||||
ast.Mod : "%",
|
||||
ast.Mod : "%%", # escaped for string formatting
|
||||
ast.Eq : "==",
|
||||
ast.NotEq : "!=",
|
||||
ast.Lt : "<",
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
import py
|
||||
|
||||
BuiltinAssertionError = py.builtin.builtins.AssertionError
|
||||
|
||||
# The _reprcompare attribute on the util module is used by the new assertion
|
||||
# interpretation code and assertion rewriter to detect this plugin was
|
||||
|
||||
@@ -11,20 +11,23 @@ def pytest_addoption(parser):
|
||||
group._addoption('-s', action="store_const", const="no", dest="capture",
|
||||
help="shortcut for --capture=no.")
|
||||
|
||||
@pytest.mark.tryfirst
|
||||
def pytest_cmdline_parse(pluginmanager, args):
|
||||
# we want to perform capturing already for plugin/conftest loading
|
||||
if '-s' in args or "--capture=no" in args:
|
||||
method = "no"
|
||||
elif hasattr(os, 'dup') and '--capture=sys' not in args:
|
||||
method = "fd"
|
||||
else:
|
||||
method = "sys"
|
||||
capman = CaptureManager(method)
|
||||
pluginmanager.register(capman, "capturemanager")
|
||||
|
||||
def addouterr(rep, outerr):
|
||||
for secname, content in zip(["out", "err"], outerr):
|
||||
if content:
|
||||
rep.sections.append(("Captured std%s" % secname, content))
|
||||
|
||||
def pytest_unconfigure(config):
|
||||
# registered in config.py during early conftest.py loading
|
||||
capman = config.pluginmanager.getplugin('capturemanager')
|
||||
while capman._method2capture:
|
||||
name, cap = capman._method2capture.popitem()
|
||||
# XXX logging module may wants to close it itself on process exit
|
||||
# otherwise we could do finalization here and call "reset()".
|
||||
cap.suspend()
|
||||
|
||||
class NoCapture:
|
||||
def startall(self):
|
||||
pass
|
||||
@@ -36,8 +39,9 @@ class NoCapture:
|
||||
return "", ""
|
||||
|
||||
class CaptureManager:
|
||||
def __init__(self):
|
||||
def __init__(self, defaultmethod=None):
|
||||
self._method2capture = {}
|
||||
self._defaultmethod = defaultmethod
|
||||
|
||||
def _maketempfile(self):
|
||||
f = py.std.tempfile.TemporaryFile()
|
||||
@@ -62,14 +66,6 @@ class CaptureManager:
|
||||
else:
|
||||
raise ValueError("unknown capturing method: %r" % method)
|
||||
|
||||
def _getmethod_preoptionparse(self, args):
|
||||
if '-s' in args or "--capture=no" in args:
|
||||
return "no"
|
||||
elif hasattr(os, 'dup') and '--capture=sys' not in args:
|
||||
return "fd"
|
||||
else:
|
||||
return "sys"
|
||||
|
||||
def _getmethod(self, config, fspath):
|
||||
if config.option.capture:
|
||||
method = config.option.capture
|
||||
@@ -82,16 +78,22 @@ class CaptureManager:
|
||||
method = "sys"
|
||||
return method
|
||||
|
||||
def reset_capturings(self):
|
||||
for name, cap in self._method2capture.items():
|
||||
cap.reset()
|
||||
|
||||
def resumecapture_item(self, item):
|
||||
method = self._getmethod(item.config, item.fspath)
|
||||
if not hasattr(item, 'outerr'):
|
||||
item.outerr = ('', '') # we accumulate outerr on the item
|
||||
return self.resumecapture(method)
|
||||
|
||||
def resumecapture(self, method):
|
||||
def resumecapture(self, method=None):
|
||||
if hasattr(self, '_capturing'):
|
||||
raise ValueError("cannot resume, already capturing with %r" %
|
||||
(self._capturing,))
|
||||
if method is None:
|
||||
method = self._defaultmethod
|
||||
cap = self._method2capture.get(method)
|
||||
self._capturing = method
|
||||
if cap is None:
|
||||
@@ -161,17 +163,6 @@ class CaptureManager:
|
||||
def pytest_runtest_teardown(self, item):
|
||||
self.resumecapture_item(item)
|
||||
|
||||
def pytest__teardown_final(self, __multicall__, session):
|
||||
method = self._getmethod(session.config, None)
|
||||
self.resumecapture(method)
|
||||
try:
|
||||
rep = __multicall__.execute()
|
||||
finally:
|
||||
outerr = self.suspendcapture()
|
||||
if rep:
|
||||
addouterr(rep, outerr)
|
||||
return rep
|
||||
|
||||
def pytest_keyboard_interrupt(self, excinfo):
|
||||
if hasattr(self, '_capturing'):
|
||||
self.suspendcapture()
|
||||
|
||||
@@ -11,8 +11,12 @@ def pytest_cmdline_parse(pluginmanager, args):
|
||||
return config
|
||||
|
||||
def pytest_unconfigure(config):
|
||||
for func in config._cleanup:
|
||||
func()
|
||||
while 1:
|
||||
try:
|
||||
fin = config._cleanup.pop()
|
||||
except IndexError:
|
||||
break
|
||||
fin()
|
||||
|
||||
class Parser:
|
||||
""" Parser for command line arguments. """
|
||||
@@ -79,6 +83,7 @@ class Parser:
|
||||
self._inidict[name] = (help, type, default)
|
||||
self._ininames.append(name)
|
||||
|
||||
|
||||
class OptionGroup:
|
||||
def __init__(self, name, description="", parser=None):
|
||||
self.name = name
|
||||
@@ -254,11 +259,14 @@ class Config(object):
|
||||
self.hook = self.pluginmanager.hook
|
||||
self._inicache = {}
|
||||
self._cleanup = []
|
||||
|
||||
|
||||
@classmethod
|
||||
def fromdictargs(cls, option_dict, args):
|
||||
""" constructor useable for subprocesses. """
|
||||
config = cls()
|
||||
# XXX slightly crude way to initialize capturing
|
||||
import _pytest.capture
|
||||
_pytest.capture.pytest_cmdline_parse(config.pluginmanager, args)
|
||||
config._preparse(args, addopts=False)
|
||||
config.option.__dict__.update(option_dict)
|
||||
for x in config.option.plugins:
|
||||
@@ -283,11 +291,10 @@ class Config(object):
|
||||
|
||||
def _setinitialconftest(self, args):
|
||||
# capture output during conftest init (#issue93)
|
||||
from _pytest.capture import CaptureManager
|
||||
capman = CaptureManager()
|
||||
self.pluginmanager.register(capman, 'capturemanager')
|
||||
# will be unregistered in capture.py's unconfigure()
|
||||
capman.resumecapture(capman._getmethod_preoptionparse(args))
|
||||
# XXX introduce load_conftest hook to avoid needing to know
|
||||
# about capturing plugin here
|
||||
capman = self.pluginmanager.getplugin("capturemanager")
|
||||
capman.resumecapture()
|
||||
try:
|
||||
try:
|
||||
self._conftest.setinitial(args)
|
||||
@@ -340,6 +347,14 @@ class Config(object):
|
||||
args.append(py.std.os.getcwd())
|
||||
self.args = args
|
||||
|
||||
def addinivalue_line(self, name, line):
|
||||
""" add a line to an ini-file option. The option must have been
|
||||
declared but might not yet be set in which case the line becomes the
|
||||
the first line in its value. """
|
||||
x = self.getini(name)
|
||||
assert isinstance(x, list)
|
||||
x.append(line) # modifies the cached list inline
|
||||
|
||||
def getini(self, name):
|
||||
""" return configuration value from an ini file. If the
|
||||
specified name hasn't been registered through a prior ``parse.addini``
|
||||
|
||||
@@ -211,6 +211,14 @@ class PluginManager(object):
|
||||
self.register(mod, modname)
|
||||
self.consider_module(mod)
|
||||
|
||||
def pytest_configure(self, config):
|
||||
config.addinivalue_line("markers",
|
||||
"tryfirst: mark a hook implementation function such that the "
|
||||
"plugin machinery will try to call it first/as early as possible.")
|
||||
config.addinivalue_line("markers",
|
||||
"trylast: mark a hook implementation function such that the "
|
||||
"plugin machinery will try to call it last/as late as possible.")
|
||||
|
||||
def pytest_plugin_registered(self, plugin):
|
||||
import pytest
|
||||
dic = self.call_plugin(plugin, "pytest_namespace", {}) or {}
|
||||
@@ -313,13 +321,15 @@ def importplugin(importspec):
|
||||
name = importspec
|
||||
try:
|
||||
mod = "_pytest." + name
|
||||
return __import__(mod, None, None, '__doc__')
|
||||
__import__(mod)
|
||||
return sys.modules[mod]
|
||||
except ImportError:
|
||||
#e = py.std.sys.exc_info()[1]
|
||||
#if str(e).find(name) == -1:
|
||||
# raise
|
||||
pass #
|
||||
return __import__(importspec, None, None, '__doc__')
|
||||
__import__(importspec)
|
||||
return sys.modules[importspec]
|
||||
|
||||
class MultiCall:
|
||||
""" execute a call into multiple python functions/methods. """
|
||||
@@ -431,10 +441,7 @@ _preinit = []
|
||||
def _preloadplugins():
|
||||
_preinit.append(PluginManager(load=True))
|
||||
|
||||
def main(args=None, plugins=None):
|
||||
""" returned exit code integer, after an in-process testing run
|
||||
with the given command line arguments, preloading an optional list
|
||||
of passed in plugin objects. """
|
||||
def _prepareconfig(args=None, plugins=None):
|
||||
if args is None:
|
||||
args = sys.argv[1:]
|
||||
elif isinstance(args, py.path.local):
|
||||
@@ -448,17 +455,18 @@ def main(args=None, plugins=None):
|
||||
else: # subsequent calls to main will create a fresh instance
|
||||
_pluginmanager = PluginManager(load=True)
|
||||
hook = _pluginmanager.hook
|
||||
try:
|
||||
if plugins:
|
||||
for plugin in plugins:
|
||||
_pluginmanager.register(plugin)
|
||||
config = hook.pytest_cmdline_parse(
|
||||
pluginmanager=_pluginmanager, args=args)
|
||||
exitstatus = hook.pytest_cmdline_main(config=config)
|
||||
except UsageError:
|
||||
e = sys.exc_info()[1]
|
||||
sys.stderr.write("ERROR: %s\n" %(e.args[0],))
|
||||
exitstatus = 3
|
||||
if plugins:
|
||||
for plugin in plugins:
|
||||
_pluginmanager.register(plugin)
|
||||
return hook.pytest_cmdline_parse(
|
||||
pluginmanager=_pluginmanager, args=args)
|
||||
|
||||
def main(args=None, plugins=None):
|
||||
""" returned exit code integer, after an in-process testing run
|
||||
with the given command line arguments, preloading an optional list
|
||||
of passed in plugin objects. """
|
||||
config = _prepareconfig(args, plugins)
|
||||
exitstatus = config.hook.pytest_cmdline_main(config=config)
|
||||
return exitstatus
|
||||
|
||||
class UsageError(Exception):
|
||||
|
||||
@@ -56,6 +56,7 @@ def pytest_cmdline_main(config):
|
||||
elif config.option.help:
|
||||
config.pluginmanager.do_configure(config)
|
||||
showhelp(config)
|
||||
config.pluginmanager.do_unconfigure(config)
|
||||
return 0
|
||||
|
||||
def showhelp(config):
|
||||
@@ -113,7 +114,7 @@ def pytest_report_header(config):
|
||||
verinfo = getpluginversioninfo(config)
|
||||
if verinfo:
|
||||
lines.extend(verinfo)
|
||||
|
||||
|
||||
if config.option.traceconfig:
|
||||
lines.append("active plugins:")
|
||||
plugins = []
|
||||
|
||||
@@ -121,16 +121,23 @@ def pytest_generate_tests(metafunc):
|
||||
def pytest_itemstart(item, node=None):
|
||||
""" (deprecated, use pytest_runtest_logstart). """
|
||||
|
||||
def pytest_runtest_protocol(item):
|
||||
""" implements the standard runtest_setup/call/teardown protocol including
|
||||
capturing exceptions and calling reporting hooks on the results accordingly.
|
||||
def pytest_runtest_protocol(item, nextitem):
|
||||
""" implements the runtest_setup/call/teardown protocol for
|
||||
the given test item, including capturing exceptions and calling
|
||||
reporting hooks.
|
||||
|
||||
:arg item: test item for which the runtest protocol is performed.
|
||||
|
||||
:arg nexitem: the scheduled-to-be-next test item (or None if this
|
||||
is the end my friend). This argument is passed on to
|
||||
:py:func:`pytest_runtest_teardown`.
|
||||
|
||||
:return boolean: True if no further hook implementations should be invoked.
|
||||
"""
|
||||
pytest_runtest_protocol.firstresult = True
|
||||
|
||||
def pytest_runtest_logstart(nodeid, location):
|
||||
""" signal the start of a test run. """
|
||||
""" signal the start of running a single test item. """
|
||||
|
||||
def pytest_runtest_setup(item):
|
||||
""" called before ``pytest_runtest_call(item)``. """
|
||||
@@ -138,8 +145,14 @@ def pytest_runtest_setup(item):
|
||||
def pytest_runtest_call(item):
|
||||
""" called to execute the test ``item``. """
|
||||
|
||||
def pytest_runtest_teardown(item):
|
||||
""" called after ``pytest_runtest_call``. """
|
||||
def pytest_runtest_teardown(item, nextitem):
|
||||
""" called after ``pytest_runtest_call``.
|
||||
|
||||
:arg nexitem: the scheduled-to-be-next test item (None if no further
|
||||
test item is scheduled). This argument can be used to
|
||||
perform exact teardowns, i.e. calling just enough finalizers
|
||||
so that nextitem only needs to call setup-functions.
|
||||
"""
|
||||
|
||||
def pytest_runtest_makereport(item, call):
|
||||
""" return a :py:class:`_pytest.runner.TestReport` object
|
||||
@@ -149,15 +162,8 @@ def pytest_runtest_makereport(item, call):
|
||||
pytest_runtest_makereport.firstresult = True
|
||||
|
||||
def pytest_runtest_logreport(report):
|
||||
""" process item test report. """
|
||||
|
||||
# special handling for final teardown - somewhat internal for now
|
||||
def pytest__teardown_final(session):
|
||||
""" called before test session finishes. """
|
||||
pytest__teardown_final.firstresult = True
|
||||
|
||||
def pytest__teardown_final_logerror(report, session):
|
||||
""" called if runtest_teardown_final failed. """
|
||||
""" process a test setup/call/teardown report relating to
|
||||
the respective phase of executing a test. """
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# test session related hooks
|
||||
|
||||
@@ -25,21 +25,39 @@ except NameError:
|
||||
long = int
|
||||
|
||||
|
||||
class Junit(py.xml.Namespace):
|
||||
pass
|
||||
|
||||
|
||||
# We need to get the subset of the invalid unicode ranges according to
|
||||
# XML 1.0 which are valid in this python build. Hence we calculate
|
||||
# this dynamically instead of hardcoding it. The spec range of valid
|
||||
# chars is: Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD]
|
||||
# | [#x10000-#x10FFFF]
|
||||
_illegal_unichrs = [(0x00, 0x08), (0x0B, 0x0C), (0x0E, 0x19),
|
||||
(0xD800, 0xDFFF), (0xFDD0, 0xFFFF)]
|
||||
_illegal_ranges = [unicode("%s-%s") % (unichr(low), unichr(high))
|
||||
for (low, high) in _illegal_unichrs
|
||||
_legal_chars = (0x09, 0x0A, 0x0d)
|
||||
_legal_ranges = (
|
||||
(0x20, 0xD7FF),
|
||||
(0xE000, 0xFFFD),
|
||||
(0x10000, 0x10FFFF),
|
||||
)
|
||||
_legal_xml_re = [unicode("%s-%s") % (unichr(low), unichr(high))
|
||||
for (low, high) in _legal_ranges
|
||||
if low < sys.maxunicode]
|
||||
illegal_xml_re = re.compile(unicode('[%s]') %
|
||||
unicode('').join(_illegal_ranges))
|
||||
del _illegal_unichrs
|
||||
del _illegal_ranges
|
||||
_legal_xml_re = [unichr(x) for x in _legal_chars] + _legal_xml_re
|
||||
illegal_xml_re = re.compile(unicode('[^%s]') %
|
||||
unicode('').join(_legal_xml_re))
|
||||
del _legal_chars
|
||||
del _legal_ranges
|
||||
del _legal_xml_re
|
||||
|
||||
def bin_xml_escape(arg):
|
||||
def repl(matchobj):
|
||||
i = ord(matchobj.group())
|
||||
if i <= 0xFF:
|
||||
return unicode('#x%02X') % i
|
||||
else:
|
||||
return unicode('#x%04X') % i
|
||||
return illegal_xml_re.sub(repl, py.xml.escape(arg))
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("terminal reporting")
|
||||
@@ -63,120 +81,105 @@ def pytest_unconfigure(config):
|
||||
config.pluginmanager.unregister(xml)
|
||||
|
||||
|
||||
def mangle_testnames(names):
|
||||
names = [x.replace(".py", "") for x in names if x != '()']
|
||||
names[0] = names[0].replace("/", '.')
|
||||
return names
|
||||
|
||||
class LogXML(object):
|
||||
def __init__(self, logfile, prefix):
|
||||
logfile = os.path.expanduser(os.path.expandvars(logfile))
|
||||
self.logfile = os.path.normpath(logfile)
|
||||
self.prefix = prefix
|
||||
self.test_logs = []
|
||||
self.tests = []
|
||||
self.passed = self.skipped = 0
|
||||
self.failed = self.errors = 0
|
||||
|
||||
def _opentestcase(self, report):
|
||||
names = report.nodeid.split("::")
|
||||
names[0] = names[0].replace("/", '.')
|
||||
names = tuple(names)
|
||||
d = {'time': getattr(report, 'duration', 0)}
|
||||
names = [x.replace(".py", "") for x in names if x != "()"]
|
||||
names = mangle_testnames(report.nodeid.split("::"))
|
||||
classnames = names[:-1]
|
||||
if self.prefix:
|
||||
classnames.insert(0, self.prefix)
|
||||
d['classname'] = ".".join(classnames)
|
||||
d['name'] = py.xml.escape(names[-1])
|
||||
attrs = ['%s="%s"' % item for item in sorted(d.items())]
|
||||
self.test_logs.append("\n<testcase %s>" % " ".join(attrs))
|
||||
self.tests.append(Junit.testcase(
|
||||
classname=".".join(classnames),
|
||||
name=names[-1],
|
||||
time=getattr(report, 'duration', 0)
|
||||
))
|
||||
|
||||
def _closetestcase(self):
|
||||
self.test_logs.append("</testcase>")
|
||||
|
||||
def appendlog(self, fmt, *args):
|
||||
def repl(matchobj):
|
||||
i = ord(matchobj.group())
|
||||
if i <= 0xFF:
|
||||
return unicode('#x%02X') % i
|
||||
else:
|
||||
return unicode('#x%04X') % i
|
||||
args = tuple([illegal_xml_re.sub(repl, py.xml.escape(arg))
|
||||
for arg in args])
|
||||
self.test_logs.append(fmt % args)
|
||||
def append(self, obj):
|
||||
self.tests[-1].append(obj)
|
||||
|
||||
def append_pass(self, report):
|
||||
self.passed += 1
|
||||
self._opentestcase(report)
|
||||
self._closetestcase()
|
||||
|
||||
def append_failure(self, report):
|
||||
self._opentestcase(report)
|
||||
#msg = str(report.longrepr.reprtraceback.extraline)
|
||||
if "xfail" in report.keywords:
|
||||
self.appendlog(
|
||||
'<skipped message="xfail-marked test passes unexpectedly"/>')
|
||||
self.append(
|
||||
Junit.skipped(message="xfail-marked test passes unexpectedly"))
|
||||
self.skipped += 1
|
||||
else:
|
||||
sec = dict(report.sections)
|
||||
self.appendlog('<failure message="test failure">%s</failure>',
|
||||
report.longrepr)
|
||||
fail = Junit.failure(message="test failure")
|
||||
fail.append(str(report.longrepr))
|
||||
self.append(fail)
|
||||
for name in ('out', 'err'):
|
||||
content = sec.get("Captured std%s" % name)
|
||||
if content:
|
||||
self.appendlog(
|
||||
"<system-%s>%%s</system-%s>" % (name, name), content)
|
||||
tag = getattr(Junit, 'system-'+name)
|
||||
self.append(tag(bin_xml_escape(content)))
|
||||
self.failed += 1
|
||||
self._closetestcase()
|
||||
|
||||
def append_collect_failure(self, report):
|
||||
self._opentestcase(report)
|
||||
#msg = str(report.longrepr.reprtraceback.extraline)
|
||||
self.appendlog('<failure message="collection failure">%s</failure>',
|
||||
report.longrepr)
|
||||
self._closetestcase()
|
||||
self.append(Junit.failure(str(report.longrepr),
|
||||
message="collection failure"))
|
||||
self.errors += 1
|
||||
|
||||
def append_collect_skipped(self, report):
|
||||
self._opentestcase(report)
|
||||
#msg = str(report.longrepr.reprtraceback.extraline)
|
||||
self.appendlog('<skipped message="collection skipped">%s</skipped>',
|
||||
report.longrepr)
|
||||
self._closetestcase()
|
||||
self.append(Junit.skipped(str(report.longrepr),
|
||||
message="collection skipped"))
|
||||
self.skipped += 1
|
||||
|
||||
def append_error(self, report):
|
||||
self._opentestcase(report)
|
||||
self.appendlog('<error message="test setup failure">%s</error>',
|
||||
report.longrepr)
|
||||
self._closetestcase()
|
||||
self.append(Junit.error(str(report.longrepr),
|
||||
message="test setup failure"))
|
||||
self.errors += 1
|
||||
|
||||
def append_skipped(self, report):
|
||||
self._opentestcase(report)
|
||||
if "xfail" in report.keywords:
|
||||
self.appendlog(
|
||||
'<skipped message="expected test failure">%s</skipped>',
|
||||
report.keywords['xfail'])
|
||||
self.append(Junit.skipped(str(report.keywords['xfail']),
|
||||
message="expected test failure"))
|
||||
else:
|
||||
filename, lineno, skipreason = report.longrepr
|
||||
if skipreason.startswith("Skipped: "):
|
||||
skipreason = skipreason[9:]
|
||||
self.appendlog('<skipped type="pytest.skip" '
|
||||
'message="%s">%s</skipped>',
|
||||
skipreason, "%s:%s: %s" % report.longrepr,
|
||||
)
|
||||
self._closetestcase()
|
||||
self.append(
|
||||
Junit.skipped("%s:%s: %s" % report.longrepr,
|
||||
type="pytest.skip",
|
||||
message=skipreason
|
||||
))
|
||||
self.skipped += 1
|
||||
|
||||
def pytest_runtest_logreport(self, report):
|
||||
if report.passed:
|
||||
self.append_pass(report)
|
||||
if report.when == "call": # ignore setup/teardown
|
||||
self._opentestcase(report)
|
||||
self.append_pass(report)
|
||||
elif report.failed:
|
||||
self._opentestcase(report)
|
||||
if report.when != "call":
|
||||
self.append_error(report)
|
||||
else:
|
||||
self.append_failure(report)
|
||||
elif report.skipped:
|
||||
self._opentestcase(report)
|
||||
self.append_skipped(report)
|
||||
|
||||
def pytest_collectreport(self, report):
|
||||
if not report.passed:
|
||||
self._opentestcase(report)
|
||||
if report.failed:
|
||||
self.append_collect_failure(report)
|
||||
else:
|
||||
@@ -185,10 +188,11 @@ class LogXML(object):
|
||||
def pytest_internalerror(self, excrepr):
|
||||
self.errors += 1
|
||||
data = py.xml.escape(excrepr)
|
||||
self.test_logs.append(
|
||||
'\n<testcase classname="pytest" name="internal">'
|
||||
' <error message="internal error">'
|
||||
'%s</error></testcase>' % data)
|
||||
self.tests.append(
|
||||
Junit.testcase(
|
||||
Junit.error(data, message="internal error"),
|
||||
classname="pytest",
|
||||
name="internal"))
|
||||
|
||||
def pytest_sessionstart(self, session):
|
||||
self.suite_start_time = time.time()
|
||||
@@ -202,17 +206,17 @@ class LogXML(object):
|
||||
suite_stop_time = time.time()
|
||||
suite_time_delta = suite_stop_time - self.suite_start_time
|
||||
numtests = self.passed + self.failed
|
||||
|
||||
logfile.write('<?xml version="1.0" encoding="utf-8"?>')
|
||||
logfile.write('<testsuite ')
|
||||
logfile.write('name="" ')
|
||||
logfile.write('errors="%i" ' % self.errors)
|
||||
logfile.write('failures="%i" ' % self.failed)
|
||||
logfile.write('skips="%i" ' % self.skipped)
|
||||
logfile.write('tests="%i" ' % numtests)
|
||||
logfile.write('time="%.3f"' % suite_time_delta)
|
||||
logfile.write(' >')
|
||||
logfile.writelines(self.test_logs)
|
||||
logfile.write('</testsuite>')
|
||||
logfile.write(Junit.testsuite(
|
||||
self.tests,
|
||||
name="",
|
||||
errors=self.errors,
|
||||
failures=self.failed,
|
||||
skips=self.skipped,
|
||||
tests=numtests,
|
||||
time="%.3f" % suite_time_delta,
|
||||
).unicode(indent=0))
|
||||
logfile.close()
|
||||
|
||||
def pytest_terminal_summary(self, terminalreporter):
|
||||
|
||||
103
_pytest/main.py
103
_pytest/main.py
@@ -10,6 +10,9 @@ EXIT_OK = 0
|
||||
EXIT_TESTSFAILED = 1
|
||||
EXIT_INTERRUPTED = 2
|
||||
EXIT_INTERNALERROR = 3
|
||||
EXIT_USAGEERROR = 4
|
||||
|
||||
name_re = py.std.re.compile("^[a-zA-Z_]\w*$")
|
||||
|
||||
def pytest_addoption(parser):
|
||||
parser.addini("norecursedirs", "directory patterns to avoid for recursion",
|
||||
@@ -27,6 +30,9 @@ def pytest_addoption(parser):
|
||||
action="store", type="int", dest="maxfail", default=0,
|
||||
help="exit after first num failures or errors.")
|
||||
|
||||
group._addoption('--strict', action="store_true",
|
||||
help="run pytest in strict mode, warnings become errors.")
|
||||
|
||||
group = parser.getgroup("collect", "collection")
|
||||
group.addoption('--collectonly',
|
||||
action="store_true", dest="collectonly",
|
||||
@@ -48,7 +54,7 @@ def pytest_addoption(parser):
|
||||
def pytest_namespace():
|
||||
collect = dict(Item=Item, Collector=Collector, File=File, Session=Session)
|
||||
return dict(collect=collect)
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
py.test.config = config # compatibiltiy
|
||||
if config.option.exitfirst:
|
||||
@@ -60,30 +66,34 @@ def wrap_session(config, doit):
|
||||
session.exitstatus = EXIT_OK
|
||||
initstate = 0
|
||||
try:
|
||||
config.pluginmanager.do_configure(config)
|
||||
initstate = 1
|
||||
config.hook.pytest_sessionstart(session=session)
|
||||
initstate = 2
|
||||
doit(config, session)
|
||||
except pytest.UsageError:
|
||||
raise
|
||||
except KeyboardInterrupt:
|
||||
excinfo = py.code.ExceptionInfo()
|
||||
config.hook.pytest_keyboard_interrupt(excinfo=excinfo)
|
||||
session.exitstatus = EXIT_INTERRUPTED
|
||||
except:
|
||||
excinfo = py.code.ExceptionInfo()
|
||||
config.pluginmanager.notify_exception(excinfo, config.option)
|
||||
session.exitstatus = EXIT_INTERNALERROR
|
||||
if excinfo.errisinstance(SystemExit):
|
||||
sys.stderr.write("mainloop: caught Spurious SystemExit!\n")
|
||||
if not session.exitstatus and session._testsfailed:
|
||||
session.exitstatus = EXIT_TESTSFAILED
|
||||
if initstate >= 2:
|
||||
config.hook.pytest_sessionfinish(session=session,
|
||||
exitstatus=session.exitstatus)
|
||||
if initstate >= 1:
|
||||
config.pluginmanager.do_unconfigure(config)
|
||||
try:
|
||||
config.pluginmanager.do_configure(config)
|
||||
initstate = 1
|
||||
config.hook.pytest_sessionstart(session=session)
|
||||
initstate = 2
|
||||
doit(config, session)
|
||||
except pytest.UsageError:
|
||||
msg = sys.exc_info()[1].args[0]
|
||||
sys.stderr.write("ERROR: %s\n" %(msg,))
|
||||
session.exitstatus = EXIT_USAGEERROR
|
||||
except KeyboardInterrupt:
|
||||
excinfo = py.code.ExceptionInfo()
|
||||
config.hook.pytest_keyboard_interrupt(excinfo=excinfo)
|
||||
session.exitstatus = EXIT_INTERRUPTED
|
||||
except:
|
||||
excinfo = py.code.ExceptionInfo()
|
||||
config.pluginmanager.notify_exception(excinfo, config.option)
|
||||
session.exitstatus = EXIT_INTERNALERROR
|
||||
if excinfo.errisinstance(SystemExit):
|
||||
sys.stderr.write("mainloop: caught Spurious SystemExit!\n")
|
||||
finally:
|
||||
if initstate >= 2:
|
||||
config.hook.pytest_sessionfinish(session=session,
|
||||
exitstatus=session.exitstatus or (session._testsfailed and 1))
|
||||
if not session.exitstatus and session._testsfailed:
|
||||
session.exitstatus = EXIT_TESTSFAILED
|
||||
if initstate >= 1:
|
||||
config.pluginmanager.do_unconfigure(config)
|
||||
return session.exitstatus
|
||||
|
||||
def pytest_cmdline_main(config):
|
||||
@@ -101,8 +111,12 @@ def pytest_collection(session):
|
||||
def pytest_runtestloop(session):
|
||||
if session.config.option.collectonly:
|
||||
return True
|
||||
for item in session.session.items:
|
||||
item.config.hook.pytest_runtest_protocol(item=item)
|
||||
for i, item in enumerate(session.items):
|
||||
try:
|
||||
nextitem = session.items[i+1]
|
||||
except IndexError:
|
||||
nextitem = None
|
||||
item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem)
|
||||
if session.shouldstop:
|
||||
raise session.Interrupted(session.shouldstop)
|
||||
return True
|
||||
@@ -132,7 +146,7 @@ def compatproperty(name):
|
||||
return getattr(pytest, name)
|
||||
return property(fget, None, None,
|
||||
"deprecated attribute %r, use pytest.%s" % (name,name))
|
||||
|
||||
|
||||
class Node(object):
|
||||
""" base class for all Nodes in the collection tree.
|
||||
Collector subclasses have children, Items are terminal nodes."""
|
||||
@@ -143,13 +157,13 @@ class Node(object):
|
||||
|
||||
#: the parent collector node.
|
||||
self.parent = parent
|
||||
|
||||
|
||||
#: the test config object
|
||||
self.config = config or parent.config
|
||||
|
||||
#: the collection this node is part of
|
||||
self.session = session or parent.session
|
||||
|
||||
|
||||
#: filesystem path where this node was collected from
|
||||
self.fspath = getattr(parent, 'fspath', None)
|
||||
self.ihook = self.session.gethookproxy(self.fspath)
|
||||
@@ -224,13 +238,13 @@ class Node(object):
|
||||
def listchain(self):
|
||||
""" return list of all parent collectors up to self,
|
||||
starting from root of collection tree. """
|
||||
l = [self]
|
||||
while 1:
|
||||
x = l[0]
|
||||
if x.parent is not None: # and x.parent.parent is not None:
|
||||
l.insert(0, x.parent)
|
||||
else:
|
||||
return l
|
||||
chain = []
|
||||
item = self
|
||||
while item is not None:
|
||||
chain.append(item)
|
||||
item = item.parent
|
||||
chain.reverse()
|
||||
return chain
|
||||
|
||||
def listnames(self):
|
||||
return [x.name for x in self.listchain()]
|
||||
@@ -325,6 +339,8 @@ class Item(Node):
|
||||
""" a basic test invocation item. Note that for a single function
|
||||
there might be multiple test invocation items.
|
||||
"""
|
||||
nextitem = None
|
||||
|
||||
def reportinfo(self):
|
||||
return self.fspath, None, ""
|
||||
|
||||
@@ -399,6 +415,7 @@ class Session(FSCollector):
|
||||
self._notfound = []
|
||||
self._initialpaths = set()
|
||||
self._initialparts = []
|
||||
self.items = items = []
|
||||
for arg in args:
|
||||
parts = self._parsearg(arg)
|
||||
self._initialparts.append(parts)
|
||||
@@ -414,7 +431,6 @@ class Session(FSCollector):
|
||||
if not genitems:
|
||||
return rep.result
|
||||
else:
|
||||
self.items = items = []
|
||||
if rep.passed:
|
||||
for node in rep.result:
|
||||
self.items.extend(self.genitems(node))
|
||||
@@ -472,6 +488,13 @@ class Session(FSCollector):
|
||||
mod = None
|
||||
path = [os.path.abspath('.')] + sys.path
|
||||
for name in x.split('.'):
|
||||
# ignore anything that's not a proper name here
|
||||
# else something like --pyargs will mess up '.'
|
||||
# since imp.find_module will actually sometimes work for it
|
||||
# but it's supposed to be considered a filesystem path
|
||||
# not a package
|
||||
if name_re.match(name) is None:
|
||||
return x
|
||||
try:
|
||||
fd, mod, type_ = imp.find_module(name, path)
|
||||
except ImportError:
|
||||
@@ -479,7 +502,7 @@ class Session(FSCollector):
|
||||
else:
|
||||
if fd is not None:
|
||||
fd.close()
|
||||
|
||||
|
||||
if type_[2] != imp.PKG_DIRECTORY:
|
||||
path = [os.path.dirname(mod)]
|
||||
else:
|
||||
@@ -502,7 +525,7 @@ class Session(FSCollector):
|
||||
raise pytest.UsageError(msg + arg)
|
||||
parts[0] = path
|
||||
return parts
|
||||
|
||||
|
||||
def matchnodes(self, matching, names):
|
||||
self.trace("matchnodes", matching, names)
|
||||
self.trace.root.indent += 1
|
||||
|
||||
102
_pytest/mark.py
102
_pytest/mark.py
@@ -14,12 +14,37 @@ def pytest_addoption(parser):
|
||||
"Terminate expression with ':' to make the first match match "
|
||||
"all subsequent tests (usually file-order). ")
|
||||
|
||||
group._addoption("-m",
|
||||
action="store", dest="markexpr", default="", metavar="MARKEXPR",
|
||||
help="only run tests matching given mark expression. "
|
||||
"example: -m 'mark1 and not mark2'."
|
||||
)
|
||||
|
||||
group.addoption("--markers", action="store_true", help=
|
||||
"show markers (builtin, plugin and per-project ones).")
|
||||
|
||||
parser.addini("markers", "markers for test functions", 'linelist')
|
||||
|
||||
def pytest_cmdline_main(config):
|
||||
if config.option.markers:
|
||||
config.pluginmanager.do_configure(config)
|
||||
tw = py.io.TerminalWriter()
|
||||
for line in config.getini("markers"):
|
||||
name, rest = line.split(":", 1)
|
||||
tw.write("@pytest.mark.%s:" % name, bold=True)
|
||||
tw.line(rest)
|
||||
tw.line()
|
||||
config.pluginmanager.do_unconfigure(config)
|
||||
return 0
|
||||
pytest_cmdline_main.tryfirst = True
|
||||
|
||||
def pytest_collection_modifyitems(items, config):
|
||||
keywordexpr = config.option.keyword
|
||||
if not keywordexpr:
|
||||
matchexpr = config.option.markexpr
|
||||
if not keywordexpr and not matchexpr:
|
||||
return
|
||||
selectuntil = False
|
||||
if keywordexpr[-1] == ":":
|
||||
if keywordexpr[-1:] == ":":
|
||||
selectuntil = True
|
||||
keywordexpr = keywordexpr[:-1]
|
||||
|
||||
@@ -29,21 +54,38 @@ def pytest_collection_modifyitems(items, config):
|
||||
if keywordexpr and skipbykeyword(colitem, keywordexpr):
|
||||
deselected.append(colitem)
|
||||
else:
|
||||
remaining.append(colitem)
|
||||
if selectuntil:
|
||||
keywordexpr = None
|
||||
if matchexpr:
|
||||
if not matchmark(colitem, matchexpr):
|
||||
deselected.append(colitem)
|
||||
continue
|
||||
remaining.append(colitem)
|
||||
|
||||
if deselected:
|
||||
config.hook.pytest_deselected(items=deselected)
|
||||
items[:] = remaining
|
||||
|
||||
class BoolDict:
|
||||
def __init__(self, mydict):
|
||||
self._mydict = mydict
|
||||
def __getitem__(self, name):
|
||||
return name in self._mydict
|
||||
|
||||
def matchmark(colitem, matchexpr):
|
||||
return eval(matchexpr, {}, BoolDict(colitem.obj.__dict__))
|
||||
|
||||
def pytest_configure(config):
|
||||
if config.option.strict:
|
||||
pytest.mark._config = config
|
||||
|
||||
def skipbykeyword(colitem, keywordexpr):
|
||||
""" return True if they given keyword expression means to
|
||||
skip this collector/item.
|
||||
"""
|
||||
if not keywordexpr:
|
||||
return
|
||||
|
||||
|
||||
itemkeywords = getkeywords(colitem)
|
||||
for key in filter(None, keywordexpr.split()):
|
||||
eor = key[:1] == '-'
|
||||
@@ -77,15 +119,31 @@ class MarkGenerator:
|
||||
@py.test.mark.slowtest
|
||||
def test_function():
|
||||
pass
|
||||
|
||||
|
||||
will set a 'slowtest' :class:`MarkInfo` object
|
||||
on the ``test_function`` object. """
|
||||
|
||||
def __getattr__(self, name):
|
||||
if name[0] == "_":
|
||||
raise AttributeError(name)
|
||||
if hasattr(self, '_config'):
|
||||
self._check(name)
|
||||
return MarkDecorator(name)
|
||||
|
||||
def _check(self, name):
|
||||
try:
|
||||
if name in self._markers:
|
||||
return
|
||||
except AttributeError:
|
||||
pass
|
||||
self._markers = l = set()
|
||||
for line in self._config.getini("markers"):
|
||||
beginning = line.split(":", 1)
|
||||
x = beginning[0].split("(", 1)[0]
|
||||
l.add(x)
|
||||
if name not in self._markers:
|
||||
raise AttributeError("%r not a registered marker" % (name,))
|
||||
|
||||
class MarkDecorator:
|
||||
""" A decorator for test functions and test classes. When applied
|
||||
it will create :class:`MarkInfo` objects which may be
|
||||
@@ -133,8 +191,7 @@ class MarkDecorator:
|
||||
holder = MarkInfo(self.markname, self.args, self.kwargs)
|
||||
setattr(func, self.markname, holder)
|
||||
else:
|
||||
holder.kwargs.update(self.kwargs)
|
||||
holder.args += self.args
|
||||
holder.add(self.args, self.kwargs)
|
||||
return func
|
||||
kw = self.kwargs.copy()
|
||||
kw.update(kwargs)
|
||||
@@ -150,27 +207,20 @@ class MarkInfo:
|
||||
self.args = args
|
||||
#: keyword argument dictionary, empty if nothing specified
|
||||
self.kwargs = kwargs
|
||||
self._arglist = [(args, kwargs.copy())]
|
||||
|
||||
def __repr__(self):
|
||||
return "<MarkInfo %r args=%r kwargs=%r>" % (
|
||||
self.name, self.args, self.kwargs)
|
||||
|
||||
def pytest_itemcollected(item):
|
||||
if not isinstance(item, pytest.Function):
|
||||
return
|
||||
try:
|
||||
func = item.obj.__func__
|
||||
except AttributeError:
|
||||
func = getattr(item.obj, 'im_func', item.obj)
|
||||
pyclasses = (pytest.Class, pytest.Module)
|
||||
for node in item.listchain():
|
||||
if isinstance(node, pyclasses):
|
||||
marker = getattr(node.obj, 'pytestmark', None)
|
||||
if marker is not None:
|
||||
if isinstance(marker, list):
|
||||
for mark in marker:
|
||||
mark(func)
|
||||
else:
|
||||
marker(func)
|
||||
node = node.parent
|
||||
item.keywords.update(py.builtin._getfuncdict(func))
|
||||
def add(self, args, kwargs):
|
||||
""" add a MarkInfo with the given args and kwargs. """
|
||||
self._arglist.append((args, kwargs))
|
||||
self.args += args
|
||||
self.kwargs.update(kwargs)
|
||||
|
||||
def __iter__(self):
|
||||
""" yield MarkInfo objects each relating to a marking-call. """
|
||||
for args, kwargs in self._arglist:
|
||||
yield MarkInfo(self.name, args, kwargs)
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@ def pytest_funcarg__monkeypatch(request):
|
||||
monkeypatch.setenv(name, value, prepend=False)
|
||||
monkeypatch.delenv(name, value, raising=True)
|
||||
monkeypatch.syspath_prepend(path)
|
||||
monkeypatch.chdir(path)
|
||||
|
||||
All modifications will be undone after the requesting
|
||||
test function has finished. The ``raising``
|
||||
@@ -30,6 +31,7 @@ class monkeypatch:
|
||||
def __init__(self):
|
||||
self._setattr = []
|
||||
self._setitem = []
|
||||
self._cwd = None
|
||||
|
||||
def setattr(self, obj, name, value, raising=True):
|
||||
""" set attribute ``name`` on ``obj`` to ``value``, by default
|
||||
@@ -83,6 +85,17 @@ class monkeypatch:
|
||||
self._savesyspath = sys.path[:]
|
||||
sys.path.insert(0, str(path))
|
||||
|
||||
def chdir(self, path):
|
||||
""" change the current working directory to the specified path
|
||||
path can be a string or a py.path.local object
|
||||
"""
|
||||
if self._cwd is None:
|
||||
self._cwd = os.getcwd()
|
||||
if hasattr(path, "chdir"):
|
||||
path.chdir()
|
||||
else:
|
||||
os.chdir(path)
|
||||
|
||||
def undo(self):
|
||||
""" undo previous changes. This call consumes the
|
||||
undo stack. Calling it a second time has no effect unless
|
||||
@@ -95,9 +108,17 @@ class monkeypatch:
|
||||
self._setattr[:] = []
|
||||
for dictionary, name, value in self._setitem:
|
||||
if value is notset:
|
||||
del dictionary[name]
|
||||
try:
|
||||
del dictionary[name]
|
||||
except KeyError:
|
||||
pass # was already deleted, so we have the desired state
|
||||
else:
|
||||
dictionary[name] = value
|
||||
self._setitem[:] = []
|
||||
if hasattr(self, '_savesyspath'):
|
||||
sys.path[:] = self._savesyspath
|
||||
del self._savesyspath
|
||||
|
||||
if self._cwd is not None:
|
||||
os.chdir(self._cwd)
|
||||
self._cwd = None
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
import py, sys
|
||||
|
||||
class url:
|
||||
base = "http://paste.pocoo.org"
|
||||
base = "http://bpaste.net"
|
||||
xmlrpc = base + "/xmlrpc/"
|
||||
show = base + "/show/"
|
||||
|
||||
@@ -11,7 +11,7 @@ def pytest_addoption(parser):
|
||||
group._addoption('--pastebin', metavar="mode",
|
||||
action='store', dest="pastebin", default=None,
|
||||
type="choice", choices=['failed', 'all'],
|
||||
help="send failed|all info to Pocoo pastebin service.")
|
||||
help="send failed|all info to bpaste.net pastebin service.")
|
||||
|
||||
def pytest_configure(__multicall__, config):
|
||||
import tempfile
|
||||
@@ -38,7 +38,11 @@ def pytest_unconfigure(config):
|
||||
del tr._tw.__dict__['write']
|
||||
|
||||
def getproxy():
|
||||
return py.std.xmlrpclib.ServerProxy(url.xmlrpc).pastes
|
||||
if sys.version_info < (3, 0):
|
||||
from xmlrpclib import ServerProxy
|
||||
else:
|
||||
from xmlrpc.client import ServerProxy
|
||||
return ServerProxy(url.xmlrpc).pastes
|
||||
|
||||
def pytest_terminal_summary(terminalreporter):
|
||||
if terminalreporter.config.option.pastebin != "failed":
|
||||
|
||||
@@ -70,7 +70,13 @@ class PdbInvoke:
|
||||
tw.sep(">", "traceback")
|
||||
rep.toterminal(tw)
|
||||
tw.sep(">", "entering PDB")
|
||||
post_mortem(call.excinfo._excinfo[2])
|
||||
# A doctest.UnexpectedException is not useful for post_mortem.
|
||||
# Use the underlying exception instead:
|
||||
if isinstance(call.excinfo.value, py.std.doctest.UnexpectedException):
|
||||
tb = call.excinfo.value.exc_info[2]
|
||||
else:
|
||||
tb = call.excinfo._excinfo[2]
|
||||
post_mortem(tb)
|
||||
rep._pdbshown = True
|
||||
return rep
|
||||
|
||||
|
||||
@@ -314,16 +314,6 @@ class TmpTestdir:
|
||||
result.extend(session.genitems(colitem))
|
||||
return result
|
||||
|
||||
def inline_genitems(self, *args):
|
||||
#config = self.parseconfig(*args)
|
||||
config = self.parseconfigure(*args)
|
||||
rec = self.getreportrecorder(config)
|
||||
session = Session(config)
|
||||
config.hook.pytest_sessionstart(session=session)
|
||||
session.perform_collect()
|
||||
config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK)
|
||||
return session.items, rec
|
||||
|
||||
def runitem(self, source):
|
||||
# used from runner functional tests
|
||||
item = self.getitem(source)
|
||||
@@ -344,64 +334,57 @@ class TmpTestdir:
|
||||
l = list(args) + [p]
|
||||
reprec = self.inline_run(*l)
|
||||
reports = reprec.getreports("pytest_runtest_logreport")
|
||||
assert len(reports) == 1, reports
|
||||
return reports[0]
|
||||
assert len(reports) == 3, reports # setup/call/teardown
|
||||
return reports[1]
|
||||
|
||||
def inline_genitems(self, *args):
|
||||
return self.inprocess_run(list(args) + ['--collectonly'])
|
||||
|
||||
def inline_run(self, *args):
|
||||
args = ("-s", ) + args # otherwise FD leakage
|
||||
config = self.parseconfig(*args)
|
||||
reprec = self.getreportrecorder(config)
|
||||
#config.pluginmanager.do_configure(config)
|
||||
config.hook.pytest_cmdline_main(config=config)
|
||||
#config.pluginmanager.do_unconfigure(config)
|
||||
return reprec
|
||||
items, rec = self.inprocess_run(args)
|
||||
return rec
|
||||
|
||||
def config_preparse(self):
|
||||
config = self.Config()
|
||||
for plugin in self.plugins:
|
||||
if isinstance(plugin, str):
|
||||
config.pluginmanager.import_plugin(plugin)
|
||||
else:
|
||||
if isinstance(plugin, dict):
|
||||
plugin = PseudoPlugin(plugin)
|
||||
if not config.pluginmanager.isregistered(plugin):
|
||||
config.pluginmanager.register(plugin)
|
||||
return config
|
||||
def inprocess_run(self, args, plugins=None):
|
||||
rec = []
|
||||
items = []
|
||||
class Collect:
|
||||
def pytest_configure(x, config):
|
||||
rec.append(self.getreportrecorder(config))
|
||||
def pytest_itemcollected(self, item):
|
||||
items.append(item)
|
||||
if not plugins:
|
||||
plugins = []
|
||||
plugins.append(Collect())
|
||||
ret = self.pytestmain(list(args), plugins=[Collect()])
|
||||
reprec = rec[0]
|
||||
reprec.ret = ret
|
||||
assert len(rec) == 1
|
||||
return items, reprec
|
||||
|
||||
def parseconfig(self, *args):
|
||||
if not args:
|
||||
args = (self.tmpdir,)
|
||||
config = self.config_preparse()
|
||||
args = list(args)
|
||||
args = [str(x) for x in args]
|
||||
for x in args:
|
||||
if str(x).startswith('--basetemp'):
|
||||
break
|
||||
else:
|
||||
args.append("--basetemp=%s" % self.tmpdir.dirpath('basetemp'))
|
||||
config.parse(args)
|
||||
import _pytest.core
|
||||
config = _pytest.core._prepareconfig(args, self.plugins)
|
||||
# the in-process pytest invocation needs to avoid leaking FDs
|
||||
# so we register a "reset_capturings" callmon the capturing manager
|
||||
# and make sure it gets called
|
||||
config._cleanup.append(
|
||||
config.pluginmanager.getplugin("capturemanager").reset_capturings)
|
||||
import _pytest.config
|
||||
self.request.addfinalizer(
|
||||
lambda: _pytest.config.pytest_unconfigure(config))
|
||||
return config
|
||||
|
||||
def reparseconfig(self, args=None):
|
||||
""" this is used from tests that want to re-invoke parse(). """
|
||||
if not args:
|
||||
args = [self.tmpdir]
|
||||
oldconfig = getattr(py.test, 'config', None)
|
||||
try:
|
||||
c = py.test.config = self.Config()
|
||||
c.basetemp = py.path.local.make_numbered_dir(prefix="reparse",
|
||||
keep=0, rootdir=self.tmpdir, lock_timeout=None)
|
||||
c.parse(args)
|
||||
c.pluginmanager.do_configure(c)
|
||||
self.request.addfinalizer(lambda: c.pluginmanager.do_unconfigure(c))
|
||||
return c
|
||||
finally:
|
||||
py.test.config = oldconfig
|
||||
|
||||
def parseconfigure(self, *args):
|
||||
config = self.parseconfig(*args)
|
||||
config.pluginmanager.do_configure(config)
|
||||
self.request.addfinalizer(lambda:
|
||||
config.pluginmanager.do_unconfigure(config))
|
||||
config.pluginmanager.do_unconfigure(config))
|
||||
return config
|
||||
|
||||
def getitem(self, source, funcname="test_func"):
|
||||
@@ -421,7 +404,6 @@ class TmpTestdir:
|
||||
self.makepyfile(__init__ = "#")
|
||||
self.config = config = self.parseconfigure(path, *configargs)
|
||||
node = self.getnode(config, path)
|
||||
#config.pluginmanager.do_unconfigure(config)
|
||||
return node
|
||||
|
||||
def collect_by_name(self, modcol, name):
|
||||
@@ -438,9 +420,16 @@ class TmpTestdir:
|
||||
return py.std.subprocess.Popen(cmdargs, stdout=stdout, stderr=stderr, **kw)
|
||||
|
||||
def pytestmain(self, *args, **kwargs):
|
||||
ret = pytest.main(*args, **kwargs)
|
||||
if ret == 2:
|
||||
raise KeyboardInterrupt()
|
||||
class ResetCapturing:
|
||||
@pytest.mark.trylast
|
||||
def pytest_unconfigure(self, config):
|
||||
capman = config.pluginmanager.getplugin("capturemanager")
|
||||
capman.reset_capturings()
|
||||
plugins = kwargs.setdefault("plugins", [])
|
||||
rc = ResetCapturing()
|
||||
plugins.append(rc)
|
||||
return pytest.main(*args, **kwargs)
|
||||
|
||||
def run(self, *cmdargs):
|
||||
return self._run(*cmdargs)
|
||||
|
||||
@@ -529,6 +518,8 @@ class TmpTestdir:
|
||||
pexpect = py.test.importorskip("pexpect", "2.4")
|
||||
if hasattr(sys, 'pypy_version_info') and '64' in py.std.platform.machine():
|
||||
pytest.skip("pypy-64 bit not supported")
|
||||
if sys.platform == "darwin":
|
||||
pytest.xfail("pexpect does not work reliably on darwin?!")
|
||||
logfile = self.tmpdir.join("spawn.out")
|
||||
child = pexpect.spawn(cmd, logfile=logfile.open("w"))
|
||||
child.timeout = expect_timeout
|
||||
@@ -541,10 +532,6 @@ def getdecoded(out):
|
||||
return "INTERNAL not-utf8-decodeable, truncated string:\n%s" % (
|
||||
py.io.saferepr(out),)
|
||||
|
||||
class PseudoPlugin:
|
||||
def __init__(self, vars):
|
||||
self.__dict__.update(vars)
|
||||
|
||||
class ReportRecorder(object):
|
||||
def __init__(self, hook):
|
||||
self.hook = hook
|
||||
@@ -566,10 +553,17 @@ class ReportRecorder(object):
|
||||
def getreports(self, names="pytest_runtest_logreport pytest_collectreport"):
|
||||
return [x.report for x in self.getcalls(names)]
|
||||
|
||||
def matchreport(self, inamepart="", names="pytest_runtest_logreport pytest_collectreport", when=None):
|
||||
def matchreport(self, inamepart="",
|
||||
names="pytest_runtest_logreport pytest_collectreport", when=None):
|
||||
""" return a testreport whose dotted import path matches """
|
||||
l = []
|
||||
for rep in self.getreports(names=names):
|
||||
try:
|
||||
if not when and rep.when != "call" and rep.passed:
|
||||
# setup/teardown passing reports - let's ignore those
|
||||
continue
|
||||
except AttributeError:
|
||||
pass
|
||||
if when and getattr(rep, 'when', None) != when:
|
||||
continue
|
||||
if not inamepart or inamepart in rep.nodeid.split("::"):
|
||||
|
||||
@@ -4,6 +4,7 @@ import inspect
|
||||
import sys
|
||||
import pytest
|
||||
from py._code.code import TerminalRepr
|
||||
from _pytest.monkeypatch import monkeypatch
|
||||
|
||||
import _pytest
|
||||
cutdir = py.path.local(_pytest.__file__).dirpath()
|
||||
@@ -26,6 +27,24 @@ def pytest_cmdline_main(config):
|
||||
showfuncargs(config)
|
||||
return 0
|
||||
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
try:
|
||||
param = metafunc.function.parametrize
|
||||
except AttributeError:
|
||||
return
|
||||
for p in param:
|
||||
metafunc.parametrize(*p.args, **p.kwargs)
|
||||
|
||||
def pytest_configure(config):
|
||||
config.addinivalue_line("markers",
|
||||
"parametrize(argnames, argvalues): call a test function multiple "
|
||||
"times passing in multiple different argument value sets. Example: "
|
||||
"@parametrize('arg1', [1,2]) would lead to two calls of the decorated "
|
||||
"test function, one with arg1=1 and another with arg1=2."
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.trylast
|
||||
def pytest_namespace():
|
||||
raises.Exception = pytest.fail.Exception
|
||||
@@ -138,6 +157,7 @@ class PyobjMixin(object):
|
||||
obj = obj.place_as
|
||||
|
||||
self._fslineno = py.code.getfslineno(obj)
|
||||
assert isinstance(self._fslineno[1], int), obj
|
||||
return self._fslineno
|
||||
|
||||
def reportinfo(self):
|
||||
@@ -155,6 +175,7 @@ class PyobjMixin(object):
|
||||
else:
|
||||
fspath, lineno = self._getfslineno()
|
||||
modpath = self.getmodpath()
|
||||
assert isinstance(lineno, int)
|
||||
return fspath, lineno, modpath
|
||||
|
||||
class PyCollectorMixin(PyobjMixin, pytest.Collector):
|
||||
@@ -200,6 +221,7 @@ class PyCollectorMixin(PyobjMixin, pytest.Collector):
|
||||
module = self.getparent(Module).obj
|
||||
clscol = self.getparent(Class)
|
||||
cls = clscol and clscol.obj or None
|
||||
transfer_markers(funcobj, cls, module)
|
||||
metafunc = Metafunc(funcobj, config=self.config,
|
||||
cls=cls, module=module)
|
||||
gentesthook = self.config.hook.pytest_generate_tests
|
||||
@@ -219,6 +241,19 @@ class PyCollectorMixin(PyobjMixin, pytest.Collector):
|
||||
l.append(function)
|
||||
return l
|
||||
|
||||
def transfer_markers(funcobj, cls, mod):
|
||||
# XXX this should rather be code in the mark plugin or the mark
|
||||
# plugin should merge with the python plugin.
|
||||
for holder in (cls, mod):
|
||||
try:
|
||||
pytestmark = holder.pytestmark
|
||||
except AttributeError:
|
||||
continue
|
||||
if isinstance(pytestmark, list):
|
||||
for mark in pytestmark:
|
||||
mark(funcobj)
|
||||
else:
|
||||
pytestmark(funcobj)
|
||||
|
||||
class Module(pytest.File, PyCollectorMixin):
|
||||
def _getobj(self):
|
||||
@@ -239,7 +274,8 @@ class Module(pytest.File, PyCollectorMixin):
|
||||
" %s\n"
|
||||
"which is not the same as the test file we want to collect:\n"
|
||||
" %s\n"
|
||||
"HINT: use a unique basename for your test file modules"
|
||||
"HINT: remove __pycache__ / .pyc files and/or use a "
|
||||
"unique basename for your test file modules"
|
||||
% e.args
|
||||
)
|
||||
#print "imported test module", mod
|
||||
@@ -275,12 +311,14 @@ class Class(PyCollectorMixin, pytest.Collector):
|
||||
setup_class = getattr(self.obj, 'setup_class', None)
|
||||
if setup_class is not None:
|
||||
setup_class = getattr(setup_class, 'im_func', setup_class)
|
||||
setup_class = getattr(setup_class, '__func__', setup_class)
|
||||
setup_class(self.obj)
|
||||
|
||||
def teardown(self):
|
||||
teardown_class = getattr(self.obj, 'teardown_class', None)
|
||||
if teardown_class is not None:
|
||||
teardown_class = getattr(teardown_class, 'im_func', teardown_class)
|
||||
teardown_class = getattr(teardown_class, '__func__', teardown_class)
|
||||
teardown_class(self.obj)
|
||||
|
||||
class Instance(PyCollectorMixin, pytest.Collector):
|
||||
@@ -369,6 +407,7 @@ class FuncargLookupErrorRepr(TerminalRepr):
|
||||
tw.line()
|
||||
tw.line("%s:%d" % (self.filename, self.firstlineno+1))
|
||||
|
||||
|
||||
class Generator(FunctionMixin, PyCollectorMixin, pytest.Collector):
|
||||
def collect(self):
|
||||
# test generators are seen as collectors but they also
|
||||
@@ -425,6 +464,7 @@ class Function(FunctionMixin, pytest.Item):
|
||||
"yielded functions (deprecated) cannot have funcargs")
|
||||
else:
|
||||
if callspec is not None:
|
||||
self.callspec = callspec
|
||||
self.funcargs = callspec.funcargs or {}
|
||||
self._genid = callspec.id
|
||||
if hasattr(callspec, "param"):
|
||||
@@ -501,15 +541,59 @@ def fillfuncargs(function):
|
||||
request._fillfuncargs()
|
||||
|
||||
_notexists = object()
|
||||
class CallSpec:
|
||||
def __init__(self, funcargs, id, param):
|
||||
self.funcargs = funcargs
|
||||
self.id = id
|
||||
|
||||
class CallSpec2(object):
|
||||
def __init__(self, metafunc):
|
||||
self.metafunc = metafunc
|
||||
self.funcargs = {}
|
||||
self._idlist = []
|
||||
self.params = {}
|
||||
self._globalid = _notexists
|
||||
self._globalid_args = set()
|
||||
self._globalparam = _notexists
|
||||
|
||||
def copy(self, metafunc):
|
||||
cs = CallSpec2(self.metafunc)
|
||||
cs.funcargs.update(self.funcargs)
|
||||
cs.params.update(self.params)
|
||||
cs._idlist = list(self._idlist)
|
||||
cs._globalid = self._globalid
|
||||
cs._globalid_args = self._globalid_args
|
||||
cs._globalparam = self._globalparam
|
||||
return cs
|
||||
|
||||
def _checkargnotcontained(self, arg):
|
||||
if arg in self.params or arg in self.funcargs:
|
||||
raise ValueError("duplicate %r" %(arg,))
|
||||
|
||||
def getparam(self, name):
|
||||
try:
|
||||
return self.params[name]
|
||||
except KeyError:
|
||||
if self._globalparam is _notexists:
|
||||
raise ValueError(name)
|
||||
return self._globalparam
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
return "-".join(map(str, filter(None, self._idlist)))
|
||||
|
||||
def setmulti(self, valtype, argnames, valset, id):
|
||||
for arg,val in zip(argnames, valset):
|
||||
self._checkargnotcontained(arg)
|
||||
getattr(self, valtype)[arg] = val
|
||||
self._idlist.append(id)
|
||||
|
||||
def setall(self, funcargs, id, param):
|
||||
for x in funcargs:
|
||||
self._checkargnotcontained(x)
|
||||
self.funcargs.update(funcargs)
|
||||
if id is not _notexists:
|
||||
self._idlist.append(id)
|
||||
if param is not _notexists:
|
||||
self.param = param
|
||||
def __repr__(self):
|
||||
return "<CallSpec id=%r param=%r funcargs=%r>" %(
|
||||
self.id, getattr(self, 'param', '?'), self.funcargs)
|
||||
assert self._globalparam is _notexists
|
||||
self._globalparam = param
|
||||
|
||||
|
||||
class Metafunc:
|
||||
def __init__(self, function, config=None, cls=None, module=None):
|
||||
@@ -523,31 +607,71 @@ class Metafunc:
|
||||
self._calls = []
|
||||
self._ids = py.builtin.set()
|
||||
|
||||
def parametrize(self, argnames, argvalues, indirect=False, ids=None):
|
||||
""" Add new invocations to the underlying test function using the list
|
||||
of argvalues for the given argnames. Parametrization is performed
|
||||
during the collection phase. If you need to setup expensive resources
|
||||
you may pass indirect=True and implement a funcarg factory which can
|
||||
perform the expensive setup just before a test is actually run.
|
||||
|
||||
:arg argnames: an argument name or a list of argument names
|
||||
|
||||
:arg argvalues: a list of values for the argname or a list of tuples of
|
||||
values for the list of argument names.
|
||||
|
||||
:arg indirect: if True each argvalue corresponding to an argument will
|
||||
be passed as request.param to its respective funcarg factory so
|
||||
that it can perform more expensive setups during the setup phase of
|
||||
a test rather than at collection time.
|
||||
|
||||
:arg ids: list of string ids each corresponding to the argvalues so
|
||||
that they are part of the test id. If no ids are provided they will
|
||||
be generated automatically from the argvalues.
|
||||
"""
|
||||
if not isinstance(argnames, (tuple, list)):
|
||||
argnames = (argnames,)
|
||||
argvalues = [(val,) for val in argvalues]
|
||||
if not indirect:
|
||||
#XXX should we also check for the opposite case?
|
||||
for arg in argnames:
|
||||
if arg not in self.funcargnames:
|
||||
raise ValueError("%r has no argument %r" %(self.function, arg))
|
||||
valtype = indirect and "params" or "funcargs"
|
||||
if not ids:
|
||||
idmaker = IDMaker()
|
||||
ids = list(map(idmaker, argvalues))
|
||||
newcalls = []
|
||||
for callspec in self._calls or [CallSpec2(self)]:
|
||||
for i, valset in enumerate(argvalues):
|
||||
assert len(valset) == len(argnames)
|
||||
newcallspec = callspec.copy(self)
|
||||
newcallspec.setmulti(valtype, argnames, valset, ids[i])
|
||||
newcalls.append(newcallspec)
|
||||
self._calls = newcalls
|
||||
|
||||
def addcall(self, funcargs=None, id=_notexists, param=_notexists):
|
||||
""" add a new call to the underlying test function during the
|
||||
collection phase of a test run. Note that request.addcall() is
|
||||
called during the test collection phase prior and independently
|
||||
to actual test execution. Therefore you should perform setup
|
||||
of resources in a funcarg factory which can be instrumented
|
||||
with the ``param``.
|
||||
""" (deprecated, use parametrize) Add a new call to the underlying
|
||||
test function during the collection phase of a test run. Note that
|
||||
request.addcall() is called during the test collection phase prior and
|
||||
independently to actual test execution. You should only use addcall()
|
||||
if you need to specify multiple arguments of a test function.
|
||||
|
||||
:arg funcargs: argument keyword dictionary used when invoking
|
||||
the test function.
|
||||
|
||||
:arg id: used for reporting and identification purposes. If you
|
||||
don't supply an `id` the length of the currently
|
||||
list of calls to the test function will be used.
|
||||
don't supply an `id` an automatic unique id will be generated.
|
||||
|
||||
:arg param: will be exposed to a later funcarg factory invocation
|
||||
through the ``request.param`` attribute. It allows to
|
||||
defer test fixture setup activities to when an actual
|
||||
test is run.
|
||||
:arg param: a parameter which will be exposed to a later funcarg factory
|
||||
invocation through the ``request.param`` attribute.
|
||||
"""
|
||||
assert funcargs is None or isinstance(funcargs, dict)
|
||||
if funcargs is not None:
|
||||
for name in funcargs:
|
||||
if name not in self.funcargnames:
|
||||
pytest.fail("funcarg %r not used in this function." % name)
|
||||
else:
|
||||
funcargs = {}
|
||||
if id is None:
|
||||
raise ValueError("id=None not allowed")
|
||||
if id is _notexists:
|
||||
@@ -556,11 +680,26 @@ class Metafunc:
|
||||
if id in self._ids:
|
||||
raise ValueError("duplicate id %r" % id)
|
||||
self._ids.add(id)
|
||||
self._calls.append(CallSpec(funcargs, id, param))
|
||||
|
||||
cs = CallSpec2(self)
|
||||
cs.setall(funcargs, id, param)
|
||||
self._calls.append(cs)
|
||||
|
||||
class IDMaker:
|
||||
def __init__(self):
|
||||
self.counter = 0
|
||||
def __call__(self, valset):
|
||||
l = []
|
||||
for val in valset:
|
||||
if not isinstance(val, (int, str)):
|
||||
val = "."+str(self.counter)
|
||||
self.counter += 1
|
||||
l.append(str(val))
|
||||
return "-".join(l)
|
||||
|
||||
class FuncargRequest:
|
||||
""" A request for function arguments from a test function.
|
||||
|
||||
|
||||
Note that there is an optional ``param`` attribute in case
|
||||
there was an invocation to metafunc.addcall(param=...).
|
||||
If no such call was done in a ``pytest_generate_tests``
|
||||
@@ -632,7 +771,7 @@ class FuncargRequest:
|
||||
|
||||
|
||||
def applymarker(self, marker):
|
||||
""" apply a marker to a single test function invocation.
|
||||
""" Apply a marker to a single test function invocation.
|
||||
This method is useful if you don't want to have a keyword/marker
|
||||
on all function invocations.
|
||||
|
||||
@@ -644,7 +783,7 @@ class FuncargRequest:
|
||||
self._pyfuncitem.keywords[marker.markname] = marker
|
||||
|
||||
def cached_setup(self, setup, teardown=None, scope="module", extrakey=None):
|
||||
""" return a testing resource managed by ``setup`` &
|
||||
""" Return a testing resource managed by ``setup`` &
|
||||
``teardown`` calls. ``scope`` and ``extrakey`` determine when the
|
||||
``teardown`` function will be called so that subsequent calls to
|
||||
``setup`` would recreate the resource.
|
||||
@@ -693,11 +832,18 @@ class FuncargRequest:
|
||||
self._raiselookupfailed(argname)
|
||||
funcargfactory = self._name2factory[argname].pop()
|
||||
oldarg = self._currentarg
|
||||
self._currentarg = argname
|
||||
mp = monkeypatch()
|
||||
mp.setattr(self, '_currentarg', argname)
|
||||
try:
|
||||
param = self._pyfuncitem.callspec.getparam(argname)
|
||||
except (AttributeError, ValueError):
|
||||
pass
|
||||
else:
|
||||
mp.setattr(self, 'param', param, raising=False)
|
||||
try:
|
||||
self._funcargs[argname] = res = funcargfactory(request=self)
|
||||
finally:
|
||||
self._currentarg = oldarg
|
||||
mp.undo()
|
||||
return res
|
||||
|
||||
def _getscopeitem(self, scope):
|
||||
@@ -812,8 +958,7 @@ def raises(ExpectedException, *args, **kwargs):
|
||||
>>> raises(ZeroDivisionError, f, x=0)
|
||||
<ExceptionInfo ...>
|
||||
|
||||
A third possibility is to use a string which which will
|
||||
be executed::
|
||||
A third possibility is to use a string to be executed::
|
||||
|
||||
>>> raises(ZeroDivisionError, "f(0)")
|
||||
<ExceptionInfo ...>
|
||||
|
||||
@@ -63,6 +63,8 @@ class ResultLog(object):
|
||||
self.write_log_entry(testpath, lettercode, longrepr)
|
||||
|
||||
def pytest_runtest_logreport(self, report):
|
||||
if report.when != "call" and report.passed:
|
||||
return
|
||||
res = self.config.hook.pytest_report_teststatus(report=report)
|
||||
code = res[1]
|
||||
if code == 'x':
|
||||
@@ -89,5 +91,8 @@ class ResultLog(object):
|
||||
self.log_outcome(report, code, longrepr)
|
||||
|
||||
def pytest_internalerror(self, excrepr):
|
||||
path = excrepr.reprcrash.path
|
||||
reprcrash = getattr(excrepr, 'reprcrash', None)
|
||||
path = getattr(reprcrash, "path", None)
|
||||
if path is None:
|
||||
path = "cwd:%s" % py.path.local()
|
||||
self.write_log_entry(path, '!', str(excrepr))
|
||||
|
||||
@@ -14,33 +14,60 @@ def pytest_namespace():
|
||||
#
|
||||
# pytest plugin hooks
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("terminal reporting", "reporting", after="general")
|
||||
group.addoption('--durations',
|
||||
action="store", type="int", default=None, metavar="N",
|
||||
help="show N slowest setup/test durations (N=0 for all)."),
|
||||
|
||||
def pytest_terminal_summary(terminalreporter):
|
||||
durations = terminalreporter.config.option.durations
|
||||
if durations is None:
|
||||
return
|
||||
tr = terminalreporter
|
||||
dlist = []
|
||||
for replist in tr.stats.values():
|
||||
for rep in replist:
|
||||
if hasattr(rep, 'duration'):
|
||||
dlist.append(rep)
|
||||
if not dlist:
|
||||
return
|
||||
dlist.sort(key=lambda x: x.duration)
|
||||
dlist.reverse()
|
||||
if not durations:
|
||||
tr.write_sep("=", "slowest test durations")
|
||||
else:
|
||||
tr.write_sep("=", "slowest %s test durations" % durations)
|
||||
dlist = dlist[:durations]
|
||||
|
||||
for rep in dlist:
|
||||
nodeid = rep.nodeid.replace("::()::", "::")
|
||||
tr.write_line("%02.2fs %-8s %s" %
|
||||
(rep.duration, rep.when, nodeid))
|
||||
|
||||
def pytest_sessionstart(session):
|
||||
session._setupstate = SetupState()
|
||||
|
||||
def pytest_sessionfinish(session, exitstatus):
|
||||
hook = session.config.hook
|
||||
rep = hook.pytest__teardown_final(session=session)
|
||||
if rep:
|
||||
hook.pytest__teardown_final_logerror(session=session, report=rep)
|
||||
session.exitstatus = 1
|
||||
def pytest_sessionfinish(session):
|
||||
session._setupstate.teardown_all()
|
||||
|
||||
class NodeInfo:
|
||||
def __init__(self, location):
|
||||
self.location = location
|
||||
|
||||
def pytest_runtest_protocol(item):
|
||||
def pytest_runtest_protocol(item, nextitem):
|
||||
item.ihook.pytest_runtest_logstart(
|
||||
nodeid=item.nodeid, location=item.location,
|
||||
)
|
||||
runtestprotocol(item)
|
||||
runtestprotocol(item, nextitem=nextitem)
|
||||
return True
|
||||
|
||||
def runtestprotocol(item, log=True):
|
||||
def runtestprotocol(item, log=True, nextitem=None):
|
||||
rep = call_and_report(item, "setup", log)
|
||||
reports = [rep]
|
||||
if rep.passed:
|
||||
reports.append(call_and_report(item, "call", log))
|
||||
reports.append(call_and_report(item, "teardown", log))
|
||||
reports.append(call_and_report(item, "teardown", log,
|
||||
nextitem=nextitem))
|
||||
return reports
|
||||
|
||||
def pytest_runtest_setup(item):
|
||||
@@ -49,16 +76,8 @@ def pytest_runtest_setup(item):
|
||||
def pytest_runtest_call(item):
|
||||
item.runtest()
|
||||
|
||||
def pytest_runtest_teardown(item):
|
||||
item.session._setupstate.teardown_exact(item)
|
||||
|
||||
def pytest__teardown_final(session):
|
||||
call = CallInfo(session._setupstate.teardown_all, when="teardown")
|
||||
if call.excinfo:
|
||||
ntraceback = call.excinfo.traceback .cut(excludepath=py._pydir)
|
||||
call.excinfo.traceback = ntraceback.filter()
|
||||
longrepr = call.excinfo.getrepr(funcargs=True)
|
||||
return TeardownErrorReport(longrepr)
|
||||
def pytest_runtest_teardown(item, nextitem):
|
||||
item.session._setupstate.teardown_exact(item, nextitem)
|
||||
|
||||
def pytest_report_teststatus(report):
|
||||
if report.when in ("setup", "teardown"):
|
||||
@@ -74,18 +93,18 @@ def pytest_report_teststatus(report):
|
||||
#
|
||||
# Implementation
|
||||
|
||||
def call_and_report(item, when, log=True):
|
||||
call = call_runtest_hook(item, when)
|
||||
def call_and_report(item, when, log=True, **kwds):
|
||||
call = call_runtest_hook(item, when, **kwds)
|
||||
hook = item.ihook
|
||||
report = hook.pytest_runtest_makereport(item=item, call=call)
|
||||
if log and (when == "call" or not report.passed):
|
||||
if log:
|
||||
hook.pytest_runtest_logreport(report=report)
|
||||
return report
|
||||
|
||||
def call_runtest_hook(item, when):
|
||||
def call_runtest_hook(item, when, **kwds):
|
||||
hookname = "pytest_runtest_" + when
|
||||
ihook = getattr(item.ihook, hookname)
|
||||
return CallInfo(lambda: ihook(item=item), when=when)
|
||||
return CallInfo(lambda: ihook(item=item, **kwds), when=when)
|
||||
|
||||
class CallInfo:
|
||||
""" Result/Exception info a function invocation. """
|
||||
@@ -124,6 +143,10 @@ def getslaveinfoline(node):
|
||||
return s
|
||||
|
||||
class BaseReport(object):
|
||||
|
||||
def __init__(self, **kw):
|
||||
self.__dict__.update(kw)
|
||||
|
||||
def toterminal(self, out):
|
||||
longrepr = self.longrepr
|
||||
if hasattr(self, 'node'):
|
||||
@@ -173,7 +196,7 @@ class TestReport(BaseReport):
|
||||
they fail).
|
||||
"""
|
||||
def __init__(self, nodeid, location,
|
||||
keywords, outcome, longrepr, when, sections=(), duration=0):
|
||||
keywords, outcome, longrepr, when, sections=(), duration=0, **extra):
|
||||
#: normalized collection node id
|
||||
self.nodeid = nodeid
|
||||
|
||||
@@ -185,13 +208,13 @@ class TestReport(BaseReport):
|
||||
#: a name -> value dictionary containing all keywords and
|
||||
#: markers associated with a test invocation.
|
||||
self.keywords = keywords
|
||||
|
||||
|
||||
#: test outcome, always one of "passed", "failed", "skipped".
|
||||
self.outcome = outcome
|
||||
|
||||
#: None or a failure representation.
|
||||
self.longrepr = longrepr
|
||||
|
||||
|
||||
#: one of 'setup', 'call', 'teardown' to indicate runtest phase.
|
||||
self.when = when
|
||||
|
||||
@@ -202,6 +225,8 @@ class TestReport(BaseReport):
|
||||
#: time it took to run just the test
|
||||
self.duration = duration
|
||||
|
||||
self.__dict__.update(extra)
|
||||
|
||||
def __repr__(self):
|
||||
return "<TestReport %r when=%r outcome=%r>" % (
|
||||
self.nodeid, self.when, self.outcome)
|
||||
@@ -209,9 +234,10 @@ class TestReport(BaseReport):
|
||||
class TeardownErrorReport(BaseReport):
|
||||
outcome = "failed"
|
||||
when = "teardown"
|
||||
def __init__(self, longrepr):
|
||||
def __init__(self, longrepr, **extra):
|
||||
self.longrepr = longrepr
|
||||
self.sections = []
|
||||
self.__dict__.update(extra)
|
||||
|
||||
def pytest_make_collect_report(collector):
|
||||
call = CallInfo(collector._memocollect, "memocollect")
|
||||
@@ -233,12 +259,13 @@ def pytest_make_collect_report(collector):
|
||||
getattr(call, 'result', None))
|
||||
|
||||
class CollectReport(BaseReport):
|
||||
def __init__(self, nodeid, outcome, longrepr, result, sections=()):
|
||||
def __init__(self, nodeid, outcome, longrepr, result, sections=(), **extra):
|
||||
self.nodeid = nodeid
|
||||
self.outcome = outcome
|
||||
self.longrepr = longrepr
|
||||
self.result = result or []
|
||||
self.sections = list(sections)
|
||||
self.__dict__.update(extra)
|
||||
|
||||
@property
|
||||
def location(self):
|
||||
@@ -292,20 +319,22 @@ class SetupState(object):
|
||||
self._teardown_with_finalization(None)
|
||||
assert not self._finalizers
|
||||
|
||||
def teardown_exact(self, item):
|
||||
if self.stack and item == self.stack[-1]:
|
||||
def teardown_exact(self, item, nextitem):
|
||||
needed_collectors = nextitem and nextitem.listchain() or []
|
||||
self._teardown_towards(needed_collectors)
|
||||
|
||||
def _teardown_towards(self, needed_collectors):
|
||||
while self.stack:
|
||||
if self.stack == needed_collectors[:len(self.stack)]:
|
||||
break
|
||||
self._pop_and_teardown()
|
||||
else:
|
||||
self._callfinalizers(item)
|
||||
|
||||
def prepare(self, colitem):
|
||||
""" setup objects along the collector chain to the test-method
|
||||
and teardown previously setup objects."""
|
||||
needed_collectors = colitem.listchain()
|
||||
while self.stack:
|
||||
if self.stack == needed_collectors[:len(self.stack)]:
|
||||
break
|
||||
self._pop_and_teardown()
|
||||
self._teardown_towards(needed_collectors)
|
||||
|
||||
# check if the last collection node has raised an error
|
||||
for col in self.stack:
|
||||
if hasattr(col, '_prepare_exc'):
|
||||
@@ -387,9 +416,10 @@ def importorskip(modname, minversion=None):
|
||||
__tracebackhide__ = True
|
||||
compile(modname, '', 'eval') # to catch syntaxerrors
|
||||
try:
|
||||
mod = __import__(modname, None, None, ['__doc__'])
|
||||
__import__(modname)
|
||||
except ImportError:
|
||||
py.test.skip("could not import %r" %(modname,))
|
||||
mod = sys.modules[modname]
|
||||
if minversion is None:
|
||||
return mod
|
||||
verattr = getattr(mod, '__version__', None)
|
||||
|
||||
@@ -9,6 +9,21 @@ def pytest_addoption(parser):
|
||||
action="store_true", dest="runxfail", default=False,
|
||||
help="run tests even if they are marked xfail")
|
||||
|
||||
def pytest_configure(config):
|
||||
config.addinivalue_line("markers",
|
||||
"skipif(*conditions): skip the given test function if evaluation "
|
||||
"of all conditions has a True value. Evaluation happens within the "
|
||||
"module global context. Example: skipif('sys.platform == \"win32\"') "
|
||||
"skips the test if we are on the win32 platform. "
|
||||
)
|
||||
config.addinivalue_line("markers",
|
||||
"xfail(*conditions, reason=None, run=True): mark the the test function "
|
||||
"as an expected failure. Optionally specify a reason and run=False "
|
||||
"if you don't even want to execute the test function. Any positional "
|
||||
"condition strings will be evaluated (like with skipif) and if one is "
|
||||
"False the marker will not be applied."
|
||||
)
|
||||
|
||||
def pytest_namespace():
|
||||
return dict(xfail=xfail)
|
||||
|
||||
@@ -117,6 +132,14 @@ def check_xfail_no_run(item):
|
||||
def pytest_runtest_makereport(__multicall__, item, call):
|
||||
if not isinstance(item, pytest.Function):
|
||||
return
|
||||
# unitttest special case, see setting of _unexpectedsuccess
|
||||
if hasattr(item, '_unexpectedsuccess'):
|
||||
rep = __multicall__.execute()
|
||||
if rep.when == "call":
|
||||
# we need to translate into how py.test encodes xpass
|
||||
rep.keywords['xfail'] = "reason: " + repr(item._unexpectedsuccess)
|
||||
rep.outcome = "failed"
|
||||
return rep
|
||||
if not (call.excinfo and
|
||||
call.excinfo.errisinstance(py.test.xfail.Exception)):
|
||||
evalxfail = getattr(item, '_evalxfail', None)
|
||||
|
||||
@@ -43,7 +43,8 @@ def pytest_configure(config):
|
||||
pass
|
||||
else:
|
||||
stdout = os.fdopen(newfd, stdout.mode, 1)
|
||||
config._toclose = stdout
|
||||
config._cleanup.append(lambda: stdout.close())
|
||||
|
||||
reporter = TerminalReporter(config, stdout)
|
||||
config.pluginmanager.register(reporter, 'terminalreporter')
|
||||
if config.option.debug or config.option.traceconfig:
|
||||
@@ -52,11 +53,6 @@ def pytest_configure(config):
|
||||
reporter.write_line("[traceconfig] " + msg)
|
||||
config.trace.root.setprocessor("pytest:config", mywriter)
|
||||
|
||||
def pytest_unconfigure(config):
|
||||
if hasattr(config, '_toclose'):
|
||||
#print "closing", config._toclose, config._toclose.fileno()
|
||||
config._toclose.close()
|
||||
|
||||
def getreportopt(config):
|
||||
reportopts = ""
|
||||
optvalue = config.option.report
|
||||
@@ -165,9 +161,6 @@ class TerminalReporter:
|
||||
def pytest_deselected(self, items):
|
||||
self.stats.setdefault('deselected', []).extend(items)
|
||||
|
||||
def pytest__teardown_final_logerror(self, report):
|
||||
self.stats.setdefault("error", []).append(report)
|
||||
|
||||
def pytest_runtest_logstart(self, nodeid, location):
|
||||
# ensure that the path is printed before the
|
||||
# 1st test of a module starts running
|
||||
@@ -289,10 +282,18 @@ class TerminalReporter:
|
||||
# we take care to leave out Instances aka ()
|
||||
# because later versions are going to get rid of them anyway
|
||||
if self.config.option.verbose < 0:
|
||||
for item in items:
|
||||
nodeid = item.nodeid
|
||||
nodeid = nodeid.replace("::()::", "::")
|
||||
self._tw.line(nodeid)
|
||||
if self.config.option.verbose < -1:
|
||||
counts = {}
|
||||
for item in items:
|
||||
name = item.nodeid.split('::', 1)[0]
|
||||
counts[name] = counts.get(name, 0) + 1
|
||||
for name, count in sorted(counts.items()):
|
||||
self._tw.line("%s: %d" % (name, count))
|
||||
else:
|
||||
for item in items:
|
||||
nodeid = item.nodeid
|
||||
nodeid = nodeid.replace("::()::", "::")
|
||||
self._tw.line(nodeid)
|
||||
return
|
||||
stack = []
|
||||
indent = ""
|
||||
@@ -430,9 +431,10 @@ class TerminalReporter:
|
||||
keys.append(key)
|
||||
parts = []
|
||||
for key in keys:
|
||||
val = self.stats.get(key, None)
|
||||
if val:
|
||||
parts.append("%d %s" %(len(val), key))
|
||||
if key: # setup/teardown reports have an empty key, ignore them
|
||||
val = self.stats.get(key, None)
|
||||
if val:
|
||||
parts.append("%d %s" %(len(val), key))
|
||||
line = ", ".join(parts)
|
||||
# XXX coloring
|
||||
msg = "%s in %.2f seconds" %(line, session_duration)
|
||||
@@ -443,8 +445,15 @@ class TerminalReporter:
|
||||
|
||||
def summary_deselected(self):
|
||||
if 'deselected' in self.stats:
|
||||
l = []
|
||||
k = self.config.option.keyword
|
||||
if k:
|
||||
l.append("-k%s" % k)
|
||||
m = self.config.option.markexpr
|
||||
if m:
|
||||
l.append("-m %r" % m)
|
||||
self.write_sep("=", "%d tests deselected by %r" %(
|
||||
len(self.stats['deselected']), self.config.option.keyword), bold=True)
|
||||
len(self.stats['deselected']), " ".join(l)), bold=True)
|
||||
|
||||
def repr_pythonversion(v=None):
|
||||
if v is None:
|
||||
|
||||
@@ -46,7 +46,7 @@ class TempdirHandler:
|
||||
|
||||
def finish(self):
|
||||
self.trace("finish")
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
mp = monkeypatch()
|
||||
t = TempdirHandler(config)
|
||||
@@ -64,5 +64,5 @@ def pytest_funcarg__tmpdir(request):
|
||||
name = request._pyfuncitem.name
|
||||
name = py.std.re.sub("[\W]", "_", name)
|
||||
x = request.config._tmpdirhandler.mktemp(name, numbered=True)
|
||||
return x.realpath()
|
||||
return x
|
||||
|
||||
|
||||
@@ -2,6 +2,9 @@
|
||||
import pytest, py
|
||||
import sys, pdb
|
||||
|
||||
# for transfering markers
|
||||
from _pytest.python import transfer_markers
|
||||
|
||||
def pytest_pycollect_makeitem(collector, name, obj):
|
||||
unittest = sys.modules.get('unittest')
|
||||
if unittest is None:
|
||||
@@ -19,7 +22,14 @@ def pytest_pycollect_makeitem(collector, name, obj):
|
||||
class UnitTestCase(pytest.Class):
|
||||
def collect(self):
|
||||
loader = py.std.unittest.TestLoader()
|
||||
module = self.getparent(pytest.Module).obj
|
||||
cls = self.obj
|
||||
for name in loader.getTestCaseNames(self.obj):
|
||||
x = getattr(self.obj, name)
|
||||
funcobj = getattr(x, 'im_func', x)
|
||||
transfer_markers(funcobj, cls, module)
|
||||
if hasattr(funcobj, 'todo'):
|
||||
pytest.mark.xfail(reason=str(funcobj.todo))(funcobj)
|
||||
yield TestCaseFunction(name, parent=self)
|
||||
|
||||
def setup(self):
|
||||
@@ -37,15 +47,13 @@ class UnitTestCase(pytest.Class):
|
||||
class TestCaseFunction(pytest.Function):
|
||||
_excinfo = None
|
||||
|
||||
def __init__(self, name, parent):
|
||||
super(TestCaseFunction, self).__init__(name, parent)
|
||||
if hasattr(self._obj, 'todo'):
|
||||
getattr(self._obj, 'im_func', self._obj).xfail = \
|
||||
pytest.mark.xfail(reason=str(self._obj.todo))
|
||||
|
||||
def setup(self):
|
||||
self._testcase = self.parent.obj(self.name)
|
||||
self._obj = getattr(self._testcase, self.name)
|
||||
if hasattr(self._testcase, 'skip'):
|
||||
pytest.skip(self._testcase.skip)
|
||||
if hasattr(self._obj, 'skip'):
|
||||
pytest.skip(self._obj.skip)
|
||||
if hasattr(self._testcase, 'setup_method'):
|
||||
self._testcase.setup_method(self._obj)
|
||||
|
||||
@@ -83,28 +91,37 @@ class TestCaseFunction(pytest.Function):
|
||||
self._addexcinfo(rawexcinfo)
|
||||
def addFailure(self, testcase, rawexcinfo):
|
||||
self._addexcinfo(rawexcinfo)
|
||||
|
||||
def addSkip(self, testcase, reason):
|
||||
try:
|
||||
pytest.skip(reason)
|
||||
except pytest.skip.Exception:
|
||||
self._addexcinfo(sys.exc_info())
|
||||
def addExpectedFailure(self, testcase, rawexcinfo, reason):
|
||||
|
||||
def addExpectedFailure(self, testcase, rawexcinfo, reason=""):
|
||||
try:
|
||||
pytest.xfail(str(reason))
|
||||
except pytest.xfail.Exception:
|
||||
self._addexcinfo(sys.exc_info())
|
||||
def addUnexpectedSuccess(self, testcase, reason):
|
||||
pass
|
||||
|
||||
def addUnexpectedSuccess(self, testcase, reason=""):
|
||||
self._unexpectedsuccess = reason
|
||||
|
||||
def addSuccess(self, testcase):
|
||||
pass
|
||||
|
||||
def stopTest(self, testcase):
|
||||
pass
|
||||
|
||||
def runtest(self):
|
||||
self._testcase(result=self)
|
||||
|
||||
def _prunetraceback(self, excinfo):
|
||||
pytest.Function._prunetraceback(self, excinfo)
|
||||
excinfo.traceback = excinfo.traceback.filter(lambda x:not x.frame.f_globals.get('__unittest'))
|
||||
traceback = excinfo.traceback.filter(
|
||||
lambda x:not x.frame.f_globals.get('__unittest'))
|
||||
if traceback:
|
||||
excinfo.traceback = traceback
|
||||
|
||||
@pytest.mark.tryfirst
|
||||
def pytest_runtest_makereport(item, call):
|
||||
@@ -120,14 +137,19 @@ def pytest_runtest_protocol(item, __multicall__):
|
||||
ut = sys.modules['twisted.python.failure']
|
||||
Failure__init__ = ut.Failure.__init__.im_func
|
||||
check_testcase_implements_trial_reporter()
|
||||
def excstore(self, exc_value=None, exc_type=None, exc_tb=None):
|
||||
def excstore(self, exc_value=None, exc_type=None, exc_tb=None,
|
||||
captureVars=None):
|
||||
if exc_value is None:
|
||||
self._rawexcinfo = sys.exc_info()
|
||||
else:
|
||||
if exc_type is None:
|
||||
exc_type = type(exc_value)
|
||||
self._rawexcinfo = (exc_type, exc_value, exc_tb)
|
||||
Failure__init__(self, exc_value, exc_type, exc_tb)
|
||||
try:
|
||||
Failure__init__(self, exc_value, exc_type, exc_tb,
|
||||
captureVars=captureVars)
|
||||
except TypeError:
|
||||
Failure__init__(self, exc_value, exc_type, exc_tb)
|
||||
ut.Failure.__init__ = excstore
|
||||
try:
|
||||
return __multicall__.execute()
|
||||
|
||||
@@ -46,7 +46,7 @@ except ImportError:
|
||||
args = [quote(arg) for arg in args]
|
||||
return os.spawnl(os.P_WAIT, sys.executable, *args) == 0
|
||||
|
||||
DEFAULT_VERSION = "0.6.19"
|
||||
DEFAULT_VERSION = "0.6.27"
|
||||
DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/"
|
||||
SETUPTOOLS_FAKED_VERSION = "0.6c11"
|
||||
|
||||
@@ -63,7 +63,7 @@ Description: xxx
|
||||
""" % SETUPTOOLS_FAKED_VERSION
|
||||
|
||||
|
||||
def _install(tarball):
|
||||
def _install(tarball, install_args=()):
|
||||
# extracting the tarball
|
||||
tmpdir = tempfile.mkdtemp()
|
||||
log.warn('Extracting in %s', tmpdir)
|
||||
@@ -81,7 +81,7 @@ def _install(tarball):
|
||||
|
||||
# installing
|
||||
log.warn('Installing Distribute')
|
||||
if not _python_cmd('setup.py', 'install'):
|
||||
if not _python_cmd('setup.py', 'install', *install_args):
|
||||
log.warn('Something went wrong during the installation.')
|
||||
log.warn('See the error message above.')
|
||||
finally:
|
||||
@@ -306,6 +306,9 @@ def _create_fake_setuptools_pkg_info(placeholder):
|
||||
log.warn('%s already exists', pkg_info)
|
||||
return
|
||||
|
||||
if not os.access(pkg_info, os.W_OK):
|
||||
log.warn("Don't have permissions to write %s, skipping", pkg_info)
|
||||
|
||||
log.warn('Creating %s', pkg_info)
|
||||
f = open(pkg_info, 'w')
|
||||
try:
|
||||
@@ -474,11 +477,20 @@ def _extractall(self, path=".", members=None):
|
||||
else:
|
||||
self._dbg(1, "tarfile: %s" % e)
|
||||
|
||||
def _build_install_args(argv):
|
||||
install_args = []
|
||||
user_install = '--user' in argv
|
||||
if user_install and sys.version_info < (2,6):
|
||||
log.warn("--user requires Python 2.6 or later")
|
||||
raise SystemExit(1)
|
||||
if user_install:
|
||||
install_args.append('--user')
|
||||
return install_args
|
||||
|
||||
def main(argv, version=DEFAULT_VERSION):
|
||||
"""Install or upgrade setuptools and EasyInstall"""
|
||||
tarball = download_setuptools()
|
||||
_install(tarball)
|
||||
_install(tarball, _build_install_args(argv))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -40,7 +40,7 @@ clean:
|
||||
-rm -rf $(BUILDDIR)/*
|
||||
|
||||
install: html
|
||||
@rsync -avz _build/html/ pytest.org:/www/pytest.org/latest
|
||||
rsync -avz _build/html/ pytest.org:/www/pytest.org/latest
|
||||
|
||||
installpdf: latexpdf
|
||||
@scp $(BUILDDIR)/latex/pytest.pdf pytest.org:/www/pytest.org/latest
|
||||
|
||||
@@ -5,6 +5,9 @@ Release announcements
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
release-2.2.2
|
||||
release-2.2.1
|
||||
release-2.2.0
|
||||
release-2.1.3
|
||||
release-2.1.2
|
||||
release-2.1.1
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
py.test 2.0.2: bug fixes, improved xfail/skip expressions, speedups
|
||||
py.test 2.0.2: bug fixes, improved xfail/skip expressions, speed ups
|
||||
===========================================================================
|
||||
|
||||
Welcome to pytest-2.0.2, a maintenance and bug fix release of pytest,
|
||||
@@ -32,17 +32,17 @@ Changes between 2.0.1 and 2.0.2
|
||||
|
||||
Also you can now access module globals from xfail/skipif
|
||||
expressions so that this for example works now::
|
||||
|
||||
|
||||
import pytest
|
||||
import mymodule
|
||||
@pytest.mark.skipif("mymodule.__version__[0] == "1")
|
||||
def test_function():
|
||||
pass
|
||||
|
||||
This will not run the test function if the module's version string
|
||||
This will not run the test function if the module's version string
|
||||
does not start with a "1". Note that specifying a string instead
|
||||
of a boolean expressions allows py.test to report meaningful information
|
||||
when summarizing a test run as to what conditions lead to skipping
|
||||
of a boolean expressions allows py.test to report meaningful information
|
||||
when summarizing a test run as to what conditions lead to skipping
|
||||
(or xfail-ing) tests.
|
||||
|
||||
- fix issue28 - setup_method and pytest_generate_tests work together
|
||||
@@ -65,7 +65,7 @@ Changes between 2.0.1 and 2.0.2
|
||||
- fixed typos in the docs (thanks Victor Garcia, Brianna Laugher) and particular
|
||||
thanks to Laura Creighton who also revieved parts of the documentation.
|
||||
|
||||
- fix slighly wrong output of verbose progress reporting for classes
|
||||
- fix slighly wrong output of verbose progress reporting for classes
|
||||
(thanks Amaury)
|
||||
|
||||
- more precise (avoiding of) deprecation warnings for node.Class|Function accesses
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
py.test 2.1.0: perfected assertions and bug fixes
|
||||
===========================================================================
|
||||
|
||||
Welcome to the relase of pytest-2.1, a mature testing tool for Python,
|
||||
Welcome to the release of pytest-2.1, a mature testing tool for Python,
|
||||
supporting CPython 2.4-3.2, Jython and latest PyPy interpreters. See
|
||||
the improved extensive docs (now also as PDF!) with tested examples here:
|
||||
|
||||
@@ -15,7 +15,7 @@ assert statements in test modules upon import, using a PEP302 hook.
|
||||
See http://pytest.org/assert.html#advanced-assertion-introspection for
|
||||
detailed information. The work has been partly sponsored by my company,
|
||||
merlinux GmbH.
|
||||
|
||||
|
||||
For further details on bug fixes and smaller enhancements see below.
|
||||
|
||||
If you want to install or upgrade pytest, just type one of::
|
||||
@@ -39,10 +39,9 @@ Changes between 2.0.3 and 2.1.0
|
||||
unexpected exceptions
|
||||
- fix issue47: timing output in junitxml for test cases is now correct
|
||||
- fix issue48: typo in MarkInfo repr leading to exception
|
||||
- fix issue49: avoid confusing error when initizaliation partially fails
|
||||
- fix issue49: avoid confusing error when initialization partially fails
|
||||
- fix issue44: env/username expansion for junitxml file path
|
||||
- show releaselevel information in test runs for pypy
|
||||
- reworked doc pages for better navigation and PDF generation
|
||||
- report KeyboardInterrupt even if interrupted during session startup
|
||||
- fix issue 35 - provide PDF doc version and download link from index page
|
||||
|
||||
|
||||
95
doc/announce/release-2.2.0.txt
Normal file
95
doc/announce/release-2.2.0.txt
Normal file
@@ -0,0 +1,95 @@
|
||||
py.test 2.2.0: test marking++, parametrization++ and duration profiling
|
||||
===========================================================================
|
||||
|
||||
pytest-2.2.0 is a test-suite compatible release of the popular
|
||||
py.test testing tool. Plugins might need upgrades. It comes
|
||||
with these improvements:
|
||||
|
||||
* easier and more powerful parametrization of tests:
|
||||
|
||||
- new @pytest.mark.parametrize decorator to run tests with different arguments
|
||||
- new metafunc.parametrize() API for parametrizing arguments independently
|
||||
- see examples at http://pytest.org/latest/example/parametrize.html
|
||||
- NOTE that parametrize() related APIs are still a bit experimental
|
||||
and might change in future releases.
|
||||
|
||||
* improved handling of test markers and refined marking mechanism:
|
||||
|
||||
- "-m markexpr" option for selecting tests according to their mark
|
||||
- a new "markers" ini-variable for registering test markers for your project
|
||||
- the new "--strict" bails out with an error if using unregistered markers.
|
||||
- see examples at http://pytest.org/latest/example/markers.html
|
||||
|
||||
* duration profiling: new "--duration=N" option showing the N slowest test
|
||||
execution or setup/teardown calls. This is most useful if you want to
|
||||
find out where your slowest test code is.
|
||||
|
||||
* also 2.2.0 performs more eager calling of teardown/finalizers functions
|
||||
resulting in better and more accurate reporting when they fail
|
||||
|
||||
Besides there is the usual set of bug fixes along with a cleanup of
|
||||
pytest's own test suite allowing it to run on a wider range of environments.
|
||||
|
||||
For general information, see extensive docs with examples here:
|
||||
|
||||
http://pytest.org/
|
||||
|
||||
If you want to install or upgrade pytest you might just type::
|
||||
|
||||
pip install -U pytest # or
|
||||
easy_install -U pytest
|
||||
|
||||
Thanks to Ronny Pfannschmidt, David Burns, Jeff Donner, Daniel Nouri, Alfredo Deza and all who gave feedback or sent bug reports.
|
||||
|
||||
best,
|
||||
holger krekel
|
||||
|
||||
|
||||
notes on incompatibility
|
||||
------------------------------
|
||||
|
||||
While test suites should work unchanged you might need to upgrade plugins:
|
||||
|
||||
* You need a new version of the pytest-xdist plugin (1.7) for distributing
|
||||
test runs.
|
||||
|
||||
* Other plugins might need an upgrade if they implement
|
||||
the ``pytest_runtest_logreport`` hook which now is called unconditionally
|
||||
for the setup/teardown fixture phases of a test. You may choose to
|
||||
ignore setup/teardown failures by inserting "if rep.when != 'call': return"
|
||||
or something similar. Note that most code probably "just" works because
|
||||
the hook was already called for failing setup/teardown phases of a test
|
||||
so a plugin should have been ready to grok such reports already.
|
||||
|
||||
|
||||
Changes between 2.1.3 and 2.2.0
|
||||
----------------------------------------
|
||||
|
||||
- fix issue90: introduce eager tearing down of test items so that
|
||||
teardown function are called earlier.
|
||||
- add an all-powerful metafunc.parametrize function which allows to
|
||||
parametrize test function arguments in multiple steps and therefore
|
||||
from independent plugins and places.
|
||||
- add a @pytest.mark.parametrize helper which allows to easily
|
||||
call a test function with different argument values.
|
||||
- Add examples to the "parametrize" example page, including a quick port
|
||||
of Test scenarios and the new parametrize function and decorator.
|
||||
- introduce registration for "pytest.mark.*" helpers via ini-files
|
||||
or through plugin hooks. Also introduce a "--strict" option which
|
||||
will treat unregistered markers as errors
|
||||
allowing to avoid typos and maintain a well described set of markers
|
||||
for your test suite. See examples at http://pytest.org/latest/mark.html
|
||||
and its links.
|
||||
- issue50: introduce "-m marker" option to select tests based on markers
|
||||
(this is a stricter and more predictable version of "-k" in that "-m"
|
||||
only matches complete markers and has more obvious rules for and/or
|
||||
semantics.
|
||||
- new feature to help optimizing the speed of your tests:
|
||||
--durations=N option for displaying N slowest test calls
|
||||
and setup/teardown methods.
|
||||
- fix issue87: --pastebin now works with python3
|
||||
- fix issue89: --pdb with unexpected exceptions in doctest work more sensibly
|
||||
- fix and cleanup pytest's own test suite to not leak FDs
|
||||
- fix issue83: link to generated funcarg list
|
||||
- fix issue74: pyarg module names are now checked against imp.find_module false positives
|
||||
- fix compatibility with twisted/trial-11.1.0 use cases
|
||||
41
doc/announce/release-2.2.1.txt
Normal file
41
doc/announce/release-2.2.1.txt
Normal file
@@ -0,0 +1,41 @@
|
||||
pytest-2.2.1: bug fixes, perfect teardowns
|
||||
===========================================================================
|
||||
|
||||
|
||||
pytest-2.2.1 is a minor backward-compatible release of the the py.test
|
||||
testing tool. It contains bug fixes and little improvements, including
|
||||
documentation fixes. If you are using the distributed testing
|
||||
pluginmake sure to upgrade it to pytest-xdist-1.8.
|
||||
|
||||
For general information see here:
|
||||
|
||||
http://pytest.org/
|
||||
|
||||
To install or upgrade pytest:
|
||||
|
||||
pip install -U pytest # or
|
||||
easy_install -U pytest
|
||||
|
||||
Special thanks for helping on this release to Ronny Pfannschmidt, Jurko
|
||||
Gospodnetic and Ralf Schmitt.
|
||||
|
||||
best,
|
||||
holger krekel
|
||||
|
||||
|
||||
Changes between 2.2.0 and 2.2.1
|
||||
----------------------------------------
|
||||
|
||||
- fix issue99 (in pytest and py) internallerrors with resultlog now
|
||||
produce better output - fixed by normalizing pytest_internalerror
|
||||
input arguments.
|
||||
- fix issue97 / traceback issues (in pytest and py) improve traceback output
|
||||
in conjunction with jinja2 and cython which hack tracebacks
|
||||
- fix issue93 (in pytest and pytest-xdist) avoid "delayed teardowns":
|
||||
the final test in a test node will now run its teardown directly
|
||||
instead of waiting for the end of the session. Thanks Dave Hunt for
|
||||
the good reporting and feedback. The pytest_runtest_protocol as well
|
||||
as the pytest_runtest_teardown hooks now have "nextitem" available
|
||||
which will be None indicating the end of the test run.
|
||||
- fix collection crash due to unknown-source collected items, thanks
|
||||
to Ralf Schmitt (fixed by depending on a more recent pylib)
|
||||
43
doc/announce/release-2.2.2.txt
Normal file
43
doc/announce/release-2.2.2.txt
Normal file
@@ -0,0 +1,43 @@
|
||||
pytest-2.2.2: bug fixes
|
||||
===========================================================================
|
||||
|
||||
pytest-2.2.2 (updated to 2.2.3 to fix packaging issues) is a minor
|
||||
backward-compatible release of the versatile py.test testing tool. It
|
||||
contains bug fixes and a few refinements particularly to reporting with
|
||||
"--collectonly", see below for betails.
|
||||
|
||||
For general information see here:
|
||||
|
||||
http://pytest.org/
|
||||
|
||||
To install or upgrade pytest:
|
||||
|
||||
pip install -U pytest # or
|
||||
easy_install -U pytest
|
||||
|
||||
Special thanks for helping on this release to Ronny Pfannschmidt
|
||||
and Ralf Schmitt and the contributors of issues.
|
||||
|
||||
best,
|
||||
holger krekel
|
||||
|
||||
|
||||
Changes between 2.2.1 and 2.2.2
|
||||
----------------------------------------
|
||||
|
||||
- fix issue101: wrong args to unittest.TestCase test function now
|
||||
produce better output
|
||||
- fix issue102: report more useful errors and hints for when a
|
||||
test directory was renamed and some pyc/__pycache__ remain
|
||||
- fix issue106: allow parametrize to be applied multiple times
|
||||
e.g. from module, class and at function level.
|
||||
- fix issue107: actually perform session scope finalization
|
||||
- don't check in parametrize if indirect parameters are funcarg names
|
||||
- add chdir method to monkeypatch funcarg
|
||||
- fix crash resulting from calling monkeypatch undo a second time
|
||||
- fix issue115: make --collectonly robust against early failure
|
||||
(missing files/directories)
|
||||
- "-qq --collectonly" now shows only files and the number of tests in them
|
||||
- "-q --collectonly" now shows test ids
|
||||
- allow adding of attributes to test reports such that it also works
|
||||
with distributed testing (no upgrade of pytest-xdist needed)
|
||||
39
doc/announce/release-2.2.4.txt
Normal file
39
doc/announce/release-2.2.4.txt
Normal file
@@ -0,0 +1,39 @@
|
||||
pytest-2.2.4: bug fixes, better junitxml/unittest/python3 compat
|
||||
===========================================================================
|
||||
|
||||
pytest-2.2.4 is a minor backward-compatible release of the versatile
|
||||
py.test testing tool. It contains bug fixes and a few refinements
|
||||
to junitxml reporting, better unittest- and python3 compatibility.
|
||||
|
||||
For general information see here:
|
||||
|
||||
http://pytest.org/
|
||||
|
||||
To install or upgrade pytest:
|
||||
|
||||
pip install -U pytest # or
|
||||
easy_install -U pytest
|
||||
|
||||
Special thanks for helping on this release to Ronny Pfannschmidt
|
||||
and Benjamin Peterson and the contributors of issues.
|
||||
|
||||
best,
|
||||
holger krekel
|
||||
|
||||
Changes between 2.2.3 and 2.2.4
|
||||
-----------------------------------
|
||||
|
||||
- fix error message for rewritten assertions involving the % operator
|
||||
- fix issue 126: correctly match all invalid xml characters for junitxml
|
||||
binary escape
|
||||
- fix issue with unittest: now @unittest.expectedFailure markers should
|
||||
be processed correctly (you can also use @pytest.mark markers)
|
||||
- document integration with the extended distribute/setuptools test commands
|
||||
- fix issue 140: propperly get the real functions
|
||||
of bound classmethods for setup/teardown_class
|
||||
- fix issue #141: switch from the deceased paste.pocoo.org to bpaste.net
|
||||
- fix issue #143: call unconfigure/sessionfinish always when
|
||||
configure/sessionstart where called
|
||||
- fix issue #144: better mangle test ids to junitxml classnames
|
||||
- upgrade distribute_setup.py to 0.6.27
|
||||
|
||||
@@ -23,7 +23,7 @@ you will see the return value of the function call::
|
||||
|
||||
$ py.test test_assert1.py
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.1.3
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 1 items
|
||||
|
||||
test_assert1.py F
|
||||
@@ -37,7 +37,7 @@ you will see the return value of the function call::
|
||||
E + where 3 = f()
|
||||
|
||||
test_assert1.py:5: AssertionError
|
||||
========================= 1 failed in 0.03 seconds =========================
|
||||
========================= 1 failed in 0.02 seconds =========================
|
||||
|
||||
py.test has support for showing the values of the most common subexpressions
|
||||
including calls, attributes, comparisons, and binary and unary
|
||||
@@ -73,7 +73,7 @@ and if you need to have access to the actual exception info you may use::
|
||||
|
||||
# do checks related to excinfo.type, excinfo.value, excinfo.traceback
|
||||
|
||||
If you want to write test code that works on Python2.4 as well,
|
||||
If you want to write test code that works on Python 2.4 as well,
|
||||
you may also use two other ways to test for an expected exception::
|
||||
|
||||
pytest.raises(ExpectedException, func, *args, **kwargs)
|
||||
@@ -105,7 +105,7 @@ if you run this module::
|
||||
|
||||
$ py.test test_assert2.py
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.1.3
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 1 items
|
||||
|
||||
test_assert2.py F
|
||||
@@ -124,7 +124,7 @@ if you run this module::
|
||||
E '5'
|
||||
|
||||
test_assert2.py:5: AssertionError
|
||||
========================= 1 failed in 0.02 seconds =========================
|
||||
========================= 1 failed in 0.03 seconds =========================
|
||||
|
||||
Special comparisons are done for a number of cases:
|
||||
|
||||
|
||||
@@ -17,6 +17,9 @@ to get an overview on the globally available helpers.
|
||||
.. automodule:: pytest
|
||||
:members:
|
||||
|
||||
|
||||
.. _builtinfuncargs:
|
||||
|
||||
Builtin function arguments
|
||||
-----------------------------------------------------
|
||||
|
||||
@@ -25,7 +28,7 @@ You can ask for available builtin or project-custom
|
||||
|
||||
$ py.test --funcargs
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.1.3
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collected 0 items
|
||||
pytestconfig
|
||||
the pytest config object with access to command line opts.
|
||||
@@ -57,6 +60,7 @@ You can ask for available builtin or project-custom
|
||||
monkeypatch.setenv(name, value, prepend=False)
|
||||
monkeypatch.delenv(name, value, raising=True)
|
||||
monkeypatch.syspath_prepend(path)
|
||||
monkeypatch.chdir(path)
|
||||
|
||||
All modifications will be undone after the requesting
|
||||
test function has finished. The ``raising``
|
||||
@@ -75,4 +79,4 @@ You can ask for available builtin or project-custom
|
||||
cov
|
||||
A pytest funcarg that provides access to the underlying coverage object.
|
||||
|
||||
============================= in 0.00 seconds =============================
|
||||
============================= in 0.01 seconds =============================
|
||||
|
||||
@@ -64,7 +64,7 @@ of the failing function and hide the other one::
|
||||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.1.3
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_module.py .F
|
||||
@@ -78,7 +78,7 @@ of the failing function and hide the other one::
|
||||
|
||||
test_module.py:9: AssertionError
|
||||
----------------------------- Captured stdout ------------------------------
|
||||
setting up <function test_func2 at 0x10130ccf8>
|
||||
setting up <function test_func2 at 0x1013230c8>
|
||||
==================== 1 failed, 1 passed in 0.03 seconds ====================
|
||||
|
||||
Accessing captured output from a test function
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
|
||||
.. _toc:
|
||||
|
||||
Full pytest documenation
|
||||
========================
|
||||
Full pytest documentation
|
||||
===========================
|
||||
|
||||
`Download latest version as PDF <http://media.readthedocs.org/pdf/pytest/latest/pytest.pdf>`_
|
||||
`Download latest version as PDF <pytest.pdf>`_
|
||||
|
||||
.. `Download latest version as EPUB <http://media.readthedocs.org/epub/pytest/latest/pytest.epub>`_
|
||||
|
||||
|
||||
@@ -64,13 +64,13 @@ Builtin configuration file options
|
||||
|
||||
.. confval:: minversion
|
||||
|
||||
specifies a minimal pytest version required for running tests.
|
||||
Specifies a minimal pytest version required for running tests.
|
||||
|
||||
minversion = 2.1 # will fail if we run with pytest-2.0
|
||||
|
||||
.. confval:: addopts
|
||||
|
||||
add the specified ``OPTS`` to the set of command line arguments as if they
|
||||
Add the specified ``OPTS`` to the set of command line arguments as if they
|
||||
had been specified by the user. Example: if you have this ini file content::
|
||||
|
||||
[pytest]
|
||||
@@ -94,7 +94,7 @@ Builtin configuration file options
|
||||
[seq] matches any character in seq
|
||||
[!seq] matches any char not in seq
|
||||
|
||||
Default patterns are ``.* _* CVS {args}``. Setting a ``norecurse``
|
||||
Default patterns are ``.* _* CVS {args}``. Setting a ``norecursedir``
|
||||
replaces the default. Here is an example of how to avoid
|
||||
certain directories::
|
||||
|
||||
@@ -121,4 +121,3 @@ Builtin configuration file options
|
||||
and methods are considered as test modules.
|
||||
|
||||
See :ref:`change naming conventions` for examples.
|
||||
|
||||
|
||||
@@ -44,10 +44,10 @@ then you can just invoke ``py.test`` without command line options::
|
||||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.1.3
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 1 items
|
||||
|
||||
mymodule.py .
|
||||
|
||||
========================= 1 passed in 0.06 seconds =========================
|
||||
========================= 1 passed in 0.51 seconds =========================
|
||||
[?1034h
|
||||
@@ -9,7 +9,7 @@ need more examples or have questions. Also take a look at the :ref:`comprehensiv
|
||||
|
||||
.. note::
|
||||
|
||||
see :doc:`../getting-started` for basic introductionary examples
|
||||
see :doc:`../getting-started` for basic introductory examples
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
@@ -18,5 +18,6 @@ need more examples or have questions. Also take a look at the :ref:`comprehensiv
|
||||
simple.txt
|
||||
mysetup.txt
|
||||
parametrize.txt
|
||||
markers.txt
|
||||
pythoncollection.txt
|
||||
nonpython.txt
|
||||
|
||||
297
doc/example/markers.txt
Normal file
297
doc/example/markers.txt
Normal file
@@ -0,0 +1,297 @@
|
||||
|
||||
.. _`mark examples`:
|
||||
|
||||
Working with custom markers
|
||||
=================================================
|
||||
|
||||
Here are some example using the :ref:`mark` mechanism.
|
||||
|
||||
Marking test functions and selecting them for a run
|
||||
----------------------------------------------------
|
||||
|
||||
You can "mark" a test function with custom metadata like this::
|
||||
|
||||
# content of test_server.py
|
||||
|
||||
import pytest
|
||||
@pytest.mark.webtest
|
||||
def test_send_http():
|
||||
pass # perform some webtest test for your app
|
||||
def test_something_quick():
|
||||
pass
|
||||
|
||||
.. versionadded:: 2.2
|
||||
|
||||
You can then restrict a test run to only run tests marked with ``webtest``::
|
||||
|
||||
$ py.test -v -m webtest
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2 -- /Users/hpk/venv/0/bin/python
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_server.py:3: test_send_http PASSED
|
||||
|
||||
=================== 1 tests deselected by "-m 'webtest'" ===================
|
||||
================== 1 passed, 1 deselected in 0.01 seconds ==================
|
||||
|
||||
Or the inverse, running all tests except the webtest ones::
|
||||
|
||||
$ py.test -v -m "not webtest"
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2 -- /Users/hpk/venv/0/bin/python
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_server.py:6: test_something_quick PASSED
|
||||
|
||||
================= 1 tests deselected by "-m 'not webtest'" =================
|
||||
================== 1 passed, 1 deselected in 0.02 seconds ==================
|
||||
|
||||
Registering markers
|
||||
-------------------------------------
|
||||
|
||||
.. versionadded:: 2.2
|
||||
|
||||
.. ini-syntax for custom markers:
|
||||
|
||||
Registering markers for your test suite is simple::
|
||||
|
||||
# content of pytest.ini
|
||||
[pytest]
|
||||
markers =
|
||||
webtest: mark a test as a webtest.
|
||||
|
||||
You can ask which markers exist for your test suite - the list includes our just defined ``webtest`` markers::
|
||||
|
||||
$ py.test --markers
|
||||
@pytest.mark.webtest: mark a test as a webtest.
|
||||
|
||||
@pytest.mark.skipif(*conditions): skip the given test function if evaluation of all conditions has a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform.
|
||||
|
||||
@pytest.mark.xfail(*conditions, reason=None, run=True): mark the the test function as an expected failure. Optionally specify a reason and run=False if you don't even want to execute the test function. Any positional condition strings will be evaluated (like with skipif) and if one is False the marker will not be applied.
|
||||
|
||||
@pytest.mark.parametrize(argnames, argvalues): call a test function multiple times passing in multiple different argument value sets. Example: @parametrize('arg1', [1,2]) would lead to two calls of the decorated test function, one with arg1=1 and another with arg1=2.
|
||||
|
||||
@pytest.mark.tryfirst: mark a hook implementation function such that the plugin machinery will try to call it first/as early as possible.
|
||||
|
||||
@pytest.mark.trylast: mark a hook implementation function such that the plugin machinery will try to call it last/as late as possible.
|
||||
|
||||
|
||||
For an example on how to add and work with markers from a plugin, see
|
||||
:ref:`adding a custom marker from a plugin`.
|
||||
|
||||
.. note::
|
||||
|
||||
It is recommended to explicitely register markers so that:
|
||||
|
||||
* there is one place in your test suite defining your markers
|
||||
|
||||
* asking for existing markers via ``py.test --markers`` gives good output
|
||||
|
||||
* typos in function markers are treated as an error if you use
|
||||
the ``--strict`` option. Later versions of py.test are probably
|
||||
going to treat non-registered markers as an error.
|
||||
|
||||
.. _`scoped-marking`:
|
||||
|
||||
Marking whole classes or modules
|
||||
----------------------------------------------------
|
||||
|
||||
If you are programming with Python 2.6 or later you may use ``pytest.mark``
|
||||
decorators with classes to apply markers to all of its test methods::
|
||||
|
||||
# content of test_mark_classlevel.py
|
||||
import pytest
|
||||
@pytest.mark.webtest
|
||||
class TestClass:
|
||||
def test_startup(self):
|
||||
pass
|
||||
def test_startup_and_more(self):
|
||||
pass
|
||||
|
||||
This is equivalent to directly applying the decorator to the
|
||||
two test functions.
|
||||
|
||||
To remain backward-compatible with Python 2.4 you can also set a
|
||||
``pytestmark`` attribute on a TestClass like this::
|
||||
|
||||
import pytest
|
||||
|
||||
class TestClass:
|
||||
pytestmark = pytest.mark.webtest
|
||||
|
||||
or if you need to use multiple markers you can use a list::
|
||||
|
||||
import pytest
|
||||
|
||||
class TestClass:
|
||||
pytestmark = [pytest.mark.webtest, pytest.mark.slowtest]
|
||||
|
||||
You can also set a module level marker::
|
||||
|
||||
import pytest
|
||||
pytestmark = pytest.mark.webtest
|
||||
|
||||
in which case it will be applied to all functions and
|
||||
methods defined in the module.
|
||||
|
||||
|
||||
Using ``-k TEXT`` to select tests
|
||||
----------------------------------------------------
|
||||
|
||||
You can use the ``-k`` command line option to only run tests with names matching
|
||||
the given argument::
|
||||
|
||||
$ py.test -k send_http # running with the above defined examples
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 4 items
|
||||
|
||||
test_server.py .
|
||||
|
||||
=================== 3 tests deselected by '-ksend_http' ====================
|
||||
================== 1 passed, 3 deselected in 0.02 seconds ==================
|
||||
|
||||
And you can also run all tests except the ones that match the keyword::
|
||||
|
||||
$ py.test -k-send_http
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 4 items
|
||||
|
||||
test_mark_classlevel.py ..
|
||||
test_server.py .
|
||||
|
||||
=================== 1 tests deselected by '-k-send_http' ===================
|
||||
================== 3 passed, 1 deselected in 0.03 seconds ==================
|
||||
|
||||
Or to only select the class::
|
||||
|
||||
$ py.test -kTestClass
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 4 items
|
||||
|
||||
test_mark_classlevel.py ..
|
||||
|
||||
=================== 2 tests deselected by '-kTestClass' ====================
|
||||
================== 2 passed, 2 deselected in 0.03 seconds ==================
|
||||
|
||||
.. _`adding a custom marker from a plugin`:
|
||||
|
||||
Custom marker and command line option to control test runs
|
||||
----------------------------------------------------------
|
||||
|
||||
.. regendoc:wipe
|
||||
|
||||
Plugins can provide custom markers and implement specific behaviour
|
||||
based on it. This is a self-contained example which adds a command
|
||||
line option and a parametrized test function marker to run tests
|
||||
specifies via named environments::
|
||||
|
||||
# content of conftest.py
|
||||
|
||||
import pytest
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption("-E", dest="env", action="store", metavar="NAME",
|
||||
help="only run tests matching the environment NAME.")
|
||||
|
||||
def pytest_configure(config):
|
||||
# register an additional marker
|
||||
config.addinivalue_line("markers",
|
||||
"env(name): mark test to run only on named environment")
|
||||
|
||||
def pytest_runtest_setup(item):
|
||||
if not isinstance(item, item.Function):
|
||||
return
|
||||
if hasattr(item.obj, 'env'):
|
||||
envmarker = getattr(item.obj, 'env')
|
||||
envname = envmarker.args[0]
|
||||
if envname != item.config.option.env:
|
||||
pytest.skip("test requires env %r" % envname)
|
||||
|
||||
A test file using this local plugin::
|
||||
|
||||
# content of test_someenv.py
|
||||
|
||||
import pytest
|
||||
@pytest.mark.env("stage1")
|
||||
def test_basic_db_operation():
|
||||
pass
|
||||
|
||||
and an example invocations specifying a different environment than what
|
||||
the test needs::
|
||||
|
||||
$ py.test -E stage2
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 1 items
|
||||
|
||||
test_someenv.py s
|
||||
|
||||
======================== 1 skipped in 0.02 seconds =========================
|
||||
|
||||
and here is one that specifies exactly the environment needed::
|
||||
|
||||
$ py.test -E stage1
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 1 items
|
||||
|
||||
test_someenv.py .
|
||||
|
||||
========================= 1 passed in 0.02 seconds =========================
|
||||
|
||||
The ``--markers`` option always gives you a list of available markers::
|
||||
|
||||
$ py.test --markers
|
||||
@pytest.mark.env(name): mark test to run only on named environment
|
||||
|
||||
@pytest.mark.skipif(*conditions): skip the given test function if evaluation of all conditions has a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform.
|
||||
|
||||
@pytest.mark.xfail(*conditions, reason=None, run=True): mark the the test function as an expected failure. Optionally specify a reason and run=False if you don't even want to execute the test function. Any positional condition strings will be evaluated (like with skipif) and if one is False the marker will not be applied.
|
||||
|
||||
@pytest.mark.parametrize(argnames, argvalues): call a test function multiple times passing in multiple different argument value sets. Example: @parametrize('arg1', [1,2]) would lead to two calls of the decorated test function, one with arg1=1 and another with arg1=2.
|
||||
|
||||
@pytest.mark.tryfirst: mark a hook implementation function such that the plugin machinery will try to call it first/as early as possible.
|
||||
|
||||
@pytest.mark.trylast: mark a hook implementation function such that the plugin machinery will try to call it last/as late as possible.
|
||||
|
||||
|
||||
Reading markers which were set from multiple places
|
||||
----------------------------------------------------
|
||||
|
||||
.. versionadded: 2.2.2
|
||||
|
||||
If you are heavily using markers in your test suite you may encounter the case where a marker is applied several times to a test function. From plugin
|
||||
code you can read over all such settings. Example::
|
||||
|
||||
# content of test_mark_three_times.py
|
||||
import pytest
|
||||
pytestmark = pytest.mark.glob("module", x=1)
|
||||
|
||||
@pytest.mark.glob("class", x=2)
|
||||
class TestClass:
|
||||
@pytest.mark.glob("function", x=3)
|
||||
def test_something(self):
|
||||
pass
|
||||
|
||||
Here we have the marker "glob" applied three times to the same
|
||||
test function. From a conftest file we can read it like this::
|
||||
|
||||
# content of conftest.py
|
||||
|
||||
def pytest_runtest_setup(item):
|
||||
g = getattr(item.obj, 'glob', None)
|
||||
if g is not None:
|
||||
for info in g:
|
||||
print ("glob args=%s kwargs=%s" %(info.args, info.kwargs))
|
||||
|
||||
Let's run this without capturing output and see what we get::
|
||||
|
||||
$ py.test -q -s
|
||||
collecting ... collected 2 items
|
||||
..
|
||||
2 passed in 0.02 seconds
|
||||
glob args=('function',) kwargs={'x': 3}
|
||||
glob args=('class',) kwargs={'x': 2}
|
||||
glob args=('module',) kwargs={'x': 1}
|
||||
@@ -2,20 +2,20 @@
|
||||
module containing a parametrized tests testing cross-python
|
||||
serialization via the pickle module.
|
||||
"""
|
||||
import py
|
||||
import py, pytest
|
||||
|
||||
pythonlist = ['python2.4', 'python2.5', 'python2.6', 'python2.7', 'python2.8']
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
if 'python1' in metafunc.funcargnames:
|
||||
assert 'python2' in metafunc.funcargnames
|
||||
for obj in metafunc.function.multiarg.kwargs['obj']:
|
||||
for py1 in pythonlist:
|
||||
for py2 in pythonlist:
|
||||
metafunc.addcall(id="%s-%s-%s" % (py1, py2, obj),
|
||||
param=(py1, py2, obj))
|
||||
# we parametrize all "python1" and "python2" arguments to iterate
|
||||
# over the python interpreters of our list above - the actual
|
||||
# setup and lookup of interpreters in the python1/python2 factories
|
||||
# respectively.
|
||||
for arg in metafunc.funcargnames:
|
||||
if arg in ("python1", "python2"):
|
||||
metafunc.parametrize(arg, pythonlist, indirect=True)
|
||||
|
||||
@py.test.mark.multiarg(obj=[42, {}, {1:3},])
|
||||
@pytest.mark.parametrize("obj", [42, {}, {1:3},])
|
||||
def test_basic_objects(python1, python2, obj):
|
||||
python1.dumps(obj)
|
||||
python2.load_and_is_true("obj == %s" % obj)
|
||||
@@ -23,14 +23,11 @@ def test_basic_objects(python1, python2, obj):
|
||||
def pytest_funcarg__python1(request):
|
||||
tmpdir = request.getfuncargvalue("tmpdir")
|
||||
picklefile = tmpdir.join("data.pickle")
|
||||
return Python(request.param[0], picklefile)
|
||||
return Python(request.param, picklefile)
|
||||
|
||||
def pytest_funcarg__python2(request):
|
||||
python1 = request.getfuncargvalue("python1")
|
||||
return Python(request.param[1], python1.picklefile)
|
||||
|
||||
def pytest_funcarg__obj(request):
|
||||
return request.param[2]
|
||||
return Python(request.param, python1.picklefile)
|
||||
|
||||
class Python:
|
||||
def __init__(self, version, picklefile):
|
||||
|
||||
@@ -38,7 +38,7 @@ method in a :ref:`local plugin <localplugin>` ::
|
||||
def myapp(self):
|
||||
return MyApp()
|
||||
|
||||
To run the example we stub out a a simple ``MyApp`` application object::
|
||||
To run the example we stub out a simple ``MyApp`` application object::
|
||||
|
||||
# content of myapp.py
|
||||
class MyApp:
|
||||
@@ -49,7 +49,7 @@ You can now run the test::
|
||||
|
||||
$ py.test test_sample.py
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.1.3
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 1 items
|
||||
|
||||
test_sample.py F
|
||||
@@ -57,7 +57,7 @@ You can now run the test::
|
||||
================================= FAILURES =================================
|
||||
_______________________________ test_answer ________________________________
|
||||
|
||||
mysetup = <conftest.MySetup instance at 0x1013145a8>
|
||||
mysetup = <conftest.MySetup instance at 0x101322fc8>
|
||||
|
||||
def test_answer(mysetup):
|
||||
app = mysetup.myapp()
|
||||
@@ -66,7 +66,7 @@ You can now run the test::
|
||||
E assert 54 == 42
|
||||
|
||||
test_sample.py:4: AssertionError
|
||||
========================= 1 failed in 0.02 seconds =========================
|
||||
========================= 1 failed in 0.72 seconds =========================
|
||||
|
||||
This means that our ``mysetup`` object was successfully instantiated
|
||||
and ``mysetup.app()`` returned an initialized ``MyApp`` instance.
|
||||
@@ -122,12 +122,12 @@ Running it yields::
|
||||
|
||||
$ py.test test_ssh.py -rs
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.1.3
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 1 items
|
||||
|
||||
test_ssh.py s
|
||||
========================= short test summary info ==========================
|
||||
SKIP [1] /Users/hpk/tmp/doc-exec-167/conftest.py:22: specify ssh host with --ssh
|
||||
SKIP [1] /Users/hpk/tmp/doc-exec-153/conftest.py:22: specify ssh host with --ssh
|
||||
|
||||
======================== 1 skipped in 0.02 seconds =========================
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ now execute the test specification::
|
||||
|
||||
nonpython $ py.test test_simple.yml
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.1.3
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_simple.yml .F
|
||||
@@ -37,7 +37,7 @@ now execute the test specification::
|
||||
usecase execution failed
|
||||
spec failed: 'some': 'other'
|
||||
no further details known at this point.
|
||||
==================== 1 failed, 1 passed in 0.09 seconds ====================
|
||||
==================== 1 failed, 1 passed in 0.48 seconds ====================
|
||||
|
||||
You get one dot for the passing ``sub1: sub1`` check and one failure.
|
||||
Obviously in the above ``conftest.py`` you'll want to implement a more
|
||||
@@ -51,12 +51,12 @@ your own domain specific testing language this way.
|
||||
representation string of your choice. It
|
||||
will be reported as a (red) string.
|
||||
|
||||
``reportinfo()`` is used for representing the test location and is also consulted for
|
||||
reporting in ``verbose`` mode::
|
||||
``reportinfo()`` is used for representing the test location and is also
|
||||
consulted when reporting in ``verbose`` mode::
|
||||
|
||||
nonpython $ py.test -v
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.1.3 -- /Users/hpk/venv/0/bin/python
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2 -- /Users/hpk/venv/0/bin/python
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_simple.yml:1: usecase: ok PASSED
|
||||
@@ -67,17 +67,17 @@ reporting in ``verbose`` mode::
|
||||
usecase execution failed
|
||||
spec failed: 'some': 'other'
|
||||
no further details known at this point.
|
||||
==================== 1 failed, 1 passed in 0.09 seconds ====================
|
||||
==================== 1 failed, 1 passed in 0.10 seconds ====================
|
||||
|
||||
While developing your custom test collection and execution it's also
|
||||
interesting to just look at the collection tree::
|
||||
|
||||
nonpython $ py.test --collectonly
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.1.3
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 2 items
|
||||
<YamlFile 'test_simple.yml'>
|
||||
<YamlItem 'ok'>
|
||||
<YamlItem 'hello'>
|
||||
|
||||
============================= in 0.08 seconds =============================
|
||||
============================= in 0.18 seconds =============================
|
||||
|
||||
@@ -4,18 +4,71 @@
|
||||
Parametrizing tests
|
||||
=================================================
|
||||
|
||||
py.test allows to easily implement your own custom
|
||||
parametrization scheme for tests. Here we provide
|
||||
some examples for inspiration and re-use.
|
||||
.. currentmodule:: _pytest.python
|
||||
|
||||
py.test allows to easily parametrize test functions.
|
||||
In the following we provide some examples using
|
||||
the builtin mechanisms.
|
||||
|
||||
.. _parametrizemark:
|
||||
|
||||
Simple "decorator" parametrization of a test function
|
||||
----------------------------------------------------------------------------
|
||||
|
||||
.. versionadded:: 2.2
|
||||
|
||||
The builtin ``pytest.mark.parametrize`` decorator directly enables
|
||||
parametrization of arguments for a test function. Here is an example
|
||||
of a test function that wants to compare that processing some input
|
||||
results in expected output::
|
||||
|
||||
# content of test_expectation.py
|
||||
import pytest
|
||||
@pytest.mark.parametrize(("input", "expected"), [
|
||||
("3+5", 8),
|
||||
("2+4", 6),
|
||||
("6*9", 42),
|
||||
])
|
||||
def test_eval(input, expected):
|
||||
assert eval(input) == expected
|
||||
|
||||
we parametrize two arguments of the test function so that the test
|
||||
function is called three times. Let's run it::
|
||||
|
||||
$ py.test -q
|
||||
collecting ... collected 3 items
|
||||
..F
|
||||
================================= FAILURES =================================
|
||||
____________________________ test_eval[6*9-42] _____________________________
|
||||
|
||||
input = '6*9', expected = 42
|
||||
|
||||
@pytest.mark.parametrize(("input", "expected"), [
|
||||
("3+5", 8),
|
||||
("2+4", 6),
|
||||
("6*9", 42),
|
||||
])
|
||||
def test_eval(input, expected):
|
||||
> assert eval(input) == expected
|
||||
E assert 54 == 42
|
||||
E + where 54 = eval('6*9')
|
||||
|
||||
test_expectation.py:8: AssertionError
|
||||
1 failed, 2 passed in 0.03 seconds
|
||||
|
||||
As expected only one pair of input/output values fails the simple test function.
|
||||
|
||||
Note that there are various ways how you can mark groups of functions,
|
||||
see :ref:`mark`.
|
||||
|
||||
Generating parameters combinations, depending on command line
|
||||
----------------------------------------------------------------------------
|
||||
|
||||
.. regendoc:wipe
|
||||
|
||||
Let's say we want to execute a test with different parameters
|
||||
and the parameter range shall be determined by a command
|
||||
line argument. Let's first write a simple computation test::
|
||||
Let's say we want to execute a test with different computation
|
||||
parameters and the parameter range shall be determined by a command
|
||||
line argument. Let's first write a simple (do-nothing) computation test::
|
||||
|
||||
# content of test_compute.py
|
||||
|
||||
@@ -36,15 +89,14 @@ Now we add a test configuration like this::
|
||||
end = 5
|
||||
else:
|
||||
end = 2
|
||||
for i in range(end):
|
||||
metafunc.addcall(funcargs={'param1': i})
|
||||
metafunc.parametrize("param1", range(end))
|
||||
|
||||
This means that we only run 2 tests if we do not pass ``--all``::
|
||||
|
||||
$ py.test -q test_compute.py
|
||||
collecting ... collected 2 items
|
||||
..
|
||||
2 passed in 0.02 seconds
|
||||
2 passed in 0.03 seconds
|
||||
|
||||
We run only two computations, so we see two dots.
|
||||
let's run the full monty::
|
||||
@@ -62,46 +114,102 @@ let's run the full monty::
|
||||
E assert 4 < 4
|
||||
|
||||
test_compute.py:3: AssertionError
|
||||
1 failed, 4 passed in 0.03 seconds
|
||||
1 failed, 4 passed in 0.05 seconds
|
||||
|
||||
As expected when running the full range of ``param1`` values
|
||||
we'll get an error on the last one.
|
||||
|
||||
Deferring the setup of parametrizing resources
|
||||
A quick port of "testscenarios"
|
||||
------------------------------------
|
||||
|
||||
.. _`test scenarios`: http://bazaar.launchpad.net/~lifeless/testscenarios/trunk/annotate/head%3A/doc/example.py
|
||||
|
||||
Here is a quick port to run tests configured with `test scenarios`_,
|
||||
an add-on from Robert Collins for the standard unittest framework. We
|
||||
only have to work a bit to construct the correct arguments for pytest's
|
||||
:py:func:`Metafunc.parametrize`::
|
||||
|
||||
# content of test_scenarios.py
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
idlist = []
|
||||
argvalues = []
|
||||
for scenario in metafunc.cls.scenarios:
|
||||
idlist.append(scenario[0])
|
||||
items = scenario[1].items()
|
||||
argnames = [x[0] for x in items]
|
||||
argvalues.append(([x[1] for x in items]))
|
||||
metafunc.parametrize(argnames, argvalues, ids=idlist)
|
||||
|
||||
scenario1 = ('basic', {'attribute': 'value'})
|
||||
scenario2 = ('advanced', {'attribute': 'value2'})
|
||||
|
||||
class TestSampleWithScenarios:
|
||||
scenarios = [scenario1, scenario2]
|
||||
|
||||
def test_demo(self, attribute):
|
||||
assert isinstance(attribute, str)
|
||||
|
||||
this is a fully self-contained example which you can run with::
|
||||
|
||||
$ py.test test_scenarios.py
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_scenarios.py ..
|
||||
|
||||
========================= 2 passed in 0.02 seconds =========================
|
||||
|
||||
If you just collect tests you'll also nicely see 'advanced' and 'basic' as variants for the test function::
|
||||
|
||||
|
||||
$ py.test --collectonly test_scenarios.py
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 2 items
|
||||
<Module 'test_scenarios.py'>
|
||||
<Class 'TestSampleWithScenarios'>
|
||||
<Instance '()'>
|
||||
<Function 'test_demo[basic]'>
|
||||
<Function 'test_demo[advanced]'>
|
||||
|
||||
============================= in 0.05 seconds =============================
|
||||
|
||||
Deferring the setup of parametrized resources
|
||||
---------------------------------------------------
|
||||
|
||||
.. regendoc:wipe
|
||||
|
||||
The parametrization of test functions happens at collection
|
||||
time. It is often a good idea to setup possibly expensive
|
||||
resources only when the actual test is run. Here is a simple
|
||||
example how you can achieve that::
|
||||
time. It is a good idea to setup expensive resources like DB
|
||||
connections or subprocess only when the actual test is run.
|
||||
Here is a simple example how you can achieve that, first
|
||||
the actual test requiring a ``db`` object::
|
||||
|
||||
# content of test_backends.py
|
||||
|
||||
|
||||
import pytest
|
||||
def test_db_initialized(db):
|
||||
# a dummy test
|
||||
if db.__class__.__name__ == "DB2":
|
||||
pytest.fail("deliberately failing for demo purposes")
|
||||
|
||||
Now we add a test configuration that takes care to generate
|
||||
two invocations of the ``test_db_initialized`` function and
|
||||
furthermore a factory that creates a database object when
|
||||
each test is actually run::
|
||||
We can now add a test configuration that generates two invocations of
|
||||
the ``test_db_initialized`` function and also implements a factory that
|
||||
creates a database object for the actual test invocations::
|
||||
|
||||
# content of conftest.py
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
if 'db' in metafunc.funcargnames:
|
||||
metafunc.addcall(param="d1")
|
||||
metafunc.addcall(param="d2")
|
||||
metafunc.parametrize("db", ['d1', 'd2'], indirect=True)
|
||||
|
||||
class DB1:
|
||||
"one database object"
|
||||
class DB2:
|
||||
"alternative database object"
|
||||
|
||||
|
||||
def pytest_funcarg__db(request):
|
||||
if request.param == "d1":
|
||||
return DB1()
|
||||
@@ -114,11 +222,11 @@ Let's first see how it looks like at collection time::
|
||||
|
||||
$ py.test test_backends.py --collectonly
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.1.3
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 2 items
|
||||
<Module 'test_backends.py'>
|
||||
<Function 'test_db_initialized[0]'>
|
||||
<Function 'test_db_initialized[1]'>
|
||||
<Function 'test_db_initialized[d1]'>
|
||||
<Function 'test_db_initialized[d2]'>
|
||||
|
||||
============================= in 0.01 seconds =============================
|
||||
|
||||
@@ -128,9 +236,9 @@ And then when we run the test::
|
||||
collecting ... collected 2 items
|
||||
.F
|
||||
================================= FAILURES =================================
|
||||
__________________________ test_db_initialized[1] __________________________
|
||||
_________________________ test_db_initialized[d2] __________________________
|
||||
|
||||
db = <conftest.DB2 instance at 0x101316b90>
|
||||
db = <conftest.DB2 instance at 0x101323710>
|
||||
|
||||
def test_db_initialized(db):
|
||||
# a dummy test
|
||||
@@ -139,34 +247,37 @@ And then when we run the test::
|
||||
E Failed: deliberately failing for demo purposes
|
||||
|
||||
test_backends.py:6: Failed
|
||||
1 failed, 1 passed in 0.02 seconds
|
||||
1 failed, 1 passed in 0.03 seconds
|
||||
|
||||
Now you see that one invocation of the test passes and another fails,
|
||||
as it to be expected.
|
||||
The first invocation with ``db == "DB1"`` passed while the second with ``db == "DB2"`` failed. Our ``pytest_funcarg__db`` factory has instantiated each of the DB values during the setup phase while the ``pytest_generate_tests`` generated two according calls to the ``test_db_initialized`` during the collection phase.
|
||||
|
||||
.. regendoc:wipe
|
||||
|
||||
Parametrizing test methods through per-class configuration
|
||||
--------------------------------------------------------------
|
||||
|
||||
.. _`unittest parameterizer`: http://code.google.com/p/unittest-ext/source/browse/trunk/params.py
|
||||
|
||||
|
||||
Here is an example ``pytest_generate_function`` function implementing a
|
||||
parametrization scheme similar to Michael Foords `unittest
|
||||
parameterizer`_ in a lot less code::
|
||||
parametrization scheme similar to Michael Foord's `unittest
|
||||
parameterizer`_ but in a lot less code::
|
||||
|
||||
# content of ./test_parametrize.py
|
||||
import pytest
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
# called once per each test function
|
||||
for funcargs in metafunc.cls.params[metafunc.function.__name__]:
|
||||
# schedule a new test function run with applied **funcargs
|
||||
metafunc.addcall(funcargs=funcargs)
|
||||
funcarglist = metafunc.cls.params[metafunc.function.__name__]
|
||||
argnames = list(funcarglist[0])
|
||||
metafunc.parametrize(argnames, [[funcargs[name] for name in argnames]
|
||||
for funcargs in funcarglist])
|
||||
|
||||
class TestClass:
|
||||
# a map specifying multiple argument sets for a test method
|
||||
params = {
|
||||
'test_equals': [dict(a=1, b=2), dict(a=3, b=3), ],
|
||||
'test_zerodivision': [dict(a=1, b=0), dict(a=3, b=2)],
|
||||
'test_zerodivision': [dict(a=1, b=0), ],
|
||||
}
|
||||
|
||||
def test_equals(self, a, b):
|
||||
@@ -175,120 +286,44 @@ parameterizer`_ in a lot less code::
|
||||
def test_zerodivision(self, a, b):
|
||||
pytest.raises(ZeroDivisionError, "a/b")
|
||||
|
||||
Running it means we are two tests for each test functions, using
|
||||
the respective settings::
|
||||
Our test generator looks up a class-level definition which specifies which
|
||||
argument sets to use for each test function. Let's run it::
|
||||
|
||||
$ py.test -q
|
||||
collecting ... collected 6 items
|
||||
.FF..F
|
||||
collecting ... collected 3 items
|
||||
F..
|
||||
================================= FAILURES =================================
|
||||
__________________________ test_db_initialized[1] __________________________
|
||||
________________________ TestClass.test_equals[1-2] ________________________
|
||||
|
||||
db = <conftest.DB2 instance at 0x10131c488>
|
||||
|
||||
def test_db_initialized(db):
|
||||
# a dummy test
|
||||
if db.__class__.__name__ == "DB2":
|
||||
> pytest.fail("deliberately failing for demo purposes")
|
||||
E Failed: deliberately failing for demo purposes
|
||||
|
||||
test_backends.py:6: Failed
|
||||
_________________________ TestClass.test_equals[0] _________________________
|
||||
|
||||
self = <test_parametrize.TestClass instance at 0x101322170>, a = 1, b = 2
|
||||
self = <test_parametrize.TestClass instance at 0x101326368>, a = 1, b = 2
|
||||
|
||||
def test_equals(self, a, b):
|
||||
> assert a == b
|
||||
E assert 1 == 2
|
||||
|
||||
test_parametrize.py:17: AssertionError
|
||||
______________________ TestClass.test_zerodivision[1] ______________________
|
||||
|
||||
self = <test_parametrize.TestClass instance at 0x1013228c0>, a = 3, b = 2
|
||||
|
||||
def test_zerodivision(self, a, b):
|
||||
> pytest.raises(ZeroDivisionError, "a/b")
|
||||
E Failed: DID NOT RAISE
|
||||
|
||||
test_parametrize.py:20: Failed
|
||||
3 failed, 3 passed in 0.05 seconds
|
||||
test_parametrize.py:18: AssertionError
|
||||
1 failed, 2 passed in 0.03 seconds
|
||||
|
||||
Parametrizing test methods through a decorator
|
||||
--------------------------------------------------------------
|
||||
|
||||
Modifying the previous example we can also allow decorators
|
||||
for parametrizing test methods::
|
||||
|
||||
# content of test_parametrize2.py
|
||||
|
||||
import pytest
|
||||
|
||||
# test support code
|
||||
def params(funcarglist):
|
||||
def wrapper(function):
|
||||
function.funcarglist = funcarglist
|
||||
return function
|
||||
return wrapper
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
for funcargs in getattr(metafunc.function, 'funcarglist', ()):
|
||||
metafunc.addcall(funcargs=funcargs)
|
||||
|
||||
# actual test code
|
||||
class TestClass:
|
||||
@params([dict(a=1, b=2), dict(a=3, b=3), ])
|
||||
def test_equals(self, a, b):
|
||||
assert a == b
|
||||
|
||||
@params([dict(a=1, b=0), dict(a=3, b=2)])
|
||||
def test_zerodivision(self, a, b):
|
||||
pytest.raises(ZeroDivisionError, "a/b")
|
||||
|
||||
Running it gives similar results as before::
|
||||
|
||||
$ py.test -q test_parametrize2.py
|
||||
collecting ... collected 4 items
|
||||
F..F
|
||||
================================= FAILURES =================================
|
||||
_________________________ TestClass.test_equals[0] _________________________
|
||||
|
||||
self = <test_parametrize2.TestClass instance at 0x10130ac20>, a = 1, b = 2
|
||||
|
||||
@params([dict(a=1, b=2), dict(a=3, b=3), ])
|
||||
def test_equals(self, a, b):
|
||||
> assert a == b
|
||||
E assert 1 == 2
|
||||
|
||||
test_parametrize2.py:19: AssertionError
|
||||
______________________ TestClass.test_zerodivision[1] ______________________
|
||||
|
||||
self = <test_parametrize2.TestClass instance at 0x10131c878>, a = 3, b = 2
|
||||
|
||||
@params([dict(a=1, b=0), dict(a=3, b=2)])
|
||||
def test_zerodivision(self, a, b):
|
||||
> pytest.raises(ZeroDivisionError, "a/b")
|
||||
E Failed: DID NOT RAISE
|
||||
|
||||
test_parametrize2.py:23: Failed
|
||||
2 failed, 2 passed in 0.04 seconds
|
||||
|
||||
Checking serialization between Python interpreters
|
||||
Indirect parametrization with multiple resources
|
||||
--------------------------------------------------------------
|
||||
|
||||
Here is a stripped down real-life example of using parametrized
|
||||
testing for testing serialization between different interpreters.
|
||||
testing for testing serialization, invoking different python interpreters.
|
||||
We define a ``test_basic_objects`` function which is to be run
|
||||
with different sets of arguments for its three arguments::
|
||||
with different sets of arguments for its three arguments:
|
||||
|
||||
* ``python1``: first python interpreter
|
||||
* ``python2``: second python interpreter
|
||||
* ``obj``: object to be dumped from first interpreter and loaded into second interpreter
|
||||
* ``python1``: first python interpreter, run to pickle-dump an object to a file
|
||||
* ``python2``: second interpreter, run to pickle-load an object from a file
|
||||
* ``obj``: object to be dumped/loaded
|
||||
|
||||
.. literalinclude:: multipython.py
|
||||
|
||||
Running it (with Python-2.4 through to Python2.7 installed)::
|
||||
Running it results in some skips if we don't have all the python interpreters installed and otherwise runs all combinations (5 interpreters times 5 interpreters times 3 objects to serialize/deserialize)::
|
||||
|
||||
. $ py.test -q multipython.py
|
||||
. $ py.test -rs -q multipython.py
|
||||
collecting ... collected 75 items
|
||||
ssssss...ss...ss...ssssssssssss...ss...ss...ssssssssssss...ss...ss...ssssss
|
||||
27 passed, 48 skipped in 3.04 seconds
|
||||
ssssssssssssssssss.........ssssss.........ssssss.........ssssssssssssssssss
|
||||
========================= short test summary info ==========================
|
||||
SKIP [24] /Users/hpk/p/pytest/doc/example/multipython.py:36: 'python2.8' not found
|
||||
SKIP [24] /Users/hpk/p/pytest/doc/example/multipython.py:36: 'python2.4' not found
|
||||
27 passed, 48 skipped in 7.76 seconds
|
||||
|
||||
@@ -43,7 +43,7 @@ then the test collection looks like this::
|
||||
|
||||
$ py.test --collectonly
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.1.3
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 2 items
|
||||
<Module 'check_myapp.py'>
|
||||
<Class 'CheckMyApp'>
|
||||
@@ -51,7 +51,7 @@ then the test collection looks like this::
|
||||
<Function 'check_simple'>
|
||||
<Function 'check_complex'>
|
||||
|
||||
============================= in 0.02 seconds =============================
|
||||
============================= in 0.01 seconds =============================
|
||||
|
||||
Interpreting cmdline arguments as Python packages
|
||||
-----------------------------------------------------
|
||||
@@ -82,7 +82,7 @@ You can always peek at the collection tree without running tests like this::
|
||||
|
||||
. $ py.test --collectonly pythoncollection.py
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.1.3
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 3 items
|
||||
<Module 'pythoncollection.py'>
|
||||
<Function 'test_function'>
|
||||
|
||||
@@ -13,7 +13,7 @@ get on the terminal - we are working on that):
|
||||
|
||||
assertion $ py.test failure_demo.py
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.1.3
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 39 items
|
||||
|
||||
failure_demo.py FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
|
||||
@@ -30,7 +30,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:15: AssertionError
|
||||
_________________________ TestFailing.test_simple __________________________
|
||||
|
||||
self = <failure_demo.TestFailing object at 0x10134bc50>
|
||||
self = <failure_demo.TestFailing object at 0x101490690>
|
||||
|
||||
def test_simple(self):
|
||||
def f():
|
||||
@@ -40,13 +40,13 @@ get on the terminal - we are working on that):
|
||||
|
||||
> assert f() == g()
|
||||
E assert 42 == 43
|
||||
E + where 42 = <function f at 0x101322320>()
|
||||
E + and 43 = <function g at 0x101322398>()
|
||||
E + where 42 = <function f at 0x101462b90>()
|
||||
E + and 43 = <function g at 0x101462c08>()
|
||||
|
||||
failure_demo.py:28: AssertionError
|
||||
____________________ TestFailing.test_simple_multiline _____________________
|
||||
|
||||
self = <failure_demo.TestFailing object at 0x10134b150>
|
||||
self = <failure_demo.TestFailing object at 0x101490b10>
|
||||
|
||||
def test_simple_multiline(self):
|
||||
otherfunc_multi(
|
||||
@@ -66,19 +66,19 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:11: AssertionError
|
||||
___________________________ TestFailing.test_not ___________________________
|
||||
|
||||
self = <failure_demo.TestFailing object at 0x10134b710>
|
||||
self = <failure_demo.TestFailing object at 0x101490210>
|
||||
|
||||
def test_not(self):
|
||||
def f():
|
||||
return 42
|
||||
> assert not f()
|
||||
E assert not 42
|
||||
E + where 42 = <function f at 0x101322398>()
|
||||
E + where 42 = <function f at 0x101462aa0>()
|
||||
|
||||
failure_demo.py:38: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_eq_text _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x10134be10>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x101490a10>
|
||||
|
||||
def test_eq_text(self):
|
||||
> assert 'spam' == 'eggs'
|
||||
@@ -89,7 +89,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:42: AssertionError
|
||||
_____________ TestSpecialisedExplanations.test_eq_similar_text _____________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x101347110>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x10148d9d0>
|
||||
|
||||
def test_eq_similar_text(self):
|
||||
> assert 'foo 1 bar' == 'foo 2 bar'
|
||||
@@ -102,7 +102,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:45: AssertionError
|
||||
____________ TestSpecialisedExplanations.test_eq_multiline_text ____________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x101343d50>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x10148d590>
|
||||
|
||||
def test_eq_multiline_text(self):
|
||||
> assert 'foo\nspam\nbar' == 'foo\neggs\nbar'
|
||||
@@ -115,7 +115,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:48: AssertionError
|
||||
______________ TestSpecialisedExplanations.test_eq_long_text _______________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x10134b210>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x10148dc90>
|
||||
|
||||
def test_eq_long_text(self):
|
||||
a = '1'*100 + 'a' + '2'*100
|
||||
@@ -132,7 +132,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:53: AssertionError
|
||||
_________ TestSpecialisedExplanations.test_eq_long_text_multiline __________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x101343d90>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x10148d910>
|
||||
|
||||
def test_eq_long_text_multiline(self):
|
||||
a = '1\n'*100 + 'a' + '2\n'*100
|
||||
@@ -156,7 +156,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:58: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_eq_list _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x101343dd0>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x10148b9d0>
|
||||
|
||||
def test_eq_list(self):
|
||||
> assert [0, 1, 2] == [0, 1, 3]
|
||||
@@ -166,7 +166,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:61: AssertionError
|
||||
______________ TestSpecialisedExplanations.test_eq_list_long _______________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x101343b90>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x10148b750>
|
||||
|
||||
def test_eq_list_long(self):
|
||||
a = [0]*100 + [1] + [3]*100
|
||||
@@ -178,7 +178,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:66: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_eq_dict _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x101343210>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x10148bdd0>
|
||||
|
||||
def test_eq_dict(self):
|
||||
> assert {'a': 0, 'b': 1} == {'a': 0, 'b': 2}
|
||||
@@ -191,7 +191,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:69: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_eq_set __________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x101343990>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x10148b1d0>
|
||||
|
||||
def test_eq_set(self):
|
||||
> assert set([0, 10, 11, 12]) == set([0, 20, 21])
|
||||
@@ -207,7 +207,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:72: AssertionError
|
||||
_____________ TestSpecialisedExplanations.test_eq_longer_list ______________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x101343590>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x10148bf10>
|
||||
|
||||
def test_eq_longer_list(self):
|
||||
> assert [1,2] == [1,2,3]
|
||||
@@ -217,7 +217,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:75: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_in_list _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x101343e50>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x10148b390>
|
||||
|
||||
def test_in_list(self):
|
||||
> assert 1 in [0, 2, 3, 4, 5]
|
||||
@@ -226,7 +226,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:78: AssertionError
|
||||
__________ TestSpecialisedExplanations.test_not_in_text_multiline __________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x10133bb10>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x101483e50>
|
||||
|
||||
def test_not_in_text_multiline(self):
|
||||
text = 'some multiline\ntext\nwhich\nincludes foo\nand a\ntail'
|
||||
@@ -244,7 +244,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:82: AssertionError
|
||||
___________ TestSpecialisedExplanations.test_not_in_text_single ____________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x10133b990>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x101483c10>
|
||||
|
||||
def test_not_in_text_single(self):
|
||||
text = 'single foo line'
|
||||
@@ -257,7 +257,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:86: AssertionError
|
||||
_________ TestSpecialisedExplanations.test_not_in_text_single_long _________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x10133bbd0>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x101483ed0>
|
||||
|
||||
def test_not_in_text_single_long(self):
|
||||
text = 'head ' * 50 + 'foo ' + 'tail ' * 20
|
||||
@@ -270,7 +270,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:90: AssertionError
|
||||
______ TestSpecialisedExplanations.test_not_in_text_single_long_term _______
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x101343510>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x101483310>
|
||||
|
||||
def test_not_in_text_single_long_term(self):
|
||||
text = 'head ' * 50 + 'f'*70 + 'tail ' * 20
|
||||
@@ -289,7 +289,7 @@ get on the terminal - we are working on that):
|
||||
i = Foo()
|
||||
> assert i.b == 2
|
||||
E assert 1 == 2
|
||||
E + where 1 = <failure_demo.Foo object at 0x10133b390>.b
|
||||
E + where 1 = <failure_demo.Foo object at 0x101483f50>.b
|
||||
|
||||
failure_demo.py:101: AssertionError
|
||||
_________________________ test_attribute_instance __________________________
|
||||
@@ -299,8 +299,8 @@ get on the terminal - we are working on that):
|
||||
b = 1
|
||||
> assert Foo().b == 2
|
||||
E assert 1 == 2
|
||||
E + where 1 = <failure_demo.Foo object at 0x10133b250>.b
|
||||
E + where <failure_demo.Foo object at 0x10133b250> = <class 'failure_demo.Foo'>()
|
||||
E + where 1 = <failure_demo.Foo object at 0x101483210>.b
|
||||
E + where <failure_demo.Foo object at 0x101483210> = <class 'failure_demo.Foo'>()
|
||||
|
||||
failure_demo.py:107: AssertionError
|
||||
__________________________ test_attribute_failure __________________________
|
||||
@@ -316,7 +316,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:116:
|
||||
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
||||
|
||||
self = <failure_demo.Foo object at 0x10133bd50>
|
||||
self = <failure_demo.Foo object at 0x101483450>
|
||||
|
||||
def _get_b(self):
|
||||
> raise Exception('Failed to get attrib')
|
||||
@@ -332,15 +332,15 @@ get on the terminal - we are working on that):
|
||||
b = 2
|
||||
> assert Foo().b == Bar().b
|
||||
E assert 1 == 2
|
||||
E + where 1 = <failure_demo.Foo object at 0x10133bb50>.b
|
||||
E + where <failure_demo.Foo object at 0x10133bb50> = <class 'failure_demo.Foo'>()
|
||||
E + and 2 = <failure_demo.Bar object at 0x10133b1d0>.b
|
||||
E + where <failure_demo.Bar object at 0x10133b1d0> = <class 'failure_demo.Bar'>()
|
||||
E + where 1 = <failure_demo.Foo object at 0x101483150>.b
|
||||
E + where <failure_demo.Foo object at 0x101483150> = <class 'failure_demo.Foo'>()
|
||||
E + and 2 = <failure_demo.Bar object at 0x101483350>.b
|
||||
E + where <failure_demo.Bar object at 0x101483350> = <class 'failure_demo.Bar'>()
|
||||
|
||||
failure_demo.py:124: AssertionError
|
||||
__________________________ TestRaises.test_raises __________________________
|
||||
|
||||
self = <failure_demo.TestRaises instance at 0x1013697e8>
|
||||
self = <failure_demo.TestRaises instance at 0x1014a6758>
|
||||
|
||||
def test_raises(self):
|
||||
s = 'qwe'
|
||||
@@ -352,10 +352,10 @@ get on the terminal - we are working on that):
|
||||
> int(s)
|
||||
E ValueError: invalid literal for int() with base 10: 'qwe'
|
||||
|
||||
<0-codegen /Users/hpk/p/pytest/_pytest/python.py:833>:1: ValueError
|
||||
<0-codegen /Users/hpk/p/pytest/_pytest/python.py:976>:1: ValueError
|
||||
______________________ TestRaises.test_raises_doesnt _______________________
|
||||
|
||||
self = <failure_demo.TestRaises instance at 0x101372a70>
|
||||
self = <failure_demo.TestRaises instance at 0x1014b03f8>
|
||||
|
||||
def test_raises_doesnt(self):
|
||||
> raises(IOError, "int('3')")
|
||||
@@ -364,7 +364,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:136: Failed
|
||||
__________________________ TestRaises.test_raise ___________________________
|
||||
|
||||
self = <failure_demo.TestRaises instance at 0x10136a908>
|
||||
self = <failure_demo.TestRaises instance at 0x1014a8998>
|
||||
|
||||
def test_raise(self):
|
||||
> raise ValueError("demo error")
|
||||
@@ -373,7 +373,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:139: ValueError
|
||||
________________________ TestRaises.test_tupleerror ________________________
|
||||
|
||||
self = <failure_demo.TestRaises instance at 0x10136c710>
|
||||
self = <failure_demo.TestRaises instance at 0x1014a27a0>
|
||||
|
||||
def test_tupleerror(self):
|
||||
> a,b = [1]
|
||||
@@ -382,7 +382,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:142: ValueError
|
||||
______ TestRaises.test_reinterpret_fails_with_print_for_the_fun_of_it ______
|
||||
|
||||
self = <failure_demo.TestRaises instance at 0x101365488>
|
||||
self = <failure_demo.TestRaises instance at 0x1014a5518>
|
||||
|
||||
def test_reinterpret_fails_with_print_for_the_fun_of_it(self):
|
||||
l = [1,2,3]
|
||||
@@ -395,7 +395,7 @@ get on the terminal - we are working on that):
|
||||
l is [1, 2, 3]
|
||||
________________________ TestRaises.test_some_error ________________________
|
||||
|
||||
self = <failure_demo.TestRaises instance at 0x101367248>
|
||||
self = <failure_demo.TestRaises instance at 0x1014a1320>
|
||||
|
||||
def test_some_error(self):
|
||||
> if namenotexi:
|
||||
@@ -423,7 +423,7 @@ get on the terminal - we are working on that):
|
||||
<2-codegen 'abc-123' /Users/hpk/p/pytest/doc/example/assertion/failure_demo.py:162>:2: AssertionError
|
||||
____________________ TestMoreErrors.test_complex_error _____________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x101380f38>
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1014a6638>
|
||||
|
||||
def test_complex_error(self):
|
||||
def f():
|
||||
@@ -452,7 +452,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:5: AssertionError
|
||||
___________________ TestMoreErrors.test_z1_unpack_error ____________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x101367f80>
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1014a42d8>
|
||||
|
||||
def test_z1_unpack_error(self):
|
||||
l = []
|
||||
@@ -462,7 +462,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:179: ValueError
|
||||
____________________ TestMoreErrors.test_z2_type_error _____________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x101363dd0>
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1014a0128>
|
||||
|
||||
def test_z2_type_error(self):
|
||||
l = 3
|
||||
@@ -472,19 +472,19 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:183: TypeError
|
||||
______________________ TestMoreErrors.test_startswith ______________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x101364bd8>
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1014a0ef0>
|
||||
|
||||
def test_startswith(self):
|
||||
s = "123"
|
||||
g = "456"
|
||||
> assert s.startswith(g)
|
||||
E assert <built-in method startswith of str object at 0x1013524e0>('456')
|
||||
E + where <built-in method startswith of str object at 0x1013524e0> = '123'.startswith
|
||||
E assert <built-in method startswith of str object at 0x1014951c0>('456')
|
||||
E + where <built-in method startswith of str object at 0x1014951c0> = '123'.startswith
|
||||
|
||||
failure_demo.py:188: AssertionError
|
||||
__________________ TestMoreErrors.test_startswith_nested ___________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x101363fc8>
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1014a4170>
|
||||
|
||||
def test_startswith_nested(self):
|
||||
def f():
|
||||
@@ -492,15 +492,15 @@ get on the terminal - we are working on that):
|
||||
def g():
|
||||
return "456"
|
||||
> assert f().startswith(g())
|
||||
E assert <built-in method startswith of str object at 0x1013524e0>('456')
|
||||
E + where <built-in method startswith of str object at 0x1013524e0> = '123'.startswith
|
||||
E + where '123' = <function f at 0x10132c500>()
|
||||
E + and '456' = <function g at 0x10132c8c0>()
|
||||
E assert <built-in method startswith of str object at 0x1014951c0>('456')
|
||||
E + where <built-in method startswith of str object at 0x1014951c0> = '123'.startswith
|
||||
E + where '123' = <function f at 0x1014aea28>()
|
||||
E + and '456' = <function g at 0x101477c80>()
|
||||
|
||||
failure_demo.py:195: AssertionError
|
||||
_____________________ TestMoreErrors.test_global_func ______________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1013696c8>
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1014b3ab8>
|
||||
|
||||
def test_global_func(self):
|
||||
> assert isinstance(globf(42), float)
|
||||
@@ -510,18 +510,18 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:198: AssertionError
|
||||
_______________________ TestMoreErrors.test_instance _______________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1013671b8>
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1014a2878>
|
||||
|
||||
def test_instance(self):
|
||||
self.x = 6*7
|
||||
> assert self.x != 42
|
||||
E assert 42 != 42
|
||||
E + where 42 = <failure_demo.TestMoreErrors instance at 0x1013671b8>.x
|
||||
E + where 42 = <failure_demo.TestMoreErrors instance at 0x1014a2878>.x
|
||||
|
||||
failure_demo.py:202: AssertionError
|
||||
_______________________ TestMoreErrors.test_compare ________________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x101366560>
|
||||
self = <failure_demo.TestMoreErrors instance at 0x10149da70>
|
||||
|
||||
def test_compare(self):
|
||||
> assert globf(10) < 5
|
||||
@@ -531,7 +531,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:205: AssertionError
|
||||
_____________________ TestMoreErrors.test_try_finally ______________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1013613b0>
|
||||
self = <failure_demo.TestMoreErrors instance at 0x101493908>
|
||||
|
||||
def test_try_finally(self):
|
||||
x = 1
|
||||
@@ -540,4 +540,4 @@ get on the terminal - we are working on that):
|
||||
E assert 1 == 0
|
||||
|
||||
failure_demo.py:210: AssertionError
|
||||
======================== 39 failed in 0.39 seconds =========================
|
||||
======================== 39 failed in 1.05 seconds =========================
|
||||
|
||||
@@ -53,7 +53,7 @@ Let's run this without supplying our new command line option::
|
||||
test_sample.py:6: AssertionError
|
||||
----------------------------- Captured stdout ------------------------------
|
||||
first
|
||||
1 failed in 0.02 seconds
|
||||
1 failed in 0.50 seconds
|
||||
|
||||
And now with supplying a command line option::
|
||||
|
||||
@@ -109,13 +109,13 @@ directory with the above conftest.py::
|
||||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.1.3
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
gw0 I
|
||||
gw0 [0]
|
||||
|
||||
scheduling tests via LoadScheduling
|
||||
|
||||
============================= in 0.48 seconds =============================
|
||||
============================= in 5.12 seconds =============================
|
||||
|
||||
.. _`excontrolskip`:
|
||||
|
||||
@@ -156,20 +156,20 @@ and when running it will see a skipped "slow" test::
|
||||
|
||||
$ py.test -rs # "-rs" means report details on the little 's'
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.1.3
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_module.py .s
|
||||
========================= short test summary info ==========================
|
||||
SKIP [1] /Users/hpk/tmp/doc-exec-172/conftest.py:9: need --runslow option to run
|
||||
SKIP [1] /Users/hpk/tmp/doc-exec-158/conftest.py:9: need --runslow option to run
|
||||
|
||||
=================== 1 passed, 1 skipped in 0.02 seconds ====================
|
||||
=================== 1 passed, 1 skipped in 0.09 seconds ====================
|
||||
|
||||
Or run it including the ``slow`` marked test::
|
||||
|
||||
$ py.test --runslow
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.1.3
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_module.py ..
|
||||
@@ -213,7 +213,7 @@ Let's run our little function::
|
||||
E Failed: not configured: 42
|
||||
|
||||
test_checkconfig.py:8: Failed
|
||||
1 failed in 0.02 seconds
|
||||
1 failed in 0.07 seconds
|
||||
|
||||
Detect if running from within a py.test run
|
||||
--------------------------------------------------------------
|
||||
@@ -240,9 +240,9 @@ and then check for the ``sys._called_from_test`` flag::
|
||||
# called from within a test run
|
||||
else:
|
||||
# called "normally"
|
||||
|
||||
|
||||
accordingly in your application. It's also a good idea
|
||||
to rather use your own application module rather than ``sys``
|
||||
to use your own application module rather than ``sys``
|
||||
for handling flag.
|
||||
|
||||
Adding info to test report header
|
||||
@@ -261,11 +261,11 @@ which will add the string to the test header accordingly::
|
||||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.1.3
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
project deps: mylib-1.1
|
||||
collecting ... collected 0 items
|
||||
|
||||
============================= in 0.00 seconds =============================
|
||||
============================= in 0.01 seconds =============================
|
||||
|
||||
.. regendoc:wipe
|
||||
|
||||
@@ -275,7 +275,7 @@ information on e.g. the value of ``config.option.verbose`` so that
|
||||
you present more information appropriately::
|
||||
|
||||
# content of conftest.py
|
||||
|
||||
|
||||
def pytest_report_header(config):
|
||||
if config.option.verbose > 0:
|
||||
return ["info1: did you know that ...", "did you?"]
|
||||
@@ -284,18 +284,56 @@ which will add info only when run with "--v"::
|
||||
|
||||
$ py.test -v
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.1.3 -- /Users/hpk/venv/0/bin/python
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2 -- /Users/hpk/venv/0/bin/python
|
||||
info1: did you know that ...
|
||||
did you?
|
||||
collecting ... collected 0 items
|
||||
|
||||
============================= in 0.00 seconds =============================
|
||||
============================= in 0.03 seconds =============================
|
||||
|
||||
and nothing when run plainly::
|
||||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.1.3
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 0 items
|
||||
|
||||
============================= in 0.00 seconds =============================
|
||||
============================= in 0.01 seconds =============================
|
||||
|
||||
profiling test duration
|
||||
--------------------------
|
||||
|
||||
.. regendoc:wipe
|
||||
|
||||
.. versionadded: 2.2
|
||||
|
||||
If you have a slow running large test suite you might want to find
|
||||
out which tests are the slowest. Let's make an artifical test suite::
|
||||
|
||||
# content of test_some_are_slow.py
|
||||
|
||||
import time
|
||||
|
||||
def test_funcfast():
|
||||
pass
|
||||
|
||||
def test_funcslow1():
|
||||
time.sleep(0.1)
|
||||
|
||||
def test_funcslow2():
|
||||
time.sleep(0.2)
|
||||
|
||||
Now we can profile which test functions execute the slowest::
|
||||
|
||||
$ py.test --durations=3
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 3 items
|
||||
|
||||
test_some_are_slow.py ...
|
||||
|
||||
========================= slowest 3 test durations =========================
|
||||
0.20s call test_some_are_slow.py::test_funcslow2
|
||||
0.10s call test_some_are_slow.py::test_funcslow1
|
||||
0.00s call test_some_are_slow.py::test_funcfast
|
||||
========================= 3 passed in 0.33 seconds =========================
|
||||
|
||||
12
doc/faq.txt
12
doc/faq.txt
@@ -18,7 +18,7 @@ utilities, all starting with ``py.<TAB>``, thus providing nice
|
||||
TAB-completion. If
|
||||
you install ``pip install pycmd`` you get these tools from a separate
|
||||
package. These days the command line tool could be called ``pytest``
|
||||
since many people have gotten used to the old name and there
|
||||
but since many people have gotten used to the old name and there
|
||||
is another tool named "pytest" we just decided to stick with
|
||||
``py.test``.
|
||||
|
||||
@@ -47,14 +47,14 @@ customizable testing frameworks for Python. However,
|
||||
``py.test`` still uses many metaprogramming techniques and
|
||||
reading its source is thus likely not something for Python beginners.
|
||||
|
||||
A second "magic" issue arguably the assert statement debugging feature. When
|
||||
A second "magic" issue is arguably the assert statement debugging feature. When
|
||||
loading test modules py.test rewrites the source code of assert statements. When
|
||||
a rewritten assert statement fails, its error message has more information than
|
||||
the original. py.test also has a second assert debugging technique. When an
|
||||
``assert`` statement that was missed by the rewriter fails, py.test
|
||||
re-interprets the expression to show intermediate values if a test fails. This
|
||||
second technique suffers from caveat that the rewriting does not: If your
|
||||
expression has side effects (better to avoid them anyway!) the intermediate
|
||||
second technique suffers from a caveat that the rewriting does not: If your
|
||||
expression has side effects (better to avoid them anyway!) the intermediate
|
||||
values may not be the same, confusing the reinterpreter and obfuscating the
|
||||
initial error (this is also explained at the command line if it happens).
|
||||
You can turn off all assertion debugging with ``py.test --assertmode=off``.
|
||||
@@ -91,8 +91,8 @@ Why the ``pytest_funcarg__*`` name for funcarg factories?
|
||||
|
||||
We like `Convention over Configuration`_ and didn't see much point
|
||||
in allowing a more flexible or abstract mechanism. Moreover,
|
||||
is is nice to be able to search for ``pytest_funcarg__MYARG`` in
|
||||
a source code and safely find all factory functions for
|
||||
it is nice to be able to search for ``pytest_funcarg__MYARG`` in
|
||||
source code and safely find all factory functions for
|
||||
the ``MYARG`` function argument.
|
||||
|
||||
.. _`Convention over Configuration`: http://en.wikipedia.org/wiki/Convention_over_Configuration
|
||||
|
||||
@@ -16,7 +16,7 @@ control their life cycle in relation to the test execution. It is
|
||||
also possible to run a test function multiple times with different objects.
|
||||
|
||||
The basic mechanism for injecting objects is also called the
|
||||
*funcarg mechanism* because objects are ultimatly injected
|
||||
*funcarg mechanism* because objects are ultimately injected
|
||||
by calling a test function with it as an argument. Unlike the
|
||||
classical xUnit approach *funcargs* relate more to `Dependency Injection`_
|
||||
because they help to de-couple test code from objects required for
|
||||
@@ -38,6 +38,7 @@ very useful if you want to test e.g. against different database backends
|
||||
or with multiple numerical arguments sets and want to reuse the same set
|
||||
of test functions.
|
||||
|
||||
py.test comes with :ref:`builtinfuncargs` and there are some refined usages in the examples section.
|
||||
|
||||
.. _funcarg:
|
||||
|
||||
@@ -61,7 +62,7 @@ Running the test looks like this::
|
||||
|
||||
$ py.test test_simplefactory.py
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.1.3
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 1 items
|
||||
|
||||
test_simplefactory.py F
|
||||
@@ -76,7 +77,7 @@ Running the test looks like this::
|
||||
E assert 42 == 17
|
||||
|
||||
test_simplefactory.py:5: AssertionError
|
||||
========================= 1 failed in 0.02 seconds =========================
|
||||
========================= 1 failed in 0.03 seconds =========================
|
||||
|
||||
This means that indeed the test function was called with a ``myfuncarg``
|
||||
argument value of ``42`` and the assert fails. Here is how py.test
|
||||
@@ -118,9 +119,9 @@ think of as "resources").
|
||||
The funcarg **request** object
|
||||
=============================================
|
||||
|
||||
Each funcarg factory receives a **request** object which is tied to a
|
||||
specific test function call. A request object is passed to a funcarg
|
||||
factory and provides access to test configuration and context:
|
||||
Each funcarg factory receives a **request** object tied to a specific test
|
||||
function call. A request object is passed to a funcarg factory and provides
|
||||
access to test configuration and context:
|
||||
|
||||
.. autoclass:: _pytest.python.FuncargRequest()
|
||||
:members: function,cls,module,keywords,config
|
||||
@@ -157,17 +158,16 @@ hook to generate several calls to the same test function::
|
||||
# content of test_example.py
|
||||
def pytest_generate_tests(metafunc):
|
||||
if "numiter" in metafunc.funcargnames:
|
||||
for i in range(10):
|
||||
metafunc.addcall(funcargs=dict(numiter=i))
|
||||
metafunc.parametrize("numiter", range(10))
|
||||
|
||||
def test_func(numiter):
|
||||
assert numiter < 9
|
||||
|
||||
Running this::
|
||||
Running this will generate ten invocations of ``test_func`` passing in each of the items in the list of ``range(10)``::
|
||||
|
||||
$ py.test test_example.py
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.1.3
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 10 items
|
||||
|
||||
test_example.py .........F
|
||||
@@ -181,16 +181,16 @@ Running this::
|
||||
> assert numiter < 9
|
||||
E assert 9 < 9
|
||||
|
||||
test_example.py:7: AssertionError
|
||||
==================== 1 failed, 9 passed in 0.04 seconds ====================
|
||||
test_example.py:6: AssertionError
|
||||
==================== 1 failed, 9 passed in 0.07 seconds ====================
|
||||
|
||||
Note that the ``pytest_generate_tests(metafunc)`` hook is called during
|
||||
Obviously, only when ``numiter`` has the value of ``9`` does the test fail. Note that the ``pytest_generate_tests(metafunc)`` hook is called during
|
||||
the test collection phase which is separate from the actual test running.
|
||||
Let's just look at what is collected::
|
||||
|
||||
$ py.test --collectonly test_example.py
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.1.3
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 10 items
|
||||
<Module 'test_example.py'>
|
||||
<Function 'test_func[0]'>
|
||||
@@ -210,31 +210,13 @@ If you want to select only the run with the value ``7`` you could do::
|
||||
|
||||
$ py.test -v -k 7 test_example.py # or -k test_func[7]
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.1.3 -- /Users/hpk/venv/0/bin/python
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2 -- /Users/hpk/venv/0/bin/python
|
||||
collecting ... collected 10 items
|
||||
|
||||
test_example.py:6: test_func[0] PASSED
|
||||
test_example.py:6: test_func[1] PASSED
|
||||
test_example.py:6: test_func[2] PASSED
|
||||
test_example.py:6: test_func[3] PASSED
|
||||
test_example.py:6: test_func[4] PASSED
|
||||
test_example.py:6: test_func[5] PASSED
|
||||
test_example.py:6: test_func[6] PASSED
|
||||
test_example.py:6: test_func[7] PASSED
|
||||
test_example.py:6: test_func[8] PASSED
|
||||
test_example.py:6: test_func[9] FAILED
|
||||
test_example.py:5: test_func[7] PASSED
|
||||
|
||||
================================= FAILURES =================================
|
||||
_______________________________ test_func[9] _______________________________
|
||||
|
||||
numiter = 9
|
||||
|
||||
def test_func(numiter):
|
||||
> assert numiter < 9
|
||||
E assert 9 < 9
|
||||
|
||||
test_example.py:7: AssertionError
|
||||
==================== 1 failed, 9 passed in 0.05 seconds ====================
|
||||
======================= 9 tests deselected by '-k7' ========================
|
||||
================== 1 passed, 9 deselected in 0.01 seconds ==================
|
||||
|
||||
You might want to look at :ref:`more parametrization examples <paramexamples>`.
|
||||
|
||||
@@ -258,4 +240,5 @@ in the class or module where a test function is defined:
|
||||
|
||||
``metafunc.config``: access to command line opts and general config
|
||||
|
||||
.. automethod:: Metafunc.addcall(funcargs=None, id=_notexists, param=_notexists)
|
||||
.. automethod:: Metafunc.parametrize
|
||||
.. automethod:: Metafunc.addcall(funcargs=None,id=_notexists,param=_notexists)
|
||||
|
||||
@@ -22,10 +22,10 @@ Installation options::
|
||||
To check your installation has installed the correct version::
|
||||
|
||||
$ py.test --version
|
||||
This is py.test version 2.1.3, imported from /Users/hpk/p/pytest/pytest.pyc
|
||||
This is py.test version 2.2.2, imported from /Users/hpk/p/pytest/pytest.pyc
|
||||
setuptools registered plugins:
|
||||
pytest-xdist-1.8 at /Users/hpk/p/pytest-xdist/xdist/plugin.pyc
|
||||
pytest-cov-1.4 at /Users/hpk/venv/0/lib/python2.7/site-packages/pytest_cov.pyc
|
||||
pytest-xdist-1.6 at /Users/hpk/venv/0/lib/python2.7/site-packages/xdist/plugin.pyc
|
||||
|
||||
If you get an error checkout :ref:`installation issues`.
|
||||
|
||||
@@ -47,7 +47,7 @@ That's it. You can execute the test function now::
|
||||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.1.3
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 1 items
|
||||
|
||||
test_sample.py F
|
||||
@@ -127,7 +127,7 @@ run the module by passing its filename::
|
||||
================================= FAILURES =================================
|
||||
____________________________ TestClass.test_two ____________________________
|
||||
|
||||
self = <test_class.TestClass instance at 0x1013167a0>
|
||||
self = <test_class.TestClass instance at 0x1013225a8>
|
||||
|
||||
def test_two(self):
|
||||
x = "hello"
|
||||
@@ -164,7 +164,7 @@ before performing the test function call. Let's just run it::
|
||||
================================= FAILURES =================================
|
||||
_____________________________ test_needsfiles ______________________________
|
||||
|
||||
tmpdir = local('/Users/hpk/tmp/pytest-93/test_needsfiles0')
|
||||
tmpdir = local('/Users/hpk/tmp/pytest-20/test_needsfiles0')
|
||||
|
||||
def test_needsfiles(tmpdir):
|
||||
print tmpdir
|
||||
@@ -173,8 +173,8 @@ before performing the test function call. Let's just run it::
|
||||
|
||||
test_tmpdir.py:3: AssertionError
|
||||
----------------------------- Captured stdout ------------------------------
|
||||
/Users/hpk/tmp/pytest-93/test_needsfiles0
|
||||
1 failed in 0.04 seconds
|
||||
/Users/hpk/tmp/pytest-20/test_needsfiles0
|
||||
1 failed in 0.11 seconds
|
||||
|
||||
Before the test runs, a unique-per-test-invocation temporary directory
|
||||
was created. More info at :ref:`tmpdir handling`.
|
||||
|
||||
@@ -21,7 +21,7 @@ dependency configurations or Python interpreters is `tox`_.
|
||||
Use tox and Continuous Integration servers
|
||||
-------------------------------------------------
|
||||
|
||||
If you frequently relase code to the public you
|
||||
If you frequently release code to the public you
|
||||
may want to look into `tox`_, the virtualenv test automation
|
||||
tool and its `pytest support <http://codespeak.net/tox/example/pytest.html>`_.
|
||||
The basic idea is to generate a JUnitXML file through the ``--junitxml=PATH`` option and have a continuous integration server like Jenkins_ pick it up
|
||||
@@ -30,7 +30,7 @@ and generate reports.
|
||||
.. _standalone:
|
||||
.. _`genscript method`:
|
||||
|
||||
Create a py.test standalone Script
|
||||
Create a py.test standalone script
|
||||
-------------------------------------------
|
||||
|
||||
If you are a maintainer or application developer and want others
|
||||
@@ -94,6 +94,40 @@ options.
|
||||
.. _`test discovery`:
|
||||
.. _`Python test discovery`:
|
||||
|
||||
|
||||
Integration with setuptools/distribute test commands
|
||||
----------------------------------------------------
|
||||
|
||||
Distribute/Setuptools support test requirements,
|
||||
which means its really easy to extend its test command
|
||||
to support running a pytest from test requirements::
|
||||
|
||||
from setuptools.command.test import test as TestCommand
|
||||
|
||||
class PyTest(TestCommand):
|
||||
def finalize_options(self):
|
||||
TestCommand.finalize_options(self)
|
||||
self.test_args = []
|
||||
self.test_suite = True
|
||||
def run_tests(self):
|
||||
#import here, cause outside the eggs aren't loaded
|
||||
import pytest
|
||||
pytest.main(self.test_args)
|
||||
|
||||
setup(
|
||||
#...,
|
||||
tests_require=['pytest'],
|
||||
cmdclass = {'test': pytest},
|
||||
)
|
||||
|
||||
Now if you run::
|
||||
|
||||
python setup.py test
|
||||
|
||||
this will download py.test if needed and then run py.test
|
||||
as you would expect it to.
|
||||
|
||||
|
||||
Conventions for Python test discovery
|
||||
-------------------------------------------------
|
||||
|
||||
@@ -159,7 +193,7 @@ You can run your tests by pointing to it::
|
||||
|
||||
* find ``basedir`` -- this is the first "upward" (towards the root)
|
||||
directory not containing an ``__init__.py``. If both the ``a``
|
||||
and ``b`` directories contain an ``__init__.py`` the basedir will
|
||||
and ``b`` directories contain an ``__init__.py`` the basedir will
|
||||
be the parent dir of ``a``.
|
||||
|
||||
* perform ``sys.path.insert(0, basedir)`` to make the test module
|
||||
|
||||
@@ -25,14 +25,15 @@ Welcome to pytest!
|
||||
|
||||
- **supports functional testing and complex test setups**
|
||||
|
||||
- (new in 2.2) :ref:`durations`
|
||||
- (much improved in 2.2) :ref:`marking and test selection <mark>`
|
||||
- (improved in 2.2) :ref:`parametrized test functions <parametrized test functions>`
|
||||
- advanced :ref:`skip and xfail`
|
||||
- generic :ref:`marking and test selection <mark>`
|
||||
- unique :ref:`dependency injection through funcargs <funcargs>`
|
||||
- can :ref:`distribute tests to multiple CPUs <xdistcpu>` through :ref:`xdist plugin <xdist>`
|
||||
- can :ref:`continuously re-run failing tests <looponfailing>`
|
||||
- many :ref:`builtin helpers <pytest helpers>`
|
||||
- flexible :ref:`Python test discovery`
|
||||
- unique :ref:`dependency injection through funcargs <funcargs>`
|
||||
- :ref:`parametrized test functions <parametrized test functions>`
|
||||
|
||||
- **integrates many common testing methods**
|
||||
|
||||
|
||||
119
doc/mark.txt
119
doc/mark.txt
@@ -6,117 +6,20 @@ Marking test functions with attributes
|
||||
|
||||
.. currentmodule:: _pytest.mark
|
||||
|
||||
By using the ``pytest.mark`` helper you can instantiate
|
||||
decorators that will set named metadata on test functions.
|
||||
By using the ``pytest.mark`` helper you can easily set
|
||||
metadata on your test functions. There are
|
||||
some builtin markers, for example:
|
||||
|
||||
Marking a single function
|
||||
----------------------------------------------------
|
||||
* :ref:`skipif <skipif>` - skip a test function if a certain condition is met
|
||||
* :ref:`xfail <xfail>` - produce an "expected failure" outcome if a certain
|
||||
condition is met
|
||||
* :ref:`parametrize <parametrizemark>` to perform multiple calls
|
||||
to the same test function.
|
||||
|
||||
You can "mark" a test function with metadata like this::
|
||||
It's easy to create custom markers or to apply markers
|
||||
to whole test classes or modules. See :ref:`mark examples` for examples
|
||||
which also serve as documentation.
|
||||
|
||||
import pytest
|
||||
@pytest.mark.webtest
|
||||
def test_send_http():
|
||||
...
|
||||
|
||||
This will set the function attribute ``webtest`` to a :py:class:`MarkInfo`
|
||||
instance. You can also specify parametrized metadata like this::
|
||||
|
||||
# content of test_mark.py
|
||||
|
||||
import pytest
|
||||
@pytest.mark.webtest(firefox=30)
|
||||
def test_receive():
|
||||
pass
|
||||
|
||||
@pytest.mark.webtest("functional", firefox=30)
|
||||
def test_run_and_look():
|
||||
pass
|
||||
|
||||
and access it from other places like this::
|
||||
|
||||
test_receive.webtest.kwargs['firefox'] == 30
|
||||
test_run_and_look.webtest.args[0] == "functional"
|
||||
|
||||
.. _`scoped-marking`:
|
||||
|
||||
Marking whole classes or modules
|
||||
----------------------------------------------------
|
||||
|
||||
If you are programming with Python2.6 you may use ``pytest.mark`` decorators
|
||||
with classes to apply markers to all of its test methods::
|
||||
|
||||
# content of test_mark_classlevel.py
|
||||
import pytest
|
||||
@pytest.mark.webtest
|
||||
class TestClass:
|
||||
def test_startup(self):
|
||||
pass
|
||||
def test_startup_and_more(self):
|
||||
pass
|
||||
|
||||
This is equivalent to directly applying the decorator to the
|
||||
two test functions.
|
||||
|
||||
To remain compatible with Python2.5 you can also set a
|
||||
``pytestmark`` attribute on a TestClass like this::
|
||||
|
||||
import pytest
|
||||
|
||||
class TestClass:
|
||||
pytestmark = pytest.mark.webtest
|
||||
|
||||
or if you need to use multiple markers you can use a list::
|
||||
|
||||
import pytest
|
||||
|
||||
class TestClass:
|
||||
pytestmark = [pytest.mark.webtest, pytest.mark.slowtest]
|
||||
|
||||
You can also set a module level marker::
|
||||
|
||||
import pytest
|
||||
pytestmark = pytest.mark.webtest
|
||||
|
||||
in which case it will be applied to all functions and
|
||||
methods defined in the module.
|
||||
|
||||
Using ``-k TEXT`` to select tests
|
||||
----------------------------------------------------
|
||||
|
||||
You can use the ``-k`` command line option to select tests::
|
||||
|
||||
$ py.test -k webtest # running with the above defined examples yields
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.1.3
|
||||
collecting ... collected 4 items
|
||||
|
||||
test_mark.py ..
|
||||
test_mark_classlevel.py ..
|
||||
|
||||
========================= 4 passed in 0.03 seconds =========================
|
||||
|
||||
And you can also run all tests except the ones that match the keyword::
|
||||
|
||||
$ py.test -k-webtest
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.1.3
|
||||
collecting ... collected 4 items
|
||||
|
||||
===================== 4 tests deselected by '-webtest' =====================
|
||||
======================= 4 deselected in 0.02 seconds =======================
|
||||
|
||||
Or to only select the class::
|
||||
|
||||
$ py.test -kTestClass
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.1.3
|
||||
collecting ... collected 4 items
|
||||
|
||||
test_mark_classlevel.py ..
|
||||
|
||||
==================== 2 tests deselected by 'TestClass' =====================
|
||||
================== 2 passed, 2 deselected in 0.02 seconds ==================
|
||||
|
||||
API reference for mark related objects
|
||||
------------------------------------------------
|
||||
|
||||
@@ -14,14 +14,14 @@ and a discussion of its motivation.
|
||||
|
||||
.. _`monkeypatch blog post`: http://tetamap.wordpress.com/2009/03/03/monkeypatching-in-unit-tests-done-right/
|
||||
|
||||
|
||||
Simple example: patching ``os.path.expanduser``
|
||||
Simple example: monkeypatching functions
|
||||
---------------------------------------------------
|
||||
|
||||
If, for instance, you want to pretend that ``os.expanduser`` returns a certain
|
||||
If you want to pretend that ``os.expanduser`` returns a certain
|
||||
directory, you can use the :py:meth:`monkeypatch.setattr` method to
|
||||
patch this function before calling into a function which uses it::
|
||||
|
||||
# content of test_module.py
|
||||
import os.path
|
||||
def getssh(): # pseudo application code
|
||||
return os.path.join(os.path.expanduser("~admin"), '.ssh')
|
||||
@@ -33,22 +33,15 @@ patch this function before calling into a function which uses it::
|
||||
x = getssh()
|
||||
assert x == '/abc/.ssh'
|
||||
|
||||
After the test function finishes the ``os.path.expanduser`` modification
|
||||
will be undone.
|
||||
|
||||
.. background check:
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.1.3
|
||||
collecting ... collected 0 items
|
||||
|
||||
============================= in 0.00 seconds =============================
|
||||
Here our test function monkeypatches ``os.path.expanduser`` and
|
||||
then calls into an function that calls it. After the test function
|
||||
finishes the ``os.path.expanduser`` modification will be undone.
|
||||
|
||||
Method reference of the monkeypatch function argument
|
||||
-----------------------------------------------------
|
||||
|
||||
.. autoclass:: monkeypatch
|
||||
:members: setattr, delattr, setitem, delitem, setenv, delenv, syspath_prepend, undo
|
||||
:members: setattr, delattr, setitem, delitem, setenv, delenv, syspath_prepend, chdir, undo
|
||||
|
||||
``monkeypatch.setattr/delattr/delitem/delenv()`` all
|
||||
by default raise an Exception if the target does not exist.
|
||||
|
||||
@@ -264,7 +264,7 @@ specification. However, a hook function may accept *fewer* parameters
|
||||
by simply not specifying them. If you mistype argument names or the
|
||||
hook name itself you get an error showing the available arguments.
|
||||
|
||||
Initialisation, command line and configuration hooks
|
||||
Initialization, command line and configuration hooks
|
||||
--------------------------------------------------------------------
|
||||
|
||||
.. currentmodule:: _pytest.hookspec
|
||||
@@ -327,7 +327,6 @@ test execution:
|
||||
|
||||
.. autofunction: pytest_runtest_logreport
|
||||
|
||||
|
||||
Reference of important objects involved in hooks
|
||||
===========================================================
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
|
||||
Asserting deprecation and other warnings.
|
||||
Asserting deprecation and other warnings
|
||||
=====================================================
|
||||
|
||||
The recwarn function argument
|
||||
@@ -35,4 +35,3 @@ warning::
|
||||
|
||||
def test_global():
|
||||
pytest.deprecated_call(myfunction, 17)
|
||||
|
||||
|
||||
@@ -130,7 +130,7 @@ Running it with the report-on-xfail option gives this output::
|
||||
|
||||
example $ py.test -rx xfail_demo.py
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.1.3
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 6 items
|
||||
|
||||
xfail_demo.py xxxxxx
|
||||
@@ -147,7 +147,7 @@ Running it with the report-on-xfail option gives this output::
|
||||
XFAIL xfail_demo.py::test_hello6
|
||||
reason: reason
|
||||
|
||||
======================== 6 xfailed in 0.11 seconds =========================
|
||||
======================== 6 xfailed in 0.16 seconds =========================
|
||||
|
||||
.. _`evaluation of skipif/xfail conditions`:
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ Function arguments:
|
||||
|
||||
Test parametrization:
|
||||
|
||||
- `generating parametrized tests with funcargs`_
|
||||
- `generating parametrized tests with funcargs`_ (uses deprecated ``addcall()`` API.
|
||||
- `test generators and cached setup`_
|
||||
- `parametrizing tests, generalized`_ (blog post)
|
||||
- `putting test-hooks into local or global plugins`_ (blog post)
|
||||
|
||||
@@ -35,10 +35,11 @@ However easy_install does not provide an uninstall facility.
|
||||
|
||||
.. IMPORTANT::
|
||||
|
||||
Ensure that you manually delete the init_covmain.pth file in your site-packages directory.
|
||||
Ensure that you manually delete the init_covmain.pth file in your
|
||||
site-packages directory.
|
||||
|
||||
This file starts coverage collection of subprocesses if appropriate during site initialisation
|
||||
at python startup.
|
||||
This file starts coverage collection of subprocesses if appropriate during
|
||||
site initialization at python startup.
|
||||
|
||||
|
||||
Usage
|
||||
@@ -198,7 +199,7 @@ slave.
|
||||
|
||||
For subprocess measurement environment variables must make it from the main process to the
|
||||
subprocess. The python used by the subprocess must have pytest-cov installed. The subprocess must
|
||||
do normal site initialisation so that the environment variables can be detected and coverage
|
||||
do normal site initialization so that the environment variables can be detected and coverage
|
||||
started.
|
||||
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@ Running this would result in a passed test except for the last
|
||||
|
||||
$ py.test test_tmpdir.py
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.1.3
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 1 items
|
||||
|
||||
test_tmpdir.py F
|
||||
@@ -36,7 +36,7 @@ Running this would result in a passed test except for the last
|
||||
================================= FAILURES =================================
|
||||
_____________________________ test_create_file _____________________________
|
||||
|
||||
tmpdir = local('/Users/hpk/tmp/pytest-94/test_create_file0')
|
||||
tmpdir = local('/Users/hpk/tmp/pytest-21/test_create_file0')
|
||||
|
||||
def test_create_file(tmpdir):
|
||||
p = tmpdir.mkdir("sub").join("hello.txt")
|
||||
@@ -47,7 +47,7 @@ Running this would result in a passed test except for the last
|
||||
E assert 0
|
||||
|
||||
test_tmpdir.py:7: AssertionError
|
||||
========================= 1 failed in 0.05 seconds =========================
|
||||
========================= 1 failed in 0.07 seconds =========================
|
||||
|
||||
.. _`base temporary directory`:
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ Running it yields::
|
||||
|
||||
$ py.test test_unittest.py
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.1.3
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 1 items
|
||||
|
||||
test_unittest.py F
|
||||
@@ -42,7 +42,7 @@ Running it yields::
|
||||
test_unittest.py:8: AssertionError
|
||||
----------------------------- Captured stdout ------------------------------
|
||||
hello
|
||||
========================= 1 failed in 0.04 seconds =========================
|
||||
========================= 1 failed in 0.15 seconds =========================
|
||||
|
||||
.. _`unittest.py style`: http://docs.python.org/library/unittest.html
|
||||
|
||||
|
||||
@@ -36,7 +36,7 @@ Stopping after the first (or N) failures
|
||||
To stop the testing process after the first (N) failures::
|
||||
|
||||
py.test -x # stop after first failure
|
||||
py.test -maxfail=2 # stop after two failures
|
||||
py.test --maxfail=2 # stop after two failures
|
||||
|
||||
Specifying tests / selecting tests
|
||||
---------------------------------------------------
|
||||
@@ -70,7 +70,7 @@ Dropping to PDB (Python Debugger) on failures
|
||||
.. _PDB: http://docs.python.org/library/pdb.html
|
||||
|
||||
Python comes with a builtin Python debugger called PDB_. ``py.test``
|
||||
allows to drop into the PDB prompt via a command line option::
|
||||
allows one to drop into the PDB prompt via a command line option::
|
||||
|
||||
py.test --pdb
|
||||
|
||||
@@ -96,7 +96,19 @@ can use a helper::
|
||||
.. versionadded: 2.0.0
|
||||
|
||||
In previous versions you could only enter PDB tracing if
|
||||
you disable capturing on the command line via ``py.test -s``.
|
||||
you disabled capturing on the command line via ``py.test -s``.
|
||||
|
||||
.. _durations:
|
||||
|
||||
Profiling test execution duration
|
||||
-------------------------------------
|
||||
|
||||
.. versionadded: 2.2
|
||||
|
||||
To get a list of the slowest 10 test durations::
|
||||
|
||||
py.test --durations=10
|
||||
|
||||
|
||||
Creating JUnitXML format files
|
||||
----------------------------------------------------
|
||||
|
||||
@@ -24,13 +24,11 @@ module you can optionally implement the following fixture methods
|
||||
which will usually be called once for all the functions::
|
||||
|
||||
def setup_module(module):
|
||||
""" setup up any state specific to the execution
|
||||
of the given module.
|
||||
"""
|
||||
""" setup any state specific to the execution of the given module."""
|
||||
|
||||
def teardown_module(module):
|
||||
""" teardown any state that was previously setup
|
||||
with a setup_module method.
|
||||
""" teardown any state that was previously setup with a setup_module
|
||||
method.
|
||||
"""
|
||||
|
||||
Class level setup/teardown
|
||||
@@ -41,14 +39,14 @@ and after all test methods of the class are called::
|
||||
|
||||
@classmethod
|
||||
def setup_class(cls):
|
||||
""" setup up any state specific to the execution
|
||||
of the given class (which usually contains tests).
|
||||
""" setup any state specific to the execution of the given class (which
|
||||
usually contains tests).
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def teardown_class(cls):
|
||||
""" teardown any state that was previously setup
|
||||
with a call to setup_class.
|
||||
""" teardown any state that was previously setup with a call to
|
||||
setup_class.
|
||||
"""
|
||||
|
||||
Method and function level setup/teardown
|
||||
@@ -57,31 +55,29 @@ Method and function level setup/teardown
|
||||
Similarly, the following methods are called around each method invocation::
|
||||
|
||||
def setup_method(self, method):
|
||||
""" setup up any state tied to the execution of the given
|
||||
method in a class. setup_method is invoked for every
|
||||
test method of a class.
|
||||
""" setup any state tied to the execution of the given method in a
|
||||
class. setup_method is invoked for every test method of a class.
|
||||
"""
|
||||
|
||||
def teardown_method(self, method):
|
||||
""" teardown any state that was previously setup
|
||||
with a setup_method call.
|
||||
""" teardown any state that was previously setup with a setup_method
|
||||
call.
|
||||
"""
|
||||
|
||||
If you would rather define test functions directly at module level
|
||||
you can also use the following functions to implement fixtures::
|
||||
|
||||
def setup_function(function):
|
||||
""" setup up any state tied to the execution of the given
|
||||
function. Invoked for every test function in the module.
|
||||
""" setup any state tied to the execution of the given function.
|
||||
Invoked for every test function in the module.
|
||||
"""
|
||||
|
||||
def teardown_function(function):
|
||||
""" teardown any state that was previously setup
|
||||
with a setup_function call.
|
||||
""" teardown any state that was previously setup with a setup_function
|
||||
call.
|
||||
"""
|
||||
|
||||
Note that it possible that setup/teardown pairs are invoked multiple
|
||||
times per testing process.
|
||||
|
||||
Note that it is possible for setup/teardown pairs to be invoked multiple times
|
||||
per testing process.
|
||||
|
||||
.. _`unittest.py module`: http://docs.python.org/library/unittest.html
|
||||
|
||||
|
||||
8
setup.py
8
setup.py
@@ -11,20 +11,20 @@ cross-project testing tool for Python.
|
||||
|
||||
Platforms: Linux, Win32, OSX
|
||||
|
||||
Interpreters: Python versions 2.4 through to 3.2, Jython 2.5.1 and PyPy-1.5
|
||||
Interpreters: Python versions 2.4 through to 3.2, Jython 2.5.1 and PyPy-1.6/1.7
|
||||
|
||||
Bugs and issues: http://bitbucket.org/hpk42/pytest/issues/
|
||||
|
||||
Web page: http://pytest.org
|
||||
|
||||
(c) Holger Krekel and others, 2004-2011
|
||||
(c) Holger Krekel and others, 2004-2012
|
||||
"""
|
||||
def main():
|
||||
setup(
|
||||
name='pytest',
|
||||
description='py.test: simple powerful testing with Python',
|
||||
long_description = long_description,
|
||||
version='2.1.3',
|
||||
version='2.2.4',
|
||||
url='http://pytest.org',
|
||||
license='MIT license',
|
||||
platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'],
|
||||
@@ -32,7 +32,7 @@ def main():
|
||||
author_email='holger at merlinux.eu',
|
||||
entry_points= make_entry_points(),
|
||||
# the following should be enabled for release
|
||||
install_requires=['py>=1.4.5'],
|
||||
install_requires=['py>=1.4.8'],
|
||||
classifiers=['Development Status :: 6 - Mature',
|
||||
'Intended Audience :: Developers',
|
||||
'License :: OSI Approved :: MIT License',
|
||||
|
||||
@@ -13,6 +13,12 @@ class TestGeneralUsage:
|
||||
'*ERROR: hello'
|
||||
])
|
||||
|
||||
def test_root_conftest_syntax_error(self, testdir):
|
||||
p = testdir.makepyfile(conftest="raise SyntaxError\n")
|
||||
result = testdir.runpytest()
|
||||
result.stderr.fnmatch_lines(["*raise SyntaxError*"])
|
||||
assert result.ret != 0
|
||||
|
||||
def test_early_hook_error_issue38_1(self, testdir):
|
||||
testdir.makeconftest("""
|
||||
def pytest_sessionstart():
|
||||
@@ -51,6 +57,22 @@ class TestGeneralUsage:
|
||||
assert result.ret != 0
|
||||
result.stderr.fnmatch_lines(["ERROR: file not found*asd"])
|
||||
|
||||
def test_file_not_found_unconfigure_issue143(self, testdir):
|
||||
testdir.makeconftest("""
|
||||
def pytest_configure():
|
||||
print("---configure")
|
||||
def pytest_unconfigure():
|
||||
print("---unconfigure")
|
||||
""")
|
||||
result = testdir.runpytest("-s", "asd")
|
||||
assert result.ret == 4 # EXIT_USAGEERROR
|
||||
result.stderr.fnmatch_lines(["ERROR: file not found*asd"])
|
||||
s = result.stdout.fnmatch_lines([
|
||||
"*---configure",
|
||||
"*---unconfigure",
|
||||
])
|
||||
|
||||
|
||||
def test_config_preparse_plugin_option(self, testdir):
|
||||
testdir.makepyfile(pytest_xyz="""
|
||||
def pytest_addoption(parser):
|
||||
@@ -354,24 +376,24 @@ class TestInvocationVariants:
|
||||
def test_equivalence_pytest_pytest(self):
|
||||
assert pytest.main == py.test.cmdline.main
|
||||
|
||||
def test_invoke_with_string(self, capsys):
|
||||
retcode = pytest.main("-h")
|
||||
def test_invoke_with_string(self, testdir, capsys):
|
||||
retcode = testdir.pytestmain("-h")
|
||||
assert not retcode
|
||||
out, err = capsys.readouterr()
|
||||
assert "--help" in out
|
||||
pytest.raises(ValueError, lambda: pytest.main(retcode))
|
||||
pytest.raises(ValueError, lambda: pytest.main(0))
|
||||
|
||||
def test_invoke_with_path(self, testdir, capsys):
|
||||
retcode = testdir.pytestmain(testdir.tmpdir)
|
||||
assert not retcode
|
||||
out, err = capsys.readouterr()
|
||||
|
||||
def test_invoke_plugin_api(self, capsys):
|
||||
def test_invoke_plugin_api(self, testdir, capsys):
|
||||
class MyPlugin:
|
||||
def pytest_addoption(self, parser):
|
||||
parser.addoption("--myopt")
|
||||
|
||||
pytest.main(["-h"], plugins=[MyPlugin()])
|
||||
testdir.pytestmain(["-h"], plugins=[MyPlugin()])
|
||||
out, err = capsys.readouterr()
|
||||
assert "--myopt" in out
|
||||
|
||||
@@ -403,11 +425,21 @@ class TestInvocationVariants:
|
||||
result.stdout.fnmatch_lines([
|
||||
"*1 passed*"
|
||||
])
|
||||
|
||||
def join_pythonpath(what):
|
||||
cur = py.std.os.environ.get('PYTHONPATH')
|
||||
if cur:
|
||||
return str(what) + ':' + cur
|
||||
return what
|
||||
empty_package = testdir.mkpydir("empty_package")
|
||||
monkeypatch.setenv('PYTHONPATH', join_pythonpath(empty_package))
|
||||
result = testdir.runpytest("--pyargs", ".")
|
||||
assert result.ret == 0
|
||||
result.stdout.fnmatch_lines([
|
||||
"*2 passed*"
|
||||
])
|
||||
|
||||
monkeypatch.setenv('PYTHONPATH', join_pythonpath(testdir))
|
||||
path.join('test_hello.py').remove()
|
||||
result = testdir.runpytest("--pyargs", "tpkg.test_hello")
|
||||
assert result.ret != 0
|
||||
@@ -454,3 +486,92 @@ class TestInvocationVariants:
|
||||
"*1 failed*",
|
||||
])
|
||||
|
||||
class TestDurations:
|
||||
source = """
|
||||
import time
|
||||
frag = 0.02
|
||||
def test_2():
|
||||
time.sleep(frag*5)
|
||||
def test_1():
|
||||
time.sleep(frag)
|
||||
def test_3():
|
||||
time.sleep(frag*10)
|
||||
"""
|
||||
|
||||
def test_calls(self, testdir):
|
||||
testdir.makepyfile(self.source)
|
||||
result = testdir.runpytest("--durations=10")
|
||||
assert result.ret == 0
|
||||
result.stdout.fnmatch_lines([
|
||||
"*durations*",
|
||||
"*call*test_3*",
|
||||
"*call*test_2*",
|
||||
"*call*test_1*",
|
||||
])
|
||||
|
||||
def test_calls_show_2(self, testdir):
|
||||
testdir.makepyfile(self.source)
|
||||
result = testdir.runpytest("--durations=2")
|
||||
assert result.ret == 0
|
||||
result.stdout.fnmatch_lines([
|
||||
"*durations*",
|
||||
"*call*test_3*",
|
||||
"*call*test_2*",
|
||||
])
|
||||
assert "test_1" not in result.stdout.str()
|
||||
|
||||
def test_calls_showall(self, testdir):
|
||||
testdir.makepyfile(self.source)
|
||||
result = testdir.runpytest("--durations=0")
|
||||
assert result.ret == 0
|
||||
for x in "123":
|
||||
for y in 'call',: #'setup', 'call', 'teardown':
|
||||
l = []
|
||||
for line in result.stdout.lines:
|
||||
if ("test_%s" % x) in line and y in line:
|
||||
break
|
||||
else:
|
||||
raise AssertionError("not found %s %s" % (x,y))
|
||||
|
||||
def test_with_deselected(self, testdir):
|
||||
testdir.makepyfile(self.source)
|
||||
result = testdir.runpytest("--durations=2", "-k test_1")
|
||||
assert result.ret == 0
|
||||
result.stdout.fnmatch_lines([
|
||||
"*durations*",
|
||||
"*call*test_1*",
|
||||
])
|
||||
|
||||
def test_with_failing_collection(self, testdir):
|
||||
testdir.makepyfile(self.source)
|
||||
testdir.makepyfile(test_collecterror="""xyz""")
|
||||
result = testdir.runpytest("--durations=2", "-k test_1")
|
||||
assert result.ret != 0
|
||||
result.stdout.fnmatch_lines([
|
||||
"*durations*",
|
||||
"*call*test_1*",
|
||||
])
|
||||
|
||||
|
||||
class TestDurationWithFixture:
|
||||
source = """
|
||||
import time
|
||||
frag = 0.01
|
||||
def setup_function(func):
|
||||
time.sleep(frag * 3)
|
||||
def test_1():
|
||||
time.sleep(frag*2)
|
||||
def test_2():
|
||||
time.sleep(frag)
|
||||
"""
|
||||
def test_setup_function(self, testdir):
|
||||
testdir.makepyfile(self.source)
|
||||
result = testdir.runpytest("--durations=10")
|
||||
assert result.ret == 0
|
||||
|
||||
result.stdout.fnmatch_lines([
|
||||
"*durations*",
|
||||
"* setup *test_1*",
|
||||
"* call *test_1*",
|
||||
])
|
||||
|
||||
|
||||
@@ -12,13 +12,17 @@ def pytest_addoption(parser):
|
||||
help=("run FD checks if lsof is available"))
|
||||
|
||||
def pytest_configure(config):
|
||||
config.addinivalue_line("markers",
|
||||
"multi(arg=[value1,value2, ...]): call the test function "
|
||||
"multiple times with arg=value1, then with arg=value2, ... "
|
||||
)
|
||||
if config.getvalue("lsof"):
|
||||
try:
|
||||
out = py.process.cmdexec("lsof -p %d" % pid)
|
||||
except py.process.cmdexec.Error:
|
||||
pass
|
||||
else:
|
||||
config._numfiles = getopenfiles(out)
|
||||
config._numfiles = len(getopenfiles(out))
|
||||
|
||||
#def pytest_report_header():
|
||||
# return "pid: %s" % os.getpid()
|
||||
@@ -26,23 +30,31 @@ def pytest_configure(config):
|
||||
def getopenfiles(out):
|
||||
def isopen(line):
|
||||
return ("REG" in line or "CHR" in line) and (
|
||||
"deleted" not in line and 'mem' not in line)
|
||||
return len([x for x in out.split("\n") if isopen(x)])
|
||||
"deleted" not in line and 'mem' not in line and "txt" not in line)
|
||||
return [x for x in out.split("\n") if isopen(x)]
|
||||
|
||||
def pytest_unconfigure(config, __multicall__):
|
||||
if not hasattr(config, '_numfiles'):
|
||||
return
|
||||
__multicall__.execute()
|
||||
def check_open_files(config):
|
||||
out2 = py.process.cmdexec("lsof -p %d" % pid)
|
||||
len2 = getopenfiles(out2)
|
||||
assert len2 < config._numfiles + 7, out2
|
||||
|
||||
lines2 = getopenfiles(out2)
|
||||
if len(lines2) > config._numfiles + 1:
|
||||
error = []
|
||||
error.append("***** %s FD leackage detected" %
|
||||
(len(lines2)-config._numfiles))
|
||||
error.extend(lines2)
|
||||
error.append(error[0])
|
||||
# update numfile so that the overall test run continuess
|
||||
config._numfiles = len(lines2)
|
||||
raise AssertionError("\n".join(error))
|
||||
|
||||
def pytest_runtest_setup(item):
|
||||
item._oldir = py.path.local()
|
||||
|
||||
def pytest_runtest_teardown(item):
|
||||
def pytest_runtest_teardown(item, __multicall__):
|
||||
item._oldir.chdir()
|
||||
if hasattr(item.config, '_numfiles'):
|
||||
x = __multicall__.execute()
|
||||
check_open_files(item.config)
|
||||
return x
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
multi = getattr(metafunc.function, 'multi', None)
|
||||
|
||||
@@ -28,17 +28,14 @@ def test_assert_with_explicit_message():
|
||||
assert e.msg == 'hello'
|
||||
|
||||
def test_assert_within_finally():
|
||||
class A:
|
||||
def f():
|
||||
pass
|
||||
excinfo = py.test.raises(TypeError, """
|
||||
excinfo = py.test.raises(ZeroDivisionError, """
|
||||
try:
|
||||
A().f()
|
||||
1/0
|
||||
finally:
|
||||
i = 42
|
||||
""")
|
||||
s = excinfo.exconly()
|
||||
assert s.find("takes no argument") != -1
|
||||
assert py.std.re.search("division.+by zero", s) is not None
|
||||
|
||||
#def g():
|
||||
# A.f()
|
||||
@@ -325,3 +322,18 @@ def test_assert_raises_in_nonzero_of_object_pytest_issue10():
|
||||
e = exvalue()
|
||||
s = str(e)
|
||||
assert "<MY42 object> < 0" in s
|
||||
|
||||
@py.test.mark.skipif("sys.version_info >= (2,6)")
|
||||
def test_oldinterpret_importation():
|
||||
# we had a cyclic import there
|
||||
# requires pytest on sys.path
|
||||
res = py.std.subprocess.call([
|
||||
py.std.sys.executable, '-c', str(py.code.Source("""
|
||||
try:
|
||||
from _pytest.assertion.newinterpret import interpret
|
||||
except ImportError:
|
||||
from _pytest.assertion.oldinterpret import interpret
|
||||
"""))
|
||||
])
|
||||
|
||||
assert res == 0
|
||||
|
||||
@@ -195,6 +195,10 @@ class TestAssertionRewrite:
|
||||
y = -1
|
||||
assert x + y
|
||||
assert getmsg(f) == "assert (1 + -1)"
|
||||
def f():
|
||||
x = range(10)
|
||||
assert not 5 % 4
|
||||
assert getmsg(f) == "assert not (5 % 4)"
|
||||
|
||||
def test_call(self):
|
||||
def g(a=42, *args, **kwargs):
|
||||
@@ -346,6 +350,7 @@ def test_no_bytecode():
|
||||
monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", "1")
|
||||
assert testdir.runpytest().ret == 0
|
||||
|
||||
@pytest.mark.skipif('"__pypy__" in sys.modules')
|
||||
def test_pyc_vs_pyo(self, testdir, monkeypatch):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
|
||||
@@ -16,7 +16,6 @@ class TestCaptureManager:
|
||||
|
||||
def test_configure_per_fspath(self, testdir):
|
||||
config = testdir.parseconfig(testdir.tmpdir)
|
||||
assert config.getvalue("capture") is None
|
||||
capman = CaptureManager()
|
||||
hasfd = hasattr(os, 'dup')
|
||||
if hasfd:
|
||||
@@ -53,6 +52,7 @@ class TestCaptureManager:
|
||||
capman.resumecapture(method)
|
||||
out, err = capman.suspendcapture()
|
||||
assert not out and not err
|
||||
capman.reset_capturings()
|
||||
finally:
|
||||
capouter.reset()
|
||||
|
||||
@@ -60,20 +60,23 @@ class TestCaptureManager:
|
||||
def test_juggle_capturings(self, testdir):
|
||||
capouter = py.io.StdCaptureFD()
|
||||
try:
|
||||
config = testdir.parseconfig(testdir.tmpdir)
|
||||
#config = testdir.parseconfig(testdir.tmpdir)
|
||||
capman = CaptureManager()
|
||||
capman.resumecapture("fd")
|
||||
pytest.raises(ValueError, 'capman.resumecapture("fd")')
|
||||
pytest.raises(ValueError, 'capman.resumecapture("sys")')
|
||||
os.write(1, "hello\n".encode('ascii'))
|
||||
out, err = capman.suspendcapture()
|
||||
assert out == "hello\n"
|
||||
capman.resumecapture("sys")
|
||||
os.write(1, "hello\n".encode('ascii'))
|
||||
py.builtin.print_("world", file=sys.stderr)
|
||||
out, err = capman.suspendcapture()
|
||||
assert not out
|
||||
assert err == "world\n"
|
||||
try:
|
||||
capman.resumecapture("fd")
|
||||
pytest.raises(ValueError, 'capman.resumecapture("fd")')
|
||||
pytest.raises(ValueError, 'capman.resumecapture("sys")')
|
||||
os.write(1, "hello\n".encode('ascii'))
|
||||
out, err = capman.suspendcapture()
|
||||
assert out == "hello\n"
|
||||
capman.resumecapture("sys")
|
||||
os.write(1, "hello\n".encode('ascii'))
|
||||
py.builtin.print_("world", file=sys.stderr)
|
||||
out, err = capman.suspendcapture()
|
||||
assert not out
|
||||
assert err == "world\n"
|
||||
finally:
|
||||
capman.reset_capturings()
|
||||
finally:
|
||||
capouter.reset()
|
||||
|
||||
@@ -202,7 +205,7 @@ class TestPerTestCapturing:
|
||||
#"*1 fixture failure*"
|
||||
])
|
||||
|
||||
def test_teardown_final_capturing(self, testdir):
|
||||
def test_teardown_capturing_final(self, testdir):
|
||||
p = testdir.makepyfile("""
|
||||
def teardown_module(mod):
|
||||
print ("teardown module")
|
||||
|
||||
@@ -313,7 +313,8 @@ class TestSession:
|
||||
def test_collect_topdir(self, testdir):
|
||||
p = testdir.makepyfile("def test_func(): pass")
|
||||
id = "::".join([p.basename, "test_func"])
|
||||
config = testdir.parseconfigure(id)
|
||||
# XXX migrate to inline_genitems? (see below)
|
||||
config = testdir.parseconfig(id)
|
||||
topdir = testdir.tmpdir
|
||||
rcol = Session(config)
|
||||
assert topdir == rcol.fspath
|
||||
@@ -328,15 +329,9 @@ class TestSession:
|
||||
def test_collect_protocol_single_function(self, testdir):
|
||||
p = testdir.makepyfile("def test_func(): pass")
|
||||
id = "::".join([p.basename, "test_func"])
|
||||
config = testdir.parseconfigure(id)
|
||||
topdir = testdir.tmpdir
|
||||
rcol = Session(config)
|
||||
assert topdir == rcol.fspath
|
||||
hookrec = testdir.getreportrecorder(config)
|
||||
rcol.perform_collect()
|
||||
items = rcol.items
|
||||
assert len(items) == 1
|
||||
item = items[0]
|
||||
items, hookrec = testdir.inline_genitems(id)
|
||||
item, = items
|
||||
assert item.name == "test_func"
|
||||
newid = item.nodeid
|
||||
assert newid == id
|
||||
@@ -363,10 +358,7 @@ class TestSession:
|
||||
p.basename + "::TestClass::()",
|
||||
normid,
|
||||
]:
|
||||
config = testdir.parseconfigure(id)
|
||||
rcol = Session(config=config)
|
||||
rcol.perform_collect()
|
||||
items = rcol.items
|
||||
items, hookrec = testdir.inline_genitems(id)
|
||||
assert len(items) == 1
|
||||
assert items[0].name == "test_method"
|
||||
newid = items[0].nodeid
|
||||
@@ -388,11 +380,7 @@ class TestSession:
|
||||
""" % p.basename)
|
||||
id = p.basename
|
||||
|
||||
config = testdir.parseconfigure(id)
|
||||
rcol = Session(config)
|
||||
hookrec = testdir.getreportrecorder(config)
|
||||
rcol.perform_collect()
|
||||
items = rcol.items
|
||||
items, hookrec = testdir.inline_genitems(id)
|
||||
py.std.pprint.pprint(hookrec.hookrecorder.calls)
|
||||
assert len(items) == 2
|
||||
hookrec.hookrecorder.contains([
|
||||
@@ -413,11 +401,8 @@ class TestSession:
|
||||
aaa = testdir.mkpydir("aaa")
|
||||
test_aaa = aaa.join("test_aaa.py")
|
||||
p.move(test_aaa)
|
||||
config = testdir.parseconfigure()
|
||||
rcol = Session(config)
|
||||
hookrec = testdir.getreportrecorder(config)
|
||||
rcol.perform_collect()
|
||||
items = rcol.items
|
||||
|
||||
items, hookrec = testdir.inline_genitems()
|
||||
assert len(items) == 1
|
||||
py.std.pprint.pprint(hookrec.hookrecorder.calls)
|
||||
hookrec.hookrecorder.contains([
|
||||
@@ -437,11 +422,8 @@ class TestSession:
|
||||
p.move(test_bbb)
|
||||
|
||||
id = "."
|
||||
config = testdir.parseconfigure(id)
|
||||
rcol = Session(config)
|
||||
hookrec = testdir.getreportrecorder(config)
|
||||
rcol.perform_collect()
|
||||
items = rcol.items
|
||||
|
||||
items, hookrec = testdir.inline_genitems(id)
|
||||
assert len(items) == 2
|
||||
py.std.pprint.pprint(hookrec.hookrecorder.calls)
|
||||
hookrec.hookrecorder.contains([
|
||||
@@ -455,19 +437,13 @@ class TestSession:
|
||||
|
||||
def test_serialization_byid(self, testdir):
|
||||
p = testdir.makepyfile("def test_func(): pass")
|
||||
config = testdir.parseconfigure()
|
||||
rcol = Session(config)
|
||||
rcol.perform_collect()
|
||||
items = rcol.items
|
||||
items, hookrec = testdir.inline_genitems()
|
||||
assert len(items) == 1
|
||||
item, = items
|
||||
rcol.config.pluginmanager.unregister(name="session")
|
||||
newcol = Session(config)
|
||||
item2, = newcol.perform_collect([item.nodeid], genitems=False)
|
||||
items2, hookrec = testdir.inline_genitems(item.nodeid)
|
||||
item2, = items2
|
||||
assert item2.name == item.name
|
||||
assert item2.fspath == item.fspath
|
||||
item2b, = newcol.perform_collect([item.nodeid], genitems=False)
|
||||
assert item2b == item2
|
||||
|
||||
def test_find_byid_without_instance_parents(self, testdir):
|
||||
p = testdir.makepyfile("""
|
||||
@@ -476,10 +452,7 @@ class TestSession:
|
||||
pass
|
||||
""")
|
||||
arg = p.basename + ("::TestClass::test_method")
|
||||
config = testdir.parseconfigure(arg)
|
||||
rcol = Session(config)
|
||||
rcol.perform_collect()
|
||||
items = rcol.items
|
||||
items, hookrec = testdir.inline_genitems(arg)
|
||||
assert len(items) == 1
|
||||
item, = items
|
||||
assert item.nodeid.endswith("TestClass::()::test_method")
|
||||
@@ -487,7 +460,7 @@ class TestSession:
|
||||
class Test_getinitialnodes:
|
||||
def test_global_file(self, testdir, tmpdir):
|
||||
x = tmpdir.ensure("x.py")
|
||||
config = testdir.reparseconfig([x])
|
||||
config = testdir.parseconfigure(x)
|
||||
col = testdir.getnode(config, x)
|
||||
assert isinstance(col, pytest.Module)
|
||||
assert col.name == 'x.py'
|
||||
@@ -502,7 +475,7 @@ class Test_getinitialnodes:
|
||||
subdir = tmpdir.join("subdir")
|
||||
x = subdir.ensure("x.py")
|
||||
subdir.ensure("__init__.py")
|
||||
config = testdir.reparseconfig([x])
|
||||
config = testdir.parseconfigure(x)
|
||||
col = testdir.getnode(config, x)
|
||||
assert isinstance(col, pytest.Module)
|
||||
assert col.name == 'subdir/x.py'
|
||||
@@ -528,12 +501,6 @@ class Test_genitems:
|
||||
assert hash(i) != hash(j)
|
||||
assert i != j
|
||||
|
||||
def test_root_conftest_syntax_error(self, testdir):
|
||||
# do we want to unify behaviour with
|
||||
# test_subdir_conftest_error?
|
||||
p = testdir.makepyfile(conftest="raise SyntaxError\n")
|
||||
pytest.raises(SyntaxError, testdir.inline_genitems, p.dirpath())
|
||||
|
||||
def test_example_items1(self, testdir):
|
||||
p = testdir.makepyfile('''
|
||||
def testone():
|
||||
@@ -597,6 +564,6 @@ def test_matchnodes_two_collections_same_file(testdir):
|
||||
res.stdout.fnmatch_lines([
|
||||
"*1 passed*",
|
||||
])
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
import py, pytest
|
||||
|
||||
from _pytest.config import getcfg, Config
|
||||
from _pytest.config import getcfg
|
||||
|
||||
class TestParseIni:
|
||||
def test_getcfg_and_config(self, tmpdir):
|
||||
def test_getcfg_and_config(self, testdir, tmpdir):
|
||||
sub = tmpdir.mkdir("sub")
|
||||
sub.chdir()
|
||||
tmpdir.join("setup.cfg").write(py.code.Source("""
|
||||
@@ -12,25 +12,23 @@ class TestParseIni:
|
||||
"""))
|
||||
cfg = getcfg([sub], ["setup.cfg"])
|
||||
assert cfg['name'] == "value"
|
||||
config = Config()
|
||||
config._preparse([sub])
|
||||
config = testdir.parseconfigure(sub)
|
||||
assert config.inicfg['name'] == 'value'
|
||||
|
||||
def test_getcfg_empty_path(self, tmpdir):
|
||||
cfg = getcfg([''], ['setup.cfg']) #happens on py.test ""
|
||||
|
||||
def test_append_parse_args(self, tmpdir):
|
||||
def test_append_parse_args(self, testdir, tmpdir):
|
||||
tmpdir.join("setup.cfg").write(py.code.Source("""
|
||||
[pytest]
|
||||
addopts = --verbose
|
||||
"""))
|
||||
config = Config()
|
||||
config.parse([tmpdir])
|
||||
config = testdir.parseconfig(tmpdir)
|
||||
assert config.option.verbose
|
||||
config = Config()
|
||||
args = [tmpdir,]
|
||||
config._preparse(args, addopts=False)
|
||||
assert len(args) == 1
|
||||
#config = testdir.Config()
|
||||
#args = [tmpdir,]
|
||||
#config._preparse(args, addopts=False)
|
||||
#assert len(args) == 1
|
||||
|
||||
def test_tox_ini_wrong_version(self, testdir):
|
||||
p = testdir.makefile('.ini', tox="""
|
||||
@@ -49,8 +47,7 @@ class TestParseIni:
|
||||
[pytest]
|
||||
minversion = 1.0
|
||||
"""))
|
||||
config = Config()
|
||||
config.parse([testdir.tmpdir])
|
||||
config = testdir.parseconfig()
|
||||
assert config.getini("minversion") == "1.0"
|
||||
|
||||
def test_toxini_before_lower_pytestini(self, testdir):
|
||||
@@ -63,8 +60,7 @@ class TestParseIni:
|
||||
[pytest]
|
||||
minversion = 1.5
|
||||
"""))
|
||||
config = Config()
|
||||
config.parse([sub])
|
||||
config = testdir.parseconfigure(sub)
|
||||
assert config.getini("minversion") == "2.0"
|
||||
|
||||
@pytest.mark.xfail(reason="probably not needed")
|
||||
@@ -77,10 +73,10 @@ class TestParseIni:
|
||||
""")
|
||||
result = testdir.runpytest("--confcutdir=.")
|
||||
assert result.ret == 0
|
||||
|
||||
|
||||
class TestConfigCmdlineParsing:
|
||||
def test_parsing_again_fails(self, testdir):
|
||||
config = testdir.reparseconfig([testdir.tmpdir])
|
||||
config = testdir.parseconfig()
|
||||
pytest.raises(AssertionError, "config.parse([])")
|
||||
|
||||
|
||||
@@ -101,7 +97,7 @@ class TestConfigAPI:
|
||||
assert config.getvalue("x") == 1
|
||||
assert config.getvalue("x", o.join('sub')) == 2
|
||||
pytest.raises(KeyError, "config.getvalue('y')")
|
||||
config = testdir.reparseconfig([str(o.join('sub'))])
|
||||
config = testdir.parseconfigure(str(o.join('sub')))
|
||||
assert config.getvalue("x") == 2
|
||||
assert config.getvalue("y") == 3
|
||||
assert config.getvalue("x", o) == 1
|
||||
@@ -127,18 +123,18 @@ class TestConfigAPI:
|
||||
def test_config_overwrite(self, testdir):
|
||||
o = testdir.tmpdir
|
||||
o.ensure("conftest.py").write("x=1")
|
||||
config = testdir.reparseconfig([str(o)])
|
||||
config = testdir.parseconfig(str(o))
|
||||
assert config.getvalue('x') == 1
|
||||
config.option.x = 2
|
||||
assert config.getvalue('x') == 2
|
||||
config = testdir.reparseconfig([str(o)])
|
||||
config = testdir.parseconfig([str(o)])
|
||||
assert config.getvalue('x') == 1
|
||||
|
||||
def test_getconftest_pathlist(self, testdir, tmpdir):
|
||||
somepath = tmpdir.join("x", "y", "z")
|
||||
p = tmpdir.join("conftest.py")
|
||||
p.write("pathlist = ['.', %r]" % str(somepath))
|
||||
config = testdir.reparseconfig([p])
|
||||
config = testdir.parseconfigure(p)
|
||||
assert config._getconftest_pathlist('notexist') is None
|
||||
pl = config._getconftest_pathlist('pathlist')
|
||||
print(pl)
|
||||
@@ -212,6 +208,40 @@ class TestConfigAPI:
|
||||
l = config.getini("a2")
|
||||
assert l == []
|
||||
|
||||
def test_addinivalue_line_existing(self, testdir):
|
||||
testdir.makeconftest("""
|
||||
def pytest_addoption(parser):
|
||||
parser.addini("xy", "", type="linelist")
|
||||
""")
|
||||
p = testdir.makeini("""
|
||||
[pytest]
|
||||
xy= 123
|
||||
""")
|
||||
config = testdir.parseconfig()
|
||||
l = config.getini("xy")
|
||||
assert len(l) == 1
|
||||
assert l == ["123"]
|
||||
config.addinivalue_line("xy", "456")
|
||||
l = config.getini("xy")
|
||||
assert len(l) == 2
|
||||
assert l == ["123", "456"]
|
||||
|
||||
def test_addinivalue_line_new(self, testdir):
|
||||
testdir.makeconftest("""
|
||||
def pytest_addoption(parser):
|
||||
parser.addini("xy", "", type="linelist")
|
||||
""")
|
||||
config = testdir.parseconfig()
|
||||
assert not config.getini("xy")
|
||||
config.addinivalue_line("xy", "456")
|
||||
l = config.getini("xy")
|
||||
assert len(l) == 1
|
||||
assert l == ["456"]
|
||||
config.addinivalue_line("xy", "123")
|
||||
l = config.getini("xy")
|
||||
assert len(l) == 2
|
||||
assert l == ["456", "123"]
|
||||
|
||||
def test_options_on_small_file_do_not_blow_up(testdir):
|
||||
def runfiletest(opts):
|
||||
reprec = testdir.inline_run(*opts)
|
||||
|
||||
@@ -332,17 +332,6 @@ class TestPytestPluginInteractions:
|
||||
"*did not find*sys*"
|
||||
])
|
||||
|
||||
def test_do_option_conftestplugin(self, testdir):
|
||||
p = testdir.makepyfile("""
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption('--test123', action="store_true")
|
||||
""")
|
||||
config = testdir.Config()
|
||||
config._conftest.importconftest(p)
|
||||
print(config.pluginmanager.getplugins())
|
||||
config.parse([])
|
||||
assert not config.option.test123
|
||||
|
||||
def test_namespace_early_from_import(self, testdir):
|
||||
p = testdir.makepyfile("""
|
||||
from pytest import Item
|
||||
@@ -370,9 +359,7 @@ class TestPytestPluginInteractions:
|
||||
])
|
||||
|
||||
def test_do_option_postinitialize(self, testdir):
|
||||
config = testdir.Config()
|
||||
config.parse([])
|
||||
config.pluginmanager.do_configure(config=config)
|
||||
config = testdir.parseconfigure()
|
||||
assert not hasattr(config.option, 'test123')
|
||||
p = testdir.makepyfile("""
|
||||
def pytest_addoption(parser):
|
||||
@@ -640,7 +627,7 @@ class TestTracer:
|
||||
log2("seen")
|
||||
tags, args = l2[0]
|
||||
assert args == ("seen",)
|
||||
|
||||
|
||||
|
||||
def test_setmyprocessor(self):
|
||||
from _pytest.core import TagTracer
|
||||
@@ -657,3 +644,10 @@ class TestTracer:
|
||||
assert "1" in tags
|
||||
assert "2" in tags
|
||||
assert args == (42,)
|
||||
|
||||
def test_default_markers(testdir):
|
||||
result = testdir.runpytest("--markers")
|
||||
result.stdout.fnmatch_lines([
|
||||
"*tryfirst*first*",
|
||||
"*trylast*last*",
|
||||
])
|
||||
|
||||
@@ -279,6 +279,13 @@ class TestPython:
|
||||
if not sys.platform.startswith("java"):
|
||||
assert "hx" in fnode.toxml()
|
||||
|
||||
def test_mangle_testnames():
|
||||
from _pytest.junitxml import mangle_testnames
|
||||
names = ["a/pything.py", "Class", "()", "method"]
|
||||
newnames = mangle_testnames(names)
|
||||
assert newnames == ["a.pything", "Class", "method"]
|
||||
|
||||
|
||||
class TestNonPython:
|
||||
def test_summing_simple(self, testdir):
|
||||
testdir.makeconftest("""
|
||||
@@ -340,7 +347,7 @@ def test_nullbyte_replace(testdir):
|
||||
assert '#x0' in text
|
||||
|
||||
|
||||
def test_invalid_xml_escape(testdir):
|
||||
def test_invalid_xml_escape():
|
||||
# Test some more invalid xml chars, the full range should be
|
||||
# tested really but let's just thest the edges of the ranges
|
||||
# intead.
|
||||
@@ -355,27 +362,23 @@ def test_invalid_xml_escape(testdir):
|
||||
except NameError:
|
||||
unichr = chr
|
||||
u = py.builtin._totext
|
||||
invalid = (0x1, 0xB, 0xC, 0xE, 0x19,)
|
||||
# 0xD800, 0xDFFF, 0xFFFE, 0x0FFFF) #, 0x110000)
|
||||
invalid = (0x00, 0x1, 0xB, 0xC, 0xE, 0x19,
|
||||
27, # issue #126
|
||||
0xD800, 0xDFFF, 0xFFFE, 0x0FFFF) #, 0x110000)
|
||||
valid = (0x9, 0xA, 0x20,) # 0xD, 0xD7FF, 0xE000, 0xFFFD, 0x10000, 0x10FFFF)
|
||||
all = invalid + valid
|
||||
prints = [u(" sys.stdout.write('''0x%X-->%s<--''')") % (i, unichr(i))
|
||||
for i in all]
|
||||
testdir.makepyfile(u("# -*- coding: UTF-8 -*-"),
|
||||
u("import sys"),
|
||||
u("def test_print_bytes():"),
|
||||
u("\n").join(prints),
|
||||
u(" assert False"))
|
||||
xmlf = testdir.tmpdir.join('junit.xml')
|
||||
result = testdir.runpytest('--junitxml=%s' % xmlf)
|
||||
text = xmlf.read()
|
||||
|
||||
from _pytest.junitxml import bin_xml_escape
|
||||
|
||||
|
||||
for i in invalid:
|
||||
got = bin_xml_escape(unichr(i))
|
||||
if i <= 0xFF:
|
||||
assert '#x%02X' % i in text
|
||||
expected = '#x%02X' % i
|
||||
else:
|
||||
assert '#x%04X' % i in text
|
||||
expected = '#x%04X' % i
|
||||
assert got == expected
|
||||
for i in valid:
|
||||
assert chr(i) in text
|
||||
assert chr(i) == bin_xml_escape(unichr(i))
|
||||
|
||||
def test_logxml_path_expansion():
|
||||
from _pytest.junitxml import LogXML
|
||||
|
||||
@@ -68,7 +68,78 @@ class TestMark:
|
||||
assert 'reason' not in g.some.kwargs
|
||||
assert g.some.kwargs['reason2'] == "456"
|
||||
|
||||
|
||||
def test_ini_markers(testdir):
|
||||
testdir.makeini("""
|
||||
[pytest]
|
||||
markers =
|
||||
a1: this is a webtest marker
|
||||
a2: this is a smoke marker
|
||||
""")
|
||||
testdir.makepyfile("""
|
||||
def test_markers(pytestconfig):
|
||||
markers = pytestconfig.getini("markers")
|
||||
print (markers)
|
||||
assert len(markers) >= 2
|
||||
assert markers[0].startswith("a1:")
|
||||
assert markers[1].startswith("a2:")
|
||||
""")
|
||||
rec = testdir.inline_run()
|
||||
rec.assertoutcome(passed=1)
|
||||
|
||||
def test_markers_option(testdir):
|
||||
testdir.makeini("""
|
||||
[pytest]
|
||||
markers =
|
||||
a1: this is a webtest marker
|
||||
a1some: another marker
|
||||
""")
|
||||
result = testdir.runpytest("--markers", )
|
||||
result.stdout.fnmatch_lines([
|
||||
"*a1*this is a webtest*",
|
||||
"*a1some*another marker",
|
||||
])
|
||||
|
||||
|
||||
def test_strict_prohibits_unregistered_markers(testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
@pytest.mark.unregisteredmark
|
||||
def test_hello():
|
||||
pass
|
||||
""")
|
||||
result = testdir.runpytest("--strict")
|
||||
assert result.ret != 0
|
||||
result.stdout.fnmatch_lines([
|
||||
"*unregisteredmark*not*registered*",
|
||||
])
|
||||
|
||||
@pytest.mark.multi(spec=[
|
||||
("xyz", ("test_one",)),
|
||||
("xyz and xyz2", ()),
|
||||
("xyz2", ("test_two",)),
|
||||
("xyz or xyz2", ("test_one", "test_two"),)
|
||||
])
|
||||
def test_mark_option(spec, testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
@pytest.mark.xyz
|
||||
def test_one():
|
||||
pass
|
||||
@pytest.mark.xyz2
|
||||
def test_two():
|
||||
pass
|
||||
""")
|
||||
opt, passed_result = spec
|
||||
rec = testdir.inline_run("-m", opt)
|
||||
passed, skipped, fail = rec.listoutcomes()
|
||||
passed = [x.nodeid.split("::")[-1] for x in passed]
|
||||
assert len(passed) == len(passed_result)
|
||||
assert list(passed) == list(passed_result)
|
||||
|
||||
|
||||
class TestFunctional:
|
||||
|
||||
def test_mark_per_function(self, testdir):
|
||||
p = testdir.makepyfile("""
|
||||
import pytest
|
||||
@@ -77,7 +148,7 @@ class TestFunctional:
|
||||
assert hasattr(test_hello, 'hello')
|
||||
""")
|
||||
result = testdir.runpytest(p)
|
||||
result.stdout.fnmatch_lines(["*passed*"])
|
||||
result.stdout.fnmatch_lines(["*1 passed*"])
|
||||
|
||||
def test_mark_per_module(self, testdir):
|
||||
item = testdir.getitem("""
|
||||
@@ -157,18 +228,26 @@ class TestFunctional:
|
||||
keywords = item.keywords
|
||||
marker = keywords['hello']
|
||||
assert marker.args == ("pos0", "pos1")
|
||||
assert marker.kwargs == {'x': 3, 'y': 2, 'z': 4}
|
||||
assert marker.kwargs == {'x': 1, 'y': 2, 'z': 4}
|
||||
|
||||
def test_mark_other(self, testdir):
|
||||
pytest.raises(TypeError, '''
|
||||
testdir.getitem("""
|
||||
# test the new __iter__ interface
|
||||
l = list(marker)
|
||||
assert len(l) == 3
|
||||
assert l[0].args == ("pos0",)
|
||||
assert l[1].args == ()
|
||||
assert l[2].args == ("pos1", )
|
||||
|
||||
def test_mark_with_wrong_marker(self, testdir):
|
||||
reprec = testdir.inline_runsource("""
|
||||
import pytest
|
||||
class pytestmark:
|
||||
pass
|
||||
def test_func():
|
||||
pass
|
||||
""")
|
||||
''')
|
||||
""")
|
||||
l = reprec.getfailedcollections()
|
||||
assert len(l) == 1
|
||||
assert "TypeError" in str(l[0].longrepr)
|
||||
|
||||
def test_mark_dynamically_in_funcarg(self, testdir):
|
||||
testdir.makeconftest("""
|
||||
@@ -188,57 +267,22 @@ class TestFunctional:
|
||||
"keyword: *hello*"
|
||||
])
|
||||
|
||||
|
||||
class Test_genitems:
|
||||
def test_check_collect_hashes(self, testdir):
|
||||
def test_merging_markers_two_functions(self, testdir):
|
||||
p = testdir.makepyfile("""
|
||||
def test_1():
|
||||
pass
|
||||
|
||||
def test_2():
|
||||
import pytest
|
||||
@pytest.mark.hello("pos1", z=4)
|
||||
@pytest.mark.hello("pos0", z=3)
|
||||
def test_func(self):
|
||||
pass
|
||||
""")
|
||||
p.copy(p.dirpath(p.purebasename + "2" + ".py"))
|
||||
items, reprec = testdir.inline_genitems(p.dirpath())
|
||||
assert len(items) == 4
|
||||
for numi, i in enumerate(items):
|
||||
for numj, j in enumerate(items):
|
||||
if numj != numi:
|
||||
assert hash(i) != hash(j)
|
||||
assert i != j
|
||||
|
||||
def test_root_conftest_syntax_error(self, testdir):
|
||||
# do we want to unify behaviour with
|
||||
# test_subdir_conftest_error?
|
||||
p = testdir.makepyfile(conftest="raise SyntaxError\n")
|
||||
pytest.raises(SyntaxError, testdir.inline_genitems, p.dirpath())
|
||||
|
||||
def test_example_items1(self, testdir):
|
||||
p = testdir.makepyfile('''
|
||||
def testone():
|
||||
pass
|
||||
|
||||
class TestX:
|
||||
def testmethod_one(self):
|
||||
pass
|
||||
|
||||
class TestY(TestX):
|
||||
pass
|
||||
''')
|
||||
items, reprec = testdir.inline_genitems(p)
|
||||
assert len(items) == 3
|
||||
assert items[0].name == 'testone'
|
||||
assert items[1].name == 'testmethod_one'
|
||||
assert items[2].name == 'testmethod_one'
|
||||
|
||||
# let's also test getmodpath here
|
||||
assert items[0].getmodpath() == "testone"
|
||||
assert items[1].getmodpath() == "TestX.testmethod_one"
|
||||
assert items[2].getmodpath() == "TestY.testmethod_one"
|
||||
|
||||
s = items[0].getmodpath(stopatmodule=False)
|
||||
assert s.endswith("test_example_items1.testone")
|
||||
print(s)
|
||||
items, rec = testdir.inline_genitems(p)
|
||||
item, = items
|
||||
keywords = item.keywords
|
||||
marker = keywords['hello']
|
||||
l = list(marker)
|
||||
assert len(l) == 2
|
||||
assert l[0].args == ("pos0",)
|
||||
assert l[1].args == ("pos1",)
|
||||
|
||||
|
||||
class TestKeywordSelection:
|
||||
|
||||
@@ -2,6 +2,17 @@ import os, sys
|
||||
import pytest
|
||||
from _pytest.monkeypatch import monkeypatch as MonkeyPatch
|
||||
|
||||
def pytest_funcarg__mp(request):
|
||||
cwd = os.getcwd()
|
||||
sys_path = list(sys.path)
|
||||
|
||||
def cleanup():
|
||||
sys.path[:] = sys_path
|
||||
os.chdir(cwd)
|
||||
|
||||
request.addfinalizer(cleanup)
|
||||
return MonkeyPatch()
|
||||
|
||||
def test_setattr():
|
||||
class A:
|
||||
x = 1
|
||||
@@ -59,6 +70,29 @@ def test_setitem():
|
||||
monkeypatch.undo()
|
||||
assert d['x'] == 5
|
||||
|
||||
def test_setitem_deleted_meanwhile():
|
||||
d = {}
|
||||
monkeypatch = MonkeyPatch()
|
||||
monkeypatch.setitem(d, 'x', 2)
|
||||
del d['x']
|
||||
monkeypatch.undo()
|
||||
assert not d
|
||||
|
||||
@pytest.mark.parametrize("before", [True, False])
|
||||
def test_setenv_deleted_meanwhile(before):
|
||||
key = "qwpeoip123"
|
||||
if before:
|
||||
os.environ[key] = "world"
|
||||
monkeypatch = MonkeyPatch()
|
||||
monkeypatch.setenv(key, 'hello')
|
||||
del os.environ[key]
|
||||
monkeypatch.undo()
|
||||
if before:
|
||||
assert os.environ[key] == "world"
|
||||
del os.environ[key]
|
||||
else:
|
||||
assert key not in os.environ
|
||||
|
||||
def test_delitem():
|
||||
d = {'x': 1}
|
||||
monkeypatch = MonkeyPatch()
|
||||
@@ -121,19 +155,41 @@ def test_monkeypatch_plugin(testdir):
|
||||
res = reprec.countoutcomes()
|
||||
assert tuple(res) == (1, 0, 0), res
|
||||
|
||||
def test_syspath_prepend():
|
||||
def test_syspath_prepend(mp):
|
||||
old = list(sys.path)
|
||||
try:
|
||||
monkeypatch = MonkeyPatch()
|
||||
monkeypatch.syspath_prepend('world')
|
||||
monkeypatch.syspath_prepend('hello')
|
||||
assert sys.path[0] == "hello"
|
||||
assert sys.path[1] == "world"
|
||||
monkeypatch.undo()
|
||||
assert sys.path == old
|
||||
monkeypatch.undo()
|
||||
assert sys.path == old
|
||||
finally:
|
||||
sys.path[:] = old
|
||||
mp.syspath_prepend('world')
|
||||
mp.syspath_prepend('hello')
|
||||
assert sys.path[0] == "hello"
|
||||
assert sys.path[1] == "world"
|
||||
mp.undo()
|
||||
assert sys.path == old
|
||||
mp.undo()
|
||||
assert sys.path == old
|
||||
|
||||
def test_syspath_prepend_double_undo(mp):
|
||||
mp.syspath_prepend('hello world')
|
||||
mp.undo()
|
||||
sys.path.append('more hello world')
|
||||
mp.undo()
|
||||
assert sys.path[-1] == 'more hello world'
|
||||
|
||||
def test_chdir_with_path_local(mp, tmpdir):
|
||||
mp.chdir(tmpdir)
|
||||
assert os.getcwd() == tmpdir.strpath
|
||||
|
||||
def test_chdir_with_str(mp, tmpdir):
|
||||
mp.chdir(tmpdir.strpath)
|
||||
assert os.getcwd() == tmpdir.strpath
|
||||
|
||||
def test_chdir_undo(mp, tmpdir):
|
||||
cwd = os.getcwd()
|
||||
mp.chdir(tmpdir)
|
||||
mp.undo()
|
||||
assert os.getcwd() == cwd
|
||||
|
||||
def test_chdir_double_undo(mp, tmpdir):
|
||||
mp.chdir(tmpdir.strpath)
|
||||
mp.undo()
|
||||
tmpdir.chdir()
|
||||
mp.undo()
|
||||
assert os.getcwd() == tmpdir.strpath
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import pytest
|
||||
|
||||
class TestPasting:
|
||||
def pytest_funcarg__pastebinlist(self, request):
|
||||
@@ -45,3 +46,14 @@ class TestPasting:
|
||||
for x in 'test_fail test_skip skipped'.split():
|
||||
assert s.find(x), (s, x)
|
||||
|
||||
|
||||
class TestRPCClient:
|
||||
def pytest_funcarg__pastebin(self, request):
|
||||
return request.config.pluginmanager.getplugin('pastebin')
|
||||
|
||||
def test_getproxy(self, pastebin):
|
||||
proxy = pastebin.getproxy()
|
||||
assert proxy is not None
|
||||
assert proxy.__class__.__module__.startswith('xmlrpc')
|
||||
|
||||
|
||||
|
||||
@@ -106,6 +106,26 @@ class TestPDB:
|
||||
if child.isalive():
|
||||
child.wait()
|
||||
|
||||
def test_pdb_interaction_doctest(self, testdir):
|
||||
p1 = testdir.makepyfile("""
|
||||
import pytest
|
||||
def function_1():
|
||||
'''
|
||||
>>> i = 0
|
||||
>>> assert i == 1
|
||||
'''
|
||||
""")
|
||||
child = testdir.spawn_pytest("--doctest-modules --pdb %s" % p1)
|
||||
child.expect("(Pdb)")
|
||||
child.sendline('i')
|
||||
child.expect("0")
|
||||
child.expect("(Pdb)")
|
||||
child.sendeof()
|
||||
rest = child.read()
|
||||
assert "1 failed" in rest
|
||||
if child.isalive():
|
||||
child.wait()
|
||||
|
||||
def test_pdb_interaction_capturing_twice(self, testdir):
|
||||
p1 = testdir.makepyfile("""
|
||||
import pytest
|
||||
|
||||
@@ -56,6 +56,24 @@ class TestClass:
|
||||
"*collected 0*",
|
||||
])
|
||||
|
||||
def test_setup_teardown_class_as_classmethod(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
class TestClassMethod:
|
||||
@classmethod
|
||||
def setup_class(cls):
|
||||
pass
|
||||
def test_1(self):
|
||||
pass
|
||||
@classmethod
|
||||
def teardown_class(cls):
|
||||
pass
|
||||
""")
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines([
|
||||
"*1 passed*",
|
||||
])
|
||||
|
||||
|
||||
class TestGenerator:
|
||||
def test_generative_functions(self, testdir):
|
||||
modcol = testdir.getmodulecol("""
|
||||
@@ -257,7 +275,7 @@ class TestFunction:
|
||||
assert hasattr(modcol.obj, 'test_func')
|
||||
|
||||
def test_function_equality(self, testdir, tmpdir):
|
||||
config = testdir.reparseconfig()
|
||||
config = testdir.parseconfigure()
|
||||
session = testdir.Session(config)
|
||||
f1 = pytest.Function(name="name", config=config,
|
||||
args=(1,), callobj=isinstance, session=session)
|
||||
@@ -279,7 +297,7 @@ class TestFunction:
|
||||
assert not f1 != f1_b
|
||||
|
||||
def test_function_equality_with_callspec(self, testdir, tmpdir):
|
||||
config = testdir.reparseconfig()
|
||||
config = testdir.parseconfigure()
|
||||
class callspec1:
|
||||
param = 1
|
||||
funcargs = {}
|
||||
@@ -477,7 +495,7 @@ class TestTracebackCutting:
|
||||
out = result.stdout.str()
|
||||
assert out.find("conftest.py:2: ValueError") != -1
|
||||
numentries = out.count("_ _ _ _") # separator for traceback entries
|
||||
assert numentries >3
|
||||
assert numentries > 3
|
||||
|
||||
def test_traceback_error_during_import(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
@@ -520,12 +538,6 @@ def test_getfuncargnames():
|
||||
if sys.version_info < (3,0):
|
||||
assert funcargs.getfuncargnames(A.f) == ['arg1']
|
||||
|
||||
def test_callspec_repr():
|
||||
cs = funcargs.CallSpec({}, 'hello', 1)
|
||||
repr(cs)
|
||||
cs = funcargs.CallSpec({}, 'hello', funcargs._notexists)
|
||||
repr(cs)
|
||||
|
||||
class TestFillFuncArgs:
|
||||
def test_fillfuncargs_exposed(self):
|
||||
# used by oejskit
|
||||
@@ -695,7 +707,7 @@ class TestRequest:
|
||||
teardownlist = item.getparent(pytest.Module).obj.teardownlist
|
||||
ss = item.session._setupstate
|
||||
assert not teardownlist
|
||||
ss.teardown_exact(item)
|
||||
ss.teardown_exact(item, None)
|
||||
print(ss.stack)
|
||||
assert teardownlist == [1]
|
||||
|
||||
@@ -783,7 +795,7 @@ class TestRequestCachedSetup:
|
||||
req2 = funcargs.FuncargRequest(item2)
|
||||
ret2 = req2.cached_setup(setup, scope="class")
|
||||
assert ret2 == "hello"
|
||||
|
||||
|
||||
req3 = funcargs.FuncargRequest(item3)
|
||||
ret3a = req3.cached_setup(setup, scope="class")
|
||||
ret3b = req3.cached_setup(setup, scope="class")
|
||||
@@ -886,6 +898,7 @@ class TestMetafunc:
|
||||
def function(): pass
|
||||
metafunc = funcargs.Metafunc(function)
|
||||
assert not metafunc.funcargnames
|
||||
repr(metafunc._calls)
|
||||
|
||||
def test_function_basic(self):
|
||||
def func(arg1, arg2="qwe"): pass
|
||||
@@ -925,9 +938,9 @@ class TestMetafunc:
|
||||
metafunc.addcall(param=obj)
|
||||
metafunc.addcall(param=1)
|
||||
assert len(metafunc._calls) == 3
|
||||
assert metafunc._calls[0].param == obj
|
||||
assert metafunc._calls[1].param == obj
|
||||
assert metafunc._calls[2].param == 1
|
||||
assert metafunc._calls[0].getparam("arg1") == obj
|
||||
assert metafunc._calls[1].getparam("arg1") == obj
|
||||
assert metafunc._calls[2].getparam("arg1") == 1
|
||||
|
||||
def test_addcall_funcargs(self):
|
||||
def func(x): pass
|
||||
@@ -941,7 +954,137 @@ class TestMetafunc:
|
||||
assert metafunc._calls[1].funcargs == {'x': 3}
|
||||
assert not hasattr(metafunc._calls[1], 'param')
|
||||
|
||||
class TestGenfuncFunctional:
|
||||
def test_parametrize_error(self):
|
||||
def func(x, y): pass
|
||||
metafunc = funcargs.Metafunc(func)
|
||||
metafunc.parametrize("x", [1,2])
|
||||
pytest.raises(ValueError, lambda: metafunc.parametrize("x", [5,6]))
|
||||
pytest.raises(ValueError, lambda: metafunc.parametrize("x", [5,6]))
|
||||
metafunc.parametrize("y", [1,2])
|
||||
pytest.raises(ValueError, lambda: metafunc.parametrize("y", [5,6]))
|
||||
pytest.raises(ValueError, lambda: metafunc.parametrize("y", [5,6]))
|
||||
|
||||
def test_parametrize_and_id(self):
|
||||
def func(x, y): pass
|
||||
metafunc = funcargs.Metafunc(func)
|
||||
|
||||
metafunc.parametrize("x", [1,2], ids=['basic', 'advanced'])
|
||||
metafunc.parametrize("y", ["abc", "def"])
|
||||
ids = [x.id for x in metafunc._calls]
|
||||
assert ids == ["basic-abc", "basic-def", "advanced-abc", "advanced-def"]
|
||||
|
||||
def test_parametrize_with_userobjects(self):
|
||||
def func(x, y): pass
|
||||
metafunc = funcargs.Metafunc(func)
|
||||
class A:
|
||||
pass
|
||||
metafunc.parametrize("x", [A(), A()])
|
||||
metafunc.parametrize("y", list("ab"))
|
||||
assert metafunc._calls[0].id == ".0-a"
|
||||
assert metafunc._calls[1].id == ".0-b"
|
||||
assert metafunc._calls[2].id == ".1-a"
|
||||
assert metafunc._calls[3].id == ".1-b"
|
||||
|
||||
def test_addcall_and_parametrize(self):
|
||||
def func(x, y): pass
|
||||
metafunc = funcargs.Metafunc(func)
|
||||
metafunc.addcall({'x': 1})
|
||||
metafunc.parametrize('y', [2,3])
|
||||
assert len(metafunc._calls) == 2
|
||||
assert metafunc._calls[0].funcargs == {'x': 1, 'y': 2}
|
||||
assert metafunc._calls[1].funcargs == {'x': 1, 'y': 3}
|
||||
assert metafunc._calls[0].id == "0-2"
|
||||
assert metafunc._calls[1].id == "0-3"
|
||||
|
||||
def test_parametrize_indirect(self):
|
||||
def func(x, y): pass
|
||||
metafunc = funcargs.Metafunc(func)
|
||||
metafunc.parametrize('x', [1], indirect=True)
|
||||
metafunc.parametrize('y', [2,3], indirect=True)
|
||||
metafunc.parametrize('unnamed', [1], indirect=True)
|
||||
assert len(metafunc._calls) == 2
|
||||
assert metafunc._calls[0].funcargs == {}
|
||||
assert metafunc._calls[1].funcargs == {}
|
||||
assert metafunc._calls[0].params == dict(x=1,y=2, unnamed=1)
|
||||
assert metafunc._calls[1].params == dict(x=1,y=3, unnamed=1)
|
||||
|
||||
def test_addcalls_and_parametrize_indirect(self):
|
||||
def func(x, y): pass
|
||||
metafunc = funcargs.Metafunc(func)
|
||||
metafunc.addcall(param="123")
|
||||
metafunc.parametrize('x', [1], indirect=True)
|
||||
metafunc.parametrize('y', [2,3], indirect=True)
|
||||
assert len(metafunc._calls) == 2
|
||||
assert metafunc._calls[0].funcargs == {}
|
||||
assert metafunc._calls[1].funcargs == {}
|
||||
assert metafunc._calls[0].params == dict(x=1,y=2)
|
||||
assert metafunc._calls[1].params == dict(x=1,y=3)
|
||||
|
||||
def test_parametrize_functional(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
def pytest_generate_tests(metafunc):
|
||||
metafunc.parametrize('x', [1,2], indirect=True)
|
||||
metafunc.parametrize('y', [2])
|
||||
def pytest_funcarg__x(request):
|
||||
return request.param * 10
|
||||
def pytest_funcarg__y(request):
|
||||
return request.param
|
||||
|
||||
def test_simple(x,y):
|
||||
assert x in (10,20)
|
||||
assert y == 2
|
||||
""")
|
||||
result = testdir.runpytest("-v")
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_simple*1-2*",
|
||||
"*test_simple*2-2*",
|
||||
"*2 passed*",
|
||||
])
|
||||
|
||||
def test_parametrize_onearg(self):
|
||||
metafunc = funcargs.Metafunc(lambda x: None)
|
||||
metafunc.parametrize("x", [1,2])
|
||||
assert len(metafunc._calls) == 2
|
||||
assert metafunc._calls[0].funcargs == dict(x=1)
|
||||
assert metafunc._calls[0].id == "1"
|
||||
assert metafunc._calls[1].funcargs == dict(x=2)
|
||||
assert metafunc._calls[1].id == "2"
|
||||
|
||||
def test_parametrize_onearg_indirect(self):
|
||||
metafunc = funcargs.Metafunc(lambda x: None)
|
||||
metafunc.parametrize("x", [1,2], indirect=True)
|
||||
assert metafunc._calls[0].params == dict(x=1)
|
||||
assert metafunc._calls[0].id == "1"
|
||||
assert metafunc._calls[1].params == dict(x=2)
|
||||
assert metafunc._calls[1].id == "2"
|
||||
|
||||
def test_parametrize_twoargs(self):
|
||||
metafunc = funcargs.Metafunc(lambda x,y: None)
|
||||
metafunc.parametrize(("x", "y"), [(1,2), (3,4)])
|
||||
assert len(metafunc._calls) == 2
|
||||
assert metafunc._calls[0].funcargs == dict(x=1, y=2)
|
||||
assert metafunc._calls[0].id == "1-2"
|
||||
assert metafunc._calls[1].funcargs == dict(x=3, y=4)
|
||||
assert metafunc._calls[1].id == "3-4"
|
||||
|
||||
def test_parametrize_multiple_times(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
pytestmark = pytest.mark.parametrize("x", [1,2])
|
||||
def test_func(x):
|
||||
assert 0, x
|
||||
class TestClass:
|
||||
pytestmark = pytest.mark.parametrize("y", [3,4])
|
||||
def test_meth(self, x, y):
|
||||
assert 0, x
|
||||
""")
|
||||
result = testdir.runpytest()
|
||||
assert result.ret == 1
|
||||
result.stdout.fnmatch_lines([
|
||||
"*6 fail*",
|
||||
])
|
||||
|
||||
class TestMetafuncFunctional:
|
||||
def test_attributes(self, testdir):
|
||||
p = testdir.makepyfile("""
|
||||
# assumes that generate/provide runs in the same process
|
||||
@@ -1109,6 +1252,46 @@ class TestGenfuncFunctional:
|
||||
"*1 pass*",
|
||||
])
|
||||
|
||||
def test_parametrize_functional2(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
def pytest_generate_tests(metafunc):
|
||||
metafunc.parametrize("arg1", [1,2])
|
||||
metafunc.parametrize("arg2", [4,5])
|
||||
def test_hello(arg1, arg2):
|
||||
assert 0, (arg1, arg2)
|
||||
""")
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines([
|
||||
"*(1, 4)*",
|
||||
"*(1, 5)*",
|
||||
"*(2, 4)*",
|
||||
"*(2, 5)*",
|
||||
"*4 failed*",
|
||||
])
|
||||
|
||||
def test_parametrize_and_inner_getfuncargvalue(self, testdir):
|
||||
p = testdir.makepyfile("""
|
||||
def pytest_generate_tests(metafunc):
|
||||
metafunc.parametrize("arg1", [1], indirect=True)
|
||||
metafunc.parametrize("arg2", [10], indirect=True)
|
||||
|
||||
def pytest_funcarg__arg1(request):
|
||||
x = request.getfuncargvalue("arg2")
|
||||
return x + request.param
|
||||
|
||||
def pytest_funcarg__arg2(request):
|
||||
return request.param
|
||||
|
||||
def test_func1(arg1, arg2):
|
||||
assert arg1 == 11
|
||||
""")
|
||||
result = testdir.runpytest("-v", p)
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_func1*1*PASS*",
|
||||
"*1 passed*"
|
||||
])
|
||||
|
||||
|
||||
def test_conftest_funcargs_only_available_in_subdir(testdir):
|
||||
sub1 = testdir.mkpydir("sub1")
|
||||
sub2 = testdir.mkpydir("sub2")
|
||||
@@ -1320,7 +1503,7 @@ def test_customized_python_discovery(testdir):
|
||||
"*CheckMyApp*",
|
||||
"*check_meth*",
|
||||
])
|
||||
|
||||
|
||||
result = testdir.runpytest()
|
||||
assert result.ret == 0
|
||||
result.stdout.fnmatch_lines([
|
||||
@@ -1354,7 +1537,7 @@ def test_customize_through_attributes(testdir):
|
||||
Function = MyFunction
|
||||
class MyClass(pytest.Class):
|
||||
Instance = MyInstance
|
||||
|
||||
|
||||
def pytest_pycollect_makeitem(collector, name, obj):
|
||||
if name.startswith("MyTestClass"):
|
||||
return MyClass(name, parent=collector)
|
||||
@@ -1370,3 +1553,38 @@ def test_customize_through_attributes(testdir):
|
||||
"*MyInstance*",
|
||||
"*MyFunction*test_hello*",
|
||||
])
|
||||
|
||||
|
||||
def test_unorderable_types(testdir):
|
||||
testdir.makepyfile("""
|
||||
class TestJoinEmpty:
|
||||
pass
|
||||
|
||||
def make_test():
|
||||
class Test:
|
||||
pass
|
||||
Test.__name__ = "TestFoo"
|
||||
return Test
|
||||
TestFoo = make_test()
|
||||
""")
|
||||
result = testdir.runpytest()
|
||||
assert "TypeError" not in result.stdout.str()
|
||||
assert result.ret == 0
|
||||
|
||||
def test_issue117_sessionscopeteardown(testdir):
|
||||
testdir.makepyfile("""
|
||||
def pytest_funcarg__app(request):
|
||||
app = request.cached_setup(
|
||||
scope='session',
|
||||
setup=lambda: 0,
|
||||
teardown=lambda x: 3/x)
|
||||
return app
|
||||
def test_func(app):
|
||||
pass
|
||||
""")
|
||||
result = testdir.runpytest()
|
||||
assert result.ret != 0
|
||||
result.stderr.fnmatch_lines([
|
||||
"*3/x*",
|
||||
"*ZeroDivisionError*",
|
||||
])
|
||||
|
||||
@@ -133,23 +133,26 @@ class TestWithFunctionIntegration:
|
||||
assert lines[14].startswith('X ')
|
||||
assert len(lines) == 15
|
||||
|
||||
def test_internal_exception(self):
|
||||
@pytest.mark.parametrize("style", ("native", "long", "short"))
|
||||
def test_internal_exception(self, style):
|
||||
# they are produced for example by a teardown failing
|
||||
# at the end of the run
|
||||
# at the end of the run or a failing hook invocation
|
||||
try:
|
||||
raise ValueError
|
||||
except ValueError:
|
||||
excinfo = py.code.ExceptionInfo()
|
||||
reslog = ResultLog(None, py.io.TextIO())
|
||||
reslog.pytest_internalerror(excinfo.getrepr())
|
||||
reslog.pytest_internalerror(excinfo.getrepr(style=style))
|
||||
entry = reslog.logfile.getvalue()
|
||||
entry_lines = entry.splitlines()
|
||||
|
||||
assert entry_lines[0].startswith('! ')
|
||||
assert os.path.basename(__file__)[:-9] in entry_lines[0] #.pyc/class
|
||||
if style != "native":
|
||||
assert os.path.basename(__file__)[:-9] in entry_lines[0] #.pyc/class
|
||||
assert entry_lines[-1][0] == ' '
|
||||
assert 'ValueError' in entry
|
||||
|
||||
|
||||
def test_generic(testdir, LineMatcher):
|
||||
testdir.plugins.append("resultlog")
|
||||
testdir.makepyfile("""
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import pytest, py, sys
|
||||
import pytest, py, sys, os
|
||||
from _pytest import runner
|
||||
from py._code.code import ReprExceptionInfo
|
||||
|
||||
@@ -30,9 +30,9 @@ class TestSetupState:
|
||||
def test_teardown_exact_stack_empty(self, testdir):
|
||||
item = testdir.getitem("def test_func(): pass")
|
||||
ss = runner.SetupState()
|
||||
ss.teardown_exact(item)
|
||||
ss.teardown_exact(item)
|
||||
ss.teardown_exact(item)
|
||||
ss.teardown_exact(item, None)
|
||||
ss.teardown_exact(item, None)
|
||||
ss.teardown_exact(item, None)
|
||||
|
||||
def test_setup_fails_and_failure_is_cached(self, testdir):
|
||||
item = testdir.getitem("""
|
||||
@@ -160,6 +160,45 @@ class BaseFunctionalTests:
|
||||
#assert rep.failed.where.path.basename == "test_func.py"
|
||||
#assert rep.failed.failurerepr == "hello"
|
||||
|
||||
def test_teardown_final_returncode(self, testdir):
|
||||
rec = testdir.inline_runsource("""
|
||||
def test_func():
|
||||
pass
|
||||
def teardown_function(func):
|
||||
raise ValueError(42)
|
||||
""")
|
||||
assert rec.ret == 1
|
||||
|
||||
def test_exact_teardown_issue90(self, testdir):
|
||||
rec = testdir.inline_runsource("""
|
||||
import pytest
|
||||
|
||||
class TestClass:
|
||||
def test_method(self):
|
||||
pass
|
||||
def teardown_class(cls):
|
||||
raise Exception()
|
||||
|
||||
def test_func():
|
||||
pass
|
||||
def teardown_function(func):
|
||||
raise ValueError(42)
|
||||
""")
|
||||
reps = rec.getreports("pytest_runtest_logreport")
|
||||
print (reps)
|
||||
for i in range(2):
|
||||
assert reps[i].nodeid.endswith("test_method")
|
||||
assert reps[i].passed
|
||||
assert reps[2].when == "teardown"
|
||||
assert reps[2].failed
|
||||
assert len(reps) == 6
|
||||
for i in range(3,5):
|
||||
assert reps[i].nodeid.endswith("test_func")
|
||||
assert reps[i].passed
|
||||
assert reps[5].when == "teardown"
|
||||
assert reps[5].nodeid.endswith("test_func")
|
||||
assert reps[5].failed
|
||||
|
||||
def test_failure_in_setup_function_ignores_custom_repr(self, testdir):
|
||||
testdir.makepyfile(conftest="""
|
||||
import pytest
|
||||
@@ -276,6 +315,21 @@ class TestSessionReports:
|
||||
assert not rep.passed
|
||||
assert rep.skipped
|
||||
|
||||
|
||||
reporttypes = [
|
||||
runner.BaseReport,
|
||||
runner.TestReport,
|
||||
runner.TeardownErrorReport,
|
||||
runner.CollectReport,
|
||||
]
|
||||
|
||||
@pytest.mark.parametrize('reporttype', reporttypes, ids=[x.__name__ for x in reporttypes])
|
||||
def test_report_extra_parameters(reporttype):
|
||||
args = py.std.inspect.getargspec(reporttype.__init__)[0][1:]
|
||||
basekw = dict.fromkeys(args, [])
|
||||
report = reporttype(newthing=1, **basekw)
|
||||
assert report.newthing == 1
|
||||
|
||||
def test_callinfo():
|
||||
ci = runner.CallInfo(lambda: 0, '123')
|
||||
assert ci.when == "123"
|
||||
@@ -384,21 +438,18 @@ def test_importorskip():
|
||||
py.test.fail("spurious skip")
|
||||
|
||||
def test_importorskip_imports_last_module_part():
|
||||
import os
|
||||
ospath = py.test.importorskip("os.path")
|
||||
assert os.path == ospath
|
||||
|
||||
|
||||
def test_pytest_cmdline_main(testdir):
|
||||
p = testdir.makepyfile("""
|
||||
import sys
|
||||
sys.path.insert(0, %r)
|
||||
import py
|
||||
def test_hello():
|
||||
assert 1
|
||||
if __name__ == '__main__':
|
||||
py.test.cmdline.main([__file__])
|
||||
""" % (str(py._pydir.dirpath())))
|
||||
""")
|
||||
import subprocess
|
||||
popen = subprocess.Popen([sys.executable, str(p)], stdout=subprocess.PIPE)
|
||||
s = popen.stdout.read()
|
||||
|
||||
@@ -549,3 +549,10 @@ def test_direct_gives_error(testdir):
|
||||
])
|
||||
|
||||
|
||||
def test_default_markers(testdir):
|
||||
result = testdir.runpytest("--markers")
|
||||
result.stdout.fnmatch_lines([
|
||||
"*skipif(*conditions)*skip*",
|
||||
"*xfail(*conditions, reason=None, run=True)*expected failure*",
|
||||
])
|
||||
|
||||
|
||||
@@ -256,6 +256,31 @@ class TestCollectonly:
|
||||
*1 error*
|
||||
""").strip())
|
||||
|
||||
def test_collectonly_missing_path(self, testdir):
|
||||
"""this checks issue 115,
|
||||
failure in parseargs will cause session
|
||||
not to have the items attribute
|
||||
"""
|
||||
result = testdir.runpytest("--collectonly", "uhm_missing_path")
|
||||
assert result.ret == 4
|
||||
result.stderr.fnmatch_lines([
|
||||
'*ERROR: file not found*',
|
||||
])
|
||||
|
||||
def test_collectonly_quiet(self, testdir):
|
||||
testdir.makepyfile("def test_foo(): pass")
|
||||
result = testdir.runpytest("--collectonly", "-q")
|
||||
result.stdout.fnmatch_lines([
|
||||
'*test_foo*',
|
||||
])
|
||||
|
||||
def test_collectonly_more_quiet(self, testdir):
|
||||
testdir.makepyfile(test_fun="def test_foo(): pass")
|
||||
result = testdir.runpytest("--collectonly", "-qq")
|
||||
result.stdout.fnmatch_lines([
|
||||
'*test_fun.py: 1*',
|
||||
])
|
||||
|
||||
|
||||
def test_repr_python_version(monkeypatch):
|
||||
try:
|
||||
@@ -340,7 +365,7 @@ class TestTerminalFunctional:
|
||||
result = testdir.runpytest("-k", "test_two:", testpath)
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_deselected.py ..",
|
||||
"=* 1 test*deselected by 'test_two:'*=",
|
||||
"=* 1 test*deselected by*test_two:*=",
|
||||
])
|
||||
assert result.ret == 0
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import py, pytest
|
||||
import os
|
||||
|
||||
from _pytest.tmpdir import pytest_funcarg__tmpdir, TempdirHandler
|
||||
from _pytest.python import FuncargRequest
|
||||
@@ -54,17 +55,6 @@ class TestConfigTmpdir:
|
||||
assert b2.check()
|
||||
assert not h.check()
|
||||
|
||||
def test_reparse(self, testdir):
|
||||
config2 = testdir.reparseconfig([])
|
||||
config3 = testdir.reparseconfig([])
|
||||
assert config2.basetemp != config3.basetemp
|
||||
assert not config2.basetemp.relto(config3.basetemp)
|
||||
assert not config3.basetemp.relto(config2.basetemp)
|
||||
|
||||
def test_reparse_filename_too_long(self, testdir):
|
||||
config = testdir.reparseconfig(["--basetemp=%s" % ("123"*300)])
|
||||
|
||||
|
||||
def test_basetemp(testdir):
|
||||
mytemp = testdir.tmpdir.mkdir("mytemp")
|
||||
p = testdir.makepyfile("""
|
||||
@@ -75,3 +65,16 @@ def test_basetemp(testdir):
|
||||
result = testdir.runpytest(p, '--basetemp=%s' % mytemp)
|
||||
assert result.ret == 0
|
||||
assert mytemp.join('hello').check()
|
||||
|
||||
@pytest.mark.skipif("not hasattr(py.path.local, 'mksymlinkto')")
|
||||
def test_tmpdir_keeps_symlinks(testdir):
|
||||
realtemp = testdir.tmpdir.mkdir("myrealtemp")
|
||||
linktemp = testdir.tmpdir.join("symlinktemp")
|
||||
linktemp.mksymlinkto(realtemp)
|
||||
p = testdir.makepyfile("""
|
||||
def test_1(tmpdir):
|
||||
import os
|
||||
assert os.path.realpath(str(tmpdir)) != str(tmpdir)
|
||||
""")
|
||||
result = testdir.runpytest("-s", p, '--basetemp=%s/bt' % linktemp)
|
||||
assert not result.ret
|
||||
|
||||
@@ -38,7 +38,7 @@ def test_setup(testdir):
|
||||
assert self.foo2 == 1
|
||||
def teardown_method(self, method):
|
||||
assert 0, "42"
|
||||
|
||||
|
||||
""")
|
||||
reprec = testdir.inline_run("-s", testpath)
|
||||
assert reprec.matchreport("test_both", when="call").passed
|
||||
@@ -232,6 +232,29 @@ def test_module_level_pytestmark(testdir):
|
||||
reprec.assertoutcome(skipped=1)
|
||||
|
||||
|
||||
def test_testcase_skip_property(testdir):
|
||||
testpath = testdir.makepyfile("""
|
||||
import unittest
|
||||
class MyTestCase(unittest.TestCase):
|
||||
skip = 'dont run'
|
||||
def test_func(self):
|
||||
pass
|
||||
""")
|
||||
reprec = testdir.inline_run(testpath, "-s")
|
||||
reprec.assertoutcome(skipped=1)
|
||||
|
||||
def test_testfunction_skip_property(testdir):
|
||||
testpath = testdir.makepyfile("""
|
||||
import unittest
|
||||
class MyTestCase(unittest.TestCase):
|
||||
def test_func(self):
|
||||
pass
|
||||
test_func.skip = 'dont run'
|
||||
""")
|
||||
reprec = testdir.inline_run(testpath, "-s")
|
||||
reprec.assertoutcome(skipped=1)
|
||||
|
||||
|
||||
class TestTrialUnittest:
|
||||
def setup_class(cls):
|
||||
pytest.importorskip("twisted.trial.unittest")
|
||||
@@ -408,3 +431,52 @@ def test_unittest_not_shown_in_traceback(testdir):
|
||||
""")
|
||||
res = testdir.runpytest()
|
||||
assert "failUnlessEqual" not in res.stdout.str()
|
||||
|
||||
def test_unorderable_types(testdir):
|
||||
testdir.makepyfile("""
|
||||
import unittest
|
||||
class TestJoinEmpty(unittest.TestCase):
|
||||
pass
|
||||
|
||||
def make_test():
|
||||
class Test(unittest.TestCase):
|
||||
pass
|
||||
Test.__name__ = "TestFoo"
|
||||
return Test
|
||||
TestFoo = make_test()
|
||||
""")
|
||||
result = testdir.runpytest()
|
||||
assert "TypeError" not in result.stdout.str()
|
||||
assert result.ret == 0
|
||||
|
||||
def test_unittest_typerror_traceback(testdir):
|
||||
testdir.makepyfile("""
|
||||
import unittest
|
||||
class TestJoinEmpty(unittest.TestCase):
|
||||
def test_hello(self, arg1):
|
||||
pass
|
||||
""")
|
||||
result = testdir.runpytest()
|
||||
assert "TypeError" in result.stdout.str()
|
||||
assert result.ret == 1
|
||||
|
||||
@pytest.mark.skipif("sys.version_info < (2,7)")
|
||||
def test_unittest_unexpected_failure(testdir):
|
||||
testdir.makepyfile("""
|
||||
import unittest
|
||||
class MyTestCase(unittest.TestCase):
|
||||
@unittest.expectedFailure
|
||||
def test_func1(self):
|
||||
assert 0
|
||||
@unittest.expectedFailure
|
||||
def test_func2(self):
|
||||
assert 1
|
||||
""")
|
||||
result = testdir.runpytest("-rxX")
|
||||
result.stdout.fnmatch_lines([
|
||||
"*XFAIL*MyTestCase*test_func1*",
|
||||
"*XPASS*MyTestCase*test_func2*",
|
||||
"*1 xfailed*1 xpass*",
|
||||
])
|
||||
|
||||
|
||||
|
||||
7
tox.ini
7
tox.ini
@@ -8,7 +8,7 @@ indexserver=
|
||||
|
||||
[testenv]
|
||||
changedir=testing
|
||||
commands= py.test -rfsxX --junitxml={envlogdir}/junit-{envname}.xml []
|
||||
commands= py.test --lsof -rfsxX --junitxml={envlogdir}/junit-{envname}.xml []
|
||||
deps=
|
||||
:pypi:pexpect
|
||||
:pypi:nose
|
||||
@@ -33,7 +33,7 @@ deps=:pypi:twisted
|
||||
:pypi:pexpect
|
||||
py>=1.4.5.dev1
|
||||
commands=
|
||||
py.test -rsxf \
|
||||
py.test -rsxf testing/test_unittest.py \
|
||||
--junitxml={envlogdir}/junit-{envname}.xml {posargs:testing/test_unittest.py}
|
||||
[testenv:doctest]
|
||||
changedir=.
|
||||
@@ -51,8 +51,7 @@ commands=
|
||||
make html
|
||||
|
||||
[testenv:py31]
|
||||
deps=py>1.4.0
|
||||
:pypi:nose>=1.0
|
||||
deps=:pypi:nose>=1.0
|
||||
|
||||
[testenv:py31-xdist]
|
||||
deps=pytest-xdist
|
||||
|
||||
Reference in New Issue
Block a user