Compare commits
72 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
69fc6987ad | ||
|
|
0790f7a75f | ||
|
|
db8fbe7661 | ||
|
|
91c41cd6b3 | ||
|
|
1bf1cfd07a | ||
|
|
51d94a4a6e | ||
|
|
e18abfd013 | ||
|
|
6c7ea8191f | ||
|
|
329dca42a7 | ||
|
|
0362aaba5a | ||
|
|
948dea8bb4 | ||
|
|
6155e9139d | ||
|
|
6dd8405aed | ||
|
|
c076f4e789 | ||
|
|
d32a132b51 | ||
|
|
0e3779b14f | ||
|
|
fe1c35f8d0 | ||
|
|
b4588f1798 | ||
|
|
64c7c1be15 | ||
|
|
1c817aa7bd | ||
|
|
d02eaa8881 | ||
|
|
b92176024c | ||
|
|
1c746e0819 | ||
|
|
166aae4418 | ||
|
|
58933aac2a | ||
|
|
45aa4e5229 | ||
|
|
e643e99586 | ||
|
|
9f6d6f630d | ||
|
|
812ba87f37 | ||
|
|
2b0887fa5f | ||
|
|
ee8d2f9950 | ||
|
|
51d29cf4c6 | ||
|
|
e378496b24 | ||
|
|
4d21274a29 | ||
|
|
705442cf4e | ||
|
|
87b4cb283f | ||
|
|
83505b790d | ||
|
|
2ca6d9f039 | ||
|
|
87b8769680 | ||
|
|
78e7d7aed0 | ||
|
|
68b353be0d | ||
|
|
a756dc8106 | ||
|
|
604e27658c | ||
|
|
dfa273dc25 | ||
|
|
5263656df6 | ||
|
|
d88fe07377 | ||
|
|
2e23057804 | ||
|
|
303f49a5ad | ||
|
|
adbbd164ff | ||
|
|
93424b0f9c | ||
|
|
fb7706d4c7 | ||
|
|
4131923c0f | ||
|
|
7b95af2400 | ||
|
|
eb6481c663 | ||
|
|
c126cac98d | ||
|
|
e3a8b1e062 | ||
|
|
fa6d5bd15b | ||
|
|
f2c8a837af | ||
|
|
ccc1b21ebd | ||
|
|
85f2a78005 | ||
|
|
e21202b730 | ||
|
|
dc0535f7d5 | ||
|
|
f2791988f9 | ||
|
|
8e83af1c33 | ||
|
|
268c051eba | ||
|
|
03cb37b1eb | ||
|
|
d5c3265763 | ||
|
|
13e0340350 | ||
|
|
5093d8b925 | ||
|
|
40187ec9bb | ||
|
|
f5f8695587 | ||
|
|
27f5213718 |
3
.hgtags
3
.hgtags
@@ -45,3 +45,6 @@ e5e1746a197f0398356a43fbe2eebac9690f795d 2.1.0
|
||||
5864412c6f3c903384243bd315639d101d7ebc67 2.1.2
|
||||
12a05d59249f80276e25fd8b96e8e545b1332b7a 2.1.3
|
||||
1522710369337d96bf9568569d5f0ca9b38a74e0 2.2.0
|
||||
3da8cec6c5326ed27c144c9b6d7a64a648370005 2.2.1
|
||||
92b916483c1e65a80dc80e3f7816b39e84b36a4d 2.2.2
|
||||
3c11c5c9776f3c678719161e96cc0a08169c1cb8 2.2.3
|
||||
|
||||
42
CHANGELOG
42
CHANGELOG
@@ -1,3 +1,45 @@
|
||||
Changes between 2.2.3 and 2.2.4
|
||||
-----------------------------------
|
||||
|
||||
- fix error message for rewritten assertions involving the % operator
|
||||
- fix issue 126: correctly match all invalid xml characters for junitxml
|
||||
binary escape
|
||||
- fix issue with unittest: now @unittest.expectedFailure markers should
|
||||
be processed correctly (you can also use @pytest.mark markers)
|
||||
- document integration with the extended distribute/setuptools test commands
|
||||
- fix issue 140: propperly get the real functions
|
||||
of bound classmethods for setup/teardown_class
|
||||
- fix issue #141: switch from the deceased paste.pocoo.org to bpaste.net
|
||||
- fix issue #143: call unconfigure/sessionfinish always when
|
||||
configure/sessionstart where called
|
||||
- fix issue #144: better mangle test ids to junitxml classnames
|
||||
- upgrade distribute_setup.py to 0.6.27
|
||||
|
||||
Changes between 2.2.2 and 2.2.3
|
||||
----------------------------------------
|
||||
|
||||
- fix uploaded package to only include neccesary files
|
||||
|
||||
Changes between 2.2.1 and 2.2.2
|
||||
----------------------------------------
|
||||
|
||||
- fix issue101: wrong args to unittest.TestCase test function now
|
||||
produce better output
|
||||
- fix issue102: report more useful errors and hints for when a
|
||||
test directory was renamed and some pyc/__pycache__ remain
|
||||
- fix issue106: allow parametrize to be applied multiple times
|
||||
e.g. from module, class and at function level.
|
||||
- fix issue107: actually perform session scope finalization
|
||||
- don't check in parametrize if indirect parameters are funcarg names
|
||||
- add chdir method to monkeypatch funcarg
|
||||
- fix crash resulting from calling monkeypatch undo a second time
|
||||
- fix issue115: make --collectonly robust against early failure
|
||||
(missing files/directories)
|
||||
- "-qq --collectonly" now shows only files and the number of tests in them
|
||||
- "-q --collectonly" now shows test ids
|
||||
- allow adding of attributes to test reports such that it also works
|
||||
with distributed testing (no upgrade of pytest-xdist needed)
|
||||
|
||||
Changes between 2.2.0 and 2.2.1
|
||||
----------------------------------------
|
||||
|
||||
|
||||
191
ISSUES.txt
191
ISSUES.txt
@@ -1,23 +1,124 @@
|
||||
refine parametrize API in 2.2 series
|
||||
|
||||
improve / add to dependency/test resource injection
|
||||
-------------------------------------------------------------
|
||||
tags: critical feature 2.2
|
||||
tags: wish feature docs
|
||||
|
||||
extend metafunc.parametrize to better support indirection
|
||||
by specifying a setupfunc(request, val) which will _substitute_
|
||||
the funcarg factory. Here is an example:
|
||||
write up better examples showing the connection between
|
||||
the two.
|
||||
|
||||
def setupdb(request, val):
|
||||
refine parametrize API
|
||||
-------------------------------------------------------------
|
||||
tags: critical feature
|
||||
|
||||
extend metafunc.parametrize to directly support indirection, example:
|
||||
|
||||
def setupdb(request, config):
|
||||
# setup "resource" based on test request and the values passed
|
||||
# in to parametrize. setupfunc is called for each such value.
|
||||
# you may use request.addfinalizer() or request.cached_setup ...
|
||||
return db
|
||||
return dynamic_setup_database(val)
|
||||
|
||||
@pytest.mark.parametrize("db", ["pg", "mysql"], setupfunc=setupdb)
|
||||
def test_heavy_functional_test(db):
|
||||
...
|
||||
|
||||
There would be no need to write funcarg factories for this example, only
|
||||
to explain the attributes and functionality of "request".
|
||||
There would be no need to write or explain funcarg factories and
|
||||
their special __ syntax.
|
||||
|
||||
The examples and improvements should also show how to put the parametrize
|
||||
decorator to a class, to a module or even to a directory. For the directory
|
||||
part a conftest.py content like this::
|
||||
|
||||
pytestmark = [
|
||||
@pytest.mark.parametrize_setup("db", ...),
|
||||
]
|
||||
|
||||
probably makes sense in order to keep the declarative nature. This mirrors
|
||||
the marker-mechanism with respect to a test module but puts it to a directory
|
||||
scale.
|
||||
|
||||
When doing larger scoped parametrization it probably becomes neccessary
|
||||
to allow parametrization to be ignored if the according parameter is not
|
||||
used (currently any parametrized argument that is not present in a function will cause a ValueError). Example:
|
||||
|
||||
@pytest.mark.parametrize("db", ..., mustmatch=False)
|
||||
|
||||
means to not raise an error but simply ignore the parametrization
|
||||
if the signature of a decorated function does not match. XXX is it
|
||||
not sufficient to always allow non-matches?
|
||||
|
||||
|
||||
unify item/request classes, generalize items
|
||||
---------------------------------------------------------------
|
||||
tags: 2.4 wish
|
||||
|
||||
in lieu of extended parametrization and the new way to specify resource
|
||||
factories in terms of the parametrize decorator, consider unification
|
||||
of the item and request class. This also is connected with allowing
|
||||
funcargs in setup functions. Example of new item API:
|
||||
|
||||
item.getresource("db") # alias for request.getfuncargvalue
|
||||
item.addfinalizer(...)
|
||||
item.cached_setup(...)
|
||||
item.applymarker(...)
|
||||
|
||||
test classes/modules could then use this api via::
|
||||
|
||||
def pytest_runtest_setup(item):
|
||||
use item API ...
|
||||
|
||||
introduction of this new method needs to be _fully_ backward compatible -
|
||||
and the documentation needs to change along to mention this new way of
|
||||
doing things.
|
||||
|
||||
impl note: probably Request._fillfuncargs would be called from the
|
||||
python plugins own pytest_runtest_setup(item) and would call
|
||||
item.getresource(X) for all X in the funcargs of a function.
|
||||
|
||||
XXX is it possible to even put the above item API to Nodes, i.e. also
|
||||
to Directorty/module/file/class collectors? Problem is that current
|
||||
funcarg factories presume they are called with a per-function (even
|
||||
per-funcarg-per-function) scope. Could there be small tweaks to the new
|
||||
API that lift this restriction?
|
||||
|
||||
consider::
|
||||
|
||||
def setup_class(cls, tmpdir):
|
||||
# would get a per-class tmpdir because tmpdir parametrization
|
||||
# would know that it is called with a class scope
|
||||
#
|
||||
#
|
||||
#
|
||||
this looks very difficult because those setup functions are also used
|
||||
by nose etc. Rather consider introduction of a new setup hook:
|
||||
|
||||
def setup_test(self, item):
|
||||
self.db = item.cached_setup(..., scope='class')
|
||||
self.tmpdir = item.getresource("tmpdir")
|
||||
|
||||
this should be compatible to unittest/nose and provide much of what
|
||||
"testresources" provide. XXX This would not allow full parametrization
|
||||
such that test function could be run multiple times with different
|
||||
values. See "parametrized attributes" issue.
|
||||
|
||||
allow parametrized attributes on classes
|
||||
--------------------------------------------------
|
||||
|
||||
tags: wish 2.4
|
||||
|
||||
example:
|
||||
|
||||
@pytest.mark.parametrize_attr("db", setupfunc, [1,2,3], scope="class")
|
||||
@pytest.mark.parametrize_attr("tmp", setupfunc, scope="...")
|
||||
class TestMe:
|
||||
def test_hello(self):
|
||||
access self.db ...
|
||||
|
||||
this would run the test_hello() function three times with three
|
||||
different values for self.db. This could also work with unittest/nose
|
||||
style tests, i.e. it leverages existing test suites without needing
|
||||
to rewrite them. Together with the previously mentioned setup_test()
|
||||
maybe the setupfunc could be ommitted?
|
||||
|
||||
checks / deprecations for next release
|
||||
---------------------------------------------------------------
|
||||
@@ -46,21 +147,9 @@ appropriately to avoid this issue. Moreover/Alternatively, we could
|
||||
record which implementations of a hook succeeded and only call their
|
||||
teardown.
|
||||
|
||||
do early-teardown of test modules
|
||||
-----------------------------------------
|
||||
tags: feature 2.3
|
||||
|
||||
currently teardowns are called when the next tests is setup
|
||||
except for the function/method level where interally
|
||||
"teardown_exact" tears down immediately. Generalize
|
||||
this to perform the "neccessary" teardown compared to
|
||||
the "next" test item during teardown - this should
|
||||
get rid of some irritations because otherwise e.g.
|
||||
prints of teardown-code appear in the setup of the next test.
|
||||
|
||||
consider and document __init__ file usage in test directories
|
||||
---------------------------------------------------------------
|
||||
tags: bug 2.3 core
|
||||
tags: bug core
|
||||
|
||||
Currently, a test module is imported with its fully qualified
|
||||
package path, determined by checking __init__ files upwards.
|
||||
@@ -75,7 +164,7 @@ certain scenarios makes sense.
|
||||
|
||||
relax requirement to have tests/testing contain an __init__
|
||||
----------------------------------------------------------------
|
||||
tags: feature 2.3
|
||||
tags: feature
|
||||
bb: http://bitbucket.org/hpk42/py-trunk/issue/64
|
||||
|
||||
A local test run of a "tests" directory may work
|
||||
@@ -86,7 +175,7 @@ i.e. port the nose-logic of unloading a test module.
|
||||
|
||||
customize test function collection
|
||||
-------------------------------------------------------
|
||||
tags: feature 2.3
|
||||
tags: feature
|
||||
|
||||
- introduce py.test.mark.nocollect for not considering a function for
|
||||
test collection at all. maybe also introduce a py.test.mark.test to
|
||||
@@ -95,7 +184,7 @@ tags: feature 2.3
|
||||
|
||||
introduce pytest.mark.importorskip
|
||||
-------------------------------------------------------
|
||||
tags: feature 2.3
|
||||
tags: feature
|
||||
|
||||
in addition to the imperative pytest.importorskip also introduce
|
||||
a pytest.mark.importorskip so that the test count is more correct.
|
||||
@@ -103,7 +192,7 @@ a pytest.mark.importorskip so that the test count is more correct.
|
||||
|
||||
introduce py.test.mark.platform
|
||||
-------------------------------------------------------
|
||||
tags: feature 2.3
|
||||
tags: feature
|
||||
|
||||
Introduce nice-to-spell platform-skipping, examples:
|
||||
|
||||
@@ -120,7 +209,7 @@ interpreter versions.
|
||||
|
||||
pytest.mark.xfail signature change
|
||||
-------------------------------------------------------
|
||||
tags: feature 2.3
|
||||
tags: feature
|
||||
|
||||
change to pytest.mark.xfail(reason, (optional)condition)
|
||||
to better implement the word meaning. It also signals
|
||||
@@ -128,36 +217,28 @@ better that we always have some kind of an implementation
|
||||
reason that can be formualated.
|
||||
Compatibility? how to introduce a new name/keep compat?
|
||||
|
||||
introduce py.test.mark registration
|
||||
-----------------------------------------
|
||||
tags: feature 2.3
|
||||
|
||||
introduce a hook that allows to register a named mark decorator
|
||||
with documentation and add "py.test --marks" to get
|
||||
a list of available marks. Deprecate "dynamic" mark
|
||||
definitions.
|
||||
|
||||
allow to non-intrusively apply skipfs/xfail/marks
|
||||
---------------------------------------------------
|
||||
tags: feature 2.3
|
||||
tags: feature
|
||||
|
||||
use case: mark a module or directory structures
|
||||
to be skipped on certain platforms (i.e. no import
|
||||
attempt will be made).
|
||||
|
||||
consider introducing a hook/mechanism that allows to apply marks
|
||||
from conftests or plugins.
|
||||
from conftests or plugins. (See extended parametrization)
|
||||
|
||||
|
||||
explicit referencing of conftest.py files
|
||||
-----------------------------------------
|
||||
tags: feature 2.3
|
||||
tags: feature
|
||||
|
||||
allow to name conftest.py files (in sub directories) that should
|
||||
be imported early, as to include command line options.
|
||||
|
||||
improve central py.test ini file
|
||||
----------------------------------
|
||||
tags: feature 2.3
|
||||
tags: feature
|
||||
|
||||
introduce more declarative configuration options:
|
||||
- (to-be-collected test directories)
|
||||
@@ -168,7 +249,7 @@ introduce more declarative configuration options:
|
||||
|
||||
new documentation
|
||||
----------------------------------
|
||||
tags: feature 2.3
|
||||
tags: feature
|
||||
|
||||
- logo py.test
|
||||
- examples for unittest or functional testing
|
||||
@@ -177,23 +258,15 @@ tags: feature 2.3
|
||||
|
||||
have imported module mismatch honour relative paths
|
||||
--------------------------------------------------------
|
||||
tags: bug 2.3
|
||||
tags: bug
|
||||
|
||||
With 1.1.1 py.test fails at least on windows if an import
|
||||
is relative and compared against an absolute conftest.py
|
||||
path. Normalize.
|
||||
|
||||
call termination with small timeout
|
||||
-------------------------------------------------
|
||||
tags: feature 2.3
|
||||
test: testing/pytest/dist/test_dsession.py - test_terminate_on_hanging_node
|
||||
|
||||
Call gateway group termination with a small timeout if available.
|
||||
Should make dist-testing less likely to leave lost processes.
|
||||
|
||||
consider globals: py.test.ensuretemp and config
|
||||
--------------------------------------------------------------
|
||||
tags: experimental-wish 2.3
|
||||
tags: experimental-wish
|
||||
|
||||
consider deprecating py.test.ensuretemp and py.test.config
|
||||
to further reduce py.test globality. Also consider
|
||||
@@ -202,7 +275,7 @@ a plugin rather than being there from the start.
|
||||
|
||||
consider allowing funcargs for setup methods
|
||||
--------------------------------------------------------------
|
||||
tags: experimental-wish 2.3
|
||||
tags: experimental-wish
|
||||
|
||||
Users have expressed the wish to have funcargs available to setup
|
||||
functions. Experiment with allowing funcargs there - it might
|
||||
@@ -225,7 +298,7 @@ world.
|
||||
|
||||
consider pytest_addsyspath hook
|
||||
-----------------------------------------
|
||||
tags: 2.3
|
||||
tags:
|
||||
|
||||
py.test could call a new pytest_addsyspath() in order to systematically
|
||||
allow manipulation of sys.path and to inhibit it via --no-addsyspath
|
||||
@@ -237,7 +310,7 @@ and pytest_configure.
|
||||
|
||||
show plugin information in test header
|
||||
----------------------------------------------------------------
|
||||
tags: feature 2.3
|
||||
tags: feature
|
||||
|
||||
Now that external plugins are becoming more numerous
|
||||
it would be useful to have external plugins along with
|
||||
@@ -245,7 +318,7 @@ their versions displayed as a header line.
|
||||
|
||||
deprecate global py.test.config usage
|
||||
----------------------------------------------------------------
|
||||
tags: feature 2.3
|
||||
tags: feature
|
||||
|
||||
py.test.ensuretemp and py.test.config are probably the last
|
||||
objects containing global state. Often using them is not
|
||||
@@ -255,7 +328,7 @@ as others.
|
||||
|
||||
remove deprecated bits in collect.py
|
||||
-------------------------------------------------------------------
|
||||
tags: feature 2.3
|
||||
tags: feature
|
||||
|
||||
In an effort to further simplify code, review and remove deprecated bits
|
||||
in collect.py. Probably good:
|
||||
@@ -264,7 +337,7 @@ in collect.py. Probably good:
|
||||
|
||||
implement fslayout decorator
|
||||
---------------------------------
|
||||
tags: feature 2.3
|
||||
tags: feature
|
||||
|
||||
Improve the way how tests can work with pre-made examples,
|
||||
keeping the layout close to the test function:
|
||||
@@ -278,9 +351,7 @@ keeping the layout close to the test function:
|
||||
pass
|
||||
""")
|
||||
def test_run(pytester, fslayout):
|
||||
p = fslayout.find("test_*.py")
|
||||
p = fslayout.findone("test_*.py")
|
||||
result = pytester.runpytest(p)
|
||||
assert result.ret == 0
|
||||
assert result.passed == 1
|
||||
|
||||
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
#
|
||||
__version__ = '2.2.1'
|
||||
__version__ = '2.2.4'
|
||||
|
||||
@@ -50,7 +50,7 @@ def pytest_configure(config):
|
||||
hook = None
|
||||
if mode == "rewrite":
|
||||
hook = rewrite.AssertionRewritingHook()
|
||||
sys.meta_path.append(hook)
|
||||
sys.meta_path.insert(0, hook)
|
||||
warn_about_missing_assertion(mode)
|
||||
config._assertstate = AssertionState(config, mode)
|
||||
config._assertstate.hook = hook
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
import py
|
||||
import sys, inspect
|
||||
from compiler import parse, ast, pycodegen
|
||||
from _pytest.assertion.util import format_explanation
|
||||
from _pytest.assertion.reinterpret import BuiltinAssertionError
|
||||
from _pytest.assertion.util import format_explanation, BuiltinAssertionError
|
||||
|
||||
passthroughex = py.builtin._sysex
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import sys
|
||||
import py
|
||||
|
||||
BuiltinAssertionError = py.builtin.builtins.AssertionError
|
||||
from _pytest.assertion.util import BuiltinAssertionError
|
||||
|
||||
class AssertionError(BuiltinAssertionError):
|
||||
def __init__(self, *args):
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
"""Rewrite assertion AST to produce nice error messages"""
|
||||
|
||||
import ast
|
||||
import collections
|
||||
import errno
|
||||
import itertools
|
||||
import imp
|
||||
@@ -298,7 +297,7 @@ binop_map = {
|
||||
ast.Mult : "*",
|
||||
ast.Div : "/",
|
||||
ast.FloorDiv : "//",
|
||||
ast.Mod : "%",
|
||||
ast.Mod : "%%", # escaped for string formatting
|
||||
ast.Eq : "==",
|
||||
ast.NotEq : "!=",
|
||||
ast.Lt : "<",
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
import py
|
||||
|
||||
BuiltinAssertionError = py.builtin.builtins.AssertionError
|
||||
|
||||
# The _reprcompare attribute on the util module is used by the new assertion
|
||||
# interpretation code and assertion rewriter to detect this plugin was
|
||||
|
||||
@@ -321,13 +321,15 @@ def importplugin(importspec):
|
||||
name = importspec
|
||||
try:
|
||||
mod = "_pytest." + name
|
||||
return __import__(mod, None, None, '__doc__')
|
||||
__import__(mod)
|
||||
return sys.modules[mod]
|
||||
except ImportError:
|
||||
#e = py.std.sys.exc_info()[1]
|
||||
#if str(e).find(name) == -1:
|
||||
# raise
|
||||
pass #
|
||||
return __import__(importspec, None, None, '__doc__')
|
||||
__import__(importspec)
|
||||
return sys.modules[importspec]
|
||||
|
||||
class MultiCall:
|
||||
""" execute a call into multiple python functions/methods. """
|
||||
@@ -463,13 +465,8 @@ def main(args=None, plugins=None):
|
||||
""" returned exit code integer, after an in-process testing run
|
||||
with the given command line arguments, preloading an optional list
|
||||
of passed in plugin objects. """
|
||||
try:
|
||||
config = _prepareconfig(args, plugins)
|
||||
exitstatus = config.hook.pytest_cmdline_main(config=config)
|
||||
except UsageError:
|
||||
e = sys.exc_info()[1]
|
||||
sys.stderr.write("ERROR: %s\n" %(e.args[0],))
|
||||
exitstatus = 3
|
||||
config = _prepareconfig(args, plugins)
|
||||
exitstatus = config.hook.pytest_cmdline_main(config=config)
|
||||
return exitstatus
|
||||
|
||||
class UsageError(Exception):
|
||||
|
||||
@@ -34,15 +34,21 @@ class Junit(py.xml.Namespace):
|
||||
# this dynamically instead of hardcoding it. The spec range of valid
|
||||
# chars is: Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD]
|
||||
# | [#x10000-#x10FFFF]
|
||||
_illegal_unichrs = [(0x00, 0x08), (0x0B, 0x0C), (0x0E, 0x19),
|
||||
(0xD800, 0xDFFF), (0xFDD0, 0xFFFF)]
|
||||
_illegal_ranges = [unicode("%s-%s") % (unichr(low), unichr(high))
|
||||
for (low, high) in _illegal_unichrs
|
||||
_legal_chars = (0x09, 0x0A, 0x0d)
|
||||
_legal_ranges = (
|
||||
(0x20, 0xD7FF),
|
||||
(0xE000, 0xFFFD),
|
||||
(0x10000, 0x10FFFF),
|
||||
)
|
||||
_legal_xml_re = [unicode("%s-%s") % (unichr(low), unichr(high))
|
||||
for (low, high) in _legal_ranges
|
||||
if low < sys.maxunicode]
|
||||
illegal_xml_re = re.compile(unicode('[%s]') %
|
||||
unicode('').join(_illegal_ranges))
|
||||
del _illegal_unichrs
|
||||
del _illegal_ranges
|
||||
_legal_xml_re = [unichr(x) for x in _legal_chars] + _legal_xml_re
|
||||
illegal_xml_re = re.compile(unicode('[^%s]') %
|
||||
unicode('').join(_legal_xml_re))
|
||||
del _legal_chars
|
||||
del _legal_ranges
|
||||
del _legal_xml_re
|
||||
|
||||
def bin_xml_escape(arg):
|
||||
def repl(matchobj):
|
||||
@@ -75,6 +81,11 @@ def pytest_unconfigure(config):
|
||||
config.pluginmanager.unregister(xml)
|
||||
|
||||
|
||||
def mangle_testnames(names):
|
||||
names = [x.replace(".py", "") for x in names if x != '()']
|
||||
names[0] = names[0].replace("/", '.')
|
||||
return names
|
||||
|
||||
class LogXML(object):
|
||||
def __init__(self, logfile, prefix):
|
||||
logfile = os.path.expanduser(os.path.expandvars(logfile))
|
||||
@@ -85,9 +96,7 @@ class LogXML(object):
|
||||
self.failed = self.errors = 0
|
||||
|
||||
def _opentestcase(self, report):
|
||||
names = report.nodeid.split("::")
|
||||
names[0] = names[0].replace("/", '.')
|
||||
names = [x.replace(".py", "") for x in names if x != "()"]
|
||||
names = mangle_testnames(report.nodeid.split("::"))
|
||||
classnames = names[:-1]
|
||||
if self.prefix:
|
||||
classnames.insert(0, self.prefix)
|
||||
|
||||
@@ -10,6 +10,7 @@ EXIT_OK = 0
|
||||
EXIT_TESTSFAILED = 1
|
||||
EXIT_INTERRUPTED = 2
|
||||
EXIT_INTERNALERROR = 3
|
||||
EXIT_USAGEERROR = 4
|
||||
|
||||
name_re = py.std.re.compile("^[a-zA-Z_]\w*$")
|
||||
|
||||
@@ -65,30 +66,34 @@ def wrap_session(config, doit):
|
||||
session.exitstatus = EXIT_OK
|
||||
initstate = 0
|
||||
try:
|
||||
config.pluginmanager.do_configure(config)
|
||||
initstate = 1
|
||||
config.hook.pytest_sessionstart(session=session)
|
||||
initstate = 2
|
||||
doit(config, session)
|
||||
except pytest.UsageError:
|
||||
raise
|
||||
except KeyboardInterrupt:
|
||||
excinfo = py.code.ExceptionInfo()
|
||||
config.hook.pytest_keyboard_interrupt(excinfo=excinfo)
|
||||
session.exitstatus = EXIT_INTERRUPTED
|
||||
except:
|
||||
excinfo = py.code.ExceptionInfo()
|
||||
config.pluginmanager.notify_exception(excinfo, config.option)
|
||||
session.exitstatus = EXIT_INTERNALERROR
|
||||
if excinfo.errisinstance(SystemExit):
|
||||
sys.stderr.write("mainloop: caught Spurious SystemExit!\n")
|
||||
if initstate >= 2:
|
||||
config.hook.pytest_sessionfinish(session=session,
|
||||
exitstatus=session.exitstatus or (session._testsfailed and 1))
|
||||
if not session.exitstatus and session._testsfailed:
|
||||
session.exitstatus = EXIT_TESTSFAILED
|
||||
if initstate >= 1:
|
||||
config.pluginmanager.do_unconfigure(config)
|
||||
try:
|
||||
config.pluginmanager.do_configure(config)
|
||||
initstate = 1
|
||||
config.hook.pytest_sessionstart(session=session)
|
||||
initstate = 2
|
||||
doit(config, session)
|
||||
except pytest.UsageError:
|
||||
msg = sys.exc_info()[1].args[0]
|
||||
sys.stderr.write("ERROR: %s\n" %(msg,))
|
||||
session.exitstatus = EXIT_USAGEERROR
|
||||
except KeyboardInterrupt:
|
||||
excinfo = py.code.ExceptionInfo()
|
||||
config.hook.pytest_keyboard_interrupt(excinfo=excinfo)
|
||||
session.exitstatus = EXIT_INTERRUPTED
|
||||
except:
|
||||
excinfo = py.code.ExceptionInfo()
|
||||
config.pluginmanager.notify_exception(excinfo, config.option)
|
||||
session.exitstatus = EXIT_INTERNALERROR
|
||||
if excinfo.errisinstance(SystemExit):
|
||||
sys.stderr.write("mainloop: caught Spurious SystemExit!\n")
|
||||
finally:
|
||||
if initstate >= 2:
|
||||
config.hook.pytest_sessionfinish(session=session,
|
||||
exitstatus=session.exitstatus or (session._testsfailed and 1))
|
||||
if not session.exitstatus and session._testsfailed:
|
||||
session.exitstatus = EXIT_TESTSFAILED
|
||||
if initstate >= 1:
|
||||
config.pluginmanager.do_unconfigure(config)
|
||||
return session.exitstatus
|
||||
|
||||
def pytest_cmdline_main(config):
|
||||
@@ -410,6 +415,7 @@ class Session(FSCollector):
|
||||
self._notfound = []
|
||||
self._initialpaths = set()
|
||||
self._initialparts = []
|
||||
self.items = items = []
|
||||
for arg in args:
|
||||
parts = self._parsearg(arg)
|
||||
self._initialparts.append(parts)
|
||||
@@ -425,7 +431,6 @@ class Session(FSCollector):
|
||||
if not genitems:
|
||||
return rep.result
|
||||
else:
|
||||
self.items = items = []
|
||||
if rep.passed:
|
||||
for node in rep.result:
|
||||
self.items.extend(self.genitems(node))
|
||||
|
||||
@@ -191,8 +191,7 @@ class MarkDecorator:
|
||||
holder = MarkInfo(self.markname, self.args, self.kwargs)
|
||||
setattr(func, self.markname, holder)
|
||||
else:
|
||||
holder.kwargs.update(self.kwargs)
|
||||
holder.args += self.args
|
||||
holder.add(self.args, self.kwargs)
|
||||
return func
|
||||
kw = self.kwargs.copy()
|
||||
kw.update(kwargs)
|
||||
@@ -208,27 +207,20 @@ class MarkInfo:
|
||||
self.args = args
|
||||
#: keyword argument dictionary, empty if nothing specified
|
||||
self.kwargs = kwargs
|
||||
self._arglist = [(args, kwargs.copy())]
|
||||
|
||||
def __repr__(self):
|
||||
return "<MarkInfo %r args=%r kwargs=%r>" % (
|
||||
self.name, self.args, self.kwargs)
|
||||
|
||||
def pytest_itemcollected(item):
|
||||
if not isinstance(item, pytest.Function):
|
||||
return
|
||||
try:
|
||||
func = item.obj.__func__
|
||||
except AttributeError:
|
||||
func = getattr(item.obj, 'im_func', item.obj)
|
||||
pyclasses = (pytest.Class, pytest.Module)
|
||||
for node in item.listchain():
|
||||
if isinstance(node, pyclasses):
|
||||
marker = getattr(node.obj, 'pytestmark', None)
|
||||
if marker is not None:
|
||||
if isinstance(marker, list):
|
||||
for mark in marker:
|
||||
mark(func)
|
||||
else:
|
||||
marker(func)
|
||||
node = node.parent
|
||||
item.keywords.update(py.builtin._getfuncdict(func))
|
||||
def add(self, args, kwargs):
|
||||
""" add a MarkInfo with the given args and kwargs. """
|
||||
self._arglist.append((args, kwargs))
|
||||
self.args += args
|
||||
self.kwargs.update(kwargs)
|
||||
|
||||
def __iter__(self):
|
||||
""" yield MarkInfo objects each relating to a marking-call. """
|
||||
for args, kwargs in self._arglist:
|
||||
yield MarkInfo(self.name, args, kwargs)
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@ def pytest_funcarg__monkeypatch(request):
|
||||
monkeypatch.setenv(name, value, prepend=False)
|
||||
monkeypatch.delenv(name, value, raising=True)
|
||||
monkeypatch.syspath_prepend(path)
|
||||
monkeypatch.chdir(path)
|
||||
|
||||
All modifications will be undone after the requesting
|
||||
test function has finished. The ``raising``
|
||||
@@ -30,6 +31,7 @@ class monkeypatch:
|
||||
def __init__(self):
|
||||
self._setattr = []
|
||||
self._setitem = []
|
||||
self._cwd = None
|
||||
|
||||
def setattr(self, obj, name, value, raising=True):
|
||||
""" set attribute ``name`` on ``obj`` to ``value``, by default
|
||||
@@ -83,6 +85,17 @@ class monkeypatch:
|
||||
self._savesyspath = sys.path[:]
|
||||
sys.path.insert(0, str(path))
|
||||
|
||||
def chdir(self, path):
|
||||
""" change the current working directory to the specified path
|
||||
path can be a string or a py.path.local object
|
||||
"""
|
||||
if self._cwd is None:
|
||||
self._cwd = os.getcwd()
|
||||
if hasattr(path, "chdir"):
|
||||
path.chdir()
|
||||
else:
|
||||
os.chdir(path)
|
||||
|
||||
def undo(self):
|
||||
""" undo previous changes. This call consumes the
|
||||
undo stack. Calling it a second time has no effect unless
|
||||
@@ -95,9 +108,17 @@ class monkeypatch:
|
||||
self._setattr[:] = []
|
||||
for dictionary, name, value in self._setitem:
|
||||
if value is notset:
|
||||
del dictionary[name]
|
||||
try:
|
||||
del dictionary[name]
|
||||
except KeyError:
|
||||
pass # was already deleted, so we have the desired state
|
||||
else:
|
||||
dictionary[name] = value
|
||||
self._setitem[:] = []
|
||||
if hasattr(self, '_savesyspath'):
|
||||
sys.path[:] = self._savesyspath
|
||||
del self._savesyspath
|
||||
|
||||
if self._cwd is not None:
|
||||
os.chdir(self._cwd)
|
||||
self._cwd = None
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
import py, sys
|
||||
|
||||
class url:
|
||||
base = "http://paste.pocoo.org"
|
||||
base = "http://bpaste.net"
|
||||
xmlrpc = base + "/xmlrpc/"
|
||||
show = base + "/show/"
|
||||
|
||||
@@ -11,7 +11,7 @@ def pytest_addoption(parser):
|
||||
group._addoption('--pastebin', metavar="mode",
|
||||
action='store', dest="pastebin", default=None,
|
||||
type="choice", choices=['failed', 'all'],
|
||||
help="send failed|all info to Pocoo pastebin service.")
|
||||
help="send failed|all info to bpaste.net pastebin service.")
|
||||
|
||||
def pytest_configure(__multicall__, config):
|
||||
import tempfile
|
||||
|
||||
@@ -33,7 +33,8 @@ def pytest_generate_tests(metafunc):
|
||||
param = metafunc.function.parametrize
|
||||
except AttributeError:
|
||||
return
|
||||
metafunc.parametrize(*param.args, **param.kwargs)
|
||||
for p in param:
|
||||
metafunc.parametrize(*p.args, **p.kwargs)
|
||||
|
||||
def pytest_configure(config):
|
||||
config.addinivalue_line("markers",
|
||||
@@ -220,6 +221,7 @@ class PyCollectorMixin(PyobjMixin, pytest.Collector):
|
||||
module = self.getparent(Module).obj
|
||||
clscol = self.getparent(Class)
|
||||
cls = clscol and clscol.obj or None
|
||||
transfer_markers(funcobj, cls, module)
|
||||
metafunc = Metafunc(funcobj, config=self.config,
|
||||
cls=cls, module=module)
|
||||
gentesthook = self.config.hook.pytest_generate_tests
|
||||
@@ -239,6 +241,19 @@ class PyCollectorMixin(PyobjMixin, pytest.Collector):
|
||||
l.append(function)
|
||||
return l
|
||||
|
||||
def transfer_markers(funcobj, cls, mod):
|
||||
# XXX this should rather be code in the mark plugin or the mark
|
||||
# plugin should merge with the python plugin.
|
||||
for holder in (cls, mod):
|
||||
try:
|
||||
pytestmark = holder.pytestmark
|
||||
except AttributeError:
|
||||
continue
|
||||
if isinstance(pytestmark, list):
|
||||
for mark in pytestmark:
|
||||
mark(funcobj)
|
||||
else:
|
||||
pytestmark(funcobj)
|
||||
|
||||
class Module(pytest.File, PyCollectorMixin):
|
||||
def _getobj(self):
|
||||
@@ -259,7 +274,8 @@ class Module(pytest.File, PyCollectorMixin):
|
||||
" %s\n"
|
||||
"which is not the same as the test file we want to collect:\n"
|
||||
" %s\n"
|
||||
"HINT: use a unique basename for your test file modules"
|
||||
"HINT: remove __pycache__ / .pyc files and/or use a "
|
||||
"unique basename for your test file modules"
|
||||
% e.args
|
||||
)
|
||||
#print "imported test module", mod
|
||||
@@ -295,12 +311,14 @@ class Class(PyCollectorMixin, pytest.Collector):
|
||||
setup_class = getattr(self.obj, 'setup_class', None)
|
||||
if setup_class is not None:
|
||||
setup_class = getattr(setup_class, 'im_func', setup_class)
|
||||
setup_class = getattr(setup_class, '__func__', setup_class)
|
||||
setup_class(self.obj)
|
||||
|
||||
def teardown(self):
|
||||
teardown_class = getattr(self.obj, 'teardown_class', None)
|
||||
if teardown_class is not None:
|
||||
teardown_class = getattr(teardown_class, 'im_func', teardown_class)
|
||||
teardown_class = getattr(teardown_class, '__func__', teardown_class)
|
||||
teardown_class(self.obj)
|
||||
|
||||
class Instance(PyCollectorMixin, pytest.Collector):
|
||||
@@ -558,7 +576,7 @@ class CallSpec2(object):
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
return "-".join(filter(None, self._idlist))
|
||||
return "-".join(map(str, filter(None, self._idlist)))
|
||||
|
||||
def setmulti(self, valtype, argnames, valset, id):
|
||||
for arg,val in zip(argnames, valset):
|
||||
@@ -613,9 +631,11 @@ class Metafunc:
|
||||
if not isinstance(argnames, (tuple, list)):
|
||||
argnames = (argnames,)
|
||||
argvalues = [(val,) for val in argvalues]
|
||||
for arg in argnames:
|
||||
if arg not in self.funcargnames:
|
||||
raise ValueError("%r has no argument %r" %(self.function, arg))
|
||||
if not indirect:
|
||||
#XXX should we also check for the opposite case?
|
||||
for arg in argnames:
|
||||
if arg not in self.funcargnames:
|
||||
raise ValueError("%r has no argument %r" %(self.function, arg))
|
||||
valtype = indirect and "params" or "funcargs"
|
||||
if not ids:
|
||||
idmaker = IDMaker()
|
||||
|
||||
@@ -47,6 +47,8 @@ def pytest_terminal_summary(terminalreporter):
|
||||
|
||||
def pytest_sessionstart(session):
|
||||
session._setupstate = SetupState()
|
||||
def pytest_sessionfinish(session):
|
||||
session._setupstate.teardown_all()
|
||||
|
||||
class NodeInfo:
|
||||
def __init__(self, location):
|
||||
@@ -141,6 +143,10 @@ def getslaveinfoline(node):
|
||||
return s
|
||||
|
||||
class BaseReport(object):
|
||||
|
||||
def __init__(self, **kw):
|
||||
self.__dict__.update(kw)
|
||||
|
||||
def toterminal(self, out):
|
||||
longrepr = self.longrepr
|
||||
if hasattr(self, 'node'):
|
||||
@@ -190,7 +196,7 @@ class TestReport(BaseReport):
|
||||
they fail).
|
||||
"""
|
||||
def __init__(self, nodeid, location,
|
||||
keywords, outcome, longrepr, when, sections=(), duration=0):
|
||||
keywords, outcome, longrepr, when, sections=(), duration=0, **extra):
|
||||
#: normalized collection node id
|
||||
self.nodeid = nodeid
|
||||
|
||||
@@ -219,6 +225,8 @@ class TestReport(BaseReport):
|
||||
#: time it took to run just the test
|
||||
self.duration = duration
|
||||
|
||||
self.__dict__.update(extra)
|
||||
|
||||
def __repr__(self):
|
||||
return "<TestReport %r when=%r outcome=%r>" % (
|
||||
self.nodeid, self.when, self.outcome)
|
||||
@@ -226,9 +234,10 @@ class TestReport(BaseReport):
|
||||
class TeardownErrorReport(BaseReport):
|
||||
outcome = "failed"
|
||||
when = "teardown"
|
||||
def __init__(self, longrepr):
|
||||
def __init__(self, longrepr, **extra):
|
||||
self.longrepr = longrepr
|
||||
self.sections = []
|
||||
self.__dict__.update(extra)
|
||||
|
||||
def pytest_make_collect_report(collector):
|
||||
call = CallInfo(collector._memocollect, "memocollect")
|
||||
@@ -250,12 +259,13 @@ def pytest_make_collect_report(collector):
|
||||
getattr(call, 'result', None))
|
||||
|
||||
class CollectReport(BaseReport):
|
||||
def __init__(self, nodeid, outcome, longrepr, result, sections=()):
|
||||
def __init__(self, nodeid, outcome, longrepr, result, sections=(), **extra):
|
||||
self.nodeid = nodeid
|
||||
self.outcome = outcome
|
||||
self.longrepr = longrepr
|
||||
self.result = result or []
|
||||
self.sections = list(sections)
|
||||
self.__dict__.update(extra)
|
||||
|
||||
@property
|
||||
def location(self):
|
||||
@@ -406,9 +416,10 @@ def importorskip(modname, minversion=None):
|
||||
__tracebackhide__ = True
|
||||
compile(modname, '', 'eval') # to catch syntaxerrors
|
||||
try:
|
||||
mod = __import__(modname, None, None, ['__doc__'])
|
||||
__import__(modname)
|
||||
except ImportError:
|
||||
py.test.skip("could not import %r" %(modname,))
|
||||
mod = sys.modules[modname]
|
||||
if minversion is None:
|
||||
return mod
|
||||
verattr = getattr(mod, '__version__', None)
|
||||
|
||||
@@ -132,6 +132,14 @@ def check_xfail_no_run(item):
|
||||
def pytest_runtest_makereport(__multicall__, item, call):
|
||||
if not isinstance(item, pytest.Function):
|
||||
return
|
||||
# unitttest special case, see setting of _unexpectedsuccess
|
||||
if hasattr(item, '_unexpectedsuccess'):
|
||||
rep = __multicall__.execute()
|
||||
if rep.when == "call":
|
||||
# we need to translate into how py.test encodes xpass
|
||||
rep.keywords['xfail'] = "reason: " + repr(item._unexpectedsuccess)
|
||||
rep.outcome = "failed"
|
||||
return rep
|
||||
if not (call.excinfo and
|
||||
call.excinfo.errisinstance(py.test.xfail.Exception)):
|
||||
evalxfail = getattr(item, '_evalxfail', None)
|
||||
|
||||
@@ -282,10 +282,18 @@ class TerminalReporter:
|
||||
# we take care to leave out Instances aka ()
|
||||
# because later versions are going to get rid of them anyway
|
||||
if self.config.option.verbose < 0:
|
||||
for item in items:
|
||||
nodeid = item.nodeid
|
||||
nodeid = nodeid.replace("::()::", "::")
|
||||
self._tw.line(nodeid)
|
||||
if self.config.option.verbose < -1:
|
||||
counts = {}
|
||||
for item in items:
|
||||
name = item.nodeid.split('::', 1)[0]
|
||||
counts[name] = counts.get(name, 0) + 1
|
||||
for name, count in sorted(counts.items()):
|
||||
self._tw.line("%s: %d" % (name, count))
|
||||
else:
|
||||
for item in items:
|
||||
nodeid = item.nodeid
|
||||
nodeid = nodeid.replace("::()::", "::")
|
||||
self._tw.line(nodeid)
|
||||
return
|
||||
stack = []
|
||||
indent = ""
|
||||
|
||||
@@ -2,6 +2,9 @@
|
||||
import pytest, py
|
||||
import sys, pdb
|
||||
|
||||
# for transfering markers
|
||||
from _pytest.python import transfer_markers
|
||||
|
||||
def pytest_pycollect_makeitem(collector, name, obj):
|
||||
unittest = sys.modules.get('unittest')
|
||||
if unittest is None:
|
||||
@@ -19,7 +22,14 @@ def pytest_pycollect_makeitem(collector, name, obj):
|
||||
class UnitTestCase(pytest.Class):
|
||||
def collect(self):
|
||||
loader = py.std.unittest.TestLoader()
|
||||
module = self.getparent(pytest.Module).obj
|
||||
cls = self.obj
|
||||
for name in loader.getTestCaseNames(self.obj):
|
||||
x = getattr(self.obj, name)
|
||||
funcobj = getattr(x, 'im_func', x)
|
||||
transfer_markers(funcobj, cls, module)
|
||||
if hasattr(funcobj, 'todo'):
|
||||
pytest.mark.xfail(reason=str(funcobj.todo))(funcobj)
|
||||
yield TestCaseFunction(name, parent=self)
|
||||
|
||||
def setup(self):
|
||||
@@ -37,12 +47,6 @@ class UnitTestCase(pytest.Class):
|
||||
class TestCaseFunction(pytest.Function):
|
||||
_excinfo = None
|
||||
|
||||
def __init__(self, name, parent):
|
||||
super(TestCaseFunction, self).__init__(name, parent)
|
||||
if hasattr(self._obj, 'todo'):
|
||||
getattr(self._obj, 'im_func', self._obj).xfail = \
|
||||
pytest.mark.xfail(reason=str(self._obj.todo))
|
||||
|
||||
def setup(self):
|
||||
self._testcase = self.parent.obj(self.name)
|
||||
self._obj = getattr(self._testcase, self.name)
|
||||
@@ -87,28 +91,37 @@ class TestCaseFunction(pytest.Function):
|
||||
self._addexcinfo(rawexcinfo)
|
||||
def addFailure(self, testcase, rawexcinfo):
|
||||
self._addexcinfo(rawexcinfo)
|
||||
|
||||
def addSkip(self, testcase, reason):
|
||||
try:
|
||||
pytest.skip(reason)
|
||||
except pytest.skip.Exception:
|
||||
self._addexcinfo(sys.exc_info())
|
||||
def addExpectedFailure(self, testcase, rawexcinfo, reason):
|
||||
|
||||
def addExpectedFailure(self, testcase, rawexcinfo, reason=""):
|
||||
try:
|
||||
pytest.xfail(str(reason))
|
||||
except pytest.xfail.Exception:
|
||||
self._addexcinfo(sys.exc_info())
|
||||
def addUnexpectedSuccess(self, testcase, reason):
|
||||
pass
|
||||
|
||||
def addUnexpectedSuccess(self, testcase, reason=""):
|
||||
self._unexpectedsuccess = reason
|
||||
|
||||
def addSuccess(self, testcase):
|
||||
pass
|
||||
|
||||
def stopTest(self, testcase):
|
||||
pass
|
||||
|
||||
def runtest(self):
|
||||
self._testcase(result=self)
|
||||
|
||||
def _prunetraceback(self, excinfo):
|
||||
pytest.Function._prunetraceback(self, excinfo)
|
||||
excinfo.traceback = excinfo.traceback.filter(lambda x:not x.frame.f_globals.get('__unittest'))
|
||||
traceback = excinfo.traceback.filter(
|
||||
lambda x:not x.frame.f_globals.get('__unittest'))
|
||||
if traceback:
|
||||
excinfo.traceback = traceback
|
||||
|
||||
@pytest.mark.tryfirst
|
||||
def pytest_runtest_makereport(item, call):
|
||||
|
||||
@@ -46,7 +46,7 @@ except ImportError:
|
||||
args = [quote(arg) for arg in args]
|
||||
return os.spawnl(os.P_WAIT, sys.executable, *args) == 0
|
||||
|
||||
DEFAULT_VERSION = "0.6.19"
|
||||
DEFAULT_VERSION = "0.6.27"
|
||||
DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/"
|
||||
SETUPTOOLS_FAKED_VERSION = "0.6c11"
|
||||
|
||||
@@ -63,7 +63,7 @@ Description: xxx
|
||||
""" % SETUPTOOLS_FAKED_VERSION
|
||||
|
||||
|
||||
def _install(tarball):
|
||||
def _install(tarball, install_args=()):
|
||||
# extracting the tarball
|
||||
tmpdir = tempfile.mkdtemp()
|
||||
log.warn('Extracting in %s', tmpdir)
|
||||
@@ -81,7 +81,7 @@ def _install(tarball):
|
||||
|
||||
# installing
|
||||
log.warn('Installing Distribute')
|
||||
if not _python_cmd('setup.py', 'install'):
|
||||
if not _python_cmd('setup.py', 'install', *install_args):
|
||||
log.warn('Something went wrong during the installation.')
|
||||
log.warn('See the error message above.')
|
||||
finally:
|
||||
@@ -306,6 +306,9 @@ def _create_fake_setuptools_pkg_info(placeholder):
|
||||
log.warn('%s already exists', pkg_info)
|
||||
return
|
||||
|
||||
if not os.access(pkg_info, os.W_OK):
|
||||
log.warn("Don't have permissions to write %s, skipping", pkg_info)
|
||||
|
||||
log.warn('Creating %s', pkg_info)
|
||||
f = open(pkg_info, 'w')
|
||||
try:
|
||||
@@ -474,11 +477,20 @@ def _extractall(self, path=".", members=None):
|
||||
else:
|
||||
self._dbg(1, "tarfile: %s" % e)
|
||||
|
||||
def _build_install_args(argv):
|
||||
install_args = []
|
||||
user_install = '--user' in argv
|
||||
if user_install and sys.version_info < (2,6):
|
||||
log.warn("--user requires Python 2.6 or later")
|
||||
raise SystemExit(1)
|
||||
if user_install:
|
||||
install_args.append('--user')
|
||||
return install_args
|
||||
|
||||
def main(argv, version=DEFAULT_VERSION):
|
||||
"""Install or upgrade setuptools and EasyInstall"""
|
||||
tarball = download_setuptools()
|
||||
_install(tarball)
|
||||
_install(tarball, _build_install_args(argv))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -5,6 +5,8 @@ Release announcements
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
release-2.2.2
|
||||
release-2.2.1
|
||||
release-2.2.0
|
||||
release-2.1.3
|
||||
release-2.1.2
|
||||
|
||||
43
doc/announce/release-2.2.2.txt
Normal file
43
doc/announce/release-2.2.2.txt
Normal file
@@ -0,0 +1,43 @@
|
||||
pytest-2.2.2: bug fixes
|
||||
===========================================================================
|
||||
|
||||
pytest-2.2.2 (updated to 2.2.3 to fix packaging issues) is a minor
|
||||
backward-compatible release of the versatile py.test testing tool. It
|
||||
contains bug fixes and a few refinements particularly to reporting with
|
||||
"--collectonly", see below for betails.
|
||||
|
||||
For general information see here:
|
||||
|
||||
http://pytest.org/
|
||||
|
||||
To install or upgrade pytest:
|
||||
|
||||
pip install -U pytest # or
|
||||
easy_install -U pytest
|
||||
|
||||
Special thanks for helping on this release to Ronny Pfannschmidt
|
||||
and Ralf Schmitt and the contributors of issues.
|
||||
|
||||
best,
|
||||
holger krekel
|
||||
|
||||
|
||||
Changes between 2.2.1 and 2.2.2
|
||||
----------------------------------------
|
||||
|
||||
- fix issue101: wrong args to unittest.TestCase test function now
|
||||
produce better output
|
||||
- fix issue102: report more useful errors and hints for when a
|
||||
test directory was renamed and some pyc/__pycache__ remain
|
||||
- fix issue106: allow parametrize to be applied multiple times
|
||||
e.g. from module, class and at function level.
|
||||
- fix issue107: actually perform session scope finalization
|
||||
- don't check in parametrize if indirect parameters are funcarg names
|
||||
- add chdir method to monkeypatch funcarg
|
||||
- fix crash resulting from calling monkeypatch undo a second time
|
||||
- fix issue115: make --collectonly robust against early failure
|
||||
(missing files/directories)
|
||||
- "-qq --collectonly" now shows only files and the number of tests in them
|
||||
- "-q --collectonly" now shows test ids
|
||||
- allow adding of attributes to test reports such that it also works
|
||||
with distributed testing (no upgrade of pytest-xdist needed)
|
||||
39
doc/announce/release-2.2.4.txt
Normal file
39
doc/announce/release-2.2.4.txt
Normal file
@@ -0,0 +1,39 @@
|
||||
pytest-2.2.4: bug fixes, better junitxml/unittest/python3 compat
|
||||
===========================================================================
|
||||
|
||||
pytest-2.2.4 is a minor backward-compatible release of the versatile
|
||||
py.test testing tool. It contains bug fixes and a few refinements
|
||||
to junitxml reporting, better unittest- and python3 compatibility.
|
||||
|
||||
For general information see here:
|
||||
|
||||
http://pytest.org/
|
||||
|
||||
To install or upgrade pytest:
|
||||
|
||||
pip install -U pytest # or
|
||||
easy_install -U pytest
|
||||
|
||||
Special thanks for helping on this release to Ronny Pfannschmidt
|
||||
and Benjamin Peterson and the contributors of issues.
|
||||
|
||||
best,
|
||||
holger krekel
|
||||
|
||||
Changes between 2.2.3 and 2.2.4
|
||||
-----------------------------------
|
||||
|
||||
- fix error message for rewritten assertions involving the % operator
|
||||
- fix issue 126: correctly match all invalid xml characters for junitxml
|
||||
binary escape
|
||||
- fix issue with unittest: now @unittest.expectedFailure markers should
|
||||
be processed correctly (you can also use @pytest.mark markers)
|
||||
- document integration with the extended distribute/setuptools test commands
|
||||
- fix issue 140: propperly get the real functions
|
||||
of bound classmethods for setup/teardown_class
|
||||
- fix issue #141: switch from the deceased paste.pocoo.org to bpaste.net
|
||||
- fix issue #143: call unconfigure/sessionfinish always when
|
||||
configure/sessionstart where called
|
||||
- fix issue #144: better mangle test ids to junitxml classnames
|
||||
- upgrade distribute_setup.py to 0.6.27
|
||||
|
||||
@@ -23,7 +23,7 @@ you will see the return value of the function call::
|
||||
|
||||
$ py.test test_assert1.py
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 1 items
|
||||
|
||||
test_assert1.py F
|
||||
@@ -105,7 +105,7 @@ if you run this module::
|
||||
|
||||
$ py.test test_assert2.py
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 1 items
|
||||
|
||||
test_assert2.py F
|
||||
@@ -124,7 +124,7 @@ if you run this module::
|
||||
E '5'
|
||||
|
||||
test_assert2.py:5: AssertionError
|
||||
========================= 1 failed in 0.02 seconds =========================
|
||||
========================= 1 failed in 0.03 seconds =========================
|
||||
|
||||
Special comparisons are done for a number of cases:
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@ You can ask for available builtin or project-custom
|
||||
|
||||
$ py.test --funcargs
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collected 0 items
|
||||
pytestconfig
|
||||
the pytest config object with access to command line opts.
|
||||
@@ -60,6 +60,7 @@ You can ask for available builtin or project-custom
|
||||
monkeypatch.setenv(name, value, prepend=False)
|
||||
monkeypatch.delenv(name, value, raising=True)
|
||||
monkeypatch.syspath_prepend(path)
|
||||
monkeypatch.chdir(path)
|
||||
|
||||
All modifications will be undone after the requesting
|
||||
test function has finished. The ``raising``
|
||||
@@ -75,5 +76,7 @@ You can ask for available builtin or project-custom
|
||||
See http://docs.python.org/library/warnings.html for information
|
||||
on warning categories.
|
||||
|
||||
cov
|
||||
A pytest funcarg that provides access to the underlying coverage object.
|
||||
|
||||
============================= in 0.00 seconds =============================
|
||||
============================= in 0.01 seconds =============================
|
||||
|
||||
@@ -64,7 +64,7 @@ of the failing function and hide the other one::
|
||||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_module.py .F
|
||||
@@ -78,7 +78,7 @@ of the failing function and hide the other one::
|
||||
|
||||
test_module.py:9: AssertionError
|
||||
----------------------------- Captured stdout ------------------------------
|
||||
setting up <function test_func2 at 0x101314c80>
|
||||
setting up <function test_func2 at 0x1013230c8>
|
||||
==================== 1 failed, 1 passed in 0.03 seconds ====================
|
||||
|
||||
Accessing captured output from a test function
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
|
||||
.. _toc:
|
||||
|
||||
Full pytest documenation
|
||||
========================
|
||||
Full pytest documentation
|
||||
===========================
|
||||
|
||||
`Download latest version as PDF <http://media.readthedocs.org/pdf/pytest/latest/pytest.pdf>`_
|
||||
`Download latest version as PDF <pytest.pdf>`_
|
||||
|
||||
.. `Download latest version as EPUB <http://media.readthedocs.org/epub/pytest/latest/pytest.epub>`_
|
||||
|
||||
|
||||
@@ -94,7 +94,7 @@ Builtin configuration file options
|
||||
[seq] matches any character in seq
|
||||
[!seq] matches any char not in seq
|
||||
|
||||
Default patterns are ``.* _* CVS {args}``. Setting a ``norecurse``
|
||||
Default patterns are ``.* _* CVS {args}``. Setting a ``norecursedir``
|
||||
replaces the default. Here is an example of how to avoid
|
||||
certain directories::
|
||||
|
||||
|
||||
@@ -44,10 +44,10 @@ then you can just invoke ``py.test`` without command line options::
|
||||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 1 items
|
||||
|
||||
mymodule.py .
|
||||
|
||||
========================= 1 passed in 0.05 seconds =========================
|
||||
========================= 1 passed in 0.51 seconds =========================
|
||||
[?1034h
|
||||
@@ -26,7 +26,7 @@ You can then restrict a test run to only run tests marked with ``webtest``::
|
||||
|
||||
$ py.test -v -m webtest
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.1 -- /Users/hpk/venv/1/bin/python
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2 -- /Users/hpk/venv/0/bin/python
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_server.py:3: test_send_http PASSED
|
||||
@@ -38,13 +38,13 @@ Or the inverse, running all tests except the webtest ones::
|
||||
|
||||
$ py.test -v -m "not webtest"
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.1 -- /Users/hpk/venv/1/bin/python
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2 -- /Users/hpk/venv/0/bin/python
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_server.py:6: test_something_quick PASSED
|
||||
|
||||
================= 1 tests deselected by "-m 'not webtest'" =================
|
||||
================== 1 passed, 1 deselected in 0.01 seconds ==================
|
||||
================== 1 passed, 1 deselected in 0.02 seconds ==================
|
||||
|
||||
Registering markers
|
||||
-------------------------------------
|
||||
@@ -134,6 +134,7 @@ You can also set a module level marker::
|
||||
in which case it will be applied to all functions and
|
||||
methods defined in the module.
|
||||
|
||||
|
||||
Using ``-k TEXT`` to select tests
|
||||
----------------------------------------------------
|
||||
|
||||
@@ -142,7 +143,7 @@ the given argument::
|
||||
|
||||
$ py.test -k send_http # running with the above defined examples
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 4 items
|
||||
|
||||
test_server.py .
|
||||
@@ -154,7 +155,7 @@ And you can also run all tests except the ones that match the keyword::
|
||||
|
||||
$ py.test -k-send_http
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 4 items
|
||||
|
||||
test_mark_classlevel.py ..
|
||||
@@ -167,13 +168,13 @@ Or to only select the class::
|
||||
|
||||
$ py.test -kTestClass
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 4 items
|
||||
|
||||
test_mark_classlevel.py ..
|
||||
|
||||
=================== 2 tests deselected by '-kTestClass' ====================
|
||||
================== 2 passed, 2 deselected in 0.02 seconds ==================
|
||||
================== 2 passed, 2 deselected in 0.03 seconds ==================
|
||||
|
||||
.. _`adding a custom marker from a plugin`:
|
||||
|
||||
@@ -222,7 +223,7 @@ the test needs::
|
||||
|
||||
$ py.test -E stage2
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 1 items
|
||||
|
||||
test_someenv.py s
|
||||
@@ -233,12 +234,12 @@ and here is one that specifies exactly the environment needed::
|
||||
|
||||
$ py.test -E stage1
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 1 items
|
||||
|
||||
test_someenv.py .
|
||||
|
||||
========================= 1 passed in 0.01 seconds =========================
|
||||
========================= 1 passed in 0.02 seconds =========================
|
||||
|
||||
The ``--markers`` option always gives you a list of available markers::
|
||||
|
||||
@@ -254,4 +255,43 @@ The ``--markers`` option always gives you a list of available markers::
|
||||
@pytest.mark.tryfirst: mark a hook implementation function such that the plugin machinery will try to call it first/as early as possible.
|
||||
|
||||
@pytest.mark.trylast: mark a hook implementation function such that the plugin machinery will try to call it last/as late as possible.
|
||||
|
||||
|
||||
Reading markers which were set from multiple places
|
||||
----------------------------------------------------
|
||||
|
||||
.. versionadded: 2.2.2
|
||||
|
||||
If you are heavily using markers in your test suite you may encounter the case where a marker is applied several times to a test function. From plugin
|
||||
code you can read over all such settings. Example::
|
||||
|
||||
# content of test_mark_three_times.py
|
||||
import pytest
|
||||
pytestmark = pytest.mark.glob("module", x=1)
|
||||
|
||||
@pytest.mark.glob("class", x=2)
|
||||
class TestClass:
|
||||
@pytest.mark.glob("function", x=3)
|
||||
def test_something(self):
|
||||
pass
|
||||
|
||||
Here we have the marker "glob" applied three times to the same
|
||||
test function. From a conftest file we can read it like this::
|
||||
|
||||
# content of conftest.py
|
||||
|
||||
def pytest_runtest_setup(item):
|
||||
g = getattr(item.obj, 'glob', None)
|
||||
if g is not None:
|
||||
for info in g:
|
||||
print ("glob args=%s kwargs=%s" %(info.args, info.kwargs))
|
||||
|
||||
Let's run this without capturing output and see what we get::
|
||||
|
||||
$ py.test -q -s
|
||||
collecting ... collected 2 items
|
||||
..
|
||||
2 passed in 0.02 seconds
|
||||
glob args=('function',) kwargs={'x': 3}
|
||||
glob args=('class',) kwargs={'x': 2}
|
||||
glob args=('module',) kwargs={'x': 1}
|
||||
|
||||
@@ -49,7 +49,7 @@ You can now run the test::
|
||||
|
||||
$ py.test test_sample.py
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 1 items
|
||||
|
||||
test_sample.py F
|
||||
@@ -57,7 +57,7 @@ You can now run the test::
|
||||
================================= FAILURES =================================
|
||||
_______________________________ test_answer ________________________________
|
||||
|
||||
mysetup = <conftest.MySetup instance at 0x10131c098>
|
||||
mysetup = <conftest.MySetup instance at 0x101322fc8>
|
||||
|
||||
def test_answer(mysetup):
|
||||
app = mysetup.myapp()
|
||||
@@ -66,7 +66,7 @@ You can now run the test::
|
||||
E assert 54 == 42
|
||||
|
||||
test_sample.py:4: AssertionError
|
||||
========================= 1 failed in 0.02 seconds =========================
|
||||
========================= 1 failed in 0.72 seconds =========================
|
||||
|
||||
This means that our ``mysetup`` object was successfully instantiated
|
||||
and ``mysetup.app()`` returned an initialized ``MyApp`` instance.
|
||||
@@ -122,12 +122,12 @@ Running it yields::
|
||||
|
||||
$ py.test test_ssh.py -rs
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 1 items
|
||||
|
||||
test_ssh.py s
|
||||
========================= short test summary info ==========================
|
||||
SKIP [1] /Users/hpk/tmp/doc-exec-44/conftest.py:22: specify ssh host with --ssh
|
||||
SKIP [1] /Users/hpk/tmp/doc-exec-153/conftest.py:22: specify ssh host with --ssh
|
||||
|
||||
======================== 1 skipped in 0.02 seconds =========================
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ now execute the test specification::
|
||||
|
||||
nonpython $ py.test test_simple.yml
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_simple.yml .F
|
||||
@@ -37,7 +37,7 @@ now execute the test specification::
|
||||
usecase execution failed
|
||||
spec failed: 'some': 'other'
|
||||
no further details known at this point.
|
||||
==================== 1 failed, 1 passed in 0.09 seconds ====================
|
||||
==================== 1 failed, 1 passed in 0.48 seconds ====================
|
||||
|
||||
You get one dot for the passing ``sub1: sub1`` check and one failure.
|
||||
Obviously in the above ``conftest.py`` you'll want to implement a more
|
||||
@@ -56,7 +56,7 @@ consulted when reporting in ``verbose`` mode::
|
||||
|
||||
nonpython $ py.test -v
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.1 -- /Users/hpk/venv/1/bin/python
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2 -- /Users/hpk/venv/0/bin/python
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_simple.yml:1: usecase: ok PASSED
|
||||
@@ -67,17 +67,17 @@ consulted when reporting in ``verbose`` mode::
|
||||
usecase execution failed
|
||||
spec failed: 'some': 'other'
|
||||
no further details known at this point.
|
||||
==================== 1 failed, 1 passed in 0.09 seconds ====================
|
||||
==================== 1 failed, 1 passed in 0.10 seconds ====================
|
||||
|
||||
While developing your custom test collection and execution it's also
|
||||
interesting to just look at the collection tree::
|
||||
|
||||
nonpython $ py.test --collectonly
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 2 items
|
||||
<YamlFile 'test_simple.yml'>
|
||||
<YamlItem 'ok'>
|
||||
<YamlItem 'hello'>
|
||||
|
||||
============================= in 0.08 seconds =============================
|
||||
============================= in 0.18 seconds =============================
|
||||
|
||||
@@ -96,7 +96,7 @@ This means that we only run 2 tests if we do not pass ``--all``::
|
||||
$ py.test -q test_compute.py
|
||||
collecting ... collected 2 items
|
||||
..
|
||||
2 passed in 0.02 seconds
|
||||
2 passed in 0.03 seconds
|
||||
|
||||
We run only two computations, so we see two dots.
|
||||
let's run the full monty::
|
||||
@@ -114,7 +114,7 @@ let's run the full monty::
|
||||
E assert 4 < 4
|
||||
|
||||
test_compute.py:3: AssertionError
|
||||
1 failed, 4 passed in 0.03 seconds
|
||||
1 failed, 4 passed in 0.05 seconds
|
||||
|
||||
As expected when running the full range of ``param1`` values
|
||||
we'll get an error on the last one.
|
||||
@@ -154,7 +154,7 @@ this is a fully self-contained example which you can run with::
|
||||
|
||||
$ py.test test_scenarios.py
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_scenarios.py ..
|
||||
@@ -166,7 +166,7 @@ If you just collect tests you'll also nicely see 'advanced' and 'basic' as varia
|
||||
|
||||
$ py.test --collectonly test_scenarios.py
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 2 items
|
||||
<Module 'test_scenarios.py'>
|
||||
<Class 'TestSampleWithScenarios'>
|
||||
@@ -174,7 +174,7 @@ If you just collect tests you'll also nicely see 'advanced' and 'basic' as varia
|
||||
<Function 'test_demo[basic]'>
|
||||
<Function 'test_demo[advanced]'>
|
||||
|
||||
============================= in 0.01 seconds =============================
|
||||
============================= in 0.05 seconds =============================
|
||||
|
||||
Deferring the setup of parametrized resources
|
||||
---------------------------------------------------
|
||||
@@ -222,7 +222,7 @@ Let's first see how it looks like at collection time::
|
||||
|
||||
$ py.test test_backends.py --collectonly
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 2 items
|
||||
<Module 'test_backends.py'>
|
||||
<Function 'test_db_initialized[d1]'>
|
||||
@@ -238,7 +238,7 @@ And then when we run the test::
|
||||
================================= FAILURES =================================
|
||||
_________________________ test_db_initialized[d2] __________________________
|
||||
|
||||
db = <conftest.DB2 instance at 0x10131c6c8>
|
||||
db = <conftest.DB2 instance at 0x101323710>
|
||||
|
||||
def test_db_initialized(db):
|
||||
# a dummy test
|
||||
@@ -247,7 +247,7 @@ And then when we run the test::
|
||||
E Failed: deliberately failing for demo purposes
|
||||
|
||||
test_backends.py:6: Failed
|
||||
1 failed, 1 passed in 0.02 seconds
|
||||
1 failed, 1 passed in 0.03 seconds
|
||||
|
||||
The first invocation with ``db == "DB1"`` passed while the second with ``db == "DB2"`` failed. Our ``pytest_funcarg__db`` factory has instantiated each of the DB values during the setup phase while the ``pytest_generate_tests`` generated two according calls to the ``test_db_initialized`` during the collection phase.
|
||||
|
||||
@@ -295,7 +295,7 @@ argument sets to use for each test function. Let's run it::
|
||||
================================= FAILURES =================================
|
||||
________________________ TestClass.test_equals[1-2] ________________________
|
||||
|
||||
self = <test_parametrize.TestClass instance at 0x101320200>, a = 1, b = 2
|
||||
self = <test_parametrize.TestClass instance at 0x101326368>, a = 1, b = 2
|
||||
|
||||
def test_equals(self, a, b):
|
||||
> assert a == b
|
||||
@@ -326,4 +326,4 @@ Running it results in some skips if we don't have all the python interpreters in
|
||||
========================= short test summary info ==========================
|
||||
SKIP [24] /Users/hpk/p/pytest/doc/example/multipython.py:36: 'python2.8' not found
|
||||
SKIP [24] /Users/hpk/p/pytest/doc/example/multipython.py:36: 'python2.4' not found
|
||||
27 passed, 48 skipped in 3.01 seconds
|
||||
27 passed, 48 skipped in 7.76 seconds
|
||||
|
||||
@@ -43,7 +43,7 @@ then the test collection looks like this::
|
||||
|
||||
$ py.test --collectonly
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 2 items
|
||||
<Module 'check_myapp.py'>
|
||||
<Class 'CheckMyApp'>
|
||||
@@ -82,7 +82,7 @@ You can always peek at the collection tree without running tests like this::
|
||||
|
||||
. $ py.test --collectonly pythoncollection.py
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 3 items
|
||||
<Module 'pythoncollection.py'>
|
||||
<Function 'test_function'>
|
||||
|
||||
@@ -13,7 +13,7 @@ get on the terminal - we are working on that):
|
||||
|
||||
assertion $ py.test failure_demo.py
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 39 items
|
||||
|
||||
failure_demo.py FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
|
||||
@@ -30,7 +30,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:15: AssertionError
|
||||
_________________________ TestFailing.test_simple __________________________
|
||||
|
||||
self = <failure_demo.TestFailing object at 0x101356250>
|
||||
self = <failure_demo.TestFailing object at 0x101490690>
|
||||
|
||||
def test_simple(self):
|
||||
def f():
|
||||
@@ -40,13 +40,13 @@ get on the terminal - we are working on that):
|
||||
|
||||
> assert f() == g()
|
||||
E assert 42 == 43
|
||||
E + where 42 = <function f at 0x101328848>()
|
||||
E + and 43 = <function g at 0x1013288c0>()
|
||||
E + where 42 = <function f at 0x101462b90>()
|
||||
E + and 43 = <function g at 0x101462c08>()
|
||||
|
||||
failure_demo.py:28: AssertionError
|
||||
____________________ TestFailing.test_simple_multiline _____________________
|
||||
|
||||
self = <failure_demo.TestFailing object at 0x101356810>
|
||||
self = <failure_demo.TestFailing object at 0x101490b10>
|
||||
|
||||
def test_simple_multiline(self):
|
||||
otherfunc_multi(
|
||||
@@ -66,19 +66,19 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:11: AssertionError
|
||||
___________________________ TestFailing.test_not ___________________________
|
||||
|
||||
self = <failure_demo.TestFailing object at 0x101356a10>
|
||||
self = <failure_demo.TestFailing object at 0x101490210>
|
||||
|
||||
def test_not(self):
|
||||
def f():
|
||||
return 42
|
||||
> assert not f()
|
||||
E assert not 42
|
||||
E + where 42 = <function f at 0x101328758>()
|
||||
E + where 42 = <function f at 0x101462aa0>()
|
||||
|
||||
failure_demo.py:38: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_eq_text _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x101356c50>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x101490a10>
|
||||
|
||||
def test_eq_text(self):
|
||||
> assert 'spam' == 'eggs'
|
||||
@@ -89,7 +89,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:42: AssertionError
|
||||
_____________ TestSpecialisedExplanations.test_eq_similar_text _____________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1013542d0>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x10148d9d0>
|
||||
|
||||
def test_eq_similar_text(self):
|
||||
> assert 'foo 1 bar' == 'foo 2 bar'
|
||||
@@ -102,7 +102,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:45: AssertionError
|
||||
____________ TestSpecialisedExplanations.test_eq_multiline_text ____________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x101354590>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x10148d590>
|
||||
|
||||
def test_eq_multiline_text(self):
|
||||
> assert 'foo\nspam\nbar' == 'foo\neggs\nbar'
|
||||
@@ -115,7 +115,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:48: AssertionError
|
||||
______________ TestSpecialisedExplanations.test_eq_long_text _______________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x101354710>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x10148dc90>
|
||||
|
||||
def test_eq_long_text(self):
|
||||
a = '1'*100 + 'a' + '2'*100
|
||||
@@ -132,7 +132,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:53: AssertionError
|
||||
_________ TestSpecialisedExplanations.test_eq_long_text_multiline __________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1013529d0>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x10148d910>
|
||||
|
||||
def test_eq_long_text_multiline(self):
|
||||
a = '1\n'*100 + 'a' + '2\n'*100
|
||||
@@ -156,7 +156,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:58: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_eq_list _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x101352750>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x10148b9d0>
|
||||
|
||||
def test_eq_list(self):
|
||||
> assert [0, 1, 2] == [0, 1, 3]
|
||||
@@ -166,7 +166,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:61: AssertionError
|
||||
______________ TestSpecialisedExplanations.test_eq_list_long _______________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x101352ad0>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x10148b750>
|
||||
|
||||
def test_eq_list_long(self):
|
||||
a = [0]*100 + [1] + [3]*100
|
||||
@@ -178,7 +178,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:66: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_eq_dict _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x101352b90>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x10148bdd0>
|
||||
|
||||
def test_eq_dict(self):
|
||||
> assert {'a': 0, 'b': 1} == {'a': 0, 'b': 2}
|
||||
@@ -191,7 +191,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:69: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_eq_set __________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x101352fd0>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x10148b1d0>
|
||||
|
||||
def test_eq_set(self):
|
||||
> assert set([0, 10, 11, 12]) == set([0, 20, 21])
|
||||
@@ -207,7 +207,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:72: AssertionError
|
||||
_____________ TestSpecialisedExplanations.test_eq_longer_list ______________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x101352b50>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x10148bf10>
|
||||
|
||||
def test_eq_longer_list(self):
|
||||
> assert [1,2] == [1,2,3]
|
||||
@@ -217,7 +217,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:75: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_in_list _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1013522d0>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x10148b390>
|
||||
|
||||
def test_in_list(self):
|
||||
> assert 1 in [0, 2, 3, 4, 5]
|
||||
@@ -226,7 +226,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:78: AssertionError
|
||||
__________ TestSpecialisedExplanations.test_not_in_text_multiline __________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x101351390>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x101483e50>
|
||||
|
||||
def test_not_in_text_multiline(self):
|
||||
text = 'some multiline\ntext\nwhich\nincludes foo\nand a\ntail'
|
||||
@@ -244,7 +244,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:82: AssertionError
|
||||
___________ TestSpecialisedExplanations.test_not_in_text_single ____________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x101351410>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x101483c10>
|
||||
|
||||
def test_not_in_text_single(self):
|
||||
text = 'single foo line'
|
||||
@@ -257,7 +257,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:86: AssertionError
|
||||
_________ TestSpecialisedExplanations.test_not_in_text_single_long _________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x101351510>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x101483ed0>
|
||||
|
||||
def test_not_in_text_single_long(self):
|
||||
text = 'head ' * 50 + 'foo ' + 'tail ' * 20
|
||||
@@ -270,7 +270,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:90: AssertionError
|
||||
______ TestSpecialisedExplanations.test_not_in_text_single_long_term _______
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x101351a10>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x101483310>
|
||||
|
||||
def test_not_in_text_single_long_term(self):
|
||||
text = 'head ' * 50 + 'f'*70 + 'tail ' * 20
|
||||
@@ -289,7 +289,7 @@ get on the terminal - we are working on that):
|
||||
i = Foo()
|
||||
> assert i.b == 2
|
||||
E assert 1 == 2
|
||||
E + where 1 = <failure_demo.Foo object at 0x101351b50>.b
|
||||
E + where 1 = <failure_demo.Foo object at 0x101483f50>.b
|
||||
|
||||
failure_demo.py:101: AssertionError
|
||||
_________________________ test_attribute_instance __________________________
|
||||
@@ -299,8 +299,8 @@ get on the terminal - we are working on that):
|
||||
b = 1
|
||||
> assert Foo().b == 2
|
||||
E assert 1 == 2
|
||||
E + where 1 = <failure_demo.Foo object at 0x101351810>.b
|
||||
E + where <failure_demo.Foo object at 0x101351810> = <class 'failure_demo.Foo'>()
|
||||
E + where 1 = <failure_demo.Foo object at 0x101483210>.b
|
||||
E + where <failure_demo.Foo object at 0x101483210> = <class 'failure_demo.Foo'>()
|
||||
|
||||
failure_demo.py:107: AssertionError
|
||||
__________________________ test_attribute_failure __________________________
|
||||
@@ -316,7 +316,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:116:
|
||||
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
||||
|
||||
self = <failure_demo.Foo object at 0x101351c50>
|
||||
self = <failure_demo.Foo object at 0x101483450>
|
||||
|
||||
def _get_b(self):
|
||||
> raise Exception('Failed to get attrib')
|
||||
@@ -332,15 +332,15 @@ get on the terminal - we are working on that):
|
||||
b = 2
|
||||
> assert Foo().b == Bar().b
|
||||
E assert 1 == 2
|
||||
E + where 1 = <failure_demo.Foo object at 0x101351f10>.b
|
||||
E + where <failure_demo.Foo object at 0x101351f10> = <class 'failure_demo.Foo'>()
|
||||
E + and 2 = <failure_demo.Bar object at 0x1013519d0>.b
|
||||
E + where <failure_demo.Bar object at 0x1013519d0> = <class 'failure_demo.Bar'>()
|
||||
E + where 1 = <failure_demo.Foo object at 0x101483150>.b
|
||||
E + where <failure_demo.Foo object at 0x101483150> = <class 'failure_demo.Foo'>()
|
||||
E + and 2 = <failure_demo.Bar object at 0x101483350>.b
|
||||
E + where <failure_demo.Bar object at 0x101483350> = <class 'failure_demo.Bar'>()
|
||||
|
||||
failure_demo.py:124: AssertionError
|
||||
__________________________ TestRaises.test_raises __________________________
|
||||
|
||||
self = <failure_demo.TestRaises instance at 0x101373710>
|
||||
self = <failure_demo.TestRaises instance at 0x1014a6758>
|
||||
|
||||
def test_raises(self):
|
||||
s = 'qwe'
|
||||
@@ -352,10 +352,10 @@ get on the terminal - we are working on that):
|
||||
> int(s)
|
||||
E ValueError: invalid literal for int() with base 10: 'qwe'
|
||||
|
||||
<0-codegen /Users/hpk/p/pytest/_pytest/python.py:958>:1: ValueError
|
||||
<0-codegen /Users/hpk/p/pytest/_pytest/python.py:976>:1: ValueError
|
||||
______________________ TestRaises.test_raises_doesnt _______________________
|
||||
|
||||
self = <failure_demo.TestRaises instance at 0x101334f38>
|
||||
self = <failure_demo.TestRaises instance at 0x1014b03f8>
|
||||
|
||||
def test_raises_doesnt(self):
|
||||
> raises(IOError, "int('3')")
|
||||
@@ -364,7 +364,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:136: Failed
|
||||
__________________________ TestRaises.test_raise ___________________________
|
||||
|
||||
self = <failure_demo.TestRaises instance at 0x10136d950>
|
||||
self = <failure_demo.TestRaises instance at 0x1014a8998>
|
||||
|
||||
def test_raise(self):
|
||||
> raise ValueError("demo error")
|
||||
@@ -373,7 +373,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:139: ValueError
|
||||
________________________ TestRaises.test_tupleerror ________________________
|
||||
|
||||
self = <failure_demo.TestRaises instance at 0x101367758>
|
||||
self = <failure_demo.TestRaises instance at 0x1014a27a0>
|
||||
|
||||
def test_tupleerror(self):
|
||||
> a,b = [1]
|
||||
@@ -382,7 +382,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:142: ValueError
|
||||
______ TestRaises.test_reinterpret_fails_with_print_for_the_fun_of_it ______
|
||||
|
||||
self = <failure_demo.TestRaises instance at 0x10136a4d0>
|
||||
self = <failure_demo.TestRaises instance at 0x1014a5518>
|
||||
|
||||
def test_reinterpret_fails_with_print_for_the_fun_of_it(self):
|
||||
l = [1,2,3]
|
||||
@@ -395,7 +395,7 @@ get on the terminal - we are working on that):
|
||||
l is [1, 2, 3]
|
||||
________________________ TestRaises.test_some_error ________________________
|
||||
|
||||
self = <failure_demo.TestRaises instance at 0x1013692d8>
|
||||
self = <failure_demo.TestRaises instance at 0x1014a1320>
|
||||
|
||||
def test_some_error(self):
|
||||
> if namenotexi:
|
||||
@@ -423,7 +423,7 @@ get on the terminal - we are working on that):
|
||||
<2-codegen 'abc-123' /Users/hpk/p/pytest/doc/example/assertion/failure_demo.py:162>:2: AssertionError
|
||||
____________________ TestMoreErrors.test_complex_error _____________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1013730e0>
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1014a6638>
|
||||
|
||||
def test_complex_error(self):
|
||||
def f():
|
||||
@@ -452,7 +452,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:5: AssertionError
|
||||
___________________ TestMoreErrors.test_z1_unpack_error ____________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x101368290>
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1014a42d8>
|
||||
|
||||
def test_z1_unpack_error(self):
|
||||
l = []
|
||||
@@ -462,7 +462,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:179: ValueError
|
||||
____________________ TestMoreErrors.test_z2_type_error _____________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1013610e0>
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1014a0128>
|
||||
|
||||
def test_z2_type_error(self):
|
||||
l = 3
|
||||
@@ -472,19 +472,19 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:183: TypeError
|
||||
______________________ TestMoreErrors.test_startswith ______________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x101361ea8>
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1014a0ef0>
|
||||
|
||||
def test_startswith(self):
|
||||
s = "123"
|
||||
g = "456"
|
||||
> assert s.startswith(g)
|
||||
E assert <built-in method startswith of str object at 0x101357a58>('456')
|
||||
E + where <built-in method startswith of str object at 0x101357a58> = '123'.startswith
|
||||
E assert <built-in method startswith of str object at 0x1014951c0>('456')
|
||||
E + where <built-in method startswith of str object at 0x1014951c0> = '123'.startswith
|
||||
|
||||
failure_demo.py:188: AssertionError
|
||||
__________________ TestMoreErrors.test_startswith_nested ___________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x101368128>
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1014a4170>
|
||||
|
||||
def test_startswith_nested(self):
|
||||
def f():
|
||||
@@ -492,15 +492,15 @@ get on the terminal - we are working on that):
|
||||
def g():
|
||||
return "456"
|
||||
> assert f().startswith(g())
|
||||
E assert <built-in method startswith of str object at 0x101357a58>('456')
|
||||
E + where <built-in method startswith of str object at 0x101357a58> = '123'.startswith
|
||||
E + where '123' = <function f at 0x101339938>()
|
||||
E + and '456' = <function g at 0x101339cf8>()
|
||||
E assert <built-in method startswith of str object at 0x1014951c0>('456')
|
||||
E + where <built-in method startswith of str object at 0x1014951c0> = '123'.startswith
|
||||
E + where '123' = <function f at 0x1014aea28>()
|
||||
E + and '456' = <function g at 0x101477c80>()
|
||||
|
||||
failure_demo.py:195: AssertionError
|
||||
_____________________ TestMoreErrors.test_global_func ______________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x101336758>
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1014b3ab8>
|
||||
|
||||
def test_global_func(self):
|
||||
> assert isinstance(globf(42), float)
|
||||
@@ -510,18 +510,18 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:198: AssertionError
|
||||
_______________________ TestMoreErrors.test_instance _______________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1013678c0>
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1014a2878>
|
||||
|
||||
def test_instance(self):
|
||||
self.x = 6*7
|
||||
> assert self.x != 42
|
||||
E assert 42 != 42
|
||||
E + where 42 = <failure_demo.TestMoreErrors instance at 0x1013678c0>.x
|
||||
E + where 42 = <failure_demo.TestMoreErrors instance at 0x1014a2878>.x
|
||||
|
||||
failure_demo.py:202: AssertionError
|
||||
_______________________ TestMoreErrors.test_compare ________________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x101366a28>
|
||||
self = <failure_demo.TestMoreErrors instance at 0x10149da70>
|
||||
|
||||
def test_compare(self):
|
||||
> assert globf(10) < 5
|
||||
@@ -531,7 +531,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:205: AssertionError
|
||||
_____________________ TestMoreErrors.test_try_finally ______________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1013628c0>
|
||||
self = <failure_demo.TestMoreErrors instance at 0x101493908>
|
||||
|
||||
def test_try_finally(self):
|
||||
x = 1
|
||||
@@ -540,4 +540,4 @@ get on the terminal - we are working on that):
|
||||
E assert 1 == 0
|
||||
|
||||
failure_demo.py:210: AssertionError
|
||||
======================== 39 failed in 0.41 seconds =========================
|
||||
======================== 39 failed in 1.05 seconds =========================
|
||||
|
||||
@@ -53,7 +53,7 @@ Let's run this without supplying our new command line option::
|
||||
test_sample.py:6: AssertionError
|
||||
----------------------------- Captured stdout ------------------------------
|
||||
first
|
||||
1 failed in 0.02 seconds
|
||||
1 failed in 0.50 seconds
|
||||
|
||||
And now with supplying a command line option::
|
||||
|
||||
@@ -109,13 +109,13 @@ directory with the above conftest.py::
|
||||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
gw0 I
|
||||
gw0 [0]
|
||||
|
||||
scheduling tests via LoadScheduling
|
||||
|
||||
============================= in 0.54 seconds =============================
|
||||
============================= in 5.12 seconds =============================
|
||||
|
||||
.. _`excontrolskip`:
|
||||
|
||||
@@ -156,20 +156,20 @@ and when running it will see a skipped "slow" test::
|
||||
|
||||
$ py.test -rs # "-rs" means report details on the little 's'
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_module.py .s
|
||||
========================= short test summary info ==========================
|
||||
SKIP [1] /Users/hpk/tmp/doc-exec-49/conftest.py:9: need --runslow option to run
|
||||
SKIP [1] /Users/hpk/tmp/doc-exec-158/conftest.py:9: need --runslow option to run
|
||||
|
||||
=================== 1 passed, 1 skipped in 0.02 seconds ====================
|
||||
=================== 1 passed, 1 skipped in 0.09 seconds ====================
|
||||
|
||||
Or run it including the ``slow`` marked test::
|
||||
|
||||
$ py.test --runslow
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_module.py ..
|
||||
@@ -213,7 +213,7 @@ Let's run our little function::
|
||||
E Failed: not configured: 42
|
||||
|
||||
test_checkconfig.py:8: Failed
|
||||
1 failed in 0.02 seconds
|
||||
1 failed in 0.07 seconds
|
||||
|
||||
Detect if running from within a py.test run
|
||||
--------------------------------------------------------------
|
||||
@@ -261,11 +261,11 @@ which will add the string to the test header accordingly::
|
||||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
project deps: mylib-1.1
|
||||
collecting ... collected 0 items
|
||||
|
||||
============================= in 0.00 seconds =============================
|
||||
============================= in 0.01 seconds =============================
|
||||
|
||||
.. regendoc:wipe
|
||||
|
||||
@@ -284,21 +284,21 @@ which will add info only when run with "--v"::
|
||||
|
||||
$ py.test -v
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.1 -- /Users/hpk/venv/1/bin/python
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2 -- /Users/hpk/venv/0/bin/python
|
||||
info1: did you know that ...
|
||||
did you?
|
||||
collecting ... collected 0 items
|
||||
|
||||
============================= in 0.00 seconds =============================
|
||||
============================= in 0.03 seconds =============================
|
||||
|
||||
and nothing when run plainly::
|
||||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 0 items
|
||||
|
||||
============================= in 0.00 seconds =============================
|
||||
============================= in 0.01 seconds =============================
|
||||
|
||||
profiling test duration
|
||||
--------------------------
|
||||
@@ -327,7 +327,7 @@ Now we can profile which test functions execute the slowest::
|
||||
|
||||
$ py.test --durations=3
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 3 items
|
||||
|
||||
test_some_are_slow.py ...
|
||||
@@ -335,5 +335,5 @@ Now we can profile which test functions execute the slowest::
|
||||
========================= slowest 3 test durations =========================
|
||||
0.20s call test_some_are_slow.py::test_funcslow2
|
||||
0.10s call test_some_are_slow.py::test_funcslow1
|
||||
0.00s setup test_some_are_slow.py::test_funcfast
|
||||
========================= 3 passed in 0.32 seconds =========================
|
||||
0.00s call test_some_are_slow.py::test_funcfast
|
||||
========================= 3 passed in 0.33 seconds =========================
|
||||
|
||||
@@ -62,7 +62,7 @@ Running the test looks like this::
|
||||
|
||||
$ py.test test_simplefactory.py
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 1 items
|
||||
|
||||
test_simplefactory.py F
|
||||
@@ -77,7 +77,7 @@ Running the test looks like this::
|
||||
E assert 42 == 17
|
||||
|
||||
test_simplefactory.py:5: AssertionError
|
||||
========================= 1 failed in 0.02 seconds =========================
|
||||
========================= 1 failed in 0.03 seconds =========================
|
||||
|
||||
This means that indeed the test function was called with a ``myfuncarg``
|
||||
argument value of ``42`` and the assert fails. Here is how py.test
|
||||
@@ -167,7 +167,7 @@ Running this will generate ten invocations of ``test_func`` passing in each of t
|
||||
|
||||
$ py.test test_example.py
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 10 items
|
||||
|
||||
test_example.py .........F
|
||||
@@ -182,7 +182,7 @@ Running this will generate ten invocations of ``test_func`` passing in each of t
|
||||
E assert 9 < 9
|
||||
|
||||
test_example.py:6: AssertionError
|
||||
==================== 1 failed, 9 passed in 0.05 seconds ====================
|
||||
==================== 1 failed, 9 passed in 0.07 seconds ====================
|
||||
|
||||
Obviously, only when ``numiter`` has the value of ``9`` does the test fail. Note that the ``pytest_generate_tests(metafunc)`` hook is called during
|
||||
the test collection phase which is separate from the actual test running.
|
||||
@@ -190,7 +190,7 @@ Let's just look at what is collected::
|
||||
|
||||
$ py.test --collectonly test_example.py
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 10 items
|
||||
<Module 'test_example.py'>
|
||||
<Function 'test_func[0]'>
|
||||
@@ -210,13 +210,13 @@ If you want to select only the run with the value ``7`` you could do::
|
||||
|
||||
$ py.test -v -k 7 test_example.py # or -k test_func[7]
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.1 -- /Users/hpk/venv/1/bin/python
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2 -- /Users/hpk/venv/0/bin/python
|
||||
collecting ... collected 10 items
|
||||
|
||||
test_example.py:5: test_func[7] PASSED
|
||||
|
||||
======================= 9 tests deselected by '-k7' ========================
|
||||
================== 1 passed, 9 deselected in 0.02 seconds ==================
|
||||
================== 1 passed, 9 deselected in 0.01 seconds ==================
|
||||
|
||||
You might want to look at :ref:`more parametrization examples <paramexamples>`.
|
||||
|
||||
|
||||
@@ -22,9 +22,10 @@ Installation options::
|
||||
To check your installation has installed the correct version::
|
||||
|
||||
$ py.test --version
|
||||
This is py.test version 2.2.1, imported from /Users/hpk/p/pytest/pytest.pyc
|
||||
This is py.test version 2.2.2, imported from /Users/hpk/p/pytest/pytest.pyc
|
||||
setuptools registered plugins:
|
||||
pytest-xdist-1.8.dev2 at /Users/hpk/p/pytest-xdist/xdist/plugin.pyc
|
||||
pytest-xdist-1.8 at /Users/hpk/p/pytest-xdist/xdist/plugin.pyc
|
||||
pytest-cov-1.4 at /Users/hpk/venv/0/lib/python2.7/site-packages/pytest_cov.pyc
|
||||
|
||||
If you get an error checkout :ref:`installation issues`.
|
||||
|
||||
@@ -46,7 +47,7 @@ That's it. You can execute the test function now::
|
||||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 1 items
|
||||
|
||||
test_sample.py F
|
||||
@@ -126,7 +127,7 @@ run the module by passing its filename::
|
||||
================================= FAILURES =================================
|
||||
____________________________ TestClass.test_two ____________________________
|
||||
|
||||
self = <test_class.TestClass instance at 0x10131a560>
|
||||
self = <test_class.TestClass instance at 0x1013225a8>
|
||||
|
||||
def test_two(self):
|
||||
x = "hello"
|
||||
@@ -163,7 +164,7 @@ before performing the test function call. Let's just run it::
|
||||
================================= FAILURES =================================
|
||||
_____________________________ test_needsfiles ______________________________
|
||||
|
||||
tmpdir = local('/Users/hpk/tmp/pytest-679/test_needsfiles0')
|
||||
tmpdir = local('/Users/hpk/tmp/pytest-20/test_needsfiles0')
|
||||
|
||||
def test_needsfiles(tmpdir):
|
||||
print tmpdir
|
||||
@@ -172,8 +173,8 @@ before performing the test function call. Let's just run it::
|
||||
|
||||
test_tmpdir.py:3: AssertionError
|
||||
----------------------------- Captured stdout ------------------------------
|
||||
/Users/hpk/tmp/pytest-679/test_needsfiles0
|
||||
1 failed in 0.16 seconds
|
||||
/Users/hpk/tmp/pytest-20/test_needsfiles0
|
||||
1 failed in 0.11 seconds
|
||||
|
||||
Before the test runs, a unique-per-test-invocation temporary directory
|
||||
was created. More info at :ref:`tmpdir handling`.
|
||||
|
||||
@@ -94,6 +94,40 @@ options.
|
||||
.. _`test discovery`:
|
||||
.. _`Python test discovery`:
|
||||
|
||||
|
||||
Integration with setuptools/distribute test commands
|
||||
----------------------------------------------------
|
||||
|
||||
Distribute/Setuptools support test requirements,
|
||||
which means its really easy to extend its test command
|
||||
to support running a pytest from test requirements::
|
||||
|
||||
from setuptools.command.test import test as TestCommand
|
||||
|
||||
class PyTest(TestCommand):
|
||||
def finalize_options(self):
|
||||
TestCommand.finalize_options(self)
|
||||
self.test_args = []
|
||||
self.test_suite = True
|
||||
def run_tests(self):
|
||||
#import here, cause outside the eggs aren't loaded
|
||||
import pytest
|
||||
pytest.main(self.test_args)
|
||||
|
||||
setup(
|
||||
#...,
|
||||
tests_require=['pytest'],
|
||||
cmdclass = {'test': pytest},
|
||||
)
|
||||
|
||||
Now if you run::
|
||||
|
||||
python setup.py test
|
||||
|
||||
this will download py.test if needed and then run py.test
|
||||
as you would expect it to.
|
||||
|
||||
|
||||
Conventions for Python test discovery
|
||||
-------------------------------------------------
|
||||
|
||||
|
||||
@@ -14,14 +14,14 @@ and a discussion of its motivation.
|
||||
|
||||
.. _`monkeypatch blog post`: http://tetamap.wordpress.com/2009/03/03/monkeypatching-in-unit-tests-done-right/
|
||||
|
||||
|
||||
Simple example: patching ``os.path.expanduser``
|
||||
Simple example: monkeypatching functions
|
||||
---------------------------------------------------
|
||||
|
||||
If, for instance, you want to pretend that ``os.expanduser`` returns a certain
|
||||
If you want to pretend that ``os.expanduser`` returns a certain
|
||||
directory, you can use the :py:meth:`monkeypatch.setattr` method to
|
||||
patch this function before calling into a function which uses it::
|
||||
|
||||
# content of test_module.py
|
||||
import os.path
|
||||
def getssh(): # pseudo application code
|
||||
return os.path.join(os.path.expanduser("~admin"), '.ssh')
|
||||
@@ -33,22 +33,15 @@ patch this function before calling into a function which uses it::
|
||||
x = getssh()
|
||||
assert x == '/abc/.ssh'
|
||||
|
||||
After the test function finishes the ``os.path.expanduser`` modification
|
||||
will be undone.
|
||||
|
||||
.. background check:
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.1
|
||||
collecting ... collected 0 items
|
||||
|
||||
============================= in 0.00 seconds =============================
|
||||
Here our test function monkeypatches ``os.path.expanduser`` and
|
||||
then calls into an function that calls it. After the test function
|
||||
finishes the ``os.path.expanduser`` modification will be undone.
|
||||
|
||||
Method reference of the monkeypatch function argument
|
||||
-----------------------------------------------------
|
||||
|
||||
.. autoclass:: monkeypatch
|
||||
:members: setattr, delattr, setitem, delitem, setenv, delenv, syspath_prepend, undo
|
||||
:members: setattr, delattr, setitem, delitem, setenv, delenv, syspath_prepend, chdir, undo
|
||||
|
||||
``monkeypatch.setattr/delattr/delitem/delenv()`` all
|
||||
by default raise an Exception if the target does not exist.
|
||||
|
||||
@@ -130,7 +130,7 @@ Running it with the report-on-xfail option gives this output::
|
||||
|
||||
example $ py.test -rx xfail_demo.py
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 6 items
|
||||
|
||||
xfail_demo.py xxxxxx
|
||||
@@ -147,7 +147,7 @@ Running it with the report-on-xfail option gives this output::
|
||||
XFAIL xfail_demo.py::test_hello6
|
||||
reason: reason
|
||||
|
||||
======================== 6 xfailed in 0.08 seconds =========================
|
||||
======================== 6 xfailed in 0.16 seconds =========================
|
||||
|
||||
.. _`evaluation of skipif/xfail conditions`:
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@ Running this would result in a passed test except for the last
|
||||
|
||||
$ py.test test_tmpdir.py
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 1 items
|
||||
|
||||
test_tmpdir.py F
|
||||
@@ -36,7 +36,7 @@ Running this would result in a passed test except for the last
|
||||
================================= FAILURES =================================
|
||||
_____________________________ test_create_file _____________________________
|
||||
|
||||
tmpdir = local('/Users/hpk/tmp/pytest-680/test_create_file0')
|
||||
tmpdir = local('/Users/hpk/tmp/pytest-21/test_create_file0')
|
||||
|
||||
def test_create_file(tmpdir):
|
||||
p = tmpdir.mkdir("sub").join("hello.txt")
|
||||
@@ -47,7 +47,7 @@ Running this would result in a passed test except for the last
|
||||
E assert 0
|
||||
|
||||
test_tmpdir.py:7: AssertionError
|
||||
========================= 1 failed in 0.17 seconds =========================
|
||||
========================= 1 failed in 0.07 seconds =========================
|
||||
|
||||
.. _`base temporary directory`:
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ Running it yields::
|
||||
|
||||
$ py.test test_unittest.py
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.2
|
||||
collecting ... collected 1 items
|
||||
|
||||
test_unittest.py F
|
||||
@@ -42,7 +42,7 @@ Running it yields::
|
||||
test_unittest.py:8: AssertionError
|
||||
----------------------------- Captured stdout ------------------------------
|
||||
hello
|
||||
========================= 1 failed in 0.04 seconds =========================
|
||||
========================= 1 failed in 0.15 seconds =========================
|
||||
|
||||
.. _`unittest.py style`: http://docs.python.org/library/unittest.html
|
||||
|
||||
|
||||
@@ -36,7 +36,7 @@ Stopping after the first (or N) failures
|
||||
To stop the testing process after the first (N) failures::
|
||||
|
||||
py.test -x # stop after first failure
|
||||
py.test -maxfail=2 # stop after two failures
|
||||
py.test --maxfail=2 # stop after two failures
|
||||
|
||||
Specifying tests / selecting tests
|
||||
---------------------------------------------------
|
||||
@@ -70,7 +70,7 @@ Dropping to PDB (Python Debugger) on failures
|
||||
.. _PDB: http://docs.python.org/library/pdb.html
|
||||
|
||||
Python comes with a builtin Python debugger called PDB_. ``py.test``
|
||||
allows to drop into the PDB prompt via a command line option::
|
||||
allows one to drop into the PDB prompt via a command line option::
|
||||
|
||||
py.test --pdb
|
||||
|
||||
|
||||
8
setup.py
8
setup.py
@@ -17,14 +17,14 @@ Bugs and issues: http://bitbucket.org/hpk42/pytest/issues/
|
||||
|
||||
Web page: http://pytest.org
|
||||
|
||||
(c) Holger Krekel and others, 2004-2011
|
||||
(c) Holger Krekel and others, 2004-2012
|
||||
"""
|
||||
def main():
|
||||
setup(
|
||||
name='pytest',
|
||||
description='py.test: simple powerful testing with Python',
|
||||
long_description = long_description,
|
||||
version='2.2.1',
|
||||
version='2.2.4',
|
||||
url='http://pytest.org',
|
||||
license='MIT license',
|
||||
platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'],
|
||||
@@ -32,7 +32,7 @@ def main():
|
||||
author_email='holger at merlinux.eu',
|
||||
entry_points= make_entry_points(),
|
||||
# the following should be enabled for release
|
||||
install_requires=['py>=1.4.6'],
|
||||
install_requires=['py>=1.4.8'],
|
||||
classifiers=['Development Status :: 6 - Mature',
|
||||
'Intended Audience :: Developers',
|
||||
'License :: OSI Approved :: MIT License',
|
||||
@@ -70,4 +70,4 @@ def make_entry_points():
|
||||
return {'console_scripts': l}
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
main()
|
||||
@@ -57,6 +57,22 @@ class TestGeneralUsage:
|
||||
assert result.ret != 0
|
||||
result.stderr.fnmatch_lines(["ERROR: file not found*asd"])
|
||||
|
||||
def test_file_not_found_unconfigure_issue143(self, testdir):
|
||||
testdir.makeconftest("""
|
||||
def pytest_configure():
|
||||
print("---configure")
|
||||
def pytest_unconfigure():
|
||||
print("---unconfigure")
|
||||
""")
|
||||
result = testdir.runpytest("-s", "asd")
|
||||
assert result.ret == 4 # EXIT_USAGEERROR
|
||||
result.stderr.fnmatch_lines(["ERROR: file not found*asd"])
|
||||
s = result.stdout.fnmatch_lines([
|
||||
"*---configure",
|
||||
"*---unconfigure",
|
||||
])
|
||||
|
||||
|
||||
def test_config_preparse_plugin_option(self, testdir):
|
||||
testdir.makepyfile(pytest_xyz="""
|
||||
def pytest_addoption(parser):
|
||||
@@ -410,15 +426,20 @@ class TestInvocationVariants:
|
||||
"*1 passed*"
|
||||
])
|
||||
|
||||
def join_pythonpath(what):
|
||||
cur = py.std.os.environ.get('PYTHONPATH')
|
||||
if cur:
|
||||
return str(what) + ':' + cur
|
||||
return what
|
||||
empty_package = testdir.mkpydir("empty_package")
|
||||
monkeypatch.setenv('PYTHONPATH', empty_package)
|
||||
monkeypatch.setenv('PYTHONPATH', join_pythonpath(empty_package))
|
||||
result = testdir.runpytest("--pyargs", ".")
|
||||
assert result.ret == 0
|
||||
result.stdout.fnmatch_lines([
|
||||
"*2 passed*"
|
||||
])
|
||||
|
||||
monkeypatch.setenv('PYTHONPATH', testdir)
|
||||
monkeypatch.setenv('PYTHONPATH', join_pythonpath(testdir))
|
||||
path.join('test_hello.py').remove()
|
||||
result = testdir.runpytest("--pyargs", "tpkg.test_hello")
|
||||
assert result.ret != 0
|
||||
@@ -470,11 +491,11 @@ class TestDurations:
|
||||
import time
|
||||
frag = 0.02
|
||||
def test_2():
|
||||
time.sleep(frag*2)
|
||||
time.sleep(frag*5)
|
||||
def test_1():
|
||||
time.sleep(frag)
|
||||
def test_3():
|
||||
time.sleep(frag*3)
|
||||
time.sleep(frag*10)
|
||||
"""
|
||||
|
||||
def test_calls(self, testdir):
|
||||
|
||||
@@ -28,17 +28,14 @@ def test_assert_with_explicit_message():
|
||||
assert e.msg == 'hello'
|
||||
|
||||
def test_assert_within_finally():
|
||||
class A:
|
||||
def f():
|
||||
pass
|
||||
excinfo = py.test.raises(TypeError, """
|
||||
excinfo = py.test.raises(ZeroDivisionError, """
|
||||
try:
|
||||
A().f()
|
||||
1/0
|
||||
finally:
|
||||
i = 42
|
||||
""")
|
||||
s = excinfo.exconly()
|
||||
assert s.find("takes no argument") != -1
|
||||
assert py.std.re.search("division.+by zero", s) is not None
|
||||
|
||||
#def g():
|
||||
# A.f()
|
||||
@@ -325,3 +322,18 @@ def test_assert_raises_in_nonzero_of_object_pytest_issue10():
|
||||
e = exvalue()
|
||||
s = str(e)
|
||||
assert "<MY42 object> < 0" in s
|
||||
|
||||
@py.test.mark.skipif("sys.version_info >= (2,6)")
|
||||
def test_oldinterpret_importation():
|
||||
# we had a cyclic import there
|
||||
# requires pytest on sys.path
|
||||
res = py.std.subprocess.call([
|
||||
py.std.sys.executable, '-c', str(py.code.Source("""
|
||||
try:
|
||||
from _pytest.assertion.newinterpret import interpret
|
||||
except ImportError:
|
||||
from _pytest.assertion.oldinterpret import interpret
|
||||
"""))
|
||||
])
|
||||
|
||||
assert res == 0
|
||||
|
||||
@@ -195,6 +195,10 @@ class TestAssertionRewrite:
|
||||
y = -1
|
||||
assert x + y
|
||||
assert getmsg(f) == "assert (1 + -1)"
|
||||
def f():
|
||||
x = range(10)
|
||||
assert not 5 % 4
|
||||
assert getmsg(f) == "assert not (5 % 4)"
|
||||
|
||||
def test_call(self):
|
||||
def g(a=42, *args, **kwargs):
|
||||
@@ -346,6 +350,7 @@ def test_no_bytecode():
|
||||
monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", "1")
|
||||
assert testdir.runpytest().ret == 0
|
||||
|
||||
@pytest.mark.skipif('"__pypy__" in sys.modules')
|
||||
def test_pyc_vs_pyo(self, testdir, monkeypatch):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
|
||||
@@ -279,6 +279,13 @@ class TestPython:
|
||||
if not sys.platform.startswith("java"):
|
||||
assert "hx" in fnode.toxml()
|
||||
|
||||
def test_mangle_testnames():
|
||||
from _pytest.junitxml import mangle_testnames
|
||||
names = ["a/pything.py", "Class", "()", "method"]
|
||||
newnames = mangle_testnames(names)
|
||||
assert newnames == ["a.pything", "Class", "method"]
|
||||
|
||||
|
||||
class TestNonPython:
|
||||
def test_summing_simple(self, testdir):
|
||||
testdir.makeconftest("""
|
||||
@@ -340,7 +347,7 @@ def test_nullbyte_replace(testdir):
|
||||
assert '#x0' in text
|
||||
|
||||
|
||||
def test_invalid_xml_escape(testdir):
|
||||
def test_invalid_xml_escape():
|
||||
# Test some more invalid xml chars, the full range should be
|
||||
# tested really but let's just thest the edges of the ranges
|
||||
# intead.
|
||||
@@ -355,27 +362,23 @@ def test_invalid_xml_escape(testdir):
|
||||
except NameError:
|
||||
unichr = chr
|
||||
u = py.builtin._totext
|
||||
invalid = (0x1, 0xB, 0xC, 0xE, 0x19,)
|
||||
# 0xD800, 0xDFFF, 0xFFFE, 0x0FFFF) #, 0x110000)
|
||||
invalid = (0x00, 0x1, 0xB, 0xC, 0xE, 0x19,
|
||||
27, # issue #126
|
||||
0xD800, 0xDFFF, 0xFFFE, 0x0FFFF) #, 0x110000)
|
||||
valid = (0x9, 0xA, 0x20,) # 0xD, 0xD7FF, 0xE000, 0xFFFD, 0x10000, 0x10FFFF)
|
||||
all = invalid + valid
|
||||
prints = [u(" sys.stdout.write('''0x%X-->%s<--''')") % (i, unichr(i))
|
||||
for i in all]
|
||||
testdir.makepyfile(u("# -*- coding: UTF-8 -*-"),
|
||||
u("import sys"),
|
||||
u("def test_print_bytes():"),
|
||||
u("\n").join(prints),
|
||||
u(" assert False"))
|
||||
xmlf = testdir.tmpdir.join('junit.xml')
|
||||
result = testdir.runpytest('--junitxml=%s' % xmlf)
|
||||
text = xmlf.read()
|
||||
|
||||
from _pytest.junitxml import bin_xml_escape
|
||||
|
||||
|
||||
for i in invalid:
|
||||
got = bin_xml_escape(unichr(i))
|
||||
if i <= 0xFF:
|
||||
assert '#x%02X' % i in text
|
||||
expected = '#x%02X' % i
|
||||
else:
|
||||
assert '#x%04X' % i in text
|
||||
expected = '#x%04X' % i
|
||||
assert got == expected
|
||||
for i in valid:
|
||||
assert chr(i) in text
|
||||
assert chr(i) == bin_xml_escape(unichr(i))
|
||||
|
||||
def test_logxml_path_expansion():
|
||||
from _pytest.junitxml import LogXML
|
||||
|
||||
@@ -228,18 +228,26 @@ class TestFunctional:
|
||||
keywords = item.keywords
|
||||
marker = keywords['hello']
|
||||
assert marker.args == ("pos0", "pos1")
|
||||
assert marker.kwargs == {'x': 3, 'y': 2, 'z': 4}
|
||||
assert marker.kwargs == {'x': 1, 'y': 2, 'z': 4}
|
||||
|
||||
def test_mark_other(self, testdir):
|
||||
pytest.raises(TypeError, '''
|
||||
testdir.getitem("""
|
||||
# test the new __iter__ interface
|
||||
l = list(marker)
|
||||
assert len(l) == 3
|
||||
assert l[0].args == ("pos0",)
|
||||
assert l[1].args == ()
|
||||
assert l[2].args == ("pos1", )
|
||||
|
||||
def test_mark_with_wrong_marker(self, testdir):
|
||||
reprec = testdir.inline_runsource("""
|
||||
import pytest
|
||||
class pytestmark:
|
||||
pass
|
||||
def test_func():
|
||||
pass
|
||||
""")
|
||||
''')
|
||||
""")
|
||||
l = reprec.getfailedcollections()
|
||||
assert len(l) == 1
|
||||
assert "TypeError" in str(l[0].longrepr)
|
||||
|
||||
def test_mark_dynamically_in_funcarg(self, testdir):
|
||||
testdir.makeconftest("""
|
||||
@@ -259,6 +267,23 @@ class TestFunctional:
|
||||
"keyword: *hello*"
|
||||
])
|
||||
|
||||
def test_merging_markers_two_functions(self, testdir):
|
||||
p = testdir.makepyfile("""
|
||||
import pytest
|
||||
@pytest.mark.hello("pos1", z=4)
|
||||
@pytest.mark.hello("pos0", z=3)
|
||||
def test_func(self):
|
||||
pass
|
||||
""")
|
||||
items, rec = testdir.inline_genitems(p)
|
||||
item, = items
|
||||
keywords = item.keywords
|
||||
marker = keywords['hello']
|
||||
l = list(marker)
|
||||
assert len(l) == 2
|
||||
assert l[0].args == ("pos0",)
|
||||
assert l[1].args == ("pos1",)
|
||||
|
||||
|
||||
class TestKeywordSelection:
|
||||
def test_select_simple(self, testdir):
|
||||
|
||||
@@ -2,6 +2,17 @@ import os, sys
|
||||
import pytest
|
||||
from _pytest.monkeypatch import monkeypatch as MonkeyPatch
|
||||
|
||||
def pytest_funcarg__mp(request):
|
||||
cwd = os.getcwd()
|
||||
sys_path = list(sys.path)
|
||||
|
||||
def cleanup():
|
||||
sys.path[:] = sys_path
|
||||
os.chdir(cwd)
|
||||
|
||||
request.addfinalizer(cleanup)
|
||||
return MonkeyPatch()
|
||||
|
||||
def test_setattr():
|
||||
class A:
|
||||
x = 1
|
||||
@@ -59,6 +70,29 @@ def test_setitem():
|
||||
monkeypatch.undo()
|
||||
assert d['x'] == 5
|
||||
|
||||
def test_setitem_deleted_meanwhile():
|
||||
d = {}
|
||||
monkeypatch = MonkeyPatch()
|
||||
monkeypatch.setitem(d, 'x', 2)
|
||||
del d['x']
|
||||
monkeypatch.undo()
|
||||
assert not d
|
||||
|
||||
@pytest.mark.parametrize("before", [True, False])
|
||||
def test_setenv_deleted_meanwhile(before):
|
||||
key = "qwpeoip123"
|
||||
if before:
|
||||
os.environ[key] = "world"
|
||||
monkeypatch = MonkeyPatch()
|
||||
monkeypatch.setenv(key, 'hello')
|
||||
del os.environ[key]
|
||||
monkeypatch.undo()
|
||||
if before:
|
||||
assert os.environ[key] == "world"
|
||||
del os.environ[key]
|
||||
else:
|
||||
assert key not in os.environ
|
||||
|
||||
def test_delitem():
|
||||
d = {'x': 1}
|
||||
monkeypatch = MonkeyPatch()
|
||||
@@ -121,19 +155,41 @@ def test_monkeypatch_plugin(testdir):
|
||||
res = reprec.countoutcomes()
|
||||
assert tuple(res) == (1, 0, 0), res
|
||||
|
||||
def test_syspath_prepend():
|
||||
def test_syspath_prepend(mp):
|
||||
old = list(sys.path)
|
||||
try:
|
||||
monkeypatch = MonkeyPatch()
|
||||
monkeypatch.syspath_prepend('world')
|
||||
monkeypatch.syspath_prepend('hello')
|
||||
assert sys.path[0] == "hello"
|
||||
assert sys.path[1] == "world"
|
||||
monkeypatch.undo()
|
||||
assert sys.path == old
|
||||
monkeypatch.undo()
|
||||
assert sys.path == old
|
||||
finally:
|
||||
sys.path[:] = old
|
||||
mp.syspath_prepend('world')
|
||||
mp.syspath_prepend('hello')
|
||||
assert sys.path[0] == "hello"
|
||||
assert sys.path[1] == "world"
|
||||
mp.undo()
|
||||
assert sys.path == old
|
||||
mp.undo()
|
||||
assert sys.path == old
|
||||
|
||||
def test_syspath_prepend_double_undo(mp):
|
||||
mp.syspath_prepend('hello world')
|
||||
mp.undo()
|
||||
sys.path.append('more hello world')
|
||||
mp.undo()
|
||||
assert sys.path[-1] == 'more hello world'
|
||||
|
||||
def test_chdir_with_path_local(mp, tmpdir):
|
||||
mp.chdir(tmpdir)
|
||||
assert os.getcwd() == tmpdir.strpath
|
||||
|
||||
def test_chdir_with_str(mp, tmpdir):
|
||||
mp.chdir(tmpdir.strpath)
|
||||
assert os.getcwd() == tmpdir.strpath
|
||||
|
||||
def test_chdir_undo(mp, tmpdir):
|
||||
cwd = os.getcwd()
|
||||
mp.chdir(tmpdir)
|
||||
mp.undo()
|
||||
assert os.getcwd() == cwd
|
||||
|
||||
def test_chdir_double_undo(mp, tmpdir):
|
||||
mp.chdir(tmpdir.strpath)
|
||||
mp.undo()
|
||||
tmpdir.chdir()
|
||||
mp.undo()
|
||||
assert os.getcwd() == tmpdir.strpath
|
||||
|
||||
@@ -56,6 +56,24 @@ class TestClass:
|
||||
"*collected 0*",
|
||||
])
|
||||
|
||||
def test_setup_teardown_class_as_classmethod(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
class TestClassMethod:
|
||||
@classmethod
|
||||
def setup_class(cls):
|
||||
pass
|
||||
def test_1(self):
|
||||
pass
|
||||
@classmethod
|
||||
def teardown_class(cls):
|
||||
pass
|
||||
""")
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines([
|
||||
"*1 passed*",
|
||||
])
|
||||
|
||||
|
||||
class TestGenerator:
|
||||
def test_generative_functions(self, testdir):
|
||||
modcol = testdir.getmodulecol("""
|
||||
@@ -477,7 +495,7 @@ class TestTracebackCutting:
|
||||
out = result.stdout.str()
|
||||
assert out.find("conftest.py:2: ValueError") != -1
|
||||
numentries = out.count("_ _ _ _") # separator for traceback entries
|
||||
assert numentries >3
|
||||
assert numentries > 3
|
||||
|
||||
def test_traceback_error_during_import(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
@@ -983,11 +1001,12 @@ class TestMetafunc:
|
||||
metafunc = funcargs.Metafunc(func)
|
||||
metafunc.parametrize('x', [1], indirect=True)
|
||||
metafunc.parametrize('y', [2,3], indirect=True)
|
||||
metafunc.parametrize('unnamed', [1], indirect=True)
|
||||
assert len(metafunc._calls) == 2
|
||||
assert metafunc._calls[0].funcargs == {}
|
||||
assert metafunc._calls[1].funcargs == {}
|
||||
assert metafunc._calls[0].params == dict(x=1,y=2)
|
||||
assert metafunc._calls[1].params == dict(x=1,y=3)
|
||||
assert metafunc._calls[0].params == dict(x=1,y=2, unnamed=1)
|
||||
assert metafunc._calls[1].params == dict(x=1,y=3, unnamed=1)
|
||||
|
||||
def test_addcalls_and_parametrize_indirect(self):
|
||||
def func(x, y): pass
|
||||
@@ -1048,6 +1067,23 @@ class TestMetafunc:
|
||||
assert metafunc._calls[1].funcargs == dict(x=3, y=4)
|
||||
assert metafunc._calls[1].id == "3-4"
|
||||
|
||||
def test_parametrize_multiple_times(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
pytestmark = pytest.mark.parametrize("x", [1,2])
|
||||
def test_func(x):
|
||||
assert 0, x
|
||||
class TestClass:
|
||||
pytestmark = pytest.mark.parametrize("y", [3,4])
|
||||
def test_meth(self, x, y):
|
||||
assert 0, x
|
||||
""")
|
||||
result = testdir.runpytest()
|
||||
assert result.ret == 1
|
||||
result.stdout.fnmatch_lines([
|
||||
"*6 fail*",
|
||||
])
|
||||
|
||||
class TestMetafuncFunctional:
|
||||
def test_attributes(self, testdir):
|
||||
p = testdir.makepyfile("""
|
||||
@@ -1534,3 +1570,21 @@ def test_unorderable_types(testdir):
|
||||
result = testdir.runpytest()
|
||||
assert "TypeError" not in result.stdout.str()
|
||||
assert result.ret == 0
|
||||
|
||||
def test_issue117_sessionscopeteardown(testdir):
|
||||
testdir.makepyfile("""
|
||||
def pytest_funcarg__app(request):
|
||||
app = request.cached_setup(
|
||||
scope='session',
|
||||
setup=lambda: 0,
|
||||
teardown=lambda x: 3/x)
|
||||
return app
|
||||
def test_func(app):
|
||||
pass
|
||||
""")
|
||||
result = testdir.runpytest()
|
||||
assert result.ret != 0
|
||||
result.stderr.fnmatch_lines([
|
||||
"*3/x*",
|
||||
"*ZeroDivisionError*",
|
||||
])
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import pytest, py, sys
|
||||
import pytest, py, sys, os
|
||||
from _pytest import runner
|
||||
from py._code.code import ReprExceptionInfo
|
||||
|
||||
@@ -315,6 +315,21 @@ class TestSessionReports:
|
||||
assert not rep.passed
|
||||
assert rep.skipped
|
||||
|
||||
|
||||
reporttypes = [
|
||||
runner.BaseReport,
|
||||
runner.TestReport,
|
||||
runner.TeardownErrorReport,
|
||||
runner.CollectReport,
|
||||
]
|
||||
|
||||
@pytest.mark.parametrize('reporttype', reporttypes, ids=[x.__name__ for x in reporttypes])
|
||||
def test_report_extra_parameters(reporttype):
|
||||
args = py.std.inspect.getargspec(reporttype.__init__)[0][1:]
|
||||
basekw = dict.fromkeys(args, [])
|
||||
report = reporttype(newthing=1, **basekw)
|
||||
assert report.newthing == 1
|
||||
|
||||
def test_callinfo():
|
||||
ci = runner.CallInfo(lambda: 0, '123')
|
||||
assert ci.when == "123"
|
||||
@@ -423,21 +438,18 @@ def test_importorskip():
|
||||
py.test.fail("spurious skip")
|
||||
|
||||
def test_importorskip_imports_last_module_part():
|
||||
import os
|
||||
ospath = py.test.importorskip("os.path")
|
||||
assert os.path == ospath
|
||||
|
||||
|
||||
def test_pytest_cmdline_main(testdir):
|
||||
p = testdir.makepyfile("""
|
||||
import sys
|
||||
sys.path.insert(0, %r)
|
||||
import py
|
||||
def test_hello():
|
||||
assert 1
|
||||
if __name__ == '__main__':
|
||||
py.test.cmdline.main([__file__])
|
||||
""" % (str(py._pydir.dirpath())))
|
||||
""")
|
||||
import subprocess
|
||||
popen = subprocess.Popen([sys.executable, str(p)], stdout=subprocess.PIPE)
|
||||
s = popen.stdout.read()
|
||||
|
||||
@@ -256,6 +256,31 @@ class TestCollectonly:
|
||||
*1 error*
|
||||
""").strip())
|
||||
|
||||
def test_collectonly_missing_path(self, testdir):
|
||||
"""this checks issue 115,
|
||||
failure in parseargs will cause session
|
||||
not to have the items attribute
|
||||
"""
|
||||
result = testdir.runpytest("--collectonly", "uhm_missing_path")
|
||||
assert result.ret == 4
|
||||
result.stderr.fnmatch_lines([
|
||||
'*ERROR: file not found*',
|
||||
])
|
||||
|
||||
def test_collectonly_quiet(self, testdir):
|
||||
testdir.makepyfile("def test_foo(): pass")
|
||||
result = testdir.runpytest("--collectonly", "-q")
|
||||
result.stdout.fnmatch_lines([
|
||||
'*test_foo*',
|
||||
])
|
||||
|
||||
def test_collectonly_more_quiet(self, testdir):
|
||||
testdir.makepyfile(test_fun="def test_foo(): pass")
|
||||
result = testdir.runpytest("--collectonly", "-qq")
|
||||
result.stdout.fnmatch_lines([
|
||||
'*test_fun.py: 1*',
|
||||
])
|
||||
|
||||
|
||||
def test_repr_python_version(monkeypatch):
|
||||
try:
|
||||
|
||||
@@ -448,3 +448,35 @@ def test_unorderable_types(testdir):
|
||||
result = testdir.runpytest()
|
||||
assert "TypeError" not in result.stdout.str()
|
||||
assert result.ret == 0
|
||||
|
||||
def test_unittest_typerror_traceback(testdir):
|
||||
testdir.makepyfile("""
|
||||
import unittest
|
||||
class TestJoinEmpty(unittest.TestCase):
|
||||
def test_hello(self, arg1):
|
||||
pass
|
||||
""")
|
||||
result = testdir.runpytest()
|
||||
assert "TypeError" in result.stdout.str()
|
||||
assert result.ret == 1
|
||||
|
||||
@pytest.mark.skipif("sys.version_info < (2,7)")
|
||||
def test_unittest_unexpected_failure(testdir):
|
||||
testdir.makepyfile("""
|
||||
import unittest
|
||||
class MyTestCase(unittest.TestCase):
|
||||
@unittest.expectedFailure
|
||||
def test_func1(self):
|
||||
assert 0
|
||||
@unittest.expectedFailure
|
||||
def test_func2(self):
|
||||
assert 1
|
||||
""")
|
||||
result = testdir.runpytest("-rxX")
|
||||
result.stdout.fnmatch_lines([
|
||||
"*XFAIL*MyTestCase*test_func1*",
|
||||
"*XPASS*MyTestCase*test_func2*",
|
||||
"*1 xfailed*1 xpass*",
|
||||
])
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user