Compare commits
52 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
07e870dc14 | ||
|
|
ee53b1f591 | ||
|
|
63ccec90be | ||
|
|
c666aeabbb | ||
|
|
f8137390c2 | ||
|
|
2589aa183c | ||
|
|
b316bc6723 | ||
|
|
420bbfd9a9 | ||
|
|
942ae47cd1 | ||
|
|
06ca7090f9 | ||
|
|
bc4e4b38a9 | ||
|
|
1c1918eb22 | ||
|
|
60ff2e8529 | ||
|
|
b7ba4d4e70 | ||
|
|
3a9788fc6f | ||
|
|
cf4e14baed | ||
|
|
1d40abadc4 | ||
|
|
ed6d2537bc | ||
|
|
a9f1f26a39 | ||
|
|
a7131dc911 | ||
|
|
6aaaaa8e67 | ||
|
|
6d06f55543 | ||
|
|
527bc472a8 | ||
|
|
007f0daeb9 | ||
|
|
55657d6c51 | ||
|
|
1a7c6ecc42 | ||
|
|
f2670651b3 | ||
|
|
5470cadbff | ||
|
|
f8e3fe8fbf | ||
|
|
c552b58dc5 | ||
|
|
18e784c9c9 | ||
|
|
5bef795ba7 | ||
|
|
a6c518e68c | ||
|
|
7e44c38570 | ||
|
|
9c952b3ce0 | ||
|
|
bfe6e98abb | ||
|
|
07cee24122 | ||
|
|
22fac92ca0 | ||
|
|
318e8a404b | ||
|
|
fadd1a2313 | ||
|
|
070c73ff2f | ||
|
|
682773e0cb | ||
|
|
6f3b84da9f | ||
|
|
f1b5dae1fb | ||
|
|
8d62e4c71c | ||
|
|
f6c1e49287 | ||
|
|
27577170e1 | ||
|
|
2f2586af72 | ||
|
|
70ceb946e4 | ||
|
|
d2f9b41519 | ||
|
|
2bd0c98801 | ||
|
|
5a5a618dcb |
7
.hgtags
7
.hgtags
@@ -32,3 +32,10 @@ c59d3fa8681a5b5966b8375b16fccd64a3a8dbeb 1.3.3
|
||||
79ef6377705184c55633d456832eea318fedcf61 1.3.4
|
||||
90fffd35373e9f125af233f78b19416f0938d841 1.3.4
|
||||
e9e127acd6f0497324ef7f40cfb997cad4c4cd17 2.0.0
|
||||
e4497c2aed358c1988cf7be83ca9394c3c707fa2 2.0.1
|
||||
84e5c54b72448194a0f6f815da7e048ac8019d50 2.0.2
|
||||
2ef82d82daacb72733a3a532a95c5a37164e5819 2.0.3
|
||||
2ef82d82daacb72733a3a532a95c5a37164e5819 2.0.3
|
||||
c777dcad166548b7499564cb49ae5c8b4b07f935 2.0.3
|
||||
c777dcad166548b7499564cb49ae5c8b4b07f935 2.0.3
|
||||
49f11dbff725acdcc5fe3657cbcdf9ae04e25bbc 2.0.3
|
||||
|
||||
72
CHANGELOG
72
CHANGELOG
@@ -1,3 +1,75 @@
|
||||
Changes between 2.0.2 and 2.0.3
|
||||
----------------------------------------------
|
||||
|
||||
- fix issue38: nicer tracebacks on calls to hooks, particularly early
|
||||
configure/sessionstart ones
|
||||
|
||||
- fix missing skip reason/meta information in junitxml files, reported
|
||||
via http://lists.idyll.org/pipermail/testing-in-python/2011-March/003928.html
|
||||
|
||||
- fix issue34: avoid collection failure with "test" prefixed classes
|
||||
deriving from object.
|
||||
|
||||
- don't require zlib (and other libs) for genscript plugin without
|
||||
--genscript actually being used.
|
||||
|
||||
- speed up skips (by not doing a full traceback represenation
|
||||
internally)
|
||||
|
||||
- fix issue37: avoid invalid characters in junitxml's output
|
||||
|
||||
Changes between 2.0.1 and 2.0.2
|
||||
----------------------------------------------
|
||||
|
||||
- tackle issue32 - speed up test runs of very quick test functions
|
||||
by reducing the relative overhead
|
||||
|
||||
- fix issue30 - extended xfail/skipif handling and improved reporting.
|
||||
If you have a syntax error in your skip/xfail
|
||||
expressions you now get nice error reports.
|
||||
|
||||
Also you can now access module globals from xfail/skipif
|
||||
expressions so that this for example works now::
|
||||
|
||||
import pytest
|
||||
import mymodule
|
||||
@pytest.mark.skipif("mymodule.__version__[0] == "1")
|
||||
def test_function():
|
||||
pass
|
||||
|
||||
This will not run the test function if the module's version string
|
||||
does not start with a "1". Note that specifying a string instead
|
||||
of a boolean expressions allows py.test to report meaningful information
|
||||
when summarizing a test run as to what conditions lead to skipping
|
||||
(or xfail-ing) tests.
|
||||
|
||||
- fix issue28 - setup_method and pytest_generate_tests work together
|
||||
The setup_method fixture method now gets called also for
|
||||
test function invocations generated from the pytest_generate_tests
|
||||
hook.
|
||||
|
||||
- fix issue27 - collectonly and keyword-selection (-k) now work together
|
||||
Also, if you do "py.test --collectonly -q" you now get a flat list
|
||||
of test ids that you can use to paste to the py.test commandline
|
||||
in order to execute a particular test.
|
||||
|
||||
- fix issue25 avoid reported problems with --pdb and python3.2/encodings output
|
||||
|
||||
- fix issue23 - tmpdir argument now works on Python3.2 and WindowsXP
|
||||
Starting with Python3.2 os.symlink may be supported. By requiring
|
||||
a newer py lib version the py.path.local() implementation acknowledges
|
||||
this.
|
||||
|
||||
- fixed typos in the docs (thanks Victor Garcia, Brianna Laugher) and particular
|
||||
thanks to Laura Creighton who also revieved parts of the documentation.
|
||||
|
||||
- fix slighly wrong output of verbose progress reporting for classes
|
||||
(thanks Amaury)
|
||||
|
||||
- more precise (avoiding of) deprecation warnings for node.Class|Function accesses
|
||||
|
||||
- avoid std unittest assertion helper code in tracebacks (thanks Ronny)
|
||||
|
||||
Changes between 2.0.0 and 2.0.1
|
||||
----------------------------------------------
|
||||
|
||||
|
||||
10
ISSUES.txt
10
ISSUES.txt
@@ -88,6 +88,16 @@ etc. Idea is to allow Python expressions which can operate
|
||||
on common spellings for operating systems and python
|
||||
interpreter versions.
|
||||
|
||||
pytest.mark.xfail signature change
|
||||
-------------------------------------------------------
|
||||
tags: feature 2.1
|
||||
|
||||
change to pytest.mark.xfail(reason, (optional)condition)
|
||||
to better implement the word meaning. It also signals
|
||||
better that we always have some kind of an implementation
|
||||
reason that can be formualated.
|
||||
Compatibility? Maybe rename to "pytest.mark.xfail"?
|
||||
|
||||
introduce py.test.mark registration
|
||||
-----------------------------------------
|
||||
tags: feature 2.1
|
||||
|
||||
@@ -1 +1,2 @@
|
||||
#
|
||||
__version__ = '2.0.3'
|
||||
|
||||
@@ -12,11 +12,12 @@ def pytest_addoption(parser):
|
||||
help="disable python assert expression reinterpretation."),
|
||||
|
||||
def pytest_configure(config):
|
||||
# The _pytesthook attribute on the AssertionError is used by
|
||||
# The _reprcompare attribute on the py.code module is used by
|
||||
# py._code._assertionnew to detect this plugin was loaded and in
|
||||
# turn call the hooks defined here as part of the
|
||||
# DebugInterpreter.
|
||||
config._monkeypatch = m = monkeypatch()
|
||||
m = monkeypatch()
|
||||
config._cleanup.append(m.undo)
|
||||
warn_about_missing_assertion()
|
||||
if not config.getvalue("noassert") and not config.getvalue("nomagic"):
|
||||
def callbinrepr(op, left, right):
|
||||
@@ -29,9 +30,6 @@ def pytest_configure(config):
|
||||
'AssertionError', py.code._AssertionError)
|
||||
m.setattr(py.code, '_reprcompare', callbinrepr)
|
||||
|
||||
def pytest_unconfigure(config):
|
||||
config._monkeypatch.undo()
|
||||
|
||||
def warn_about_missing_assertion():
|
||||
try:
|
||||
assert False
|
||||
@@ -51,7 +49,7 @@ except NameError:
|
||||
def pytest_assertrepr_compare(op, left, right):
|
||||
"""return specialised explanations for some operators/operands"""
|
||||
width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op
|
||||
left_repr = py.io.saferepr(left, maxsize=width/2)
|
||||
left_repr = py.io.saferepr(left, maxsize=int(width/2))
|
||||
right_repr = py.io.saferepr(right, maxsize=width-len(left_repr))
|
||||
summary = '%s %s %s' % (left_repr, op, right_repr)
|
||||
|
||||
|
||||
@@ -192,18 +192,16 @@ class CaptureManager:
|
||||
return rep
|
||||
|
||||
def pytest_funcarg__capsys(request):
|
||||
"""captures writes to sys.stdout/sys.stderr and makes
|
||||
them available successively via a ``capsys.readouterr()`` method
|
||||
which returns a ``(out, err)`` tuple of captured snapshot strings.
|
||||
"""enables capturing of writes to sys.stdout/sys.stderr and makes
|
||||
captured output available via ``capsys.readouterr()`` method calls
|
||||
which return a ``(out, err)`` tuple.
|
||||
"""
|
||||
return CaptureFuncarg(py.io.StdCapture)
|
||||
|
||||
def pytest_funcarg__capfd(request):
|
||||
"""captures writes to file descriptors 1 and 2 and makes
|
||||
snapshotted ``(out, err)`` string tuples available
|
||||
via the ``capsys.readouterr()`` method. If the underlying
|
||||
platform does not have ``os.dup`` (e.g. Jython) tests using
|
||||
this funcarg will automatically skip.
|
||||
"""enables capturing of writes to file descriptors 1 and 2 and makes
|
||||
captured output available via ``capsys.readouterr()`` method calls
|
||||
which return a ``(out, err)`` tuple.
|
||||
"""
|
||||
if not hasattr(os, 'dup'):
|
||||
py.test.skip("capfd funcarg needs os.dup")
|
||||
|
||||
@@ -12,6 +12,10 @@ def pytest_cmdline_parse(pluginmanager, args):
|
||||
config.trace.root.setwriter(sys.stderr.write)
|
||||
return config
|
||||
|
||||
def pytest_unconfigure(config):
|
||||
for func in config._cleanup:
|
||||
func()
|
||||
|
||||
class Parser:
|
||||
""" Parser for command line arguments. """
|
||||
|
||||
@@ -251,6 +255,17 @@ class Config(object):
|
||||
self._conftest = Conftest(onimport=self._onimportconftest)
|
||||
self.hook = self.pluginmanager.hook
|
||||
self._inicache = {}
|
||||
self._cleanup = []
|
||||
|
||||
@classmethod
|
||||
def fromdictargs(cls, option_dict, args):
|
||||
""" constructor useable for subprocesses. """
|
||||
config = cls()
|
||||
config._preparse(args, addopts=False)
|
||||
config.option.__dict__.update(option_dict)
|
||||
for x in config.option.plugins:
|
||||
config.pluginmanager.consider_pluginarg(x)
|
||||
return config
|
||||
|
||||
def _onimportconftest(self, conftestmodule):
|
||||
self.trace("loaded conftestmodule %r" %(conftestmodule,))
|
||||
|
||||
@@ -60,6 +60,7 @@ class TagTracerSub:
|
||||
class PluginManager(object):
|
||||
def __init__(self, load=False):
|
||||
self._name2plugin = {}
|
||||
self._listattrcache = {}
|
||||
self._plugins = []
|
||||
self._hints = []
|
||||
self.trace = TagTracer().get("pluginmanage")
|
||||
@@ -163,14 +164,17 @@ class PluginManager(object):
|
||||
def consider_preparse(self, args):
|
||||
for opt1,opt2 in zip(args, args[1:]):
|
||||
if opt1 == "-p":
|
||||
if opt2.startswith("no:"):
|
||||
name = opt2[3:]
|
||||
if self.getplugin(name) is not None:
|
||||
self.unregister(None, name=name)
|
||||
self._name2plugin[name] = -1
|
||||
else:
|
||||
if self.getplugin(opt2) is None:
|
||||
self.import_plugin(opt2)
|
||||
self.consider_pluginarg(opt2)
|
||||
|
||||
def consider_pluginarg(self, arg):
|
||||
if arg.startswith("no:"):
|
||||
name = arg[3:]
|
||||
if self.getplugin(name) is not None:
|
||||
self.unregister(None, name=name)
|
||||
self._name2plugin[name] = -1
|
||||
else:
|
||||
if self.getplugin(arg) is None:
|
||||
self.import_plugin(arg)
|
||||
|
||||
def consider_conftest(self, conftestmodule):
|
||||
if self.register(conftestmodule, name=conftestmodule.__file__):
|
||||
@@ -261,8 +265,15 @@ class PluginManager(object):
|
||||
config.hook.pytest_unconfigure(config=config)
|
||||
config.pluginmanager.unregister(self)
|
||||
|
||||
def notify_exception(self, excinfo):
|
||||
excrepr = excinfo.getrepr(funcargs=True, showlocals=True)
|
||||
def notify_exception(self, excinfo, option=None):
|
||||
if option and option.fulltrace:
|
||||
style = "long"
|
||||
else:
|
||||
style = "native"
|
||||
excrepr = excinfo.getrepr(funcargs=True,
|
||||
showlocals=getattr(option, 'showlocals', False),
|
||||
style=style,
|
||||
)
|
||||
res = self.hook.pytest_internalerror(excrepr=excrepr)
|
||||
if not py.builtin.any(res):
|
||||
for line in str(excrepr).split("\n"):
|
||||
@@ -272,6 +283,11 @@ class PluginManager(object):
|
||||
def listattr(self, attrname, plugins=None):
|
||||
if plugins is None:
|
||||
plugins = self._plugins
|
||||
key = (attrname,) + tuple(plugins)
|
||||
try:
|
||||
return list(self._listattrcache[key])
|
||||
except KeyError:
|
||||
pass
|
||||
l = []
|
||||
last = []
|
||||
for plugin in plugins:
|
||||
@@ -286,6 +302,7 @@ class PluginManager(object):
|
||||
except AttributeError:
|
||||
continue
|
||||
l.extend(last)
|
||||
self._listattrcache[key] = list(l)
|
||||
return l
|
||||
|
||||
def call_plugin(self, plugin, methname, kwargs):
|
||||
@@ -340,14 +357,20 @@ class MultiCall:
|
||||
return kwargs
|
||||
|
||||
def varnames(func):
|
||||
try:
|
||||
return func._varnames
|
||||
except AttributeError:
|
||||
pass
|
||||
if not inspect.isfunction(func) and not inspect.ismethod(func):
|
||||
func = getattr(func, '__call__', func)
|
||||
ismethod = inspect.ismethod(func)
|
||||
rawcode = py.code.getrawcode(func)
|
||||
try:
|
||||
return rawcode.co_varnames[ismethod:rawcode.co_argcount]
|
||||
x = rawcode.co_varnames[ismethod:rawcode.co_argcount]
|
||||
except AttributeError:
|
||||
return ()
|
||||
x = ()
|
||||
py.builtin._getfuncdict(func)['_varnames'] = x
|
||||
return x
|
||||
|
||||
class HookRelay:
|
||||
def __init__(self, hookspecs, pm, prefix="pytest_"):
|
||||
|
||||
@@ -1,8 +1,5 @@
|
||||
""" generate a single-file self-contained version of py.test """
|
||||
import py
|
||||
import pickle
|
||||
import zlib
|
||||
import base64
|
||||
|
||||
def find_toplevel(name):
|
||||
for syspath in py.std.sys.path:
|
||||
@@ -31,9 +28,9 @@ def pkg_to_mapping(name):
|
||||
return name2src
|
||||
|
||||
def compress_mapping(mapping):
|
||||
data = pickle.dumps(mapping, 2)
|
||||
data = zlib.compress(data, 9)
|
||||
data = base64.encodestring(data)
|
||||
data = py.std.pickle.dumps(mapping, 2)
|
||||
data = py.std.zlib.compress(data, 9)
|
||||
data = py.std.base64.encodestring(data)
|
||||
data = data.decode('ascii')
|
||||
return data
|
||||
|
||||
@@ -44,7 +41,6 @@ def compress_packages(names):
|
||||
mapping.update(pkg_to_mapping(name))
|
||||
return compress_mapping(mapping)
|
||||
|
||||
|
||||
def generate_script(entry, packages):
|
||||
data = compress_packages(packages)
|
||||
tmpl = py.path.local(__file__).dirpath().join('standalonetemplate.py')
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
import py
|
||||
import pytest
|
||||
import inspect, sys
|
||||
from _pytest.core import varnames
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup('debugconfig')
|
||||
@@ -135,12 +136,11 @@ def pytest_plugin_registered(manager, plugin):
|
||||
fail = True
|
||||
else:
|
||||
#print "checking", method
|
||||
method_args = getargs(method)
|
||||
#print "method_args", method_args
|
||||
method_args = list(varnames(method))
|
||||
if '__multicall__' in method_args:
|
||||
method_args.remove('__multicall__')
|
||||
hook = hooks[name]
|
||||
hookargs = getargs(hook)
|
||||
hookargs = varnames(hook)
|
||||
for arg in method_args:
|
||||
if arg not in hookargs:
|
||||
Print("argument %r not available" %(arg, ))
|
||||
@@ -162,11 +162,6 @@ def isgenerichook(name):
|
||||
return name == "pytest_plugins" or \
|
||||
name.startswith("pytest_funcarg__")
|
||||
|
||||
def getargs(func):
|
||||
args = inspect.getargs(py.code.getrawcode(func))[0]
|
||||
startindex = inspect.ismethod(func) and 1 or 0
|
||||
return args[startindex:]
|
||||
|
||||
def collectattr(obj):
|
||||
methods = {}
|
||||
for apiname in dir(obj):
|
||||
|
||||
@@ -5,8 +5,42 @@ Based on initial code from Ross Lawley.
|
||||
|
||||
import py
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
|
||||
|
||||
# Python 2.X and 3.X compatibility
|
||||
try:
|
||||
unichr(65)
|
||||
except NameError:
|
||||
unichr = chr
|
||||
try:
|
||||
unicode('A')
|
||||
except NameError:
|
||||
unicode = str
|
||||
try:
|
||||
long(1)
|
||||
except NameError:
|
||||
long = int
|
||||
|
||||
|
||||
# We need to get the subset of the invalid unicode ranges according to
|
||||
# XML 1.0 which are valid in this python build. Hence we calculate
|
||||
# this dynamically instead of hardcoding it. The spec range of valid
|
||||
# chars is: Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD]
|
||||
# | [#x10000-#x10FFFF]
|
||||
_illegal_unichrs = [(0x00, 0x08), (0x0B, 0x0C), (0x0E, 0x19),
|
||||
(0xD800, 0xDFFF), (0xFDD0, 0xFFFF)]
|
||||
_illegal_ranges = [unicode("%s-%s") % (unichr(low), unichr(high))
|
||||
for (low, high) in _illegal_unichrs
|
||||
if low < sys.maxunicode]
|
||||
illegal_xml_re = re.compile(unicode('[%s]') %
|
||||
unicode('').join(_illegal_ranges))
|
||||
del _illegal_unichrs
|
||||
del _illegal_ranges
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("terminal reporting")
|
||||
group.addoption('--junitxml', action="store", dest="xmlpath",
|
||||
@@ -28,6 +62,7 @@ def pytest_unconfigure(config):
|
||||
del config._xml
|
||||
config.pluginmanager.unregister(xml)
|
||||
|
||||
|
||||
class LogXML(object):
|
||||
def __init__(self, logfile, prefix):
|
||||
self.logfile = logfile
|
||||
@@ -55,7 +90,14 @@ class LogXML(object):
|
||||
self.test_logs.append("</testcase>")
|
||||
|
||||
def appendlog(self, fmt, *args):
|
||||
args = tuple([py.xml.escape(arg) for arg in args])
|
||||
def repl(matchobj):
|
||||
i = ord(matchobj.group())
|
||||
if i <= 0xFF:
|
||||
return unicode('#x%02X') % i
|
||||
else:
|
||||
return unicode('#x%04X') % i
|
||||
args = tuple([illegal_xml_re.sub(repl, py.xml.escape(arg))
|
||||
for arg in args])
|
||||
self.test_logs.append(fmt % args)
|
||||
|
||||
def append_pass(self, report):
|
||||
@@ -106,7 +148,13 @@ class LogXML(object):
|
||||
'<skipped message="expected test failure">%s</skipped>',
|
||||
report.keywords['xfail'])
|
||||
else:
|
||||
self.appendlog("<skipped/>")
|
||||
filename, lineno, skipreason = report.longrepr
|
||||
if skipreason.startswith("Skipped: "):
|
||||
skipreason = skipreason[9:]
|
||||
self.appendlog('<skipped type="pytest.skip" '
|
||||
'message="%s">%s</skipped>',
|
||||
skipreason, "%s:%s: %s" % report.longrepr,
|
||||
)
|
||||
self._closetestcase()
|
||||
self.skipped += 1
|
||||
|
||||
|
||||
@@ -71,7 +71,7 @@ def pytest_cmdline_main(config):
|
||||
session.exitstatus = EXIT_INTERRUPTED
|
||||
except:
|
||||
excinfo = py.code.ExceptionInfo()
|
||||
config.pluginmanager.notify_exception(excinfo)
|
||||
config.pluginmanager.notify_exception(excinfo, config.option)
|
||||
session.exitstatus = EXIT_INTERNALERROR
|
||||
if excinfo.errisinstance(SystemExit):
|
||||
sys.stderr.write("mainloop: caught Spurious SystemExit!\n")
|
||||
@@ -121,9 +121,6 @@ class HookProxy:
|
||||
|
||||
def compatproperty(name):
|
||||
def fget(self):
|
||||
#print "retrieving %r property from %s" %(name, self.fspath)
|
||||
py.log._apiwarn("2.0", "use pytest.%s for "
|
||||
"test collection and item classes" % name)
|
||||
return getattr(pytest, name)
|
||||
return property(fget, None, None,
|
||||
"deprecated attribute %r, use pytest.%s" % (name,name))
|
||||
@@ -157,6 +154,14 @@ class Node(object):
|
||||
File = compatproperty("File")
|
||||
Item = compatproperty("Item")
|
||||
|
||||
def _getcustomclass(self, name):
|
||||
cls = getattr(self, name)
|
||||
if cls != getattr(pytest, name):
|
||||
py.log._apiwarn("2.0", "use of node.%s is deprecated, "
|
||||
"use pytest_pycollect_makeitem(...) to create custom "
|
||||
"collection nodes" % name)
|
||||
return cls
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s %r>" %(self.__class__.__name__, getattr(self, 'name', None))
|
||||
|
||||
@@ -321,7 +326,13 @@ class Item(Node):
|
||||
return self._location
|
||||
except AttributeError:
|
||||
location = self.reportinfo()
|
||||
fspath = self.session.fspath.bestrelpath(location[0])
|
||||
# bestrelpath is a quite slow function
|
||||
cache = self.config.__dict__.setdefault("_bestrelpathcache", {})
|
||||
try:
|
||||
fspath = cache[location[0]]
|
||||
except KeyError:
|
||||
fspath = self.session.fspath.bestrelpath(location[0])
|
||||
cache[location[0]] = fspath
|
||||
location = (fspath, location[1], str(location[2]))
|
||||
self._location = location
|
||||
return location
|
||||
|
||||
@@ -89,8 +89,8 @@ class MarkGenerator:
|
||||
class MarkDecorator:
|
||||
""" A decorator for test functions and test classes. When applied
|
||||
it will create :class:`MarkInfo` objects which may be
|
||||
:ref:`retrieved by hooks as item keywords` MarkDecorator instances
|
||||
are usually created by writing::
|
||||
:ref:`retrieved by hooks as item keywords <excontrolskip>`.
|
||||
MarkDecorator instances are often created like this::
|
||||
|
||||
mark1 = py.test.mark.NAME # simple MarkDecorator
|
||||
mark2 = py.test.mark.NAME(name1=value) # parametrized MarkDecorator
|
||||
|
||||
@@ -14,8 +14,8 @@ def pytest_funcarg__monkeypatch(request):
|
||||
monkeypatch.delenv(name, value, raising=True)
|
||||
monkeypatch.syspath_prepend(path)
|
||||
|
||||
All modifications will be undone when the requesting
|
||||
test function finished its execution. The ``raising``
|
||||
All modifications will be undone after the requesting
|
||||
test function has finished. The ``raising``
|
||||
parameter determines if a KeyError or AttributeError
|
||||
will be raised if the set/deletion operation has no target.
|
||||
"""
|
||||
|
||||
@@ -52,7 +52,10 @@ class PdbInvoke:
|
||||
if "xfail" in rep.keywords:
|
||||
return rep
|
||||
# we assume that the above execute() suspended capturing
|
||||
tw = py.io.TerminalWriter()
|
||||
# XXX we re-use the TerminalReporter's terminalwriter
|
||||
# because this seems to avoid some encoding related troubles
|
||||
# for not completely clear reasons.
|
||||
tw = item.config.pluginmanager.getplugin("terminalreporter")._tw
|
||||
tw.line()
|
||||
tw.sep(">", "traceback")
|
||||
rep.toterminal(tw)
|
||||
|
||||
@@ -236,13 +236,14 @@ class TmpTestdir:
|
||||
def _makefile(self, ext, args, kwargs):
|
||||
items = list(kwargs.items())
|
||||
if args:
|
||||
source = "\n".join(map(str, args)) + "\n"
|
||||
source = py.builtin._totext("\n").join(
|
||||
map(py.builtin._totext, args)) + py.builtin._totext("\n")
|
||||
basename = self.request.function.__name__
|
||||
items.insert(0, (basename, source))
|
||||
ret = None
|
||||
for name, value in items:
|
||||
p = self.tmpdir.join(name).new(ext=ext)
|
||||
source = str(py.code.Source(value)).lstrip()
|
||||
source = py.builtin._totext(py.code.Source(value)).lstrip()
|
||||
p.write(source.encode("utf-8"), "wb")
|
||||
if ret is None:
|
||||
ret = p
|
||||
|
||||
@@ -70,10 +70,13 @@ def pytest_pycollect_makeitem(__multicall__, collector, name, obj):
|
||||
res = __multicall__.execute()
|
||||
if res is not None:
|
||||
return res
|
||||
if collector._istestclasscandidate(name, obj):
|
||||
if inspect.isclass(obj):
|
||||
#if hasattr(collector.obj, 'unittest'):
|
||||
# return # we assume it's a mixin class for a TestCase derived one
|
||||
return collector.Class(name, parent=collector)
|
||||
if collector.classnamefilter(name):
|
||||
if not hasinit(obj):
|
||||
Class = collector._getcustomclass("Class")
|
||||
return Class(name, parent=collector)
|
||||
elif collector.funcnamefilter(name) and hasattr(obj, '__call__'):
|
||||
if is_generator(obj):
|
||||
return Generator(name, parent=collector)
|
||||
@@ -193,14 +196,6 @@ class PyCollectorMixin(PyobjMixin, pytest.Collector):
|
||||
return self.ihook.pytest_pycollect_makeitem(
|
||||
collector=self, name=name, obj=obj)
|
||||
|
||||
def _istestclasscandidate(self, name, obj):
|
||||
if self.classnamefilter(name) and \
|
||||
inspect.isclass(obj):
|
||||
if hasinit(obj):
|
||||
# XXX WARN
|
||||
return False
|
||||
return True
|
||||
|
||||
def _genfunctions(self, name, funcobj):
|
||||
module = self.getparent(Module).obj
|
||||
clscol = self.getparent(Class)
|
||||
@@ -213,16 +208,18 @@ class PyCollectorMixin(PyobjMixin, pytest.Collector):
|
||||
extra.append(cls())
|
||||
plugins = self.getplugins() + extra
|
||||
gentesthook.pcall(plugins, metafunc=metafunc)
|
||||
Function = self._getcustomclass("Function")
|
||||
if not metafunc._calls:
|
||||
return self.Function(name, parent=self)
|
||||
return Function(name, parent=self)
|
||||
l = []
|
||||
for callspec in metafunc._calls:
|
||||
subname = "%s[%s]" %(name, callspec.id)
|
||||
function = self.Function(name=subname, parent=self,
|
||||
function = Function(name=subname, parent=self,
|
||||
callspec=callspec, callobj=funcobj, keywords={callspec.id:True})
|
||||
l.append(function)
|
||||
return l
|
||||
|
||||
|
||||
class Module(pytest.File, PyCollectorMixin):
|
||||
def _getobj(self):
|
||||
return self._memoizedcall('_obj', self._importtestmodule)
|
||||
@@ -272,7 +269,7 @@ class Module(pytest.File, PyCollectorMixin):
|
||||
class Class(PyCollectorMixin, pytest.Collector):
|
||||
|
||||
def collect(self):
|
||||
return [self.Instance(name="()", parent=self)]
|
||||
return [self._getcustomclass("Instance")(name="()", parent=self)]
|
||||
|
||||
def setup(self):
|
||||
setup_class = getattr(self.obj, 'setup_class', None)
|
||||
@@ -297,13 +294,8 @@ class Instance(PyCollectorMixin, pytest.Collector):
|
||||
class FunctionMixin(PyobjMixin):
|
||||
""" mixin for the code common to Function and Generator.
|
||||
"""
|
||||
|
||||
def setup(self):
|
||||
""" perform setup for this test function. """
|
||||
if inspect.ismethod(self.obj):
|
||||
name = 'setup_method'
|
||||
else:
|
||||
name = 'setup_function'
|
||||
if hasattr(self, '_preservedparent'):
|
||||
obj = self._preservedparent
|
||||
elif isinstance(self.parent, Instance):
|
||||
@@ -311,6 +303,10 @@ class FunctionMixin(PyobjMixin):
|
||||
self.obj = self._getobj()
|
||||
else:
|
||||
obj = self.parent.obj
|
||||
if inspect.ismethod(self.obj):
|
||||
name = 'setup_method'
|
||||
else:
|
||||
name = 'setup_function'
|
||||
setup_func_or_method = getattr(obj, name, None)
|
||||
if setup_func_or_method is not None:
|
||||
setup_func_or_method(self.obj)
|
||||
@@ -487,10 +483,11 @@ def hasinit(obj):
|
||||
return True
|
||||
|
||||
|
||||
def getfuncargnames(function):
|
||||
def getfuncargnames(function, startindex=None):
|
||||
# XXX merge with main.py's varnames
|
||||
argnames = py.std.inspect.getargs(py.code.getrawcode(function))[0]
|
||||
startindex = py.std.inspect.ismethod(function) and 1 or 0
|
||||
if startindex is None:
|
||||
startindex = py.std.inspect.ismethod(function) and 1 or 0
|
||||
defaults = getattr(function, 'func_defaults',
|
||||
getattr(function, '__defaults__', None)) or ()
|
||||
numdefaults = len(defaults)
|
||||
@@ -519,7 +516,8 @@ class Metafunc:
|
||||
self.config = config
|
||||
self.module = module
|
||||
self.function = function
|
||||
self.funcargnames = getfuncargnames(function)
|
||||
self.funcargnames = getfuncargnames(function,
|
||||
startindex=int(cls is not None))
|
||||
self.cls = cls
|
||||
self.module = module
|
||||
self._calls = []
|
||||
@@ -527,7 +525,11 @@ class Metafunc:
|
||||
|
||||
def addcall(self, funcargs=None, id=_notexists, param=_notexists):
|
||||
""" add a new call to the underlying test function during the
|
||||
collection phase of a test run.
|
||||
collection phase of a test run. Note that request.addcall() is
|
||||
called during the test collection phase prior and independently
|
||||
to actual test execution. Therefore you should perform setup
|
||||
of resources in a funcarg factory which can be instrumented
|
||||
with the ``param``.
|
||||
|
||||
:arg funcargs: argument keyword dictionary used when invoking
|
||||
the test function.
|
||||
@@ -537,14 +539,15 @@ class Metafunc:
|
||||
list of calls to the test function will be used.
|
||||
|
||||
:arg param: will be exposed to a later funcarg factory invocation
|
||||
through the ``request.param`` attribute. Setting it (instead of
|
||||
directly providing a ``funcargs`` ditionary) is called
|
||||
*indirect parametrization*. Indirect parametrization is
|
||||
preferable if test values are expensive to setup or can
|
||||
only be created after certain fixtures or test-run related
|
||||
initialization code has been run.
|
||||
through the ``request.param`` attribute. It allows to
|
||||
defer test fixture setup activities to when an actual
|
||||
test is run.
|
||||
"""
|
||||
assert funcargs is None or isinstance(funcargs, dict)
|
||||
if funcargs is not None:
|
||||
for name in funcargs:
|
||||
if name not in self.funcargnames:
|
||||
pytest.fail("funcarg %r not used in this function." % name)
|
||||
if id is None:
|
||||
raise ValueError("id=None not allowed")
|
||||
if id is _notexists:
|
||||
@@ -556,7 +559,13 @@ class Metafunc:
|
||||
self._calls.append(CallSpec(funcargs, id, param))
|
||||
|
||||
class FuncargRequest:
|
||||
""" A request for function arguments from a test function. """
|
||||
""" A request for function arguments from a test function.
|
||||
|
||||
Note that there is an optional ``param`` attribute in case
|
||||
there was an invocation to metafunc.addcall(param=...).
|
||||
If no such call was done in a ``pytest_generate_tests``
|
||||
hook, the attribute will not be present.
|
||||
"""
|
||||
_argprefix = "pytest_funcarg__"
|
||||
_argname = None
|
||||
|
||||
|
||||
@@ -8,6 +8,9 @@ def pytest_funcarg__recwarn(request):
|
||||
|
||||
* ``pop(category=None)``: return last warning matching the category.
|
||||
* ``clear()``: clear list of warnings
|
||||
|
||||
See http://docs.python.org/library/warnings.html for information
|
||||
on warning categories.
|
||||
"""
|
||||
if sys.version_info >= (2,7):
|
||||
import warnings
|
||||
|
||||
@@ -153,7 +153,7 @@ def pytest_runtest_makereport(item, call):
|
||||
longrepr = excinfo
|
||||
elif excinfo.errisinstance(py.test.skip.Exception):
|
||||
outcome = "skipped"
|
||||
r = item._repr_failure_py(excinfo, "line").reprcrash
|
||||
r = excinfo._getreprcrash()
|
||||
longrepr = (str(r.path), r.lineno, r.message)
|
||||
else:
|
||||
outcome = "failed"
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
""" support for skip/xfail functions and markers. """
|
||||
|
||||
import py, pytest
|
||||
import sys
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("general")
|
||||
@@ -32,9 +33,39 @@ class MarkEvaluator:
|
||||
return bool(self.holder)
|
||||
__nonzero__ = __bool__
|
||||
|
||||
def wasvalid(self):
|
||||
return not hasattr(self, 'exc')
|
||||
|
||||
def istrue(self):
|
||||
try:
|
||||
return self._istrue()
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except:
|
||||
self.exc = sys.exc_info()
|
||||
if isinstance(self.exc[1], SyntaxError):
|
||||
msg = [" " * (self.exc[1].offset + 4) + "^",]
|
||||
msg.append("SyntaxError: invalid syntax")
|
||||
else:
|
||||
msg = py.std.traceback.format_exception_only(*self.exc[:2])
|
||||
pytest.fail("Error evaluating %r expression\n"
|
||||
" %s\n"
|
||||
"%s"
|
||||
%(self.name, self.expr, "\n".join(msg)),
|
||||
pytrace=False)
|
||||
|
||||
def _getglobals(self):
|
||||
d = {'os': py.std.os, 'sys': py.std.sys, 'config': self.item.config}
|
||||
func = self.item.obj
|
||||
try:
|
||||
d.update(func.__globals__)
|
||||
except AttributeError:
|
||||
d.update(func.func_globals)
|
||||
return d
|
||||
|
||||
def _istrue(self):
|
||||
if self.holder:
|
||||
d = {'os': py.std.os, 'sys': py.std.sys, 'config': self.item.config}
|
||||
d = self._getglobals()
|
||||
if self.holder.args:
|
||||
self.result = False
|
||||
for expr in self.holder.args:
|
||||
@@ -42,7 +73,7 @@ class MarkEvaluator:
|
||||
if isinstance(expr, str):
|
||||
result = cached_eval(self.item.config, expr, d)
|
||||
else:
|
||||
result = expr
|
||||
pytest.fail("expression is not a string")
|
||||
if result:
|
||||
self.result = True
|
||||
self.expr = expr
|
||||
@@ -60,7 +91,7 @@ class MarkEvaluator:
|
||||
if not hasattr(self, 'expr'):
|
||||
return ""
|
||||
else:
|
||||
return "condition: " + self.expr
|
||||
return "condition: " + str(self.expr)
|
||||
return expl
|
||||
|
||||
|
||||
@@ -99,16 +130,17 @@ def pytest_runtest_makereport(__multicall__, item, call):
|
||||
return rep
|
||||
rep = __multicall__.execute()
|
||||
evalxfail = item._evalxfail
|
||||
if not item.config.option.runxfail and evalxfail.istrue():
|
||||
if call.excinfo:
|
||||
rep.outcome = "skipped"
|
||||
rep.keywords['xfail'] = evalxfail.getexplanation()
|
||||
elif call.when == "call":
|
||||
rep.outcome = "failed"
|
||||
rep.keywords['xfail'] = evalxfail.getexplanation()
|
||||
else:
|
||||
if 'xfail' in rep.keywords:
|
||||
del rep.keywords['xfail']
|
||||
if not item.config.option.runxfail:
|
||||
if evalxfail.wasvalid() and evalxfail.istrue():
|
||||
if call.excinfo:
|
||||
rep.outcome = "skipped"
|
||||
rep.keywords['xfail'] = evalxfail.getexplanation()
|
||||
elif call.when == "call":
|
||||
rep.outcome = "failed"
|
||||
rep.keywords['xfail'] = evalxfail.getexplanation()
|
||||
return rep
|
||||
if 'xfail' in rep.keywords:
|
||||
del rep.keywords['xfail']
|
||||
return rep
|
||||
|
||||
# called by terminalreporter progress reporting
|
||||
@@ -179,7 +211,8 @@ def cached_eval(config, expr, d):
|
||||
except KeyError:
|
||||
#import sys
|
||||
#print >>sys.stderr, ("cache-miss: %r" % expr)
|
||||
config._evalcache[expr] = x = eval(expr, d)
|
||||
exprcode = py.code.compile(expr, mode="eval")
|
||||
config._evalcache[expr] = x = eval(exprcode, d)
|
||||
return x
|
||||
|
||||
|
||||
|
||||
@@ -25,29 +25,26 @@ def pytest_addoption(parser):
|
||||
group._addoption('--tb', metavar="style",
|
||||
action="store", dest="tbstyle", default='long',
|
||||
type="choice", choices=['long', 'short', 'no', 'line', 'native'],
|
||||
help="traceback print mode (long/short/line/no).")
|
||||
help="traceback print mode (long/short/line/native/no).")
|
||||
group._addoption('--fulltrace',
|
||||
action="store_true", dest="fulltrace", default=False,
|
||||
help="don't cut any tracebacks (default is to cut).")
|
||||
|
||||
def pytest_configure(config):
|
||||
config.option.verbose -= config.option.quiet
|
||||
if config.option.collectonly:
|
||||
reporter = CollectonlyReporter(config)
|
||||
else:
|
||||
# we try hard to make printing resilient against
|
||||
# later changes on FD level.
|
||||
stdout = py.std.sys.stdout
|
||||
if hasattr(os, 'dup') and hasattr(stdout, 'fileno'):
|
||||
try:
|
||||
newfd = os.dup(stdout.fileno())
|
||||
#print "got newfd", newfd
|
||||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
stdout = os.fdopen(newfd, stdout.mode, 1)
|
||||
config._toclose = stdout
|
||||
reporter = TerminalReporter(config, stdout)
|
||||
# we try hard to make printing resilient against
|
||||
# later changes on FD level.
|
||||
stdout = py.std.sys.stdout
|
||||
if hasattr(os, 'dup') and hasattr(stdout, 'fileno'):
|
||||
try:
|
||||
newfd = os.dup(stdout.fileno())
|
||||
#print "got newfd", newfd
|
||||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
stdout = os.fdopen(newfd, stdout.mode, 1)
|
||||
config._toclose = stdout
|
||||
reporter = TerminalReporter(config, stdout)
|
||||
config.pluginmanager.register(reporter, 'terminalreporter')
|
||||
if config.option.debug or config.option.traceconfig:
|
||||
def mywriter(tags, args):
|
||||
@@ -273,12 +270,45 @@ class TerminalReporter:
|
||||
for line in flatten(lines):
|
||||
self.write_line(line)
|
||||
|
||||
def pytest_collection_finish(self):
|
||||
def pytest_collection_finish(self, session):
|
||||
if self.config.option.collectonly:
|
||||
self._printcollecteditems(session.items)
|
||||
if self.stats.get('failed'):
|
||||
self._tw.sep("!", "collection failures")
|
||||
for rep in self.stats.get('failed'):
|
||||
rep.toterminal(self._tw)
|
||||
return 1
|
||||
return 0
|
||||
if not self.showheader:
|
||||
return
|
||||
#for i, testarg in enumerate(self.config.args):
|
||||
# self.write_line("test path %d: %s" %(i+1, testarg))
|
||||
|
||||
def _printcollecteditems(self, items):
|
||||
# to print out items and their parent collectors
|
||||
# we take care to leave out Instances aka ()
|
||||
# because later versions are going to get rid of them anyway
|
||||
if self.config.option.verbose < 0:
|
||||
for item in items:
|
||||
nodeid = item.nodeid
|
||||
nodeid = nodeid.replace("::()::", "::")
|
||||
self._tw.line(nodeid)
|
||||
return
|
||||
stack = []
|
||||
indent = ""
|
||||
for item in items:
|
||||
needed_collectors = item.listchain()[1:] # strip root node
|
||||
while stack:
|
||||
if stack == needed_collectors[:len(stack)]:
|
||||
break
|
||||
stack.pop()
|
||||
for col in needed_collectors[len(stack):]:
|
||||
stack.append(col)
|
||||
#if col.name == "()":
|
||||
# continue
|
||||
indent = (len(stack)-1) * " "
|
||||
self._tw.line("%s%s" %(indent, col))
|
||||
|
||||
def pytest_sessionfinish(self, exitstatus, __multicall__):
|
||||
__multicall__.execute()
|
||||
self._tw.line("")
|
||||
@@ -305,19 +335,19 @@ class TerminalReporter:
|
||||
excrepr.reprcrash.toterminal(self._tw)
|
||||
|
||||
def _locationline(self, collect_fspath, fspath, lineno, domain):
|
||||
if fspath and fspath != collect_fspath:
|
||||
# collect_fspath comes from testid which has a "/"-normalized path
|
||||
if fspath and fspath.replace("\\", "/") != collect_fspath:
|
||||
fspath = "%s <- %s" % (collect_fspath, fspath)
|
||||
if lineno is not None:
|
||||
lineno += 1
|
||||
if fspath and lineno and domain:
|
||||
line = "%(fspath)s:%(lineno)s: %(domain)s"
|
||||
elif fspath and domain:
|
||||
line = "%(fspath)s: %(domain)s"
|
||||
elif fspath and lineno:
|
||||
line = "%(fspath)s:%(lineno)s %(extrapath)s"
|
||||
if fspath:
|
||||
line = str(fspath)
|
||||
if lineno is not None:
|
||||
lineno += 1
|
||||
line += ":" + str(lineno)
|
||||
if domain:
|
||||
line += ": " + str(domain)
|
||||
else:
|
||||
line = "[nolocation]"
|
||||
return line % locals() + " "
|
||||
line = "[location]"
|
||||
return line + " "
|
||||
|
||||
def _getfailureheadline(self, rep):
|
||||
if hasattr(rep, 'location'):
|
||||
@@ -403,52 +433,6 @@ class TerminalReporter:
|
||||
self.write_sep("=", "%d tests deselected by %r" %(
|
||||
len(self.stats['deselected']), self.config.option.keyword), bold=True)
|
||||
|
||||
|
||||
class CollectonlyReporter:
|
||||
INDENT = " "
|
||||
|
||||
def __init__(self, config, out=None):
|
||||
self.config = config
|
||||
if out is None:
|
||||
out = py.std.sys.stdout
|
||||
self._tw = py.io.TerminalWriter(out)
|
||||
self.indent = ""
|
||||
self._failed = []
|
||||
|
||||
def outindent(self, line):
|
||||
self._tw.line(self.indent + str(line))
|
||||
|
||||
def pytest_internalerror(self, excrepr):
|
||||
for line in str(excrepr).split("\n"):
|
||||
self._tw.line("INTERNALERROR> " + line)
|
||||
|
||||
def pytest_collectstart(self, collector):
|
||||
if collector.session != collector:
|
||||
self.outindent(collector)
|
||||
self.indent += self.INDENT
|
||||
|
||||
def pytest_itemcollected(self, item):
|
||||
self.outindent(item)
|
||||
|
||||
def pytest_collectreport(self, report):
|
||||
if not report.passed:
|
||||
if hasattr(report.longrepr, 'reprcrash'):
|
||||
msg = report.longrepr.reprcrash.message
|
||||
else:
|
||||
# XXX unify (we have CollectErrorRepr here)
|
||||
msg = str(report.longrepr[2])
|
||||
self.outindent("!!! %s !!!" % msg)
|
||||
#self.outindent("!!! error !!!")
|
||||
self._failed.append(report)
|
||||
self.indent = self.indent[:-len(self.INDENT)]
|
||||
|
||||
def pytest_collection_finish(self):
|
||||
if self._failed:
|
||||
self._tw.sep("!", "collection failures")
|
||||
for rep in self._failed:
|
||||
rep.toterminal(self._tw)
|
||||
return self._failed and 1 or 0
|
||||
|
||||
def repr_pythonversion(v=None):
|
||||
if v is None:
|
||||
v = sys.version_info
|
||||
|
||||
@@ -48,18 +48,15 @@ class TempdirHandler:
|
||||
self.trace("finish")
|
||||
|
||||
def pytest_configure(config):
|
||||
config._mp = mp = monkeypatch()
|
||||
mp = monkeypatch()
|
||||
t = TempdirHandler(config)
|
||||
config._cleanup.extend([mp.undo, t.finish])
|
||||
mp.setattr(config, '_tmpdirhandler', t, raising=False)
|
||||
mp.setattr(pytest, 'ensuretemp', t.ensuretemp, raising=False)
|
||||
|
||||
def pytest_unconfigure(config):
|
||||
config._tmpdirhandler.finish()
|
||||
config._mp.undo()
|
||||
|
||||
def pytest_funcarg__tmpdir(request):
|
||||
"""return a temporary directory path object
|
||||
unique to each test function invocation,
|
||||
which is unique to each test function invocation,
|
||||
created as a sub directory of the base temporary
|
||||
directory. The returned object is a `py.path.local`_
|
||||
path object.
|
||||
|
||||
@@ -102,6 +102,10 @@ class TestCaseFunction(pytest.Function):
|
||||
def runtest(self):
|
||||
self._testcase(result=self)
|
||||
|
||||
def _prunetraceback(self, excinfo):
|
||||
pytest.Function._prunetraceback(self, excinfo)
|
||||
excinfo.traceback = excinfo.traceback.filter(lambda x:not x.frame.f_globals.get('__unittest'))
|
||||
|
||||
@pytest.mark.tryfirst
|
||||
def pytest_runtest_makereport(item, call):
|
||||
if isinstance(item, TestCaseFunction):
|
||||
|
||||
1
doc/_templates/layout.html
vendored
1
doc/_templates/layout.html
vendored
@@ -19,6 +19,7 @@
|
||||
<a href="{{ pathto('getting-started') }}">install</a> |
|
||||
<a href="{{ pathto('example/index') }}">examples</a> |
|
||||
<a href="{{ pathto('customize') }}">customize</a> |
|
||||
<a href="https://bitbucket.org/hpk42/pytest/issues?status=new&status=open">issues</a>|
|
||||
<a href="{{ pathto('contact') }}">contact</a>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -5,6 +5,8 @@ Release announcements
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
release-2.0.3
|
||||
release-2.0.2
|
||||
release-2.0.1
|
||||
release-2.0.0
|
||||
|
||||
|
||||
@@ -117,7 +117,7 @@ Important Notes
|
||||
- py.test.collect.Directory does not exist anymore and it
|
||||
is not possible to provide an own "Directory" object.
|
||||
If you have used this and don't know what to do, get
|
||||
in contact. We'll figure someting out.
|
||||
in contact. We'll figure something out.
|
||||
|
||||
Note that pytest_collect_directory() is still called but
|
||||
any return value will be ignored. This allows to keep
|
||||
|
||||
@@ -1,19 +1,15 @@
|
||||
py.test 2.0.1: bug fixes
|
||||
===========================================================================
|
||||
|
||||
Welcome to pytest-2.0.1, a maintenance and bug fix release. For detailed
|
||||
changes see below. pytest is a mature testing tool for Python,
|
||||
supporting CPython 2.4-3.2, Jython and latest PyPy interpreters. See
|
||||
docs here implementation. See docs with examples here:
|
||||
Welcome to pytest-2.0.1, a maintenance and bug fix release of pytest,
|
||||
a mature testing tool for Python, supporting CPython 2.4-3.2, Jython
|
||||
and latest PyPy interpreters. See extensive docs with tested examples here:
|
||||
|
||||
http://pytest.org/
|
||||
|
||||
A note on packaging: pytest used to part of the "py" distribution up
|
||||
until version py-1.3.4 but this has changed now: pytest-2.0.X only
|
||||
contains py.test related code and is expected to be backward-compatible
|
||||
to existing test code. If you want to install pytest, just type one of::
|
||||
If you want to install or upgrade pytest, just type one of::
|
||||
|
||||
pip install -U pytest
|
||||
pip install -U pytest # or
|
||||
easy_install -U pytest
|
||||
|
||||
Many thanks to all issue reporters and people asking questions or
|
||||
@@ -23,7 +19,6 @@ for their great coding contributions and many others for feedback and help.
|
||||
best,
|
||||
holger krekel
|
||||
|
||||
|
||||
Changes between 2.0.0 and 2.0.1
|
||||
----------------------------------------------
|
||||
|
||||
@@ -68,5 +63,5 @@ Changes between 2.0.0 and 2.0.1
|
||||
collection-before-running semantics were not
|
||||
setup as with pytest 1.3.4. Note, however, that
|
||||
the recommended and much cleaner way to do test
|
||||
parametraization remains the "pytest_generate_tests"
|
||||
parametrization remains the "pytest_generate_tests"
|
||||
mechanism, see the docs.
|
||||
|
||||
73
doc/announce/release-2.0.2.txt
Normal file
73
doc/announce/release-2.0.2.txt
Normal file
@@ -0,0 +1,73 @@
|
||||
py.test 2.0.2: bug fixes, improved xfail/skip expressions, speedups
|
||||
===========================================================================
|
||||
|
||||
Welcome to pytest-2.0.2, a maintenance and bug fix release of pytest,
|
||||
a mature testing tool for Python, supporting CPython 2.4-3.2, Jython
|
||||
and latest PyPy interpreters. See the extensive docs with tested examples here:
|
||||
|
||||
http://pytest.org/
|
||||
|
||||
If you want to install or upgrade pytest, just type one of::
|
||||
|
||||
pip install -U pytest # or
|
||||
easy_install -U pytest
|
||||
|
||||
Many thanks to all issue reporters and people asking questions
|
||||
or complaining, particularly Jurko for his insistence,
|
||||
Laura, Victor and Brianna for helping with improving
|
||||
and Ronny for his general advise.
|
||||
|
||||
best,
|
||||
holger krekel
|
||||
|
||||
Changes between 2.0.1 and 2.0.2
|
||||
----------------------------------------------
|
||||
|
||||
- tackle issue32 - speed up test runs of very quick test functions
|
||||
by reducing the relative overhead
|
||||
|
||||
- fix issue30 - extended xfail/skipif handling and improved reporting.
|
||||
If you have a syntax error in your skip/xfail
|
||||
expressions you now get nice error reports.
|
||||
|
||||
Also you can now access module globals from xfail/skipif
|
||||
expressions so that this for example works now::
|
||||
|
||||
import pytest
|
||||
import mymodule
|
||||
@pytest.mark.skipif("mymodule.__version__[0] == "1")
|
||||
def test_function():
|
||||
pass
|
||||
|
||||
This will not run the test function if the module's version string
|
||||
does not start with a "1". Note that specifying a string instead
|
||||
of a boolean expressions allows py.test to report meaningful information
|
||||
when summarizing a test run as to what conditions lead to skipping
|
||||
(or xfail-ing) tests.
|
||||
|
||||
- fix issue28 - setup_method and pytest_generate_tests work together
|
||||
The setup_method fixture method now gets called also for
|
||||
test function invocations generated from the pytest_generate_tests
|
||||
hook.
|
||||
|
||||
- fix issue27 - collectonly and keyword-selection (-k) now work together
|
||||
Also, if you do "py.test --collectonly -q" you now get a flat list
|
||||
of test ids that you can use to paste to the py.test commandline
|
||||
in order to execute a particular test.
|
||||
|
||||
- fix issue25 avoid reported problems with --pdb and python3.2/encodings output
|
||||
|
||||
- fix issue23 - tmpdir argument now works on Python3.2 and WindowsXP
|
||||
Starting with Python3.2 os.symlink may be supported. By requiring
|
||||
a newer py lib version the py.path.local() implementation acknowledges
|
||||
this.
|
||||
|
||||
- fixed typos in the docs (thanks Victor Garcia, Brianna Laugher) and particular
|
||||
thanks to Laura Creighton who also revieved parts of the documentation.
|
||||
|
||||
- fix slighly wrong output of verbose progress reporting for classes
|
||||
(thanks Amaury)
|
||||
|
||||
- more precise (avoiding of) deprecation warnings for node.Class|Function accesses
|
||||
|
||||
- avoid std unittest assertion helper code in tracebacks (thanks Ronny)
|
||||
40
doc/announce/release-2.0.3.txt
Normal file
40
doc/announce/release-2.0.3.txt
Normal file
@@ -0,0 +1,40 @@
|
||||
py.test 2.0.3: bug fixes and speed ups
|
||||
===========================================================================
|
||||
|
||||
Welcome to pytest-2.0.3, a maintenance and bug fix release of pytest,
|
||||
a mature testing tool for Python, supporting CPython 2.4-3.2, Jython
|
||||
and latest PyPy interpreters. See the extensive docs with tested examples here:
|
||||
|
||||
http://pytest.org/
|
||||
|
||||
If you want to install or upgrade pytest, just type one of::
|
||||
|
||||
pip install -U pytest # or
|
||||
easy_install -U pytest
|
||||
|
||||
There also is a bugfix release 1.6 of pytest-xdist, the plugin
|
||||
that enables seemless distributed and "looponfail" testing for Python.
|
||||
|
||||
best,
|
||||
holger krekel
|
||||
|
||||
Changes between 2.0.2 and 2.0.3
|
||||
----------------------------------------------
|
||||
|
||||
- fix issue38: nicer tracebacks on calls to hooks, particularly early
|
||||
configure/sessionstart ones
|
||||
|
||||
- fix missing skip reason/meta information in junitxml files, reported
|
||||
via http://lists.idyll.org/pipermail/testing-in-python/2011-March/003928.html
|
||||
|
||||
- fix issue34: avoid collection failure with "test" prefixed classes
|
||||
deriving from object.
|
||||
|
||||
- don't require zlib (and other libs) for genscript plugin without
|
||||
--genscript actually being used.
|
||||
|
||||
- speed up skips (by not doing a full traceback represenation
|
||||
internally)
|
||||
|
||||
- fix issue37: avoid invalid characters in junitxml's output
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
|
||||
Writing and reporting of assertions in tests
|
||||
============================================
|
||||
The writing and reporting of assertions in tests
|
||||
==================================================
|
||||
|
||||
.. _`assert with the assert statement`:
|
||||
|
||||
assert with the ``assert`` statement
|
||||
---------------------------------------------------------
|
||||
|
||||
``py.test`` allows to use the standard python ``assert`` for verifying
|
||||
``py.test`` allows you to use the standard python ``assert`` for verifying
|
||||
expectations and values in Python tests. For example, you can write the
|
||||
following in your tests::
|
||||
following::
|
||||
|
||||
# content of test_assert1.py
|
||||
def f():
|
||||
@@ -18,12 +18,12 @@ following in your tests::
|
||||
def test_function():
|
||||
assert f() == 4
|
||||
|
||||
to state that your object has a certain ``attribute``. In case this
|
||||
to assert that your object returns a certain value. If this
|
||||
assertion fails you will see the value of ``x``::
|
||||
|
||||
$ py.test test_assert1.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.1
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.3
|
||||
collecting ... collected 1 items
|
||||
|
||||
test_assert1.py F
|
||||
@@ -40,27 +40,30 @@ assertion fails you will see the value of ``x``::
|
||||
========================= 1 failed in 0.02 seconds =========================
|
||||
|
||||
Reporting details about the failing assertion is achieved by re-evaluating
|
||||
the assert expression and recording intermediate values.
|
||||
the assert expression and recording the intermediate values.
|
||||
|
||||
Note: If evaluating the assert expression has side effects you may get a
|
||||
warning that the intermediate values could not be determined safely. A
|
||||
common example for this issue is reading from a file and comparing in one
|
||||
line::
|
||||
common example of this issue is an assertion which reads from a file::
|
||||
|
||||
assert f.read() != '...'
|
||||
|
||||
This might fail but when re-interpretation comes along it might pass. You can
|
||||
rewrite this (and any other expression with side effects) easily, though::
|
||||
If this assertion fails then the re-evaluation will probably succeed!
|
||||
This is because ``f.read()`` will return an empty string when it is
|
||||
called the second time during the re-evaluation. However, it is
|
||||
easy to rewrite the assertion and avoid any trouble::
|
||||
|
||||
content = f.read()
|
||||
assert content != '...'
|
||||
|
||||
|
||||
assertions about expected exceptions
|
||||
------------------------------------------
|
||||
|
||||
In order to write assertions about raised exceptions, you can use
|
||||
``pytest.raises`` as a context manager like this::
|
||||
|
||||
import pytest
|
||||
with pytest.raises(ZeroDivisionError):
|
||||
1 / 0
|
||||
|
||||
@@ -91,7 +94,7 @@ Making use of context-sensitive comparisons
|
||||
|
||||
.. versionadded:: 2.0
|
||||
|
||||
py.test has rich support for providing context-sensitive informations
|
||||
py.test has rich support for providing context-sensitive information
|
||||
when it encounters comparisons. For example::
|
||||
|
||||
# content of test_assert2.py
|
||||
@@ -105,7 +108,7 @@ if you run this module::
|
||||
|
||||
$ py.test test_assert2.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.1
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.3
|
||||
collecting ... collected 1 items
|
||||
|
||||
test_assert2.py F
|
||||
@@ -124,7 +127,7 @@ if you run this module::
|
||||
E '5'
|
||||
|
||||
test_assert2.py:5: AssertionError
|
||||
========================= 1 failed in 0.02 seconds =========================
|
||||
========================= 1 failed in 0.03 seconds =========================
|
||||
|
||||
Special comparisons are done for a number of cases:
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ You can always use an interactive Python prompt and type::
|
||||
import pytest
|
||||
help(pytest)
|
||||
|
||||
to get an overview on available globally available helpers.
|
||||
to get an overview on the globally available helpers.
|
||||
|
||||
.. automodule:: pytest
|
||||
:members:
|
||||
@@ -27,20 +27,18 @@ You can ask for available builtin or project-custom
|
||||
pytestconfig
|
||||
the pytest config object with access to command line opts.
|
||||
capsys
|
||||
captures writes to sys.stdout/sys.stderr and makes
|
||||
them available successively via a ``capsys.readouterr()`` method
|
||||
which returns a ``(out, err)`` tuple of captured snapshot strings.
|
||||
enables capturing of writes to sys.stdout/sys.stderr and makes
|
||||
captured output available via ``capsys.readouterr()`` method calls
|
||||
which return a ``(out, err)`` tuple.
|
||||
|
||||
capfd
|
||||
captures writes to file descriptors 1 and 2 and makes
|
||||
snapshotted ``(out, err)`` string tuples available
|
||||
via the ``capsys.readouterr()`` method. If the underlying
|
||||
platform does not have ``os.dup`` (e.g. Jython) tests using
|
||||
this funcarg will automatically skip.
|
||||
enables capturing of writes to file descriptors 1 and 2 and makes
|
||||
captured output available via ``capsys.readouterr()`` method calls
|
||||
which return a ``(out, err)`` tuple.
|
||||
|
||||
tmpdir
|
||||
return a temporary directory path object
|
||||
unique to each test function invocation,
|
||||
which is unique to each test function invocation,
|
||||
created as a sub directory of the base temporary
|
||||
directory. The returned object is a `py.path.local`_
|
||||
path object.
|
||||
@@ -57,8 +55,8 @@ You can ask for available builtin or project-custom
|
||||
monkeypatch.delenv(name, value, raising=True)
|
||||
monkeypatch.syspath_prepend(path)
|
||||
|
||||
All modifications will be undone when the requesting
|
||||
test function finished its execution. The ``raising``
|
||||
All modifications will be undone after the requesting
|
||||
test function has finished. The ``raising``
|
||||
parameter determines if a KeyError or AttributeError
|
||||
will be raised if the set/deletion operation has no target.
|
||||
|
||||
@@ -68,3 +66,6 @@ You can ask for available builtin or project-custom
|
||||
* ``pop(category=None)``: return last warning matching the category.
|
||||
* ``clear()``: clear list of warnings
|
||||
|
||||
See http://docs.python.org/library/warnings.html for information
|
||||
on warning categories.
|
||||
|
||||
|
||||
@@ -1,16 +1,44 @@
|
||||
|
||||
.. _`captures`:
|
||||
|
||||
Capturing of stdout/stderr output
|
||||
Capturing of the stdout/stderr output
|
||||
=========================================================
|
||||
|
||||
By default ``stdout`` and ``stderr`` output is captured separately for
|
||||
setup and test execution code. If a test or a setup method fails its
|
||||
according output will usually be shown along with the failure traceback.
|
||||
In addition, ``stdin`` is set to a "null" object which will fail all
|
||||
attempts to read from it. This is important if some code paths in
|
||||
test otherwise might lead to waiting for input - which is usually
|
||||
not desired when running automated tests.
|
||||
Default stdout/stderr/stdin capturing behaviour
|
||||
---------------------------------------------------------
|
||||
|
||||
During test execution any output sent to ``stdout`` and ``stderr`` is
|
||||
captured. If a test or a setup method fails its according captured
|
||||
output will usually be shown along with the failure traceback.
|
||||
|
||||
In addition, ``stdin`` is set to a "null" object which will
|
||||
fail on attempts to read from it because it is rarely desired
|
||||
to wait for interactive input when running automated tests.
|
||||
|
||||
By default capturing is done by intercepting writes to low level
|
||||
file descriptors. This allows to capture output from simple
|
||||
print statements as well as output from a subprocess started by
|
||||
a test.
|
||||
|
||||
Setting capturing methods or disabling capturing
|
||||
-------------------------------------------------
|
||||
|
||||
There are two ways in which ``py.test`` can perform capturing:
|
||||
|
||||
* file descriptor (FD) level capturing (default): All writes going to the
|
||||
operating system file descriptors 1 and 2 will be captured.
|
||||
|
||||
* ``sys`` level capturing: Only writes to Python files ``sys.stdout``
|
||||
and ``sys.stderr`` will be captured. No capturing of writes to
|
||||
filedescriptors is performed.
|
||||
|
||||
.. _`disable capturing`:
|
||||
|
||||
You can influence output capturing mechanisms from the command line::
|
||||
|
||||
py.test -s # disable all capturing
|
||||
py.test --capture=sys # replace sys.stdout/stderr with in-mem files
|
||||
py.test --capture=fd # also point filedescriptors 1 and 2 to temp file
|
||||
|
||||
.. _printdebugging:
|
||||
|
||||
@@ -36,7 +64,7 @@ of the failing function and hide the other one::
|
||||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.1
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.3
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_module.py .F
|
||||
@@ -50,33 +78,9 @@ of the failing function and hide the other one::
|
||||
|
||||
test_module.py:9: AssertionError
|
||||
----------------------------- Captured stdout ------------------------------
|
||||
setting up <function test_func2 at 0x2897d70>
|
||||
setting up <function test_func2 at 0x238c410>
|
||||
==================== 1 failed, 1 passed in 0.02 seconds ====================
|
||||
|
||||
Setting capturing methods or disabling capturing
|
||||
-------------------------------------------------
|
||||
|
||||
There are two ways in which ``py.test`` can perform capturing:
|
||||
|
||||
* ``fd`` level capturing (default): All writes going to the operating
|
||||
system file descriptors 1 and 2 will be captured, for example writes such
|
||||
as ``os.write(1, 'hello')``. Capturing on ``fd``-level also includes
|
||||
**output from subprocesses**.
|
||||
|
||||
* ``sys`` level capturing: The ``sys.stdout`` and ``sys.stderr`` will
|
||||
will be replaced with in-memory files and the ``print`` builtin or
|
||||
output from code like ``sys.stderr.write(...)`` will be captured with
|
||||
this method.
|
||||
|
||||
.. _`disable capturing`:
|
||||
|
||||
You can influence output capturing mechanisms from the command line::
|
||||
|
||||
py.test -s # disable all capturing
|
||||
py.test --capture=sys # replace sys.stdout/stderr with in-mem files
|
||||
py.test --capture=fd # also point filedescriptors 1 and 2 to temp file
|
||||
|
||||
|
||||
Accessing captured output from a test function
|
||||
---------------------------------------------------
|
||||
|
||||
|
||||
15
doc/conf.py
15
doc/conf.py
@@ -42,7 +42,7 @@ master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = u'pytest'
|
||||
copyright = u'2010, holger krekel et aliter'
|
||||
copyright = u'2011, holger krekel et alii'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
@@ -52,8 +52,9 @@ copyright = u'2010, holger krekel et aliter'
|
||||
version = '2.0'
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
import py, pytest
|
||||
assert py.path.local().relto(py.path.local(pytest.__file__).dirpath().dirpath())
|
||||
release = pytest.__version__
|
||||
version = ".".join(release.split(".")[:2])
|
||||
#assert py.path.local().relto(py.path.local(pytest.__file__).dirpath().dirpath())
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
@@ -185,7 +186,7 @@ htmlhelp_basename = 'pytestdoc'
|
||||
# (source start file, target name, title, author, documentclass [howto/manual]).
|
||||
latex_documents = [
|
||||
('index', 'pytest.tex', u'pytest Documentation',
|
||||
u'holger krekel et aliter', 'manual'),
|
||||
u'holger krekel et alii', 'manual'),
|
||||
]
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top of
|
||||
@@ -218,7 +219,7 @@ latex_documents = [
|
||||
# (source start file, name, description, authors, manual section).
|
||||
man_pages = [
|
||||
('index', 'pytest', u'pytest Documentation',
|
||||
[u'holger krekel et aliter'], 1)
|
||||
[u'holger krekel et alii'], 1)
|
||||
]
|
||||
|
||||
|
||||
@@ -226,9 +227,9 @@ man_pages = [
|
||||
|
||||
# Bibliographic Dublin Core info.
|
||||
epub_title = u'pytest'
|
||||
epub_author = u'holger krekel et aliter'
|
||||
epub_publisher = u'holger krekel et aliter'
|
||||
epub_copyright = u'2010, holger krekel et aliter'
|
||||
epub_author = u'holger krekel et alii'
|
||||
epub_publisher = u'holger krekel et alii'
|
||||
epub_copyright = u'2010, holger krekel et alii'
|
||||
|
||||
# The language of the text. It defaults to the language option
|
||||
# or en if the language is not set.
|
||||
|
||||
@@ -4,20 +4,20 @@ basic test configuration
|
||||
Command line options and configuration file settings
|
||||
-----------------------------------------------------------------
|
||||
|
||||
You can get help on options and ini-config values by running::
|
||||
You can get help on command line options and values in INI-style
|
||||
configurations files by using the general help option::
|
||||
|
||||
py.test -h # prints options _and_ config file settings
|
||||
|
||||
This will display command line and configuration file settings
|
||||
which were registered by installed plugins.
|
||||
|
||||
How test configuration is read from configuration INI-files
|
||||
-------------------------------------------------------------
|
||||
|
||||
how test configuration is read from setup/tox ini-files
|
||||
--------------------------------------------------------
|
||||
|
||||
py.test searched for the first matching ini-style configuration file
|
||||
py.test searches for the first matching ini-style configuration file
|
||||
in the directories of command line argument and the directories above.
|
||||
It looks for filenames in this order::
|
||||
It looks for file basenames in this order::
|
||||
|
||||
pytest.ini
|
||||
tox.ini
|
||||
@@ -44,29 +44,27 @@ is used to start the search.
|
||||
.. _`how to change command line options defaults`:
|
||||
.. _`adding default options`:
|
||||
|
||||
how to change command line options defaults
|
||||
How to change command line options defaults
|
||||
------------------------------------------------
|
||||
|
||||
py.test provides a simple way to set some default
|
||||
command line options. For example, if you want
|
||||
to always see detailed info on skipped and xfailed
|
||||
tests, as well as have terser "dot progress output",
|
||||
you can add this to your root directory::
|
||||
It can be tedious to type the same series of command line options
|
||||
every time you use py.test . For example, if you always want to see
|
||||
detailed info on skipped and xfailed tests, as well as have terser "dot"
|
||||
progress output, you can write it into a configuration file::
|
||||
|
||||
# content of pytest.ini
|
||||
# (or tox.ini or setup.cfg)
|
||||
[pytest]
|
||||
addopts = -rsxX -q
|
||||
|
||||
From now on, running ``py.test`` will implicitely add
|
||||
the specified options.
|
||||
From now on, running ``py.test`` will add the specified options.
|
||||
|
||||
builtin configuration file options
|
||||
----------------------------------------------
|
||||
|
||||
.. confval:: minversion
|
||||
|
||||
specifies a minimal pytest version needed for running tests.
|
||||
specifies a minimal pytest version required for running tests.
|
||||
|
||||
minversion = 2.1 # will fail if we run with pytest-2.0
|
||||
|
||||
@@ -97,14 +95,14 @@ builtin configuration file options
|
||||
[!seq] matches any char not in seq
|
||||
|
||||
Default patterns are ``.* _* CVS {args}``. Setting a ``norecurse``
|
||||
replaces the default. Here is a customizing example for avoiding
|
||||
a different set of directories::
|
||||
replaces the default. Here is an example of how to avoid
|
||||
certain directories::
|
||||
|
||||
# content of setup.cfg
|
||||
[pytest]
|
||||
norecursedirs = .svn _build tmp*
|
||||
|
||||
This would tell py.test to not recurse into typical subversion or
|
||||
This would tell py.test to not look into typical subversion or
|
||||
sphinx-build directories or into any ``tmp`` prefixed directory.
|
||||
|
||||
.. confval:: python_files
|
||||
|
||||
@@ -22,7 +22,7 @@ download and unpack a TAR file::
|
||||
|
||||
http://pypi.python.org/pypi/pytest/
|
||||
|
||||
activating a checkout with setuptools
|
||||
Activating a checkout with setuptools
|
||||
--------------------------------------------
|
||||
|
||||
With a working Distribute_ or setuptools_ installation you can type::
|
||||
@@ -31,4 +31,10 @@ With a working Distribute_ or setuptools_ installation you can type::
|
||||
|
||||
in order to work inline with the tools and the lib of your checkout.
|
||||
|
||||
If this command complains that it could not find the required version
|
||||
of "py" then you need to use the development pypi repository::
|
||||
|
||||
python setup.py develop -i http://pypi.testrun.org
|
||||
|
||||
|
||||
.. include:: links.inc
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
|
||||
doctest integration for modules and test files.
|
||||
doctest integration for modules and test files
|
||||
=========================================================
|
||||
|
||||
By default all files matching the ``test*.txt`` pattern will
|
||||
@@ -44,9 +44,9 @@ then you can just invoke ``py.test`` without command line options::
|
||||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.1
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.3
|
||||
collecting ... collected 1 items
|
||||
|
||||
mymodule.py .
|
||||
|
||||
========================= 1 passed in 0.02 seconds =========================
|
||||
========================= 1 passed in 0.40 seconds =========================
|
||||
|
||||
@@ -48,7 +48,7 @@ the output.
|
||||
example: decorating a funcarg in a test module
|
||||
--------------------------------------------------------------
|
||||
|
||||
For larger scale setups it's sometimes useful to decorare
|
||||
For larger scale setups it's sometimes useful to decorate
|
||||
a funcarg just for a particular test module. We can
|
||||
extend the `accept example`_ by putting this in our test module:
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ need more examples or have questions. Also take a look at the :ref:`comprehensiv
|
||||
|
||||
reportingdemo.txt
|
||||
simple.txt
|
||||
pythoncollection.txt
|
||||
mysetup.txt
|
||||
parametrize.txt
|
||||
pythoncollection.txt
|
||||
nonpython.txt
|
||||
|
||||
@@ -26,7 +26,7 @@ Let's write a simple test function using a ``mysetup`` funcarg::
|
||||
To run this test py.test needs to find and call a factory to
|
||||
obtain the required ``mysetup`` function argument. To make
|
||||
an according factory findable we write down a specifically named factory
|
||||
method in a :ref:`local plugin`::
|
||||
method in a :ref:`local plugin <localplugin>` ::
|
||||
|
||||
# content of conftest.py
|
||||
from myapp import MyApp
|
||||
@@ -49,7 +49,7 @@ You can now run the test::
|
||||
|
||||
$ py.test test_sample.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.1
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.3
|
||||
collecting ... collected 1 items
|
||||
|
||||
test_sample.py F
|
||||
@@ -57,7 +57,7 @@ You can now run the test::
|
||||
================================= FAILURES =================================
|
||||
_______________________________ test_answer ________________________________
|
||||
|
||||
mysetup = <conftest.MySetup instance at 0x2526440>
|
||||
mysetup = <conftest.MySetup instance at 0x2c1b128>
|
||||
|
||||
def test_answer(mysetup):
|
||||
app = mysetup.myapp()
|
||||
@@ -122,14 +122,14 @@ Running it yields::
|
||||
|
||||
$ py.test test_ssh.py -rs
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.1
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.3
|
||||
collecting ... collected 1 items
|
||||
|
||||
test_ssh.py s
|
||||
========================= short test summary info ==========================
|
||||
SKIP [1] /tmp/doc-exec-166/conftest.py:22: specify ssh host with --ssh
|
||||
SKIP [1] /tmp/doc-exec-37/conftest.py:22: specify ssh host with --ssh
|
||||
|
||||
======================== 1 skipped in 0.02 seconds =========================
|
||||
======================== 1 skipped in 0.01 seconds =========================
|
||||
|
||||
If you specify a command line option like ``py.test --ssh=python.org`` the test will execute as expected.
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ now execute the test specification::
|
||||
|
||||
nonpython $ py.test test_simple.yml
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.1
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.3
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_simple.yml .F
|
||||
@@ -37,7 +37,7 @@ now execute the test specification::
|
||||
usecase execution failed
|
||||
spec failed: 'some': 'other'
|
||||
no further details known at this point.
|
||||
==================== 1 failed, 1 passed in 0.06 seconds ====================
|
||||
==================== 1 failed, 1 passed in 0.24 seconds ====================
|
||||
|
||||
You get one dot for the passing ``sub1: sub1`` check and one failure.
|
||||
Obviously in the above ``conftest.py`` you'll want to implement a more
|
||||
@@ -56,7 +56,7 @@ reporting in ``verbose`` mode::
|
||||
|
||||
nonpython $ py.test -v
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.1 -- /home/hpk/venv/0/bin/python
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.3 -- /home/hpk/venv/0/bin/python
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_simple.yml:1: usecase: ok PASSED
|
||||
@@ -67,12 +67,17 @@ reporting in ``verbose`` mode::
|
||||
usecase execution failed
|
||||
spec failed: 'some': 'other'
|
||||
no further details known at this point.
|
||||
==================== 1 failed, 1 passed in 0.06 seconds ====================
|
||||
==================== 1 failed, 1 passed in 0.07 seconds ====================
|
||||
|
||||
While developing your custom test collection and execution it's also
|
||||
interesting to just look at the collection tree::
|
||||
|
||||
nonpython $ py.test --collectonly
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.3
|
||||
collecting ... collected 2 items
|
||||
<YamlFile 'test_simple.yml'>
|
||||
<YamlItem 'ok'>
|
||||
<YamlItem 'hello'>
|
||||
|
||||
============================= in 0.07 seconds =============================
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
|
||||
.. _paramexamples:
|
||||
|
||||
parametrizing tests
|
||||
=================================================
|
||||
|
||||
@@ -6,6 +8,142 @@ py.test allows to easily implement your own custom
|
||||
parametrization scheme for tests. Here we provide
|
||||
some examples for inspiration and re-use.
|
||||
|
||||
generating parameters combinations, depending on command line
|
||||
----------------------------------------------------------------------------
|
||||
|
||||
.. regendoc:wipe
|
||||
|
||||
Let's say we want to execute a test with different parameters
|
||||
and the parameter range shall be determined by a command
|
||||
line argument. Let's first write a simple computation test::
|
||||
|
||||
# content of test_compute.py
|
||||
|
||||
def test_compute(param1):
|
||||
assert param1 < 4
|
||||
|
||||
Now we add a test configuration like this::
|
||||
|
||||
# content of conftest.py
|
||||
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption("--all", action="store_true",
|
||||
help="run all combinations")
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
if 'param1' in metafunc.funcargnames:
|
||||
if metafunc.config.option.all:
|
||||
end = 5
|
||||
else:
|
||||
end = 2
|
||||
for i in range(end):
|
||||
metafunc.addcall(funcargs={'param1': i})
|
||||
|
||||
This means that we only run 2 tests if we do not pass ``--all``::
|
||||
|
||||
$ py.test -q test_compute.py
|
||||
collecting ... collected 2 items
|
||||
..
|
||||
2 passed in 0.01 seconds
|
||||
|
||||
We run only two computations, so we see two dots.
|
||||
let's run the full monty::
|
||||
|
||||
$ py.test -q --all
|
||||
collecting ... collected 5 items
|
||||
....F
|
||||
================================= FAILURES =================================
|
||||
_____________________________ test_compute[4] ______________________________
|
||||
|
||||
param1 = 4
|
||||
|
||||
def test_compute(param1):
|
||||
> assert param1 < 4
|
||||
E assert 4 < 4
|
||||
|
||||
test_compute.py:3: AssertionError
|
||||
1 failed, 4 passed in 0.03 seconds
|
||||
|
||||
As expected when running the full range of ``param1`` values
|
||||
we'll get an error on the last one.
|
||||
|
||||
Deferring the setup of parametrizing resources
|
||||
---------------------------------------------------
|
||||
|
||||
.. regendoc:wipe
|
||||
|
||||
The parametrization of test functions happens at collection
|
||||
time. It is often a good idea to setup possibly expensive
|
||||
resources only when the actual test is run. Here is a simple
|
||||
example how you can achieve that::
|
||||
|
||||
# content of test_backends.py
|
||||
|
||||
import pytest
|
||||
def test_db_initialized(db):
|
||||
# a dummy test
|
||||
if db.__class__.__name__ == "DB2":
|
||||
pytest.fail("deliberately failing for demo purposes")
|
||||
|
||||
Now we add a test configuration that takes care to generate
|
||||
two invocations of the ``test_db_initialized`` function and
|
||||
furthermore a factory that creates a database object when
|
||||
each test is actually run::
|
||||
|
||||
# content of conftest.py
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
if 'db' in metafunc.funcargnames:
|
||||
metafunc.addcall(param="d1")
|
||||
metafunc.addcall(param="d2")
|
||||
|
||||
class DB1:
|
||||
"one database object"
|
||||
class DB2:
|
||||
"alternative database object"
|
||||
|
||||
def pytest_funcarg__db(request):
|
||||
if request.param == "d1":
|
||||
return DB1()
|
||||
elif request.param == "d2":
|
||||
return DB2()
|
||||
else:
|
||||
raise ValueError("invalid internal test config")
|
||||
|
||||
Let's first see how it looks like at collection time::
|
||||
|
||||
$ py.test test_backends.py --collectonly
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.3
|
||||
collecting ... collected 2 items
|
||||
<Module 'test_backends.py'>
|
||||
<Function 'test_db_initialized[0]'>
|
||||
<Function 'test_db_initialized[1]'>
|
||||
|
||||
============================= in 0.01 seconds =============================
|
||||
|
||||
And then when we run the test::
|
||||
|
||||
$ py.test -q test_backends.py
|
||||
collecting ... collected 2 items
|
||||
.F
|
||||
================================= FAILURES =================================
|
||||
__________________________ test_db_initialized[1] __________________________
|
||||
|
||||
db = <conftest.DB2 instance at 0x2bf7bd8>
|
||||
|
||||
def test_db_initialized(db):
|
||||
# a dummy test
|
||||
if db.__class__.__name__ == "DB2":
|
||||
> pytest.fail("deliberately failing for demo purposes")
|
||||
E Failed: deliberately failing for demo purposes
|
||||
|
||||
test_backends.py:6: Failed
|
||||
1 failed, 1 passed in 0.03 seconds
|
||||
|
||||
Now you see that one invocation of the test passes and another fails,
|
||||
as it to be expected.
|
||||
|
||||
Parametrizing test methods through per-class configuration
|
||||
--------------------------------------------------------------
|
||||
|
||||
@@ -41,12 +179,23 @@ Running it means we are two tests for each test functions, using
|
||||
the respective settings::
|
||||
|
||||
$ py.test -q
|
||||
collecting ... collected 4 items
|
||||
F..F
|
||||
collecting ... collected 6 items
|
||||
.FF..F
|
||||
================================= FAILURES =================================
|
||||
__________________________ test_db_initialized[1] __________________________
|
||||
|
||||
db = <conftest.DB2 instance at 0x19bcb90>
|
||||
|
||||
def test_db_initialized(db):
|
||||
# a dummy test
|
||||
if db.__class__.__name__ == "DB2":
|
||||
> pytest.fail("deliberately failing for demo purposes")
|
||||
E Failed: deliberately failing for demo purposes
|
||||
|
||||
test_backends.py:6: Failed
|
||||
_________________________ TestClass.test_equals[0] _________________________
|
||||
|
||||
self = <test_parametrize.TestClass instance at 0x1521440>, a = 1, b = 2
|
||||
self = <test_parametrize.TestClass instance at 0x19ca8c0>, a = 1, b = 2
|
||||
|
||||
def test_equals(self, a, b):
|
||||
> assert a == b
|
||||
@@ -55,14 +204,14 @@ the respective settings::
|
||||
test_parametrize.py:17: AssertionError
|
||||
______________________ TestClass.test_zerodivision[1] ______________________
|
||||
|
||||
self = <test_parametrize.TestClass instance at 0x158aa70>, a = 3, b = 2
|
||||
self = <test_parametrize.TestClass instance at 0x19cd4d0>, a = 3, b = 2
|
||||
|
||||
def test_zerodivision(self, a, b):
|
||||
> pytest.raises(ZeroDivisionError, "a/b")
|
||||
E Failed: DID NOT RAISE
|
||||
|
||||
test_parametrize.py:20: Failed
|
||||
2 failed, 2 passed in 0.03 seconds
|
||||
3 failed, 3 passed in 0.05 seconds
|
||||
|
||||
Parametrizing test methods through a decorator
|
||||
--------------------------------------------------------------
|
||||
@@ -103,7 +252,7 @@ Running it gives similar results as before::
|
||||
================================= FAILURES =================================
|
||||
_________________________ TestClass.test_equals[0] _________________________
|
||||
|
||||
self = <test_parametrize2.TestClass instance at 0x22a77e8>, a = 1, b = 2
|
||||
self = <test_parametrize2.TestClass instance at 0x1cf1170>, a = 1, b = 2
|
||||
|
||||
@params([dict(a=1, b=2), dict(a=3, b=3), ])
|
||||
def test_equals(self, a, b):
|
||||
@@ -113,7 +262,7 @@ Running it gives similar results as before::
|
||||
test_parametrize2.py:19: AssertionError
|
||||
______________________ TestClass.test_zerodivision[1] ______________________
|
||||
|
||||
self = <test_parametrize2.TestClass instance at 0x2332a70>, a = 3, b = 2
|
||||
self = <test_parametrize2.TestClass instance at 0x1d02170>, a = 3, b = 2
|
||||
|
||||
@params([dict(a=1, b=0), dict(a=3, b=2)])
|
||||
def test_zerodivision(self, a, b):
|
||||
@@ -127,7 +276,7 @@ checking serialization between Python interpreters
|
||||
--------------------------------------------------------------
|
||||
|
||||
Here is a stripped down real-life example of using parametrized
|
||||
testing for testing serialization betwee different interpreters.
|
||||
testing for testing serialization between different interpreters.
|
||||
We define a ``test_basic_objects`` function which is to be run
|
||||
with different sets of arguments for its three arguments::
|
||||
|
||||
@@ -142,4 +291,4 @@ Running it (with Python-2.4 through to Python2.7 installed)::
|
||||
. $ py.test -q multipython.py
|
||||
collecting ... collected 75 items
|
||||
....s....s....s....ssssss....s....s....s....ssssss....s....s....s....ssssss
|
||||
48 passed, 27 skipped in 2.09 seconds
|
||||
48 passed, 27 skipped in 2.04 seconds
|
||||
|
||||
@@ -42,11 +42,16 @@ in functions and classes. For example, if we have::
|
||||
then the test collection looks like this::
|
||||
|
||||
$ py.test --collectonly
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.3
|
||||
collecting ... collected 2 items
|
||||
<Module 'check_myapp.py'>
|
||||
<Class 'CheckMyApp'>
|
||||
<Instance '()'>
|
||||
<Function 'check_simple'>
|
||||
<Function 'check_complex'>
|
||||
|
||||
============================= in 0.01 seconds =============================
|
||||
|
||||
interpret cmdline arguments as Python packages
|
||||
-----------------------------------------------------
|
||||
@@ -76,9 +81,14 @@ finding out what is collected
|
||||
You can always peek at the collection tree without running tests like this::
|
||||
|
||||
. $ py.test --collectonly pythoncollection.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.3
|
||||
collecting ... collected 3 items
|
||||
<Module 'pythoncollection.py'>
|
||||
<Function 'test_function'>
|
||||
<Class 'TestClass'>
|
||||
<Instance '()'>
|
||||
<Function 'test_method'>
|
||||
<Function 'test_anothermethod'>
|
||||
|
||||
============================= in 0.01 seconds =============================
|
||||
|
||||
@@ -13,7 +13,7 @@ get on the terminal - we are working on that):
|
||||
|
||||
assertion $ py.test failure_demo.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.1
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.3
|
||||
collecting ... collected 39 items
|
||||
|
||||
failure_demo.py FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
|
||||
@@ -30,7 +30,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:15: AssertionError
|
||||
_________________________ TestFailing.test_simple __________________________
|
||||
|
||||
self = <failure_demo.TestFailing object at 0x1b42950>
|
||||
self = <failure_demo.TestFailing object at 0x14b9890>
|
||||
|
||||
def test_simple(self):
|
||||
def f():
|
||||
@@ -40,13 +40,13 @@ get on the terminal - we are working on that):
|
||||
|
||||
> assert f() == g()
|
||||
E assert 42 == 43
|
||||
E + where 42 = <function f at 0x1b33de8>()
|
||||
E + and 43 = <function g at 0x1b47140>()
|
||||
E + where 42 = <function f at 0x14a5e60>()
|
||||
E + and 43 = <function g at 0x14bc1b8>()
|
||||
|
||||
failure_demo.py:28: AssertionError
|
||||
____________________ TestFailing.test_simple_multiline _____________________
|
||||
|
||||
self = <failure_demo.TestFailing object at 0x1b42c50>
|
||||
self = <failure_demo.TestFailing object at 0x14b9b50>
|
||||
|
||||
def test_simple_multiline(self):
|
||||
otherfunc_multi(
|
||||
@@ -66,19 +66,19 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:12: AssertionError
|
||||
___________________________ TestFailing.test_not ___________________________
|
||||
|
||||
self = <failure_demo.TestFailing object at 0x1b42190>
|
||||
self = <failure_demo.TestFailing object at 0x14b9790>
|
||||
|
||||
def test_not(self):
|
||||
def f():
|
||||
return 42
|
||||
> assert not f()
|
||||
E assert not 42
|
||||
E + where 42 = <function f at 0x1b47320>()
|
||||
E + where 42 = <function f at 0x14bc398>()
|
||||
|
||||
failure_demo.py:38: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_eq_text _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1b42150>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x14aa810>
|
||||
|
||||
def test_eq_text(self):
|
||||
> assert 'spam' == 'eggs'
|
||||
@@ -89,7 +89,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:42: AssertionError
|
||||
_____________ TestSpecialisedExplanations.test_eq_similar_text _____________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1b48610>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1576190>
|
||||
|
||||
def test_eq_similar_text(self):
|
||||
> assert 'foo 1 bar' == 'foo 2 bar'
|
||||
@@ -102,7 +102,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:45: AssertionError
|
||||
____________ TestSpecialisedExplanations.test_eq_multiline_text ____________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1b38f90>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x14a7450>
|
||||
|
||||
def test_eq_multiline_text(self):
|
||||
> assert 'foo\nspam\nbar' == 'foo\neggs\nbar'
|
||||
@@ -115,7 +115,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:48: AssertionError
|
||||
______________ TestSpecialisedExplanations.test_eq_long_text _______________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1b42cd0>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x14b9350>
|
||||
|
||||
def test_eq_long_text(self):
|
||||
a = '1'*100 + 'a' + '2'*100
|
||||
@@ -132,7 +132,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:53: AssertionError
|
||||
_________ TestSpecialisedExplanations.test_eq_long_text_multiline __________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1ba6a90>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x15764d0>
|
||||
|
||||
def test_eq_long_text_multiline(self):
|
||||
a = '1\n'*100 + 'a' + '2\n'*100
|
||||
@@ -156,7 +156,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:58: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_eq_list _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1ba6bd0>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1576350>
|
||||
|
||||
def test_eq_list(self):
|
||||
> assert [0, 1, 2] == [0, 1, 3]
|
||||
@@ -166,7 +166,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:61: AssertionError
|
||||
______________ TestSpecialisedExplanations.test_eq_list_long _______________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1b42910>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1576f10>
|
||||
|
||||
def test_eq_list_long(self):
|
||||
a = [0]*100 + [1] + [3]*100
|
||||
@@ -178,7 +178,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:66: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_eq_dict _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1ba6f90>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1576390>
|
||||
|
||||
def test_eq_dict(self):
|
||||
> assert {'a': 0, 'b': 1} == {'a': 0, 'b': 2}
|
||||
@@ -191,7 +191,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:69: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_eq_set __________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1b485d0>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x14bd790>
|
||||
|
||||
def test_eq_set(self):
|
||||
> assert set([0, 10, 11, 12]) == set([0, 20, 21])
|
||||
@@ -207,7 +207,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:72: AssertionError
|
||||
_____________ TestSpecialisedExplanations.test_eq_longer_list ______________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1ba2850>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x157a7d0>
|
||||
|
||||
def test_eq_longer_list(self):
|
||||
> assert [1,2] == [1,2,3]
|
||||
@@ -217,7 +217,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:75: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_in_list _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1ba2f10>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x157ab50>
|
||||
|
||||
def test_in_list(self):
|
||||
> assert 1 in [0, 2, 3, 4, 5]
|
||||
@@ -226,7 +226,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:78: AssertionError
|
||||
__________ TestSpecialisedExplanations.test_not_in_text_multiline __________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1ba2990>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x157a090>
|
||||
|
||||
def test_not_in_text_multiline(self):
|
||||
text = 'some multiline\ntext\nwhich\nincludes foo\nand a\ntail'
|
||||
@@ -244,7 +244,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:82: AssertionError
|
||||
___________ TestSpecialisedExplanations.test_not_in_text_single ____________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1b42110>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x14aaa50>
|
||||
|
||||
def test_not_in_text_single(self):
|
||||
text = 'single foo line'
|
||||
@@ -257,7 +257,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:86: AssertionError
|
||||
_________ TestSpecialisedExplanations.test_not_in_text_single_long _________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1ba65d0>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x157ab90>
|
||||
|
||||
def test_not_in_text_single_long(self):
|
||||
text = 'head ' * 50 + 'foo ' + 'tail ' * 20
|
||||
@@ -270,7 +270,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:90: AssertionError
|
||||
______ TestSpecialisedExplanations.test_not_in_text_single_long_term _______
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1ba2c50>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1576ed0>
|
||||
|
||||
def test_not_in_text_single_long_term(self):
|
||||
text = 'head ' * 50 + 'f'*70 + 'tail ' * 20
|
||||
@@ -289,7 +289,7 @@ get on the terminal - we are working on that):
|
||||
i = Foo()
|
||||
> assert i.b == 2
|
||||
E assert 1 == 2
|
||||
E + where 1 = <failure_demo.Foo object at 0x1ba2ad0>.b
|
||||
E + where 1 = <failure_demo.Foo object at 0x157a910>.b
|
||||
|
||||
failure_demo.py:101: AssertionError
|
||||
_________________________ test_attribute_instance __________________________
|
||||
@@ -299,8 +299,8 @@ get on the terminal - we are working on that):
|
||||
b = 1
|
||||
> assert Foo().b == 2
|
||||
E assert 1 == 2
|
||||
E + where 1 = <failure_demo.Foo object at 0x1ba2110>.b
|
||||
E + where <failure_demo.Foo object at 0x1ba2110> = <class 'failure_demo.Foo'>()
|
||||
E + where 1 = <failure_demo.Foo object at 0x1584610>.b
|
||||
E + where <failure_demo.Foo object at 0x1584610> = <class 'failure_demo.Foo'>()
|
||||
|
||||
failure_demo.py:107: AssertionError
|
||||
__________________________ test_attribute_failure __________________________
|
||||
@@ -316,7 +316,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:116:
|
||||
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
||||
|
||||
self = <failure_demo.Foo object at 0x1ba2a90>
|
||||
self = <failure_demo.Foo object at 0x157a3d0>
|
||||
|
||||
def _get_b(self):
|
||||
> raise Exception('Failed to get attrib')
|
||||
@@ -332,15 +332,15 @@ get on the terminal - we are working on that):
|
||||
b = 2
|
||||
> assert Foo().b == Bar().b
|
||||
E assert 1 == 2
|
||||
E + where 1 = <failure_demo.Foo object at 0x1ba2950>.b
|
||||
E + where <failure_demo.Foo object at 0x1ba2950> = <class 'failure_demo.Foo'>()
|
||||
E + and 2 = <failure_demo.Bar object at 0x1ba2390>.b
|
||||
E + where <failure_demo.Bar object at 0x1ba2390> = <class 'failure_demo.Bar'>()
|
||||
E + where 1 = <failure_demo.Foo object at 0x157a1d0>.b
|
||||
E + where <failure_demo.Foo object at 0x157a1d0> = <class 'failure_demo.Foo'>()
|
||||
E + and 2 = <failure_demo.Bar object at 0x157a9d0>.b
|
||||
E + where <failure_demo.Bar object at 0x157a9d0> = <class 'failure_demo.Bar'>()
|
||||
|
||||
failure_demo.py:124: AssertionError
|
||||
__________________________ TestRaises.test_raises __________________________
|
||||
|
||||
self = <failure_demo.TestRaises instance at 0x1bb3488>
|
||||
self = <failure_demo.TestRaises instance at 0x157d7e8>
|
||||
|
||||
def test_raises(self):
|
||||
s = 'qwe'
|
||||
@@ -352,10 +352,10 @@ get on the terminal - we are working on that):
|
||||
> int(s)
|
||||
E ValueError: invalid literal for int() with base 10: 'qwe'
|
||||
|
||||
<0-codegen /home/hpk/p/pytest/_pytest/python.py:822>:1: ValueError
|
||||
<0-codegen /home/hpk/p/pytest/_pytest/python.py:831>:1: ValueError
|
||||
______________________ TestRaises.test_raises_doesnt _______________________
|
||||
|
||||
self = <failure_demo.TestRaises instance at 0x1bb3098>
|
||||
self = <failure_demo.TestRaises instance at 0x158ae60>
|
||||
|
||||
def test_raises_doesnt(self):
|
||||
> raises(IOError, "int('3')")
|
||||
@@ -364,7 +364,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:136: Failed
|
||||
__________________________ TestRaises.test_raise ___________________________
|
||||
|
||||
self = <failure_demo.TestRaises instance at 0x1ba7d40>
|
||||
self = <failure_demo.TestRaises instance at 0x158bb90>
|
||||
|
||||
def test_raise(self):
|
||||
> raise ValueError("demo error")
|
||||
@@ -373,7 +373,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:139: ValueError
|
||||
________________________ TestRaises.test_tupleerror ________________________
|
||||
|
||||
self = <failure_demo.TestRaises instance at 0x1b5cc68>
|
||||
self = <failure_demo.TestRaises instance at 0x157cd40>
|
||||
|
||||
def test_tupleerror(self):
|
||||
> a,b = [1]
|
||||
@@ -382,7 +382,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:142: ValueError
|
||||
______ TestRaises.test_reinterpret_fails_with_print_for_the_fun_of_it ______
|
||||
|
||||
self = <failure_demo.TestRaises instance at 0x1bb1488>
|
||||
self = <failure_demo.TestRaises instance at 0x157d488>
|
||||
|
||||
def test_reinterpret_fails_with_print_for_the_fun_of_it(self):
|
||||
l = [1,2,3]
|
||||
@@ -395,7 +395,7 @@ get on the terminal - we are working on that):
|
||||
l is [1, 2, 3]
|
||||
________________________ TestRaises.test_some_error ________________________
|
||||
|
||||
self = <failure_demo.TestRaises instance at 0x1bb9128>
|
||||
self = <failure_demo.TestRaises instance at 0x158a7e8>
|
||||
|
||||
def test_some_error(self):
|
||||
> if namenotexi:
|
||||
@@ -423,7 +423,7 @@ get on the terminal - we are working on that):
|
||||
<2-codegen 'abc-123' /home/hpk/p/pytest/doc/example/assertion/failure_demo.py:162>:2: AssertionError
|
||||
____________________ TestMoreErrors.test_complex_error _____________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1bb8f80>
|
||||
self = <failure_demo.TestMoreErrors instance at 0x158f8c0>
|
||||
|
||||
def test_complex_error(self):
|
||||
def f():
|
||||
@@ -452,7 +452,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:5: AssertionError
|
||||
___________________ TestMoreErrors.test_z1_unpack_error ____________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1bab200>
|
||||
self = <failure_demo.TestMoreErrors instance at 0x158c998>
|
||||
|
||||
def test_z1_unpack_error(self):
|
||||
l = []
|
||||
@@ -462,7 +462,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:179: ValueError
|
||||
____________________ TestMoreErrors.test_z2_type_error _____________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1bb36c8>
|
||||
self = <failure_demo.TestMoreErrors instance at 0x15854d0>
|
||||
|
||||
def test_z2_type_error(self):
|
||||
l = 3
|
||||
@@ -472,20 +472,20 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:183: TypeError
|
||||
______________________ TestMoreErrors.test_startswith ______________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1bbce60>
|
||||
self = <failure_demo.TestMoreErrors instance at 0x14b65a8>
|
||||
|
||||
def test_startswith(self):
|
||||
s = "123"
|
||||
g = "456"
|
||||
> assert s.startswith(g)
|
||||
E assert False
|
||||
E + where False = <built-in method startswith of str object at 0x1ad6bd0>('456')
|
||||
E + where <built-in method startswith of str object at 0x1ad6bd0> = '123'.startswith
|
||||
E + where False = <built-in method startswith of str object at 0x14902a0>('456')
|
||||
E + where <built-in method startswith of str object at 0x14902a0> = '123'.startswith
|
||||
|
||||
failure_demo.py:188: AssertionError
|
||||
__________________ TestMoreErrors.test_startswith_nested ___________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1bbeb48>
|
||||
self = <failure_demo.TestMoreErrors instance at 0x158d518>
|
||||
|
||||
def test_startswith_nested(self):
|
||||
def f():
|
||||
@@ -494,15 +494,15 @@ get on the terminal - we are working on that):
|
||||
return "456"
|
||||
> assert f().startswith(g())
|
||||
E assert False
|
||||
E + where False = <built-in method startswith of str object at 0x1ad6bd0>('456')
|
||||
E + where <built-in method startswith of str object at 0x1ad6bd0> = '123'.startswith
|
||||
E + where '123' = <function f at 0x1baade8>()
|
||||
E + and '456' = <function g at 0x1baad70>()
|
||||
E + where False = <built-in method startswith of str object at 0x14902a0>('456')
|
||||
E + where <built-in method startswith of str object at 0x14902a0> = '123'.startswith
|
||||
E + where '123' = <function f at 0x15806e0>()
|
||||
E + and '456' = <function g at 0x1580aa0>()
|
||||
|
||||
failure_demo.py:195: AssertionError
|
||||
_____________________ TestMoreErrors.test_global_func ______________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1bbe098>
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1593440>
|
||||
|
||||
def test_global_func(self):
|
||||
> assert isinstance(globf(42), float)
|
||||
@@ -513,19 +513,19 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:198: AssertionError
|
||||
_______________________ TestMoreErrors.test_instance _______________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1ba7bd8>
|
||||
self = <failure_demo.TestMoreErrors instance at 0x15952d8>
|
||||
|
||||
def test_instance(self):
|
||||
self.x = 6*7
|
||||
> assert self.x != 42
|
||||
E assert 42 != 42
|
||||
E + where 42 = 42
|
||||
E + where 42 = <failure_demo.TestMoreErrors instance at 0x1ba7bd8>.x
|
||||
E + where 42 = <failure_demo.TestMoreErrors instance at 0x15952d8>.x
|
||||
|
||||
failure_demo.py:202: AssertionError
|
||||
_______________________ TestMoreErrors.test_compare ________________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1bbca28>
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1593758>
|
||||
|
||||
def test_compare(self):
|
||||
> assert globf(10) < 5
|
||||
@@ -535,7 +535,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:205: AssertionError
|
||||
_____________________ TestMoreErrors.test_try_finally ______________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1bc0908>
|
||||
self = <failure_demo.TestMoreErrors instance at 0x157cd88>
|
||||
|
||||
def test_try_finally(self):
|
||||
x = 1
|
||||
@@ -544,4 +544,4 @@ get on the terminal - we are working on that):
|
||||
E assert 1 == 0
|
||||
|
||||
failure_demo.py:210: AssertionError
|
||||
======================== 39 failed in 0.22 seconds =========================
|
||||
======================== 39 failed in 0.23 seconds =========================
|
||||
|
||||
@@ -53,7 +53,7 @@ Let's run this without supplying our new command line option::
|
||||
test_sample.py:6: AssertionError
|
||||
----------------------------- Captured stdout ------------------------------
|
||||
first
|
||||
1 failed in 0.02 seconds
|
||||
1 failed in 0.03 seconds
|
||||
|
||||
And now with supplying a command line option::
|
||||
|
||||
@@ -84,64 +84,6 @@ rather pass in different or more complex objects. See the
|
||||
next example or refer to :ref:`mysetup` for more information
|
||||
on real-life examples.
|
||||
|
||||
generating parameters combinations, depending on command line
|
||||
----------------------------------------------------------------------------
|
||||
|
||||
.. regendoc:wipe
|
||||
|
||||
Let's say we want to execute a test with different parameters
|
||||
and the parameter range shall be determined by a command
|
||||
line argument. Let's first write a simple computation test::
|
||||
|
||||
# content of test_compute.py
|
||||
|
||||
def test_compute(param1):
|
||||
assert param1 < 4
|
||||
|
||||
Now we add a test configuration like this::
|
||||
|
||||
# content of conftest.py
|
||||
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption("--all", action="store_true",
|
||||
help="run all combinations")
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
if 'param1' in metafunc.funcargnames:
|
||||
if metafunc.config.option.all:
|
||||
end = 5
|
||||
else:
|
||||
end = 2
|
||||
for i in range(end):
|
||||
metafunc.addcall(funcargs={'param1': i})
|
||||
|
||||
This means that we only run 2 tests if we do not pass ``--all``::
|
||||
|
||||
$ py.test -q test_compute.py
|
||||
collecting ... collected 2 items
|
||||
..
|
||||
2 passed in 0.01 seconds
|
||||
|
||||
We run only two computations, so we see two dots.
|
||||
let's run the full monty::
|
||||
|
||||
$ py.test -q --all
|
||||
collecting ... collected 5 items
|
||||
....F
|
||||
================================= FAILURES =================================
|
||||
_____________________________ test_compute[4] ______________________________
|
||||
|
||||
param1 = 4
|
||||
|
||||
def test_compute(param1):
|
||||
> assert param1 < 4
|
||||
E assert 4 < 4
|
||||
|
||||
test_compute.py:3: AssertionError
|
||||
1 failed, 4 passed in 0.03 seconds
|
||||
|
||||
As expected when running the full range of ``param1`` values
|
||||
we'll get an error on the last one.
|
||||
|
||||
dynamically adding command line options
|
||||
--------------------------------------------------------------
|
||||
@@ -167,15 +109,15 @@ directory with the above conftest.py::
|
||||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.1
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.3
|
||||
gw0 I / gw1 I / gw2 I / gw3 I
|
||||
gw0 [0] / gw1 [0] / gw2 [0] / gw3 [0]
|
||||
|
||||
scheduling tests via LoadScheduling
|
||||
|
||||
============================= in 0.29 seconds =============================
|
||||
============================= in 0.52 seconds =============================
|
||||
|
||||
.. _`retrieved by hooks as item keywords`:
|
||||
.. _`excontrolskip`:
|
||||
|
||||
control skipping of tests according to command line option
|
||||
--------------------------------------------------------------
|
||||
@@ -214,20 +156,20 @@ and when running it will see a skipped "slow" test::
|
||||
|
||||
$ py.test -rs # "-rs" means report details on the little 's'
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.1
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.3
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_module.py .s
|
||||
========================= short test summary info ==========================
|
||||
SKIP [1] /tmp/doc-exec-171/conftest.py:9: need --runslow option to run
|
||||
SKIP [1] /tmp/doc-exec-42/conftest.py:9: need --runslow option to run
|
||||
|
||||
=================== 1 passed, 1 skipped in 0.02 seconds ====================
|
||||
=================== 1 passed, 1 skipped in 0.01 seconds ====================
|
||||
|
||||
Or run it including the ``slow`` marked test::
|
||||
|
||||
$ py.test --runslow
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.1
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.3
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_module.py ..
|
||||
@@ -319,7 +261,7 @@ which will add the string to the test header accordingly::
|
||||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.1
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.3
|
||||
project deps: mylib-1.1
|
||||
collecting ... collected 0 items
|
||||
|
||||
@@ -342,7 +284,7 @@ which will add info only when run with "--v"::
|
||||
|
||||
$ py.test -v
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.1 -- /home/hpk/venv/0/bin/python
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.3 -- /home/hpk/venv/0/bin/python
|
||||
info1: did you know that ...
|
||||
did you?
|
||||
collecting ... collected 0 items
|
||||
@@ -353,7 +295,7 @@ and nothing when run plainly::
|
||||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.1
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.3
|
||||
collecting ... collected 0 items
|
||||
|
||||
============================= in 0.00 seconds =============================
|
||||
|
||||
@@ -17,5 +17,9 @@ def test_hello3():
|
||||
def test_hello4():
|
||||
assert 0
|
||||
|
||||
@xfail('pytest.__version__[0] != "17"')
|
||||
def test_hello5():
|
||||
assert 0
|
||||
|
||||
def test_hello6():
|
||||
pytest.xfail("reason")
|
||||
|
||||
32
doc/extracol
32
doc/extracol
@@ -1,32 +0,0 @@
|
||||
changing Python test discovery patterns
|
||||
--------------------------------------------------
|
||||
|
||||
You can influence python test file, function and class prefixes through
|
||||
the :confval:`python_patterns` configuration valueto determine which
|
||||
files are checked and which test functions are found. Example for using
|
||||
a scheme that builds on ``check`` rather than on ``test`` prefixes::
|
||||
|
||||
|
||||
# content of setup.cfg
|
||||
[pytest]
|
||||
python_patterns =
|
||||
files: check_*.py
|
||||
functions: check_
|
||||
classes: Check
|
||||
|
||||
See
|
||||
:confval:`python_funcprefixes` and :confval:`python_classprefixes`
|
||||
|
||||
|
||||
changing test file discovery
|
||||
-----------------------------------------------------
|
||||
|
||||
You can specify patterns where python tests are found::
|
||||
|
||||
python_testfilepatterns =
|
||||
testing/**/{purebasename}.py
|
||||
testing/*.py
|
||||
|
||||
.. note::
|
||||
|
||||
conftest.py files are never considered for test discovery
|
||||
65
doc/faq.txt
65
doc/faq.txt
@@ -12,25 +12,26 @@ On naming, nosetests, licensing and magic
|
||||
Why a ``py.test`` instead of a ``pytest`` command?
|
||||
++++++++++++++++++++++++++++++++++++++++++++++++++
|
||||
|
||||
Some historic, some practical reasons: ``py.test`` used to be part of
|
||||
the ``py`` package which provided several developer utitilities,
|
||||
all starting with ``py.<TAB>``, providing nice TAB-completion. If
|
||||
Some of the reasons are historic, others are practical. ``py.test``
|
||||
used to be part of the ``py`` package which provided several developer
|
||||
utilities, all starting with ``py.<TAB>``, thus providing nice
|
||||
TAB-completion. If
|
||||
you install ``pip install pycmd`` you get these tools from a separate
|
||||
package. These days the command line tool could be called ``pytest``
|
||||
but then again many people have gotten used to the old name and there
|
||||
is another tool named "pytest" so we just decided to stick with
|
||||
since many people have gotten used to the old name and there
|
||||
is another tool named "pytest" we just decided to stick with
|
||||
``py.test``.
|
||||
|
||||
What's the relation to nose and unittest?
|
||||
How does py.test relate to nose and unittest?
|
||||
+++++++++++++++++++++++++++++++++++++++++++++++++
|
||||
|
||||
py.test and nose_ share basic philosophy when it comes
|
||||
to running Python tests. In fact, you can run many tests
|
||||
written nose with py.test. nose_ was originally created
|
||||
to running and writing Python tests. In fact, you can run many tests
|
||||
written for nose with py.test. nose_ was originally created
|
||||
as a clone of ``py.test`` when py.test was in the ``0.8`` release
|
||||
cycle. As of version 2.0 support for running unittest test
|
||||
suites is majorly improved and you should be able to run
|
||||
many Django and Twisted test suites.
|
||||
cycle. Note that starting with pytest-2.0 support for running unittest
|
||||
test suites is majorly improved and you should be able to run
|
||||
many Django and Twisted test suites without modification.
|
||||
|
||||
.. _features: test/features.html
|
||||
|
||||
@@ -39,22 +40,20 @@ What's this "magic" with py.test?
|
||||
++++++++++++++++++++++++++++++++++++++++++
|
||||
|
||||
Around 2007 (version ``0.8``) some people claimed that py.test
|
||||
was using too much "magic". It has been refactored a lot. Thrown
|
||||
out old code. Deprecated unused approaches and code. And it is today
|
||||
probably one of the smallest, most universally runnable and most
|
||||
customizable testing frameworks for Python. It's true that
|
||||
``py.test`` uses metaprogramming techniques, i.e. it views
|
||||
test code similar to how compilers view programs, using a
|
||||
somewhat abstract internal model.
|
||||
was using too much "magic". Partly this has been fixed by removing
|
||||
unused, deprecated or complicated code. It is today probably one
|
||||
of the smallest, most universally runnable and most
|
||||
customizable testing frameworks for Python. However,
|
||||
``py.test`` still uses many metaprogramming techniques and
|
||||
reading its source is thus likely not something for Python beginners.
|
||||
|
||||
It's also true that the no-boilerplate testing is implemented by making
|
||||
use of the Python assert statement through "re-interpretation":
|
||||
A second "magic" issue arguably the assert statement re-intepreation:
|
||||
When an ``assert`` statement fails, py.test re-interprets the expression
|
||||
to show intermediate values if a test fails. If your expression
|
||||
has side effects the intermediate values may not be the same, obfuscating
|
||||
the initial error (this is also explained at the command line if it happens).
|
||||
``py.test --no-assert`` turns off assert re-intepretation.
|
||||
Sidenote: it is good practise to avoid asserts with side effects.
|
||||
has side effects (better to avoid them anyway!) the intermediate values
|
||||
may not be the same, obfuscating the initial error (this is also
|
||||
explained at the command line if it happens).
|
||||
``py.test --no-assert`` turns off assert re-interpretation.
|
||||
|
||||
.. _`py namespaces`: index.html
|
||||
.. _`py/__init__.py`: http://bitbucket.org/hpk42/py-trunk/src/trunk/py/__init__.py
|
||||
@@ -69,7 +68,7 @@ Is using funcarg- versus xUnit setup a style question?
|
||||
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
|
||||
|
||||
For simple applications and for people experienced with nose_ or
|
||||
unittest-style test setup using `xUnit style setup`_ often
|
||||
unittest-style test setup using `xUnit style setup`_ probably
|
||||
feels natural. For larger test suites, parametrized testing
|
||||
or setup of complex test resources using funcargs_ may feel more natural.
|
||||
Moreover, funcargs are ideal for writing advanced test support
|
||||
@@ -86,13 +85,11 @@ in a managed class/module/function scope.
|
||||
Why the ``pytest_funcarg__*`` name for funcarg factories?
|
||||
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
|
||||
|
||||
We alternatively implemented an explicit registration mechanism for
|
||||
function argument factories. But lacking a good use case for this
|
||||
indirection and flexibility we decided to go for `Convention over
|
||||
Configuration`_ and rather have factories specified by convention.
|
||||
Besides removing the need for an registration indirection it allows to
|
||||
"grep" for ``pytest_funcarg__MYARG`` and will safely find all factory
|
||||
functions for the ``MYARG`` function argument.
|
||||
We like `Convention over Configuration`_ and didn't see much point
|
||||
in allowing a more flexible or abstract mechanism. Moreover,
|
||||
is is nice to be able to search for ``pytest_funcarg__MYARG`` in
|
||||
a source code and safely find all factory functions for
|
||||
the ``MYARG`` function argument.
|
||||
|
||||
.. _`Convention over Configuration`: http://en.wikipedia.org/wiki/Convention_over_Configuration
|
||||
|
||||
@@ -125,8 +122,8 @@ Issues with py.test, multiprocess and setuptools?
|
||||
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++
|
||||
|
||||
On windows the multiprocess package will instantiate sub processes
|
||||
by pickling and thus implicitely re-import a lot of local modules.
|
||||
Unfortuantely, setuptools-0.6.11 does not ``if __name__=='__main__'``
|
||||
by pickling and thus implicitly re-import a lot of local modules.
|
||||
Unfortunately, setuptools-0.6.11 does not ``if __name__=='__main__'``
|
||||
protect its generated command line script. This leads to infinite
|
||||
recursion when running a test that instantiates Processes.
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
==============================================================
|
||||
creating and managing test function arguments
|
||||
Injecting objects into test functions (funcargs)
|
||||
==============================================================
|
||||
|
||||
.. currentmodule:: _pytest.python
|
||||
@@ -11,16 +11,26 @@ creating and managing test function arguments
|
||||
Dependency injection through function arguments
|
||||
=================================================
|
||||
|
||||
py.test allows to inject values into test functions through the *funcarg
|
||||
mechanism*: For each argument name in a test function signature a factory is
|
||||
looked up and called to create the value. The factory can live in the
|
||||
same test class, test module, in a per-directory ``conftest.py`` file or
|
||||
in an external plugin. It has full access to the requesting test
|
||||
function, can register finalizers and invoke lifecycle-caching
|
||||
helpers. As can be expected from a systematic dependency
|
||||
injection mechanism, this allows full de-coupling of resource and
|
||||
fixture setup from test code, enabling more maintainable and
|
||||
easy-to-modify test suites.
|
||||
py.test lets you inject objects into test functions and precisely
|
||||
control their life cycle in relation to the test execution. It is
|
||||
also possible to run a test function multiple times with different objects.
|
||||
|
||||
The basic mechanism for injecting objects is also called the
|
||||
*funcarg mechanism* because objects are ultimatly injected
|
||||
by calling a test function with it as an argument. Unlike the
|
||||
classical xUnit approach *funcargs* relate more to `Dependency Injection`_
|
||||
because they help to de-couple test code from objects required for
|
||||
them to execute.
|
||||
|
||||
.. _`Dependency injection`: http://en.wikipedia.org/wiki/Dependency_injection
|
||||
|
||||
To create a value with which to call a test function a factory function
|
||||
is called which gets full access to the test function context and can
|
||||
register finalizers or invoke lifecycle-caching helpers. The factory
|
||||
can be implemented in same test class or test module, or in a
|
||||
per-directory ``conftest.py`` file or even in an external plugin. This
|
||||
allows full de-coupling of test code and objects needed for test
|
||||
execution.
|
||||
|
||||
A test function may be invoked multiple times in which case we
|
||||
speak of :ref:`parametrized testing <parametrizing-tests>`. This can be
|
||||
@@ -28,11 +38,13 @@ very useful if you want to test e.g. against different database backends
|
||||
or with multiple numerical arguments sets and want to reuse the same set
|
||||
of test functions.
|
||||
|
||||
Basic funcarg example
|
||||
-----------------------
|
||||
|
||||
Let's look at a simple self-contained example that you can put
|
||||
into a test module::
|
||||
.. _funcarg:
|
||||
|
||||
Basic injection example
|
||||
--------------------------------
|
||||
|
||||
Let's look at a simple self-contained test module::
|
||||
|
||||
# content of ./test_simplefactory.py
|
||||
def pytest_funcarg__myfuncarg(request):
|
||||
@@ -41,11 +53,15 @@ into a test module::
|
||||
def test_function(myfuncarg):
|
||||
assert myfuncarg == 17
|
||||
|
||||
This test function needs an injected object named ``myfuncarg``.
|
||||
py.test will discover and call the factory named
|
||||
``pytest_funcarg__myfuncarg`` within the same module in this case.
|
||||
|
||||
Running the test looks like this::
|
||||
|
||||
$ py.test test_simplefactory.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.1
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.3
|
||||
collecting ... collected 1 items
|
||||
|
||||
test_simplefactory.py F
|
||||
@@ -62,8 +78,8 @@ Running the test looks like this::
|
||||
test_simplefactory.py:5: AssertionError
|
||||
========================= 1 failed in 0.02 seconds =========================
|
||||
|
||||
This means that the test function was called with a ``myfuncarg`` value
|
||||
of ``42`` and the assert fails accordingly. Here is how py.test
|
||||
This means that indeed the test function was called with a ``myfuncarg``
|
||||
argument value of ``42`` and the assert fails. Here is how py.test
|
||||
comes to call the test function this way:
|
||||
|
||||
1. py.test :ref:`finds <test discovery>` the ``test_function`` because
|
||||
@@ -74,14 +90,15 @@ comes to call the test function this way:
|
||||
2. ``pytest_funcarg__myfuncarg(request)`` is called and
|
||||
returns the value for ``myfuncarg``.
|
||||
|
||||
3. the test function can now be called: ``test_function(42)``
|
||||
and results in the above exception because of the assertion
|
||||
3. the test function can now be called: ``test_function(42)``.
|
||||
This results in the above exception because of the assertion
|
||||
mismatch.
|
||||
|
||||
Note that if you misspell a function argument or want
|
||||
to use one that isn't available, you'll see an error
|
||||
with a list of available function arguments. You can
|
||||
also issue::
|
||||
with a list of available function arguments.
|
||||
|
||||
You can always issue::
|
||||
|
||||
py.test --funcargs test_simplefactory.py
|
||||
|
||||
@@ -150,7 +167,7 @@ Running this::
|
||||
|
||||
$ py.test test_example.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.1
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.3
|
||||
collecting ... collected 10 items
|
||||
|
||||
test_example.py .........F
|
||||
@@ -172,6 +189,9 @@ the test collection phase which is separate from the actual test running.
|
||||
Let's just look at what is collected::
|
||||
|
||||
$ py.test --collectonly test_example.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.3
|
||||
collecting ... collected 10 items
|
||||
<Module 'test_example.py'>
|
||||
<Function 'test_func[0]'>
|
||||
<Function 'test_func[1]'>
|
||||
@@ -183,12 +203,14 @@ Let's just look at what is collected::
|
||||
<Function 'test_func[7]'>
|
||||
<Function 'test_func[8]'>
|
||||
<Function 'test_func[9]'>
|
||||
|
||||
============================= in 0.00 seconds =============================
|
||||
|
||||
If you want to select only the run with the value ``7`` you could do::
|
||||
|
||||
$ py.test -v -k 7 test_example.py # or -k test_func[7]
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.1 -- /home/hpk/venv/0/bin/python
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.3 -- /home/hpk/venv/0/bin/python
|
||||
collecting ... collected 10 items
|
||||
|
||||
test_example.py:6: test_func[7] PASSED
|
||||
@@ -196,6 +218,8 @@ If you want to select only the run with the value ``7`` you could do::
|
||||
======================== 9 tests deselected by '7' =========================
|
||||
================== 1 passed, 9 deselected in 0.01 seconds ==================
|
||||
|
||||
You might want to look at :ref:`more parametrization examples <paramexamples>`.
|
||||
|
||||
.. _`metafunc object`:
|
||||
|
||||
The **metafunc** object
|
||||
|
||||
@@ -16,10 +16,10 @@ Installation options::
|
||||
To check your installation has installed the correct version::
|
||||
|
||||
$ py.test --version
|
||||
This is py.test version 2.0.1, imported from /home/hpk/p/pytest/pytest.py
|
||||
This is py.test version 2.0.3, imported from /home/hpk/p/pytest/pytest.pyc
|
||||
setuptools registered plugins:
|
||||
pytest-xdist-1.6.dev2 at /home/hpk/p/pytest-xdist/xdist/plugin.pyc
|
||||
pytest-pep8-0.7 at /home/hpk/p/pytest-pep8/pytest_pep8.pyc
|
||||
pytest-xdist-1.6.dev3 at /home/hpk/p/pytest-xdist/xdist/plugin.pyc
|
||||
pytest-incremental-0.1.0 at /home/hpk/venv/0/lib/python2.6/site-packages/pytest_incremental.pyc
|
||||
|
||||
If you get an error checkout :ref:`installation issues`.
|
||||
|
||||
@@ -41,7 +41,7 @@ That's it. You can execute the test function now::
|
||||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.1
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.3
|
||||
collecting ... collected 1 items
|
||||
|
||||
test_sample.py F
|
||||
@@ -57,7 +57,7 @@ That's it. You can execute the test function now::
|
||||
test_sample.py:5: AssertionError
|
||||
========================= 1 failed in 0.02 seconds =========================
|
||||
|
||||
py.test found the ``test_answer`` function by following :ref:`standard test discovery rules <test discovery>`, basically detecting the ``test_`` prefixes. We got a failure report because our little ``func(3)`` call did not return ``5``. The report is formatted using the :ref:`standard traceback reporting`.
|
||||
py.test found the ``test_answer`` function by following :ref:`standard test discovery rules <test discovery>`, basically detecting the ``test_`` prefixes. We got a failure report because our little ``func(3)`` call did not return ``5``.
|
||||
|
||||
.. note::
|
||||
|
||||
@@ -80,10 +80,10 @@ py.test found the ``test_answer`` function by following :ref:`standard test disc
|
||||
|
||||
.. _`assert statement`: http://docs.python.org/reference/simple_stmts.html#the-assert-statement
|
||||
|
||||
Asserting a certain exception is raised
|
||||
Asserting that a certain exception is raised
|
||||
--------------------------------------------------------------
|
||||
|
||||
If you want to assert some code raises an exception you can
|
||||
If you want to assert that some code raises an exception you can
|
||||
use the ``raises`` helper::
|
||||
|
||||
# content of test_sysexit.py
|
||||
@@ -107,9 +107,9 @@ Running it with, this time in "quiet" reporting mode::
|
||||
Grouping multiple tests in a class
|
||||
--------------------------------------------------------------
|
||||
|
||||
If you start to have more than a few tests it often makes sense
|
||||
to group tests logically, in classes and modules. Let's put two
|
||||
tests in a class like this::
|
||||
Once you start to have more than a few tests it often makes sense
|
||||
to group tests logically, in classes and modules. Let's write a class
|
||||
containing two tests::
|
||||
|
||||
# content of test_class.py
|
||||
class TestClass:
|
||||
@@ -131,7 +131,7 @@ run the module by passing its filename::
|
||||
================================= FAILURES =================================
|
||||
____________________________ TestClass.test_two ____________________________
|
||||
|
||||
self = <test_class.TestClass instance at 0x178b2d8>
|
||||
self = <test_class.TestClass instance at 0x142c320>
|
||||
|
||||
def test_two(self):
|
||||
x = "hello"
|
||||
@@ -140,7 +140,7 @@ run the module by passing its filename::
|
||||
E + where False = hasattr('hello', 'check')
|
||||
|
||||
test_class.py:8: AssertionError
|
||||
1 failed, 1 passed in 0.02 seconds
|
||||
1 failed, 1 passed in 0.03 seconds
|
||||
|
||||
The first test passed, the second failed. Again we can easily see
|
||||
the intermediate values used in the assertion, helping us to
|
||||
@@ -169,7 +169,7 @@ before performing the test function call. Let's just run it::
|
||||
================================= FAILURES =================================
|
||||
_____________________________ test_needsfiles ______________________________
|
||||
|
||||
tmpdir = local('/tmp/pytest-101/test_needsfiles0')
|
||||
tmpdir = local('/tmp/pytest-10/test_needsfiles0')
|
||||
|
||||
def test_needsfiles(tmpdir):
|
||||
print tmpdir
|
||||
@@ -178,8 +178,8 @@ before performing the test function call. Let's just run it::
|
||||
|
||||
test_tmpdir.py:3: AssertionError
|
||||
----------------------------- Captured stdout ------------------------------
|
||||
/tmp/pytest-101/test_needsfiles0
|
||||
1 failed in 0.03 seconds
|
||||
/tmp/pytest-10/test_needsfiles0
|
||||
1 failed in 0.13 seconds
|
||||
|
||||
Before the test runs, a unique-per-test-invocation temporary directory
|
||||
was created. More info at :ref:`tmpdir handling`.
|
||||
@@ -194,7 +194,7 @@ where to go next
|
||||
Here are a few suggestions where to go next:
|
||||
|
||||
* :ref:`cmdline` for command line invocation examples
|
||||
* :ref:`good practises` for virtualenv, test layout, genscript support
|
||||
* :ref:`good practises <goodpractises>` for virtualenv, test layout, genscript support
|
||||
* :ref:`apiref` for documentation and examples on using py.test
|
||||
* :ref:`plugins` managing and writing plugins
|
||||
|
||||
@@ -228,7 +228,7 @@ py.test not found on Windows despite installation?
|
||||
- **Jython2.5.1 on Windows XP**: `Jython does not create command line launchers`_
|
||||
so ``py.test`` will not work correctly. You may install py.test on
|
||||
CPython and type ``py.test --genscript=mytest`` and then use
|
||||
``jython mytest`` to run py.test for your tests to run in Jython.
|
||||
``jython mytest`` to run py.test for your tests to run with Jython.
|
||||
|
||||
:ref:`examples` for more complex examples
|
||||
|
||||
|
||||
@@ -8,26 +8,24 @@ Good Integration Practises
|
||||
Work with virtual environments
|
||||
-----------------------------------------------------------
|
||||
|
||||
We recommend to work with virtualenv_ environments and use easy_install_
|
||||
We recommend to use virtualenv_ environments and use easy_install_
|
||||
(or pip_) for installing your application dependencies as well as
|
||||
the ``pytest`` package itself. This way you get a much more reproducible
|
||||
the ``pytest`` package itself. This way you will get a much more reproducible
|
||||
environment. A good tool to help you automate test runs against multiple
|
||||
dependency configurations or Python interpreters is `tox`_,
|
||||
independently created by the main py.test author. The latter
|
||||
is also useful for integration with the continous integration
|
||||
server Hudson_.
|
||||
dependency configurations or Python interpreters is `tox`_.
|
||||
|
||||
.. _`virtualenv`: http://pypi.python.org/pypi/virtualenv
|
||||
.. _`buildout`: http://www.buildout.org/
|
||||
.. _pip: http://pypi.python.org/pypi/pip
|
||||
|
||||
Use tox and Continous Integration servers
|
||||
Use tox and Continuous Integration servers
|
||||
-------------------------------------------------
|
||||
|
||||
If you are (often) releasing code to the public you
|
||||
If you frequently relase code to the public you
|
||||
may want to look into `tox`_, the virtualenv test automation
|
||||
tool and its `pytest support <http://codespeak.net/tox/example/pytest.html>`_.
|
||||
The basic idea is to generate a JUnitXML file through the ``--junitxml=PATH`` option and have a continous integration server like Hudson_ pick it up.
|
||||
The basic idea is to generate a JUnitXML file through the ``--junitxml=PATH`` option and have a continuous integration server like Jenkins_ pick it up
|
||||
and generate reports.
|
||||
|
||||
.. _standalone:
|
||||
.. _`genscript method`:
|
||||
@@ -90,7 +88,7 @@ If you now type::
|
||||
this will execute your tests using ``runtest.py``. As this is a
|
||||
standalone version of ``py.test`` no prior installation whatsoever is
|
||||
required for calling the test command. You can also pass additional
|
||||
arguments to the subprocess-calls like your test directory or other
|
||||
arguments to the subprocess-calls such as your test directory or other
|
||||
options.
|
||||
|
||||
.. _`test discovery`:
|
||||
@@ -101,14 +99,14 @@ Conventions for Python test discovery
|
||||
|
||||
``py.test`` implements the following standard test discovery:
|
||||
|
||||
* collection starts from initial command line arguments
|
||||
* collection starts from the initial command line arguments
|
||||
which may be directories, filenames or test ids.
|
||||
* recurse into directories, unless they match :confval:`norecursedirs`
|
||||
* ``test_*.py`` or ``*_test.py`` files, imported by their `package name`_.
|
||||
* ``Test`` prefixed test classes (without an ``__init__`` method)
|
||||
* ``test_`` prefixed test functions or methods are test items
|
||||
|
||||
For changing and customization example, see :doc:`example/pythoncollection`.
|
||||
For examples of how to cnd cusotmize your test discovery :doc:`example/pythoncollection`.
|
||||
|
||||
py.test additionally discovers tests using the standard
|
||||
:ref:`unittest.TestCase <unittest.TestCase>` subclassing technique.
|
||||
@@ -154,8 +152,8 @@ You can always run your tests by pointing to it::
|
||||
|
||||
Test modules are imported under their fully qualified name as follows:
|
||||
|
||||
* find ``basedir`` -- this is the first "upward" directory not
|
||||
containing an ``__init__.py``
|
||||
* find ``basedir`` -- this is the first "upward" (towards the root)
|
||||
directory not containing an ``__init__.py``
|
||||
|
||||
* perform ``sys.path.insert(0, basedir)`` to make the fully
|
||||
qualified test module path importable.
|
||||
|
||||
@@ -4,30 +4,32 @@ Welcome to ``py.test``!
|
||||
=============================================
|
||||
|
||||
|
||||
- **a mature fully featured testing tool**
|
||||
- **a mature full-featured testing tool**
|
||||
|
||||
- runs on Posix/Windows, Python 2.4-3.2, PyPy and Jython
|
||||
- continously `tested on many Python interpreters <http://hudson.testrun.org/view/pytest/job/pytest/>`_
|
||||
- used in :ref:`many projects and organisations <projects>`, ranging from 10 to 10000 tests
|
||||
- continuously `tested on many Python interpreters <http://hudson.testrun.org/view/pytest/job/pytest/>`_
|
||||
- used in :ref:`many projects and organisations <projects>`, in test
|
||||
suites ranging from 10 to 10s of thousands of tests
|
||||
- has :ref:`comprehensive documentation <toc>`
|
||||
- comes with :ref:`tested examples <examples>`
|
||||
- supports :ref:`good integration practises <goodpractises>`
|
||||
|
||||
- **provides no-boilerplate testing**
|
||||
|
||||
- makes it :ref:`easy to get started <getstarted>`, refined :ref:`usage options <usage>`
|
||||
- makes it :ref:`easy to get started <getstarted>`,
|
||||
- refined :ref:`usage options <usage>`
|
||||
- :ref:`assert with the assert statement`
|
||||
- helpful :ref:`traceback and failing assertion reporting <tbreportdemo>`
|
||||
- allows :ref:`print debugging <printdebugging>` and :ref:`generic output
|
||||
capturing <captures>`
|
||||
- supports :pep:`8` compliant coding style in tests
|
||||
- allows :ref:`print debugging <printdebugging>` and :ref:`the
|
||||
capturing of standard output during test execution <captures>`
|
||||
- supports :pep:`8` compliant coding styles in tests
|
||||
|
||||
- **supports functional testing and complex test setups**
|
||||
|
||||
- advanced :ref:`skip and xfail`
|
||||
- generic :ref:`marking and test selection <mark>`
|
||||
- can :ref:`distribute tests to multiple CPUs <xdistcpu>` through :ref:`xdist plugin <xdist>`
|
||||
- can :ref:`continously re-run failing tests <looponfailing>`
|
||||
- can :ref:`continuously re-run failing tests <looponfailing>`
|
||||
- many :ref:`builtin helpers <pytest helpers>`
|
||||
- flexible :ref:`Python test discovery`
|
||||
- unique :ref:`dependency injection through funcargs <funcargs>`
|
||||
@@ -39,8 +41,8 @@ Welcome to ``py.test``!
|
||||
tests, including running testcases made for Django and trial
|
||||
- supports extended :ref:`xUnit style setup <xunitsetup>`
|
||||
- supports domain-specific :ref:`non-python tests`
|
||||
- supports generating testing coverage reports
|
||||
- `Javasript unit- and functional testing`_
|
||||
- supports the generation of testing coverage reports
|
||||
- `Javascript unit- and functional testing`_
|
||||
|
||||
- **extensive plugin and customization system**
|
||||
|
||||
@@ -48,7 +50,7 @@ Welcome to ``py.test``!
|
||||
- customizations can be per-directory, per-project or per PyPI released plugins
|
||||
- it is easy to add command line options or do other kind of add-ons and customizations.
|
||||
|
||||
.. _`Javasript unit- and functional testing`: http://pypi.python.org/pypi/oejskit
|
||||
.. _`Javascript unit- and functional testing`: http://pypi.python.org/pypi/oejskit
|
||||
|
||||
.. _`easy`: http://bruynooghe.blogspot.com/2009/12/skipping-slow-test-by-default-in-pytest.html
|
||||
|
||||
|
||||
@@ -16,4 +16,5 @@
|
||||
.. _`pip`: http://pypi.python.org/pypi/pip
|
||||
.. _`virtualenv`: http://pypi.python.org/pypi/virtualenv
|
||||
.. _hudson: http://hudson-ci.org/
|
||||
.. _jenkins: http://jenkins-ci.org/
|
||||
.. _tox: http://codespeak.net/tox
|
||||
|
||||
16
doc/mark.txt
16
doc/mark.txt
@@ -7,12 +7,12 @@ mark test functions with attributes
|
||||
.. currentmodule:: _pytest.mark
|
||||
|
||||
By using the ``pytest.mark`` helper you can instantiate
|
||||
decorators that will set named meta data on test functions.
|
||||
decorators that will set named metadata on test functions.
|
||||
|
||||
Marking a single function
|
||||
----------------------------------------------------
|
||||
|
||||
You can "mark" a test function with meta data like this::
|
||||
You can "mark" a test function with metadata like this::
|
||||
|
||||
import pytest
|
||||
@pytest.mark.webtest
|
||||
@@ -20,7 +20,7 @@ You can "mark" a test function with meta data like this::
|
||||
...
|
||||
|
||||
This will set the function attribute ``webtest`` to a :py:class:`MarkInfo`
|
||||
instance. You can also specify parametrized meta data like this::
|
||||
instance. You can also specify parametrized metadata like this::
|
||||
|
||||
# content of test_mark.py
|
||||
|
||||
@@ -44,7 +44,7 @@ Marking whole classes or modules
|
||||
----------------------------------------------------
|
||||
|
||||
If you are programming with Python2.6 you may use ``pytest.mark`` decorators
|
||||
with classes to apply markers to all its test methods::
|
||||
with classes to apply markers to all of its test methods::
|
||||
|
||||
# content of test_mark_classlevel.py
|
||||
import pytest
|
||||
@@ -88,19 +88,19 @@ You can use the ``-k`` command line option to select tests::
|
||||
|
||||
$ py.test -k webtest # running with the above defined examples yields
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.1
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.3
|
||||
collecting ... collected 4 items
|
||||
|
||||
test_mark.py ..
|
||||
test_mark_classlevel.py ..
|
||||
|
||||
========================= 4 passed in 0.02 seconds =========================
|
||||
========================= 4 passed in 0.01 seconds =========================
|
||||
|
||||
And you can also run all tests except the ones that match the keyword::
|
||||
|
||||
$ py.test -k-webtest
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.1
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.3
|
||||
collecting ... collected 4 items
|
||||
|
||||
===================== 4 tests deselected by '-webtest' =====================
|
||||
@@ -110,7 +110,7 @@ Or to only select the class::
|
||||
|
||||
$ py.test -kTestClass
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.1
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.3
|
||||
collecting ... collected 4 items
|
||||
|
||||
test_mark_classlevel.py ..
|
||||
|
||||
@@ -9,8 +9,8 @@ on global settings or which invokes code which cannot be easily
|
||||
tested such as network access. The ``monkeypatch`` function argument
|
||||
helps you to safely set/delete an attribute, dictionary item or
|
||||
environment variable or to modify ``sys.path`` for importing.
|
||||
See the `monkeypatch blog post`_ one some introduction material
|
||||
and motivation.
|
||||
See the `monkeypatch blog post`_ for some introduction material
|
||||
and a discussion of its motivation.
|
||||
|
||||
.. _`monkeypatch blog post`: http://tetamap.wordpress.com/2009/03/03/monkeypatching-in-unit-tests-done-right/
|
||||
|
||||
@@ -18,20 +18,20 @@ and motivation.
|
||||
Simple example: patching ``os.path.expanduser``
|
||||
---------------------------------------------------
|
||||
|
||||
If you e.g. want to pretend that ``os.expanduser`` returns a certain
|
||||
If, for instance, you want to pretend that ``os.expanduser`` returns a certain
|
||||
directory, you can use the :py:meth:`monkeypatch.setattr` method to
|
||||
patch this function before calling into a function which uses it::
|
||||
|
||||
import os.path
|
||||
def getssh(): # pseudo application code
|
||||
return os.path.join(os.expanduser("~admin"), '.ssh')
|
||||
return os.path.join(os.path.expanduser("~admin"), '.ssh')
|
||||
|
||||
def test_mytest(monkeypatch):
|
||||
def mockreturn(path):
|
||||
return '/abc'
|
||||
monkeypatch.setattr(os.path, 'expanduser', mockreturn)
|
||||
x = getssh()
|
||||
assert x == '/abc'
|
||||
assert x == '/abc/.ssh'
|
||||
|
||||
After the test function finishes the ``os.path.expanduser`` modification
|
||||
will be undone.
|
||||
@@ -39,7 +39,7 @@ will be undone.
|
||||
.. background check:
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.1
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.3
|
||||
collecting ... collected 0 items
|
||||
|
||||
============================= in 0.00 seconds =============================
|
||||
|
||||
@@ -16,5 +16,5 @@ these renaming rules::
|
||||
py.test.cmdline.main -> pytest.main
|
||||
|
||||
The old ``py.test.*`` ways to access functionality remain
|
||||
valid but you are encouraged to do global renames according
|
||||
valid but you are encouraged to do global renaming according
|
||||
to the above rules in your test code.
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
.. _plugins:
|
||||
|
||||
Working with plugins and conftest files
|
||||
=============================================
|
||||
|
||||
.. _`local plugin`:
|
||||
|
||||
py.test implements all aspects of configuration, collection, running and reporting by calling `well specified hooks`_. Virtually any Python module can be registered as a plugin. It can implement any number of hook functions (usually two or three) which all have a ``pytest_`` prefix, making hook functions easy to distinguish and find. There are three basic locations types:
|
||||
|
||||
* `builtin plugins`_: loaded from py.test's own ``pytest/plugin`` directory.
|
||||
@@ -12,14 +12,17 @@ py.test implements all aspects of configuration, collection, running and reporti
|
||||
.. _`pytest/plugin`: http://bitbucket.org/hpk42/pytest/src/tip/pytest/plugin/
|
||||
.. _`conftest.py plugins`:
|
||||
.. _`conftest.py`:
|
||||
.. _`localplugin`:
|
||||
.. _`conftest`:
|
||||
|
||||
conftest.py: local per-directory plugins
|
||||
--------------------------------------------------------------
|
||||
|
||||
local ``conftest.py`` plugins contain directory-specific hook
|
||||
implementations. Session and test running activities will
|
||||
invoke all hooks defined in "higher up" ``conftest.py`` files.
|
||||
Example: Assume the following layout and content of files::
|
||||
invoke all hooks defined in ``conftest.py`` files closer to the
|
||||
root of the filesystem. Example: Assume the following layout
|
||||
and content of files::
|
||||
|
||||
a/conftest.py:
|
||||
def pytest_runtest_setup(item):
|
||||
@@ -39,15 +42,10 @@ Here is how you might run it::
|
||||
py.test test_flat.py # will not show "setting up"
|
||||
py.test a/test_sub.py # will show "setting up"
|
||||
|
||||
A note on ordering: ``py.test`` loads all ``conftest.py`` files upwards
|
||||
from the command line file arguments. It usually performs look up
|
||||
right-to-left, i.e. the hooks in "closer" conftest files will be called
|
||||
earlier than further away ones.
|
||||
|
||||
.. Note::
|
||||
If you have ``conftest.py`` files which do not reside in a
|
||||
python package directory (i.e. one containing an ``__init__.py``) then
|
||||
"import conftest" can be ambigous because there might be other
|
||||
"import conftest" can be ambiguous because there might be other
|
||||
``conftest.py`` files as well on your PYTHONPATH or ``sys.path``.
|
||||
It is thus good practise for projects to either put ``conftest.py``
|
||||
under a package scope or to never import anything from a
|
||||
@@ -79,7 +77,7 @@ there is no need to activate it. Here is a list of known plugins:
|
||||
coverage reporting, compatible with distributed testing
|
||||
|
||||
* `pytest-pep8 <http://pypi.python.org/pypi/pytest-pep8>`_:
|
||||
a ``--pep8`` option to enable PEP8 compliancy checking.
|
||||
a ``--pep8`` option to enable PEP8 compliance checking.
|
||||
|
||||
* `oejskit <http://pypi.python.org/pypi/oejskit>`_:
|
||||
a plugin to run javascript unittests in life browsers
|
||||
@@ -112,12 +110,12 @@ Making your plugin installable by others
|
||||
-----------------------------------------------
|
||||
|
||||
If you want to make your plugin externally available, you
|
||||
may define a so called entry point for your distribution so
|
||||
may define a so-called entry point for your distribution so
|
||||
that ``py.test`` finds your plugin module. Entry points are
|
||||
a feature that is provided by `setuptools`_ or `Distribute`_.
|
||||
The concrete entry point is ``pytest11``. To make your plugin
|
||||
available you can insert the following lines in your
|
||||
setuptools/distribute-based setup-invocation:
|
||||
py.test looks up the ``pytest11`` entrypoint to discover its
|
||||
plugins and you can thus make your plugin available by definig
|
||||
it in your setuptools/distribute-based setup-invocation:
|
||||
|
||||
.. sourcecode:: python
|
||||
|
||||
@@ -137,8 +135,8 @@ setuptools/distribute-based setup-invocation:
|
||||
)
|
||||
|
||||
If a package is installed this way, py.test will load
|
||||
``myproject.pluginmodule`` and accordingly call functions
|
||||
if they match the `well specified hooks`_.
|
||||
``myproject.pluginmodule`` as a plugin which can define
|
||||
`well specified hooks`_.
|
||||
|
||||
Plugin discovery order at tool startup
|
||||
--------------------------------------------
|
||||
@@ -260,11 +258,11 @@ hook specification and validation
|
||||
|
||||
py.test calls hook functions to implement initialization, running,
|
||||
test execution and reporting. When py.test loads a plugin it validates
|
||||
that all hook functions conform to their respective hook specification.
|
||||
that each hook function conforms to its respective hook specification.
|
||||
Each hook function name and its argument names need to match a hook
|
||||
specification exactly but it is allowed for a hook function to accept
|
||||
*less* parameters than specified. If you mistype argument names or the
|
||||
hook name itself you get useful errors.
|
||||
specification. However, a hook function may accept *fewer* parameters
|
||||
by simply not specifying them. If you mistype argument names or the
|
||||
hook name itself you get an error showing the available arguments.
|
||||
|
||||
initialisation, command line and configuration hooks
|
||||
--------------------------------------------------------------------
|
||||
@@ -292,8 +290,9 @@ All all runtest related hooks receive a :py:class:`pytest.Item` object.
|
||||
|
||||
For deeper understanding you may look at the default implementation of
|
||||
these hooks in :py:mod:`_pytest.runner` and maybe also
|
||||
in :py:mod:`_pytest.pdb` which intercepts creation
|
||||
of reports in order to drop to interactive debugging.
|
||||
in :py:mod:`_pytest.pdb` which interacts with :py:mod:`_pytest.capture`
|
||||
and its input/output capturing in order to immediately drop
|
||||
into interactive debugging when a test failure occurs.
|
||||
|
||||
The :py:mod:`_pytest.terminal` reported specifically uses
|
||||
the reporting hook to print information about a test run.
|
||||
|
||||
@@ -46,7 +46,7 @@ Some organisations using py.test
|
||||
* `Shootq <http://web.shootq.com/>`_
|
||||
* `Stups department of Heinrich Heine University Düsseldorf <http://www.stups.uni-duesseldorf.de/projects.php>`_
|
||||
* `cellzome <http://www.cellzome.com/>`_
|
||||
* `Open End, Gotenborg <http://www.openend.se>`_
|
||||
* `Open End, Gothenborg <http://www.openend.se>`_
|
||||
* `Laboraratory of Bioinformatics, Warsaw <http://genesilico.pl/>`_
|
||||
* `merlinux, Germany <http://merlinux.eu>`_
|
||||
* many more ... (please be so kind to send a note via :ref:`contact`)
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
[pytest]
|
||||
# just defined to prevent the root level tox.ini to kick in
|
||||
# just defined to prevent the root level tox.ini from kicking in
|
||||
|
||||
128
doc/skipping.txt
128
doc/skipping.txt
@@ -1,49 +1,60 @@
|
||||
|
||||
.. _`skip and xfail`:
|
||||
|
||||
skip and xfail mechanisms
|
||||
skip and xfail: dealing with tests that can not succeed
|
||||
=====================================================================
|
||||
|
||||
You can skip or "xfail" test functions, either by marking functions
|
||||
through a decorator or by calling the ``pytest.skip|xfail`` helpers.
|
||||
A *skip* means that you expect your test to pass unless a certain configuration or condition (e.g. wrong Python interpreter, missing dependency) prevents it to run. And *xfail* means that you expect your test to fail because there is an
|
||||
implementation problem. py.test counts and lists *xfailing* tests separately
|
||||
and you can provide info such as a bug number or a URL to provide a
|
||||
human readable problem context.
|
||||
If you have test functions that cannot be run on certain platforms
|
||||
or that you expect to fail you can mark them accordingly or you
|
||||
may call helper functions during execution of setup or test functions.
|
||||
|
||||
Usually detailed information about skipped/xfailed tests is not shown
|
||||
to avoid cluttering the output. You can use the ``-r`` option to
|
||||
see details corresponding to the "short" letters shown in the
|
||||
test progress::
|
||||
A *skip* means that you expect your test to pass unless a certain
|
||||
configuration or condition (e.g. wrong Python interpreter, missing
|
||||
dependency) prevents it to run. And *xfail* means that your test
|
||||
can run but you expect it to fail because there is an implementation problem.
|
||||
|
||||
py.test -rxs # show extra info on skips and xfail tests
|
||||
py.test counts and lists *skip* and *xfail* tests separately. However,
|
||||
detailed information about skipped/xfailed tests is not shown by default
|
||||
to avoid cluttering the output. You can use the ``-r`` option to see
|
||||
details corresponding to the "short" letters shown in the test
|
||||
progress::
|
||||
|
||||
py.test -rxs # show extra info on skips and xfails
|
||||
|
||||
(See :ref:`how to change command line options defaults`)
|
||||
|
||||
.. _skipif:
|
||||
|
||||
Skipping a single function
|
||||
Marking a test function to be skipped
|
||||
-------------------------------------------
|
||||
|
||||
Here is an example for marking a test function to be skipped
|
||||
Here is an example of marking a test function to be skipped
|
||||
when run on a Python3 interpreter::
|
||||
|
||||
import sys
|
||||
@pytest.mark.skipif("sys.version_info >= (3,0)")
|
||||
def test_function():
|
||||
...
|
||||
|
||||
During test function setup the skipif condition is
|
||||
evaluated by calling ``eval(expr, namespace)``. The namespace
|
||||
contains the ``sys`` and ``os`` modules and the test
|
||||
``config`` object. The latter allows you to skip based
|
||||
on a test configuration value e.g. like this::
|
||||
evaluated by calling ``eval('sys.version_info >= (3,0)', namespace)``.
|
||||
(*New in version 2.0.2*) The namespace contains all the module globals of the test function so that
|
||||
you can for example check for versions of a module you are using::
|
||||
|
||||
@pytest.mark.skipif("not config.getvalue('db')")
|
||||
def test_function(...):
|
||||
import mymodule
|
||||
|
||||
@pytest.mark.skipif("mymodule.__version__ < '1.2'")
|
||||
def test_function():
|
||||
...
|
||||
|
||||
The test function will not be run ("skipped") if
|
||||
``mymodule`` is below the specified version. The reason
|
||||
for specifying the condition as a string is mainly that
|
||||
py.test can report a summary of skip conditions.
|
||||
For information on the construction of the ``namespace``
|
||||
see `evaluation of skipif/xfail conditions`_.
|
||||
|
||||
Create a shortcut for your conditional skip decorator
|
||||
at module level like this::
|
||||
You can of course create a shortcut for your conditional skip
|
||||
decorator at module level like this::
|
||||
|
||||
win32only = pytest.mark.skipif("sys.platform != 'win32'")
|
||||
|
||||
@@ -51,13 +62,12 @@ at module level like this::
|
||||
def test_function():
|
||||
...
|
||||
|
||||
|
||||
skip test functions of a class
|
||||
skip all test functions of a class
|
||||
--------------------------------------
|
||||
|
||||
As with all function :ref:`marking` you can do it at
|
||||
As with all function :ref:`marking <mark>` you can skip test functions at the
|
||||
`whole class- or module level`_. Here is an example
|
||||
for skipping all methods of a test class based on platform::
|
||||
for skipping all methods of a test class based on the platform::
|
||||
|
||||
class TestPosixCalls:
|
||||
pytestmark = pytest.mark.skipif("sys.platform == 'win32'")
|
||||
@@ -65,9 +75,10 @@ for skipping all methods of a test class based on platform::
|
||||
def test_function(self):
|
||||
"will not be setup or run under 'win32' platform"
|
||||
|
||||
The ``pytestmark`` decorator will be applied to each test function.
|
||||
If your code targets python2.6 or above you can equivalently use
|
||||
the skipif decorator on classes::
|
||||
The ``pytestmark`` special name tells py.test to apply it to each test
|
||||
function in the class. If your code targets python2.6 or above you can
|
||||
more naturally use the skipif decorator (and any other marker) on
|
||||
classes::
|
||||
|
||||
@pytest.mark.skipif("sys.platform == 'win32'")
|
||||
class TestPosixCalls:
|
||||
@@ -75,9 +86,7 @@ the skipif decorator on classes::
|
||||
def test_function(self):
|
||||
"will not be setup or run under 'win32' platform"
|
||||
|
||||
It is fine in general to apply multiple "skipif" decorators
|
||||
on a single function - this means that if any of the conditions
|
||||
apply the function will be skipped.
|
||||
Using multiple "skipif" decorators on a single function is generally fine - it means that if any of the conditions apply the function execution will be skipped.
|
||||
|
||||
.. _`whole class- or module level`: mark.html#scoped-marking
|
||||
|
||||
@@ -104,16 +113,16 @@ By specifying on the commandline::
|
||||
you can force the running and reporting of an ``xfail`` marked test
|
||||
as if it weren't marked at all.
|
||||
|
||||
Same as with skipif_ you can also selectively expect a failure
|
||||
depending on platform::
|
||||
As with skipif_ you can also mark your expectation of a failure
|
||||
on a particular platform::
|
||||
|
||||
@pytest.mark.xfail("sys.version_info >= (3,0)")
|
||||
def test_function():
|
||||
...
|
||||
|
||||
You can also avoid running an "xfail" test at all or
|
||||
You can furthermore prevent the running of an "xfail" test or
|
||||
specify a reason such as a bug ID or similar. Here is
|
||||
a simple test file with usages:
|
||||
a simple test file with the several usages:
|
||||
|
||||
.. literalinclude:: example/xfail_demo.py
|
||||
|
||||
@@ -121,10 +130,10 @@ Running it with the report-on-xfail option gives this output::
|
||||
|
||||
example $ py.test -rx xfail_demo.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.1
|
||||
collecting ... collected 5 items
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.3
|
||||
collecting ... collected 6 items
|
||||
|
||||
xfail_demo.py xxxxx
|
||||
xfail_demo.py xxxxxx
|
||||
========================= short test summary info ==========================
|
||||
XFAIL xfail_demo.py::test_hello
|
||||
XFAIL xfail_demo.py::test_hello2
|
||||
@@ -134,9 +143,36 @@ Running it with the report-on-xfail option gives this output::
|
||||
XFAIL xfail_demo.py::test_hello4
|
||||
bug 110
|
||||
XFAIL xfail_demo.py::test_hello5
|
||||
condition: pytest.__version__[0] != "17"
|
||||
XFAIL xfail_demo.py::test_hello6
|
||||
reason: reason
|
||||
|
||||
======================== 5 xfailed in 0.04 seconds =========================
|
||||
======================== 6 xfailed in 0.05 seconds =========================
|
||||
|
||||
.. _`evaluation of skipif/xfail conditions`:
|
||||
|
||||
evaluation of skipif/xfail expressions
|
||||
----------------------------------------------------
|
||||
|
||||
.. versionadded:: 2.0.2
|
||||
|
||||
The evaluation of a condition string in ``pytest.mark.skipif(conditionstring)``
|
||||
or ``pytest.mark.xfail(conditionstring)`` takes place in a namespace
|
||||
dictionary which is constructed as follows:
|
||||
|
||||
* the namespace is initialized by putting the ``sys`` and ``os`` modules
|
||||
and the pytest ``config`` object into it.
|
||||
|
||||
* updated with the module globals of the test function for which the
|
||||
expression is applied.
|
||||
|
||||
The pytest ``config`` object allows you to skip based on a test configuration value
|
||||
which you might have added::
|
||||
|
||||
@pytest.mark.skipif("not config.getvalue('db')")
|
||||
def test_function(...):
|
||||
...
|
||||
|
||||
|
||||
imperative xfail from within a test or setup function
|
||||
------------------------------------------------------
|
||||
@@ -147,7 +183,7 @@ within test or setup code. Example::
|
||||
|
||||
def test_function():
|
||||
if not valid_config():
|
||||
pytest.xfail("unsuppored configuration")
|
||||
pytest.xfail("unsupported configuration")
|
||||
|
||||
|
||||
skipping on a missing import dependency
|
||||
@@ -159,8 +195,8 @@ or within a test or test setup function::
|
||||
docutils = pytest.importorskip("docutils")
|
||||
|
||||
If ``docutils`` cannot be imported here, this will lead to a
|
||||
skip outcome of the test. You can also skip dependeing if
|
||||
if a library does not come with a high enough version::
|
||||
skip outcome of the test. You can also skip based on the
|
||||
version number of a library::
|
||||
|
||||
docutils = pytest.importorskip("docutils", minversion="0.3")
|
||||
|
||||
@@ -170,10 +206,10 @@ imperative skip from within a test or setup function
|
||||
------------------------------------------------------
|
||||
|
||||
If for some reason you cannot declare skip-conditions
|
||||
you can also imperatively produce a Skip-outcome from
|
||||
you can also imperatively produce a skip-outcome from
|
||||
within test or setup code. Example::
|
||||
|
||||
def test_function():
|
||||
if not valid_config():
|
||||
pytest.skip("unsuppored configuration")
|
||||
pytest.skip("unsupported configuration")
|
||||
|
||||
|
||||
@@ -30,7 +30,7 @@ test parametrization:
|
||||
|
||||
distributed testing:
|
||||
|
||||
- `simultanously test your code on all platforms`_ (blog entry)
|
||||
- `simultaneously test your code on all platforms`_ (blog entry)
|
||||
|
||||
plugin specific examples:
|
||||
|
||||
@@ -42,7 +42,7 @@ plugin specific examples:
|
||||
.. _`many examples in the docs for plugins`: plugin/index.html
|
||||
.. _`monkeypatch plugin`: plugin/monkeypatch.html
|
||||
.. _`application setup in test functions with funcargs`: funcargs.html#appsetup
|
||||
.. _`simultanously test your code on all platforms`: http://tetamap.wordpress.com/2009/03/23/new-simultanously-test-your-code-on-all-platforms/
|
||||
.. _`simultaneously test your code on all platforms`: http://tetamap.wordpress.com/2009/03/23/new-simultanously-test-your-code-on-all-platforms/
|
||||
.. _`monkey patching done right`: http://tetamap.wordpress.com/2009/03/03/monkeypatching-in-unit-tests-done-right/
|
||||
.. _`putting test-hooks into local or global plugins`: http://tetamap.wordpress.com/2009/05/14/putting-test-hooks-into-local-and-global-plugins/
|
||||
.. _`parametrizing tests, generalized`: http://tetamap.wordpress.com/2009/05/13/parametrizing-python-tests-generalized/
|
||||
|
||||
@@ -17,7 +17,7 @@ customize_: configuration, customization, extensions
|
||||
|
||||
changelog_: history of changes covering last releases
|
||||
|
||||
**Continous Integration of py.test's own tests and plugins with Hudson**:
|
||||
**Continuous Integration of py.test's own tests and plugins with Hudson**:
|
||||
|
||||
`http://hudson.testrun.org/view/pytest`_
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ Mission
|
||||
py.test strives to make testing a fun and no-boilerplate effort.
|
||||
|
||||
The tool is distributed as part of the `py` package which contains supporting APIs that
|
||||
are also useable independently. The project independent ``py.test`` command line tool helps you to:
|
||||
are also usable independently. The project independent ``py.test`` command line tool helps you to:
|
||||
|
||||
* rapidly collect and run tests
|
||||
* run unit- or doctests, functional or integration tests
|
||||
|
||||
@@ -202,7 +202,7 @@ do normal site initialisation so that the environment variables can be detected
|
||||
started.
|
||||
|
||||
|
||||
Acknowledgements
|
||||
Acknowledgments
|
||||
----------------
|
||||
|
||||
Holger Krekel for pytest with its distributed testing support.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
pytest_oejskit plugin (EXTERNAL)
|
||||
==========================================
|
||||
|
||||
The `oejskit`_ offers a py.test plugin for running Javascript tests in life browers. Running inside the browsers comes with some speed cost, on the other hand it means for example the code is tested against the real-word DOM implementations.
|
||||
The `oejskit`_ offers a py.test plugin for running Javascript tests in life browsers. Running inside the browsers comes with some speed cost, on the other hand it means for example the code is tested against the real-word DOM implementations.
|
||||
The approach enables to write integration tests such that the JavaScript code is tested against server-side Python code mocked as necessary. Any server-side framework that can already be exposed through WSGI (or for which a subset of WSGI can be written to accommodate the jskit own needs) can play along.
|
||||
|
||||
For more info and download please visit the `oejskit PyPI`_ page.
|
||||
|
||||
@@ -129,7 +129,7 @@ put options values in a ``conftest.py`` file like this::
|
||||
option_tx = ['ssh=myhost//python=python2.5', 'popen//python=python2.5']
|
||||
option_dist = True
|
||||
|
||||
Any commandline ``--tx`` specifictions will add to the list of
|
||||
Any commandline ``--tx`` specifications will add to the list of
|
||||
available execution environments.
|
||||
|
||||
Specifying "rsync" dirs in a conftest.py
|
||||
|
||||
@@ -28,7 +28,7 @@ Running this would result in a passed test except for the last
|
||||
|
||||
$ py.test test_tmpdir.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.1
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.3
|
||||
collecting ... collected 1 items
|
||||
|
||||
test_tmpdir.py F
|
||||
@@ -36,7 +36,7 @@ Running this would result in a passed test except for the last
|
||||
================================= FAILURES =================================
|
||||
_____________________________ test_create_file _____________________________
|
||||
|
||||
tmpdir = local('/tmp/pytest-102/test_create_file0')
|
||||
tmpdir = local('/tmp/pytest-11/test_create_file0')
|
||||
|
||||
def test_create_file(tmpdir):
|
||||
p = tmpdir.mkdir("sub").join("hello.txt")
|
||||
@@ -47,16 +47,16 @@ Running this would result in a passed test except for the last
|
||||
E assert 0
|
||||
|
||||
test_tmpdir.py:7: AssertionError
|
||||
========================= 1 failed in 0.03 seconds =========================
|
||||
========================= 1 failed in 0.06 seconds =========================
|
||||
|
||||
.. _`base temporary directory`:
|
||||
|
||||
the default base temporary directory
|
||||
-----------------------------------------------
|
||||
|
||||
Temporary directories are by default created as sub directories of
|
||||
Temporary directories are by default created as sub-directories of
|
||||
the system temporary directory. The base name will be ``pytest-NUM`` where
|
||||
``NUM`` will be incremenated with each test run. Moreover, entries older
|
||||
``NUM`` will be incremented with each test run. Moreover, entries older
|
||||
than 3 temporary directories will be removed.
|
||||
|
||||
You can override the default temporary directory setting like this::
|
||||
|
||||
@@ -8,7 +8,7 @@ py.test has limited support for running Python `unittest.py style`_ tests.
|
||||
It will automatically collect ``unittest.TestCase`` subclasses
|
||||
and their ``test`` methods in test files. It will invoke
|
||||
``setUp/tearDown`` methods but also perform py.test's standard ways
|
||||
of treating tests like e.g. IO capturing::
|
||||
of treating tests such as IO capturing::
|
||||
|
||||
# content of test_unittest.py
|
||||
|
||||
@@ -24,7 +24,7 @@ Running it yields::
|
||||
|
||||
$ py.test test_unittest.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.1
|
||||
platform linux2 -- Python 2.6.6 -- pytest-2.0.3
|
||||
collecting ... collected 1 items
|
||||
|
||||
test_unittest.py F
|
||||
@@ -37,26 +37,12 @@ Running it yields::
|
||||
def test_method(self):
|
||||
x = 1
|
||||
> self.assertEquals(x, 3)
|
||||
E AssertionError: 1 != 3
|
||||
|
||||
test_unittest.py:8:
|
||||
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
||||
|
||||
self = <test_unittest.MyTest testMethod=test_method>, first = 1, second = 3
|
||||
msg = None
|
||||
|
||||
def failUnlessEqual(self, first, second, msg=None):
|
||||
"""Fail if the two objects are unequal as determined by the '=='
|
||||
operator.
|
||||
"""
|
||||
if not first == second:
|
||||
raise self.failureException, \
|
||||
> (msg or '%r != %r' % (first, second))
|
||||
E AssertionError: 1 != 3
|
||||
|
||||
/usr/lib/python2.6/unittest.py:350: AssertionError
|
||||
test_unittest.py:8: AssertionError
|
||||
----------------------------- Captured stdout ------------------------------
|
||||
hello
|
||||
========================= 1 failed in 0.03 seconds =========================
|
||||
========================= 1 failed in 0.02 seconds =========================
|
||||
|
||||
.. _`unittest.py style`: http://docs.python.org/library/unittest.html
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ calling pytest through ``python -m pytest``
|
||||
|
||||
.. versionadded:: 2.0
|
||||
|
||||
If you use Python-2.5 or above you can invoke testing through the
|
||||
If you use Python-2.5 or later you can invoke testing through the
|
||||
Python interpreter from the command line::
|
||||
|
||||
python -m pytest [...]
|
||||
@@ -20,8 +20,8 @@ Python interpreter from the command line::
|
||||
This is equivalent to invoking the command line script ``py.test [...]``
|
||||
directly.
|
||||
|
||||
Getting help on version, option names, environment vars
|
||||
-----------------------------------------------------------
|
||||
Getting help on version, option names, environment variables
|
||||
--------------------------------------------------------------
|
||||
|
||||
::
|
||||
|
||||
@@ -96,12 +96,12 @@ can use a helper::
|
||||
.. versionadded: 2.0.0
|
||||
|
||||
In previous versions you could only enter PDB tracing if
|
||||
you :ref:`disable capturing`.
|
||||
you disable capturing on the command line via ``py.test -s``.
|
||||
|
||||
creating JUnitXML format files
|
||||
----------------------------------------------------
|
||||
|
||||
To create result files which can be read by Hudson_ or other Continous
|
||||
To create result files which can be read by Hudson_ or other Continuous
|
||||
integration servers, use this invocation::
|
||||
|
||||
py.test --junitxml=path
|
||||
|
||||
@@ -7,13 +7,13 @@ xdist: pytest distributed testing plugin
|
||||
The `pytest-xdist`_ plugin extends py.test with some unique
|
||||
test execution modes:
|
||||
|
||||
* Looponfail: run your tests repeatedly in a subprocess. After each run py.test
|
||||
waits until a file in your project changes and then re-runs the previously
|
||||
failing tests. This is repeated until all tests pass after which again
|
||||
a full run is performed.
|
||||
* Looponfail: run your tests repeatedly in a subprocess. After each
|
||||
run, py.test waits until a file in your project changes and then
|
||||
re-runs the previously failing tests. This is repeated until all
|
||||
tests pass. At this point a full run is again performed.
|
||||
|
||||
* multiprocess Load-balancing: if you have multiple CPUs or hosts you can use
|
||||
those for a combined test run. This allows to speed up
|
||||
them for a combined test run. This allows to speed up
|
||||
development or to use special resources of remote machines.
|
||||
|
||||
* Multi-Platform coverage: you can specify different Python interpreters
|
||||
@@ -25,8 +25,8 @@ are reported back and displayed to your local terminal.
|
||||
You may specify different Python versions and interpreters.
|
||||
|
||||
|
||||
Installation
|
||||
-----------------------
|
||||
Installation of xdist plugin
|
||||
------------------------------
|
||||
|
||||
Install the plugin with::
|
||||
|
||||
@@ -36,7 +36,7 @@ Install the plugin with::
|
||||
|
||||
pip install pytest-xdist
|
||||
|
||||
or use the package in develope/in-place mode with
|
||||
or use the package in develop/in-place mode with
|
||||
a checkout of the `pytest-xdist repository`_ ::
|
||||
|
||||
python setup.py develop
|
||||
@@ -55,13 +55,13 @@ To send tests to multiple CPUs, type::
|
||||
py.test -n NUM
|
||||
|
||||
Especially for longer running tests or tests requiring
|
||||
a lot of IO this can lead to considerable speed ups.
|
||||
a lot of I/O this can lead to considerable speed ups.
|
||||
|
||||
|
||||
Running tests in a Python subprocess
|
||||
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
|
||||
|
||||
To instantiate a python2.4 sub process and send tests to it, you may type::
|
||||
To instantiate a Python-2.4 subprocess and send tests to it, you may type::
|
||||
|
||||
py.test -d --tx popen//python=python2.4
|
||||
|
||||
@@ -70,10 +70,10 @@ Python interpreter, found in your system binary lookup path.
|
||||
|
||||
If you prefix the --tx option value like this::
|
||||
|
||||
--tx 3*popen//python=python2.4
|
||||
py.test -d --tx 3*popen//python=python2.4
|
||||
|
||||
then three subprocesses would be created and tests
|
||||
will be load-balanced across these three processes.
|
||||
then three subprocesses would be created and the tests
|
||||
will be distributed to three subprocesses and run simultanously.
|
||||
|
||||
.. _looponfailing:
|
||||
|
||||
@@ -82,11 +82,13 @@ Running tests in looponfailing mode
|
||||
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
|
||||
|
||||
For refactoring a project with a medium or large test suite
|
||||
you can use the looponfailing mode, simply add the ``--f`` option::
|
||||
you can use the looponfailing mode. Simply add the ``--f`` option::
|
||||
|
||||
py.test -f
|
||||
|
||||
and py.test will run your tests, then wait for file changes and re-run the failing test set. Of course you can pass in more options to select tests or test files. File changes are detected by looking at the root directory - you can override this automatic default by an ini-file setting::
|
||||
and py.test will run your tests. Assuming you have failures it will then
|
||||
wait for file changes and re-run the failing test set. File changes are detected by looking at ``looponfailingroots`` root directories and all of their contents (recursively). If the default for this value does not work for you you
|
||||
can change it in your project by setting a configuration option::
|
||||
|
||||
# content of a pytest.ini, setup.cfg or tox.ini file
|
||||
[pytest]
|
||||
@@ -98,26 +100,28 @@ Sending tests to remote SSH accounts
|
||||
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
|
||||
|
||||
Suppose you have a package ``mypkg`` which contains some
|
||||
tests that you can successfully run locally. And you
|
||||
tests that you can successfully run locally. And you also
|
||||
have a ssh-reachable machine ``myhost``. Then
|
||||
you can ad-hoc distribute your tests by typing::
|
||||
|
||||
py.test -d --tx ssh=myhostpopen --rsyncdir mypkg mypkg
|
||||
|
||||
This will synchronize your ``mypkg`` package directory
|
||||
to an remote ssh account and then locally collect tests
|
||||
and send them to remote places for execution.
|
||||
with a remote ssh account and then collect and run your
|
||||
tests at the remote side.
|
||||
|
||||
You can specify multiple ``--rsyncdir`` directories
|
||||
to be sent to the remote side.
|
||||
|
||||
**NOTE:** For py.test to collect and send tests correctly
|
||||
you not only need to make sure all code and tests
|
||||
directories are rsynced, but that any test (sub) directory
|
||||
also has an ``__init__.py`` file because internally
|
||||
py.test references tests as a fully qualified python
|
||||
module path. **You will otherwise get strange errors**
|
||||
during setup of the remote side.
|
||||
.. XXX CHECK
|
||||
|
||||
**NOTE:** For py.test to collect and send tests correctly
|
||||
you not only need to make sure all code and tests
|
||||
directories are rsynced, but that any test (sub) directory
|
||||
also has an ``__init__.py`` file because internally
|
||||
py.test references tests as a fully qualified python
|
||||
module path. **You will otherwise get strange errors**
|
||||
during setup of the remote side.
|
||||
|
||||
Sending tests to remote Socket Servers
|
||||
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
|
||||
@@ -157,9 +161,8 @@ at once. The specifications strings use the `xspec syntax`_.
|
||||
Specifying test exec environments in an ini file
|
||||
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
|
||||
|
||||
pytest (since version 2.0) supports ini-style cofiguration.
|
||||
You can for example make running with three subprocesses
|
||||
your default like this::
|
||||
pytest (since version 2.0) supports ini-style configuration.
|
||||
For example, you could make running with three subprocesses your default::
|
||||
|
||||
[pytest]
|
||||
addopts = -n3
|
||||
|
||||
@@ -39,11 +39,13 @@ class level setup/teardown
|
||||
Similarly, the following methods are called at class level before
|
||||
and after all test methods of the class are called::
|
||||
|
||||
@classmethod
|
||||
def setup_class(cls):
|
||||
""" setup up any state specific to the execution
|
||||
of the given class (which usually contains tests).
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def teardown_class(cls):
|
||||
""" teardown any state that was previously setup
|
||||
with a call to setup_class.
|
||||
@@ -65,7 +67,7 @@ Similarly, the following methods are called around each method invocation::
|
||||
with a setup_method call.
|
||||
"""
|
||||
|
||||
If you rather define test functions directly at module level
|
||||
If you would rather define test functions directly at module level
|
||||
you can also use the following functions to implement fixtures::
|
||||
|
||||
def setup_function(function):
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
"""
|
||||
unit and functional testing with Python.
|
||||
"""
|
||||
__version__ = '2.0.1'
|
||||
__all__ = ['main']
|
||||
|
||||
from _pytest.core import main, UsageError, _preloadplugins
|
||||
from _pytest import core as cmdline
|
||||
from _pytest import __version__
|
||||
|
||||
if __name__ == '__main__': # if run as a script or by 'python -m pytest'
|
||||
raise SystemExit(main())
|
||||
|
||||
6
setup.py
6
setup.py
@@ -22,14 +22,14 @@ def main():
|
||||
name='pytest',
|
||||
description='py.test: simple powerful testing with Python',
|
||||
long_description = long_description,
|
||||
version='2.0.1',
|
||||
version='2.0.3',
|
||||
url='http://pytest.org',
|
||||
license='MIT license',
|
||||
platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'],
|
||||
author='holger krekel, Guido Wesdorp, Carl Friedrich Bolz, Armin Rigo, Maciej Fijalkowski & others',
|
||||
author_email='holger at merlinux.eu',
|
||||
entry_points= make_entry_points(),
|
||||
install_requires=['py>1.4.0'],
|
||||
install_requires=['py>1.4.1'],
|
||||
classifiers=['Development Status :: 5 - Production/Stable',
|
||||
'Intended Audience :: Developers',
|
||||
'License :: OSI Approved :: MIT License',
|
||||
@@ -67,4 +67,4 @@ def make_entry_points():
|
||||
return {'console_scripts': l}
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
main()
|
||||
@@ -13,6 +13,39 @@ class TestGeneralUsage:
|
||||
'*ERROR: hello'
|
||||
])
|
||||
|
||||
def test_early_hook_error_issue38_1(self, testdir):
|
||||
testdir.makeconftest("""
|
||||
def pytest_sessionstart():
|
||||
0 / 0
|
||||
""")
|
||||
result = testdir.runpytest(testdir.tmpdir)
|
||||
assert result.ret != 0
|
||||
# tracestyle is native by default for hook failures
|
||||
result.stdout.fnmatch_lines([
|
||||
'*INTERNALERROR*File*conftest.py*line 2*',
|
||||
'*0 / 0*',
|
||||
])
|
||||
result = testdir.runpytest(testdir.tmpdir, "--fulltrace")
|
||||
assert result.ret != 0
|
||||
# tracestyle is native by default for hook failures
|
||||
result.stdout.fnmatch_lines([
|
||||
'*INTERNALERROR*def pytest_sessionstart():*',
|
||||
'*INTERNALERROR*0 / 0*',
|
||||
])
|
||||
|
||||
def test_early_hook_configure_error_issue38(self, testdir):
|
||||
testdir.makeconftest("""
|
||||
def pytest_configure():
|
||||
0 / 0
|
||||
""")
|
||||
result = testdir.runpytest(testdir.tmpdir)
|
||||
assert result.ret != 0
|
||||
# here we get it on stderr
|
||||
result.stderr.fnmatch_lines([
|
||||
'*INTERNALERROR*File*conftest.py*line 2*',
|
||||
'*0 / 0*',
|
||||
])
|
||||
|
||||
def test_file_not_found(self, testdir):
|
||||
result = testdir.runpytest("asd")
|
||||
assert result.ret != 0
|
||||
@@ -89,9 +122,11 @@ class TestGeneralUsage:
|
||||
import pytest
|
||||
class MyFile(pytest.File):
|
||||
def collect(self):
|
||||
return
|
||||
return [MyItem("hello", parent=self)]
|
||||
def pytest_collect_file(path, parent):
|
||||
return MyFile(path, parent)
|
||||
class MyItem(pytest.Item):
|
||||
pass
|
||||
""")
|
||||
p = testdir.makepyfile("def test_hello(): pass")
|
||||
result = testdir.runpytest(p, "--collectonly")
|
||||
|
||||
@@ -44,7 +44,8 @@ class TestBinReprIntegration:
|
||||
config = testdir.parseconfig()
|
||||
plugin.pytest_configure(config)
|
||||
assert hook != py.code._reprcompare
|
||||
plugin.pytest_unconfigure(config)
|
||||
from _pytest.config import pytest_unconfigure
|
||||
pytest_unconfigure(config)
|
||||
assert hook == py.code._reprcompare
|
||||
|
||||
def callequal(left, right):
|
||||
@@ -114,6 +115,10 @@ class TestAssert_reprcompare:
|
||||
expl = callequal(A(), '')
|
||||
assert not expl
|
||||
|
||||
def test_repr_no_exc(self):
|
||||
expl = ' '.join(callequal('foo', 'bar'))
|
||||
assert 'raised in repr()' not in expl
|
||||
|
||||
def test_reprcompare_notin():
|
||||
detail = plugin.pytest_assertrepr_compare('not in', 'foo', 'aaafoobbb')[1:]
|
||||
assert detail == ["'foo' is contained here:", ' aaafoobbb', '? +++']
|
||||
|
||||
@@ -15,15 +15,10 @@ class TestCollector:
|
||||
""")
|
||||
recwarn.clear()
|
||||
assert modcol.Module == pytest.Module
|
||||
recwarn.pop(DeprecationWarning)
|
||||
assert modcol.Class == pytest.Class
|
||||
recwarn.pop(DeprecationWarning)
|
||||
assert modcol.Item == pytest.Item
|
||||
recwarn.pop(DeprecationWarning)
|
||||
assert modcol.File == pytest.File
|
||||
recwarn.pop(DeprecationWarning)
|
||||
assert modcol.Function == pytest.Function
|
||||
recwarn.pop(DeprecationWarning)
|
||||
|
||||
def test_check_equality(self, testdir):
|
||||
modcol = testdir.getmodulecol("""
|
||||
@@ -99,6 +94,8 @@ class TestCollectFS:
|
||||
tmpdir.ensure(".whatever", 'test_notfound.py')
|
||||
tmpdir.ensure(".bzr", 'test_notfound.py')
|
||||
tmpdir.ensure("normal", 'test_found.py')
|
||||
for x in tmpdir.visit("test_*.py"):
|
||||
x.write("def test_hello(): pass")
|
||||
|
||||
result = testdir.runpytest("--collectonly")
|
||||
s = result.stdout.str()
|
||||
|
||||
@@ -433,6 +433,9 @@ class TestPytestPluginInteractions:
|
||||
pluginmanager.register(p3)
|
||||
methods = pluginmanager.listattr('m')
|
||||
assert methods == [p2.m, p3.m, p1.m]
|
||||
# listattr keeps a cache and deleting
|
||||
# a function attribute requires clearing it
|
||||
pluginmanager._listattrcache.clear()
|
||||
del P1.m.__dict__['tryfirst']
|
||||
|
||||
pytest.mark.trylast(getattr(P2.m, 'im_func', P2.m))
|
||||
|
||||
@@ -58,6 +58,26 @@ class TestPython:
|
||||
assert_attr(fnode, message="test setup failure")
|
||||
assert "ValueError" in fnode.toxml()
|
||||
|
||||
def test_skip_contains_name_reason(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
def test_skip():
|
||||
pytest.skip("hello23")
|
||||
""")
|
||||
result, dom = runandparse(testdir)
|
||||
assert result.ret == 0
|
||||
node = dom.getElementsByTagName("testsuite")[0]
|
||||
assert_attr(node, skips=1)
|
||||
tnode = node.getElementsByTagName("testcase")[0]
|
||||
assert_attr(tnode,
|
||||
classname="test_skip_contains_name_reason",
|
||||
name="test_skip")
|
||||
snode = tnode.getElementsByTagName("skipped")[0]
|
||||
assert_attr(snode,
|
||||
type="pytest.skip",
|
||||
message="hello23",
|
||||
)
|
||||
|
||||
def test_classname_instance(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
class TestClass:
|
||||
@@ -263,3 +283,71 @@ class TestNonPython:
|
||||
assert_attr(fnode, message="test failure")
|
||||
assert "custom item runtest failed" in fnode.toxml()
|
||||
|
||||
|
||||
def test_nullbyte(testdir):
|
||||
# A null byte can not occur in XML (see section 2.2 of the spec)
|
||||
testdir.makepyfile("""
|
||||
import sys
|
||||
def test_print_nullbyte():
|
||||
sys.stdout.write('Here the null -->' + chr(0) + '<--')
|
||||
sys.stdout.write('In repr form -->' + repr(chr(0)) + '<--')
|
||||
assert False
|
||||
""")
|
||||
xmlf = testdir.tmpdir.join('junit.xml')
|
||||
result = testdir.runpytest('--junitxml=%s' % xmlf)
|
||||
text = xmlf.read()
|
||||
assert '\x00' not in text
|
||||
assert '#x00' in text
|
||||
|
||||
|
||||
def test_nullbyte_replace(testdir):
|
||||
# Check if the null byte gets replaced
|
||||
testdir.makepyfile("""
|
||||
import sys
|
||||
def test_print_nullbyte():
|
||||
sys.stdout.write('Here the null -->' + chr(0) + '<--')
|
||||
sys.stdout.write('In repr form -->' + repr(chr(0)) + '<--')
|
||||
assert False
|
||||
""")
|
||||
xmlf = testdir.tmpdir.join('junit.xml')
|
||||
result = testdir.runpytest('--junitxml=%s' % xmlf)
|
||||
text = xmlf.read()
|
||||
assert '#x0' in text
|
||||
|
||||
|
||||
def test_invalid_xml_escape(testdir):
|
||||
# Test some more invalid xml chars, the full range should be
|
||||
# tested really but let's just thest the edges of the ranges
|
||||
# intead.
|
||||
# XXX This only tests low unicode character points for now as
|
||||
# there are some issues with the testing infrastructure for
|
||||
# the higher ones.
|
||||
# XXX Testing 0xD (\r) is tricky as it overwrites the just written
|
||||
# line in the output, so we skip it too.
|
||||
global unichr
|
||||
try:
|
||||
unichr(65)
|
||||
except NameError:
|
||||
unichr = chr
|
||||
u = py.builtin._totext
|
||||
invalid = (0x1, 0xB, 0xC, 0xE, 0x19,)
|
||||
# 0xD800, 0xDFFF, 0xFFFE, 0x0FFFF) #, 0x110000)
|
||||
valid = (0x9, 0xA, 0x20,) # 0xD, 0xD7FF, 0xE000, 0xFFFD, 0x10000, 0x10FFFF)
|
||||
all = invalid + valid
|
||||
prints = [u(" sys.stdout.write('''0x%X-->%s<--''')") % (i, unichr(i))
|
||||
for i in all]
|
||||
testdir.makepyfile(u("# -*- coding: UTF-8 -*-"),
|
||||
u("import sys"),
|
||||
u("def test_print_bytes():"),
|
||||
u("\n").join(prints),
|
||||
u(" assert False"))
|
||||
xmlf = testdir.tmpdir.join('junit.xml')
|
||||
result = testdir.runpytest('--junitxml=%s' % xmlf)
|
||||
text = xmlf.read()
|
||||
for i in invalid:
|
||||
if i <= 0xFF:
|
||||
assert '#x%02X' % i in text
|
||||
else:
|
||||
assert '#x%04X' % i in text
|
||||
for i in valid:
|
||||
assert chr(i) in text
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import py
|
||||
import pytest
|
||||
import os, sys
|
||||
from _pytest.pytester import LineMatcher, LineComp, HookRecorder
|
||||
@@ -113,3 +114,12 @@ def test_functional(testdir, linecomp):
|
||||
assert res == [42]
|
||||
""")
|
||||
reprec.assertoutcome(passed=1)
|
||||
|
||||
|
||||
def test_makepyfile_unicode(testdir):
|
||||
global unichr
|
||||
try:
|
||||
unichr(65)
|
||||
except NameError:
|
||||
unichr = chr
|
||||
testdir.makepyfile(unichr(0xfffd))
|
||||
|
||||
@@ -46,6 +46,16 @@ class TestClass:
|
||||
l = modcol.collect()
|
||||
assert len(l) == 0
|
||||
|
||||
def test_class_subclassobject(self, testdir):
|
||||
testdir.getmodulecol("""
|
||||
class test(object):
|
||||
pass
|
||||
""")
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines([
|
||||
"*collected 0*",
|
||||
])
|
||||
|
||||
class TestGenerator:
|
||||
def test_generative_functions(self, testdir):
|
||||
modcol = testdir.getmodulecol("""
|
||||
@@ -359,8 +369,8 @@ class TestConftestCustomization:
|
||||
if path.basename == "test_xyz.py":
|
||||
return MyModule(path, parent)
|
||||
""")
|
||||
testdir.makepyfile("def some(): pass")
|
||||
testdir.makepyfile(test_xyz="")
|
||||
testdir.makepyfile("def test_some(): pass")
|
||||
testdir.makepyfile(test_xyz="def test_func(): pass")
|
||||
result = testdir.runpytest("--collectonly")
|
||||
result.stdout.fnmatch_lines([
|
||||
"*<Module*test_pytest*",
|
||||
@@ -517,6 +527,10 @@ def test_callspec_repr():
|
||||
repr(cs)
|
||||
|
||||
class TestFillFuncArgs:
|
||||
def test_fillfuncargs_exposed(self):
|
||||
# used by oejskit
|
||||
assert pytest._fillfuncargs == funcargs.fillfuncargs
|
||||
|
||||
def test_funcarg_lookupfails(self, testdir):
|
||||
testdir.makeconftest("""
|
||||
def pytest_funcarg__xyzsomething(request):
|
||||
@@ -588,7 +602,8 @@ class TestFillFuncArgs:
|
||||
item.config.pluginmanager.register(Provider())
|
||||
if hasattr(item, '_args'):
|
||||
del item._args
|
||||
pytest._fillfuncargs(item)
|
||||
from _pytest.python import fillfuncargs
|
||||
fillfuncargs(item)
|
||||
assert len(item.funcargs) == 1
|
||||
|
||||
class TestRequest:
|
||||
@@ -915,11 +930,12 @@ class TestMetafunc:
|
||||
assert metafunc._calls[2].param == 1
|
||||
|
||||
def test_addcall_funcargs(self):
|
||||
def func(arg1): pass
|
||||
def func(x): pass
|
||||
metafunc = funcargs.Metafunc(func)
|
||||
class obj: pass
|
||||
metafunc.addcall(funcargs={"x": 2})
|
||||
metafunc.addcall(funcargs={"x": 3})
|
||||
pytest.raises(pytest.fail.Exception, "metafunc.addcall({'xyz': 0})")
|
||||
assert len(metafunc._calls) == 2
|
||||
assert metafunc._calls[0].funcargs == {'x': 2}
|
||||
assert metafunc._calls[1].funcargs == {'x': 3}
|
||||
@@ -1003,6 +1019,21 @@ class TestGenfuncFunctional:
|
||||
"*1 failed, 3 passed*"
|
||||
])
|
||||
|
||||
def test_noself_in_method(self, testdir):
|
||||
p = testdir.makepyfile("""
|
||||
def pytest_generate_tests(metafunc):
|
||||
assert 'xyz' not in metafunc.funcargnames
|
||||
|
||||
class TestHello:
|
||||
def test_hello(xyz):
|
||||
pass
|
||||
""")
|
||||
result = testdir.runpytest(p)
|
||||
result.stdout.fnmatch_lines([
|
||||
"*1 pass*",
|
||||
])
|
||||
|
||||
|
||||
def test_generate_plugin_and_module(self, testdir):
|
||||
testdir.makeconftest("""
|
||||
def pytest_generate_tests(metafunc):
|
||||
@@ -1062,6 +1093,21 @@ class TestGenfuncFunctional:
|
||||
"*2 pass*",
|
||||
])
|
||||
|
||||
def test_issue28_setup_method_in_generate_tests(self, testdir):
|
||||
p = testdir.makepyfile("""
|
||||
def pytest_generate_tests(metafunc):
|
||||
metafunc.addcall({'arg1': 1})
|
||||
|
||||
class TestClass:
|
||||
def test_method(self, arg1):
|
||||
assert arg1 == self.val
|
||||
def setup_method(self, func):
|
||||
self.val = 1
|
||||
""")
|
||||
result = testdir.runpytest(p)
|
||||
result.stdout.fnmatch_lines([
|
||||
"*1 pass*",
|
||||
])
|
||||
|
||||
def test_conftest_funcargs_only_available_in_subdir(testdir):
|
||||
sub1 = testdir.mkpydir("sub1")
|
||||
@@ -1324,5 +1370,3 @@ def test_customize_through_attributes(testdir):
|
||||
"*MyInstance*",
|
||||
"*MyFunction*test_hello*",
|
||||
])
|
||||
|
||||
|
||||
|
||||
@@ -470,3 +470,62 @@ def test_reportchars(testdir):
|
||||
"XPASS*test_3*",
|
||||
"SKIP*four*",
|
||||
])
|
||||
|
||||
@pytest.mark.xfail("hasattr(sys, 'pypy_version_info')")
|
||||
def test_errors_in_xfail_skip_expressions(testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
@pytest.mark.skipif("asd")
|
||||
def test_nameerror():
|
||||
pass
|
||||
@pytest.mark.xfail("syntax error")
|
||||
def test_syntax():
|
||||
pass
|
||||
|
||||
def test_func():
|
||||
pass
|
||||
""")
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines([
|
||||
"*ERROR*test_nameerror*",
|
||||
"*evaluating*skipif*expression*",
|
||||
"*asd*",
|
||||
"*ERROR*test_syntax*",
|
||||
"*evaluating*xfail*expression*",
|
||||
" syntax error",
|
||||
" ^",
|
||||
"SyntaxError: invalid syntax",
|
||||
"*1 pass*2 error*",
|
||||
])
|
||||
|
||||
def test_xfail_skipif_with_globals(testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
x = 3
|
||||
@pytest.mark.skipif("x == 3")
|
||||
def test_skip1():
|
||||
pass
|
||||
@pytest.mark.xfail("x == 3")
|
||||
def test_boolean():
|
||||
assert 0
|
||||
""")
|
||||
result = testdir.runpytest("-rsx")
|
||||
result.stdout.fnmatch_lines([
|
||||
"*SKIP*x == 3*",
|
||||
"*XFAIL*test_boolean*",
|
||||
"*x == 3*",
|
||||
])
|
||||
|
||||
def test_direct_gives_error(testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
@pytest.mark.skipif(True)
|
||||
def test_skip1():
|
||||
pass
|
||||
""")
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines([
|
||||
"*1 error*",
|
||||
])
|
||||
|
||||
|
||||
|
||||
@@ -4,8 +4,7 @@ terminal reporting of the full testing process.
|
||||
import pytest,py
|
||||
import sys
|
||||
|
||||
from _pytest.terminal import TerminalReporter, \
|
||||
CollectonlyReporter, repr_pythonversion, getreportopt
|
||||
from _pytest.terminal import TerminalReporter, repr_pythonversion, getreportopt
|
||||
from _pytest import runner
|
||||
|
||||
def basic_run_report(item):
|
||||
@@ -131,6 +130,20 @@ class TestTerminal:
|
||||
"*test_p2.py <- *test_p1.py:2: TestMore.test_p1*",
|
||||
])
|
||||
|
||||
def test_itemreport_directclasses_not_shown_as_subclasses(self, testdir):
|
||||
a = testdir.mkpydir("a")
|
||||
a.join("test_hello.py").write(py.code.Source("""
|
||||
class TestClass:
|
||||
def test_method(self):
|
||||
pass
|
||||
"""))
|
||||
result = testdir.runpytest("-v")
|
||||
assert result.ret == 0
|
||||
result.stdout.fnmatch_lines([
|
||||
"*a/test_hello.py*PASS*",
|
||||
])
|
||||
assert " <- " not in result.stdout.str()
|
||||
|
||||
def test_keyboard_interrupt(self, testdir, option):
|
||||
p = testdir.makepyfile("""
|
||||
def test_foobar():
|
||||
@@ -157,53 +170,35 @@ class TestTerminal:
|
||||
|
||||
|
||||
class TestCollectonly:
|
||||
def test_collectonly_basic(self, testdir, linecomp):
|
||||
modcol = testdir.getmodulecol(configargs=['--collectonly'], source="""
|
||||
def test_collectonly_basic(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
def test_func():
|
||||
pass
|
||||
""")
|
||||
rep = CollectonlyReporter(modcol.config, out=linecomp.stringio)
|
||||
modcol.config.pluginmanager.register(rep)
|
||||
indent = rep.indent
|
||||
rep.config.hook.pytest_collectstart(collector=modcol)
|
||||
linecomp.assert_contains_lines([
|
||||
"<Module 'test_collectonly_basic.py'>"
|
||||
])
|
||||
item = modcol.collect()[0]
|
||||
rep.config.hook.pytest_itemcollected(item=item)
|
||||
linecomp.assert_contains_lines([
|
||||
result = testdir.runpytest("--collectonly",)
|
||||
result.stdout.fnmatch_lines([
|
||||
"<Module 'test_collectonly_basic.py'>",
|
||||
" <Function 'test_func'>",
|
||||
])
|
||||
report = rep.config.hook.pytest_make_collect_report(collector=modcol)
|
||||
rep.config.hook.pytest_collectreport(report=report)
|
||||
assert rep.indent == indent
|
||||
|
||||
def test_collectonly_skipped_module(self, testdir, linecomp):
|
||||
modcol = testdir.getmodulecol(configargs=['--collectonly'], source="""
|
||||
def test_collectonly_skipped_module(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
pytest.skip("nomod")
|
||||
""")
|
||||
rep = CollectonlyReporter(modcol.config, out=linecomp.stringio)
|
||||
modcol.config.pluginmanager.register(rep)
|
||||
cols = list(testdir.genitems([modcol]))
|
||||
assert len(cols) == 0
|
||||
linecomp.assert_contains_lines("""
|
||||
<Module 'test_collectonly_skipped_module.py'>
|
||||
!!! Skipped: nomod !!!
|
||||
pytest.skip("hello")
|
||||
""")
|
||||
result = testdir.runpytest("--collectonly", "-rs")
|
||||
result.stdout.fnmatch_lines([
|
||||
"SKIP*hello*",
|
||||
"*1 skip*",
|
||||
])
|
||||
|
||||
def test_collectonly_failed_module(self, testdir, linecomp):
|
||||
modcol = testdir.getmodulecol(configargs=['--collectonly'], source="""
|
||||
raise ValueError(0)
|
||||
""")
|
||||
rep = CollectonlyReporter(modcol.config, out=linecomp.stringio)
|
||||
modcol.config.pluginmanager.register(rep)
|
||||
cols = list(testdir.genitems([modcol]))
|
||||
assert len(cols) == 0
|
||||
linecomp.assert_contains_lines("""
|
||||
<Module 'test_collectonly_failed_module.py'>
|
||||
!!! ValueError: 0 !!!
|
||||
""")
|
||||
def test_collectonly_failed_module(self, testdir):
|
||||
testdir.makepyfile("""raise ValueError(0)""")
|
||||
result = testdir.runpytest("--collectonly")
|
||||
result.stdout.fnmatch_lines([
|
||||
"*raise ValueError*",
|
||||
"*1 error*",
|
||||
])
|
||||
|
||||
def test_collectonly_fatal(self, testdir):
|
||||
p1 = testdir.makeconftest("""
|
||||
@@ -228,11 +223,11 @@ class TestCollectonly:
|
||||
stderr = result.stderr.str().strip()
|
||||
#assert stderr.startswith("inserting into sys.path")
|
||||
assert result.ret == 0
|
||||
extra = result.stdout.fnmatch_lines([
|
||||
result.stdout.fnmatch_lines([
|
||||
"*<Module '*.py'>",
|
||||
"* <Function 'test_func1'*>",
|
||||
"* <Class 'TestClass'>",
|
||||
"* <Instance '()'>",
|
||||
#"* <Instance '()'>",
|
||||
"* <Function 'test_method'*>",
|
||||
])
|
||||
|
||||
@@ -241,11 +236,11 @@ class TestCollectonly:
|
||||
result = testdir.runpytest("--collectonly", p)
|
||||
stderr = result.stderr.str().strip()
|
||||
assert result.ret == 1
|
||||
extra = result.stdout.fnmatch_lines(py.code.Source("""
|
||||
*<Module '*.py'>
|
||||
*ImportError*
|
||||
*!!!*failures*!!!
|
||||
*test_collectonly_error.py:1*
|
||||
result.stdout.fnmatch_lines(py.code.Source("""
|
||||
*ERROR*
|
||||
*import Errlk*
|
||||
*ImportError*
|
||||
*1 error*
|
||||
""").strip())
|
||||
|
||||
|
||||
@@ -418,6 +413,7 @@ class TestTerminalFunctional:
|
||||
"*test_verbose_reporting.py:10: test_gen*FAIL*",
|
||||
])
|
||||
assert result.ret == 1
|
||||
|
||||
pytestconfig.pluginmanager.skipifmissing("xdist")
|
||||
result = testdir.runpytest(p1, '-v', '-n 1')
|
||||
result.stdout.fnmatch_lines([
|
||||
@@ -526,7 +522,7 @@ def test_PYTEST_DEBUG(testdir, monkeypatch):
|
||||
result.stderr.fnmatch_lines([
|
||||
"*registered*PluginManager*"
|
||||
])
|
||||
|
||||
|
||||
|
||||
class TestGenericReporting:
|
||||
""" this test class can be subclassed with a different option
|
||||
|
||||
@@ -396,3 +396,15 @@ def test_djangolike_testcase(testdir):
|
||||
"*tearDown()*",
|
||||
"*_post_teardown()*",
|
||||
])
|
||||
|
||||
|
||||
def test_unittest_not_shown_in_traceback(testdir):
|
||||
testdir.makepyfile("""
|
||||
import unittest
|
||||
class t(unittest.TestCase):
|
||||
def test_hello(self):
|
||||
x = 3
|
||||
self.assertEquals(x, 4)
|
||||
""")
|
||||
res = testdir.runpytest()
|
||||
assert "failUnlessEqual" not in res.stdout.str()
|
||||
|
||||
Reference in New Issue
Block a user