Compare commits
54 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
48e6823c7a | ||
|
|
6b4e6eee09 | ||
|
|
f7648e11d8 | ||
|
|
7bb7d1205c | ||
|
|
a1d41c6811 | ||
|
|
58e0301f87 | ||
|
|
a5e7b2760d | ||
|
|
efe438d3e8 | ||
|
|
ec0565fac5 | ||
|
|
48a6a504b6 | ||
|
|
8f55425898 | ||
|
|
a51e52aee3 | ||
|
|
69dfc75572 | ||
|
|
9d3e51af9f | ||
|
|
f7c1b9087a | ||
|
|
36c42b5c15 | ||
|
|
bc8ee95e72 | ||
|
|
979dfd20f2 | ||
|
|
67fbd24ebf | ||
|
|
7f7589afa9 | ||
|
|
4f01cda2a7 | ||
|
|
bd296c796f | ||
|
|
7144cec580 | ||
|
|
99a1188287 | ||
|
|
0b18b6094e | ||
|
|
ae53d04780 | ||
|
|
a324826dfd | ||
|
|
29bf205f3a | ||
|
|
3b9fd3abd8 | ||
|
|
974e4e3a9d | ||
|
|
369b7709f7 | ||
|
|
78438db752 | ||
|
|
a2f4a11301 | ||
|
|
077c468589 | ||
|
|
d4fe273b2f | ||
|
|
761a95e542 | ||
|
|
5ae04397bd | ||
|
|
2c230f910d | ||
|
|
ae54151467 | ||
|
|
05af53d160 | ||
|
|
448f1c0d9c | ||
|
|
346da57a8a | ||
|
|
9d92b19ed1 | ||
|
|
e2201fe3a9 | ||
|
|
45b98d6e70 | ||
|
|
29b4082b00 | ||
|
|
6ac638ba87 | ||
|
|
f2512017ea | ||
|
|
3bd3ba133f | ||
|
|
be249dcfe5 | ||
|
|
45afb1b7d1 | ||
|
|
922a283f99 | ||
|
|
7e857e9068 | ||
|
|
ac9192e4f8 |
2
.hgtags
2
.hgtags
@@ -42,3 +42,5 @@ c777dcad166548b7499564cb49ae5c8b4b07f935 2.0.3
|
||||
49f11dbff725acdcc5fe3657cbcdf9ae04e25bbc 2.0.3
|
||||
363e5a5a59c803e6bc176a6f9cc4bf1a1ca2dab0 2.0.3
|
||||
e5e1746a197f0398356a43fbe2eebac9690f795d 2.1.0
|
||||
5864412c6f3c903384243bd315639d101d7ebc67 2.1.2
|
||||
12a05d59249f80276e25fd8b96e8e545b1332b7a 2.1.3
|
||||
|
||||
1
AUTHORS
1
AUTHORS
@@ -22,3 +22,4 @@ Jan Balster
|
||||
Grig Gheorghiu
|
||||
Bob Ippolito
|
||||
Christian Tismer
|
||||
Daniel Nuri
|
||||
|
||||
41
CHANGELOG
41
CHANGELOG
@@ -1,3 +1,44 @@
|
||||
Changes between 2.1.3 and 2.2.0
|
||||
----------------------------------------
|
||||
|
||||
- fix issue90: introduce eager tearing down of test items so that
|
||||
teardown function are called earlier.
|
||||
- add an all-powerful metafunc.parametrize function which allows to
|
||||
parametrize test function arguments in multiple steps and therefore
|
||||
from indepdenent plugins and palces.
|
||||
- add a @pytest.mark.parametrize helper which allows to easily
|
||||
call a test function with different argument values
|
||||
- Add examples to the "parametrize" example page, including a quick port
|
||||
of Test scenarios and the new parametrize function and decorator.
|
||||
- introduce registration for "pytest.mark.*" helpers via ini-files
|
||||
or through plugin hooks. Also introduce a "--strict" option which
|
||||
will treat unregistered markers as errors
|
||||
allowing to avoid typos and maintain a well described set of markers
|
||||
for your test suite. See exaples at http://pytest.org/latest/mark.html
|
||||
and its links.
|
||||
- issue50: introduce "-m marker" option to select tests based on markers
|
||||
(this is a stricter and more predictable version of '-k' in that "-m"
|
||||
only matches complete markers and has more obvious rules for and/or
|
||||
semantics.
|
||||
- new feature to help optimizing the speed of your tests:
|
||||
--durations=N option for displaying N slowest test calls
|
||||
and setup/teardown methods.
|
||||
- fix issue87: --pastebin now works with python3
|
||||
- fix issue89: --pdb with unexpected exceptions in doctest work more sensibly
|
||||
- fix and cleanup pytest's own test suite to not leak FDs
|
||||
- fix issue83: link to generated funcarg list
|
||||
- fix issue74: pyarg module names are now checked against imp.find_module false positives
|
||||
- fix compatibility with twisted/trial-11.1.0 use cases
|
||||
|
||||
Changes between 2.1.2 and 2.1.3
|
||||
----------------------------------------
|
||||
|
||||
- fix issue79: assertion rewriting failed on some comparisons in boolops
|
||||
- correctly handle zero length arguments (a la pytest '')
|
||||
- fix issue67 / junitxml now contains correct test durations, thanks ronny
|
||||
- fix issue75 / skipping test failure on jython
|
||||
- fix issue77 / Allow assertrepr_compare hook to apply to a subset of tests
|
||||
|
||||
Changes between 2.1.1 and 2.1.2
|
||||
----------------------------------------
|
||||
|
||||
|
||||
@@ -7,6 +7,13 @@ tags: bug 2.4 core xdist
|
||||
the protocol now - setup/teardown is called at module level.
|
||||
consider making calling of setup/teardown configurable
|
||||
|
||||
optimizations
|
||||
---------------------------------------------------------------
|
||||
tags: 2.4 core
|
||||
|
||||
- look at ihook optimization such that all lookups for
|
||||
hooks relating to the same fspath are cached.
|
||||
|
||||
fix start/finish partial finailization problem
|
||||
---------------------------------------------------------------
|
||||
tags: bug core
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
#
|
||||
__version__ = '2.1.2'
|
||||
__version__ = '2.2.0'
|
||||
|
||||
@@ -43,23 +43,10 @@ def pytest_configure(config):
|
||||
mode = "reinterp"
|
||||
if mode != "plain":
|
||||
_load_modules(mode)
|
||||
def callbinrepr(op, left, right):
|
||||
hook_result = config.hook.pytest_assertrepr_compare(
|
||||
config=config, op=op, left=left, right=right)
|
||||
for new_expl in hook_result:
|
||||
if new_expl:
|
||||
res = '\n~'.join(new_expl)
|
||||
if mode == "rewrite":
|
||||
# The result will be fed back a python % formatting
|
||||
# operation, which will fail if there are extraneous
|
||||
# '%'s in the string. Escape them here.
|
||||
res = res.replace("%", "%%")
|
||||
return res
|
||||
m = monkeypatch()
|
||||
config._cleanup.append(m.undo)
|
||||
m.setattr(py.builtin.builtins, 'AssertionError',
|
||||
reinterpret.AssertionError)
|
||||
m.setattr(util, '_reprcompare', callbinrepr)
|
||||
hook = None
|
||||
if mode == "rewrite":
|
||||
hook = rewrite.AssertionRewritingHook()
|
||||
@@ -82,6 +69,24 @@ def pytest_collection(session):
|
||||
if hook is not None:
|
||||
hook.set_session(session)
|
||||
|
||||
def pytest_runtest_setup(item):
|
||||
def callbinrepr(op, left, right):
|
||||
hook_result = item.ihook.pytest_assertrepr_compare(
|
||||
config=item.config, op=op, left=left, right=right)
|
||||
for new_expl in hook_result:
|
||||
if new_expl:
|
||||
res = '\n~'.join(new_expl)
|
||||
if item.config.getvalue("assertmode") == "rewrite":
|
||||
# The result will be fed back a python % formatting
|
||||
# operation, which will fail if there are extraneous
|
||||
# '%'s in the string. Escape them here.
|
||||
res = res.replace("%", "%%")
|
||||
return res
|
||||
util._reprcompare = callbinrepr
|
||||
|
||||
def pytest_runtest_teardown(item):
|
||||
util._reprcompare = None
|
||||
|
||||
def pytest_sessionfinish(session):
|
||||
hook = session.config._assertstate.hook
|
||||
if hook is not None:
|
||||
|
||||
@@ -484,20 +484,20 @@ class AssertionRewriter(ast.NodeVisitor):
|
||||
res_var = self.variable()
|
||||
expl_list = self.assign(ast.List([], ast.Load()))
|
||||
app = ast.Attribute(expl_list, "append", ast.Load())
|
||||
is_or = isinstance(boolop.op, ast.Or)
|
||||
is_or = int(isinstance(boolop.op, ast.Or))
|
||||
body = save = self.statements
|
||||
fail_save = self.on_failure
|
||||
levels = len(boolop.values) - 1
|
||||
self.push_format_context()
|
||||
# Process each operand, short-circuting if needed.
|
||||
for i, v in enumerate(boolop.values):
|
||||
self.push_format_context()
|
||||
res, expl = self.visit(v)
|
||||
body.append(ast.Assign([ast.Name(res_var, ast.Store())], res))
|
||||
if i:
|
||||
fail_inner = []
|
||||
self.on_failure.append(ast.If(cond, fail_inner, []))
|
||||
self.on_failure = fail_inner
|
||||
self.push_format_context()
|
||||
res, expl = self.visit(v)
|
||||
body.append(ast.Assign([ast.Name(res_var, ast.Store())], res))
|
||||
expl_format = self.pop_format_context(ast.Str(expl))
|
||||
call = ast.Call(app, [expl_format], [], None, None)
|
||||
self.on_failure.append(ast.Expr(call))
|
||||
|
||||
@@ -11,20 +11,23 @@ def pytest_addoption(parser):
|
||||
group._addoption('-s', action="store_const", const="no", dest="capture",
|
||||
help="shortcut for --capture=no.")
|
||||
|
||||
@pytest.mark.tryfirst
|
||||
def pytest_cmdline_parse(pluginmanager, args):
|
||||
# we want to perform capturing already for plugin/conftest loading
|
||||
if '-s' in args or "--capture=no" in args:
|
||||
method = "no"
|
||||
elif hasattr(os, 'dup') and '--capture=sys' not in args:
|
||||
method = "fd"
|
||||
else:
|
||||
method = "sys"
|
||||
capman = CaptureManager(method)
|
||||
pluginmanager.register(capman, "capturemanager")
|
||||
|
||||
def addouterr(rep, outerr):
|
||||
for secname, content in zip(["out", "err"], outerr):
|
||||
if content:
|
||||
rep.sections.append(("Captured std%s" % secname, content))
|
||||
|
||||
def pytest_unconfigure(config):
|
||||
# registered in config.py during early conftest.py loading
|
||||
capman = config.pluginmanager.getplugin('capturemanager')
|
||||
while capman._method2capture:
|
||||
name, cap = capman._method2capture.popitem()
|
||||
# XXX logging module may wants to close it itself on process exit
|
||||
# otherwise we could do finalization here and call "reset()".
|
||||
cap.suspend()
|
||||
|
||||
class NoCapture:
|
||||
def startall(self):
|
||||
pass
|
||||
@@ -36,8 +39,9 @@ class NoCapture:
|
||||
return "", ""
|
||||
|
||||
class CaptureManager:
|
||||
def __init__(self):
|
||||
def __init__(self, defaultmethod=None):
|
||||
self._method2capture = {}
|
||||
self._defaultmethod = defaultmethod
|
||||
|
||||
def _maketempfile(self):
|
||||
f = py.std.tempfile.TemporaryFile()
|
||||
@@ -62,14 +66,6 @@ class CaptureManager:
|
||||
else:
|
||||
raise ValueError("unknown capturing method: %r" % method)
|
||||
|
||||
def _getmethod_preoptionparse(self, args):
|
||||
if '-s' in args or "--capture=no" in args:
|
||||
return "no"
|
||||
elif hasattr(os, 'dup') and '--capture=sys' not in args:
|
||||
return "fd"
|
||||
else:
|
||||
return "sys"
|
||||
|
||||
def _getmethod(self, config, fspath):
|
||||
if config.option.capture:
|
||||
method = config.option.capture
|
||||
@@ -82,16 +78,22 @@ class CaptureManager:
|
||||
method = "sys"
|
||||
return method
|
||||
|
||||
def reset_capturings(self):
|
||||
for name, cap in self._method2capture.items():
|
||||
cap.reset()
|
||||
|
||||
def resumecapture_item(self, item):
|
||||
method = self._getmethod(item.config, item.fspath)
|
||||
if not hasattr(item, 'outerr'):
|
||||
item.outerr = ('', '') # we accumulate outerr on the item
|
||||
return self.resumecapture(method)
|
||||
|
||||
def resumecapture(self, method):
|
||||
def resumecapture(self, method=None):
|
||||
if hasattr(self, '_capturing'):
|
||||
raise ValueError("cannot resume, already capturing with %r" %
|
||||
(self._capturing,))
|
||||
if method is None:
|
||||
method = self._defaultmethod
|
||||
cap = self._method2capture.get(method)
|
||||
self._capturing = method
|
||||
if cap is None:
|
||||
@@ -161,17 +163,6 @@ class CaptureManager:
|
||||
def pytest_runtest_teardown(self, item):
|
||||
self.resumecapture_item(item)
|
||||
|
||||
def pytest__teardown_final(self, __multicall__, session):
|
||||
method = self._getmethod(session.config, None)
|
||||
self.resumecapture(method)
|
||||
try:
|
||||
rep = __multicall__.execute()
|
||||
finally:
|
||||
outerr = self.suspendcapture()
|
||||
if rep:
|
||||
addouterr(rep, outerr)
|
||||
return rep
|
||||
|
||||
def pytest_keyboard_interrupt(self, excinfo):
|
||||
if hasattr(self, '_capturing'):
|
||||
self.suspendcapture()
|
||||
|
||||
@@ -11,8 +11,12 @@ def pytest_cmdline_parse(pluginmanager, args):
|
||||
return config
|
||||
|
||||
def pytest_unconfigure(config):
|
||||
for func in config._cleanup:
|
||||
func()
|
||||
while 1:
|
||||
try:
|
||||
fin = config._cleanup.pop()
|
||||
except IndexError:
|
||||
break
|
||||
fin()
|
||||
|
||||
class Parser:
|
||||
""" Parser for command line arguments. """
|
||||
@@ -79,6 +83,7 @@ class Parser:
|
||||
self._inidict[name] = (help, type, default)
|
||||
self._ininames.append(name)
|
||||
|
||||
|
||||
class OptionGroup:
|
||||
def __init__(self, name, description="", parser=None):
|
||||
self.name = name
|
||||
@@ -254,11 +259,14 @@ class Config(object):
|
||||
self.hook = self.pluginmanager.hook
|
||||
self._inicache = {}
|
||||
self._cleanup = []
|
||||
|
||||
|
||||
@classmethod
|
||||
def fromdictargs(cls, option_dict, args):
|
||||
""" constructor useable for subprocesses. """
|
||||
config = cls()
|
||||
# XXX slightly crude way to initialize capturing
|
||||
import _pytest.capture
|
||||
_pytest.capture.pytest_cmdline_parse(config.pluginmanager, args)
|
||||
config._preparse(args, addopts=False)
|
||||
config.option.__dict__.update(option_dict)
|
||||
for x in config.option.plugins:
|
||||
@@ -283,11 +291,10 @@ class Config(object):
|
||||
|
||||
def _setinitialconftest(self, args):
|
||||
# capture output during conftest init (#issue93)
|
||||
from _pytest.capture import CaptureManager
|
||||
capman = CaptureManager()
|
||||
self.pluginmanager.register(capman, 'capturemanager')
|
||||
# will be unregistered in capture.py's unconfigure()
|
||||
capman.resumecapture(capman._getmethod_preoptionparse(args))
|
||||
# XXX introduce load_conftest hook to avoid needing to know
|
||||
# about capturing plugin here
|
||||
capman = self.pluginmanager.getplugin("capturemanager")
|
||||
capman.resumecapture()
|
||||
try:
|
||||
try:
|
||||
self._conftest.setinitial(args)
|
||||
@@ -340,6 +347,14 @@ class Config(object):
|
||||
args.append(py.std.os.getcwd())
|
||||
self.args = args
|
||||
|
||||
def addinivalue_line(self, name, line):
|
||||
""" add a line to an ini-file option. The option must have been
|
||||
declared but might not yet be set in which case the line becomes the
|
||||
the first line in its value. """
|
||||
x = self.getini(name)
|
||||
assert isinstance(x, list)
|
||||
x.append(line) # modifies the cached list inline
|
||||
|
||||
def getini(self, name):
|
||||
""" return configuration value from an ini file. If the
|
||||
specified name hasn't been registered through a prior ``parse.addini``
|
||||
@@ -421,7 +436,7 @@ class Config(object):
|
||||
|
||||
|
||||
def getcfg(args, inibasenames):
|
||||
args = [x for x in args if str(x)[0] != "-"]
|
||||
args = [x for x in args if not str(x).startswith("-")]
|
||||
if not args:
|
||||
args = [py.path.local()]
|
||||
for arg in args:
|
||||
|
||||
@@ -211,6 +211,14 @@ class PluginManager(object):
|
||||
self.register(mod, modname)
|
||||
self.consider_module(mod)
|
||||
|
||||
def pytest_configure(self, config):
|
||||
config.addinivalue_line("markers",
|
||||
"tryfirst: mark a hook implementation function such that the "
|
||||
"plugin machinery will try to call it first/as early as possible.")
|
||||
config.addinivalue_line("markers",
|
||||
"trylast: mark a hook implementation function such that the "
|
||||
"plugin machinery will try to call it last/as late as possible.")
|
||||
|
||||
def pytest_plugin_registered(self, plugin):
|
||||
import pytest
|
||||
dic = self.call_plugin(plugin, "pytest_namespace", {}) or {}
|
||||
@@ -431,10 +439,7 @@ _preinit = []
|
||||
def _preloadplugins():
|
||||
_preinit.append(PluginManager(load=True))
|
||||
|
||||
def main(args=None, plugins=None):
|
||||
""" returned exit code integer, after an in-process testing run
|
||||
with the given command line arguments, preloading an optional list
|
||||
of passed in plugin objects. """
|
||||
def _prepareconfig(args=None, plugins=None):
|
||||
if args is None:
|
||||
args = sys.argv[1:]
|
||||
elif isinstance(args, py.path.local):
|
||||
@@ -448,13 +453,19 @@ def main(args=None, plugins=None):
|
||||
else: # subsequent calls to main will create a fresh instance
|
||||
_pluginmanager = PluginManager(load=True)
|
||||
hook = _pluginmanager.hook
|
||||
if plugins:
|
||||
for plugin in plugins:
|
||||
_pluginmanager.register(plugin)
|
||||
return hook.pytest_cmdline_parse(
|
||||
pluginmanager=_pluginmanager, args=args)
|
||||
|
||||
def main(args=None, plugins=None):
|
||||
""" returned exit code integer, after an in-process testing run
|
||||
with the given command line arguments, preloading an optional list
|
||||
of passed in plugin objects. """
|
||||
try:
|
||||
if plugins:
|
||||
for plugin in plugins:
|
||||
_pluginmanager.register(plugin)
|
||||
config = hook.pytest_cmdline_parse(
|
||||
pluginmanager=_pluginmanager, args=args)
|
||||
exitstatus = hook.pytest_cmdline_main(config=config)
|
||||
config = _prepareconfig(args, plugins)
|
||||
exitstatus = config.hook.pytest_cmdline_main(config=config)
|
||||
except UsageError:
|
||||
e = sys.exc_info()[1]
|
||||
sys.stderr.write("ERROR: %s\n" %(e.args[0],))
|
||||
|
||||
@@ -56,6 +56,7 @@ def pytest_cmdline_main(config):
|
||||
elif config.option.help:
|
||||
config.pluginmanager.do_configure(config)
|
||||
showhelp(config)
|
||||
config.pluginmanager.do_unconfigure(config)
|
||||
return 0
|
||||
|
||||
def showhelp(config):
|
||||
@@ -113,7 +114,7 @@ def pytest_report_header(config):
|
||||
verinfo = getpluginversioninfo(config)
|
||||
if verinfo:
|
||||
lines.extend(verinfo)
|
||||
|
||||
|
||||
if config.option.traceconfig:
|
||||
lines.append("active plugins:")
|
||||
plugins = []
|
||||
|
||||
@@ -149,7 +149,8 @@ def pytest_runtest_makereport(item, call):
|
||||
pytest_runtest_makereport.firstresult = True
|
||||
|
||||
def pytest_runtest_logreport(report):
|
||||
""" process item test report. """
|
||||
""" process a test setup/call/teardown report relating to
|
||||
the respective phase of executing a test. """
|
||||
|
||||
# special handling for final teardown - somewhat internal for now
|
||||
def pytest__teardown_final(session):
|
||||
|
||||
@@ -71,13 +71,12 @@ class LogXML(object):
|
||||
self.test_logs = []
|
||||
self.passed = self.skipped = 0
|
||||
self.failed = self.errors = 0
|
||||
self._durations = {}
|
||||
|
||||
def _opentestcase(self, report):
|
||||
names = report.nodeid.split("::")
|
||||
names[0] = names[0].replace("/", '.')
|
||||
names = tuple(names)
|
||||
d = {'time': self._durations.pop(report.nodeid, "0")}
|
||||
d = {'time': getattr(report, 'duration', 0)}
|
||||
names = [x.replace(".py", "") for x in names if x != "()"]
|
||||
classnames = names[:-1]
|
||||
if self.prefix:
|
||||
@@ -167,7 +166,8 @@ class LogXML(object):
|
||||
|
||||
def pytest_runtest_logreport(self, report):
|
||||
if report.passed:
|
||||
self.append_pass(report)
|
||||
if report.when == "call": # ignore setup/teardown
|
||||
self.append_pass(report)
|
||||
elif report.failed:
|
||||
if report.when != "call":
|
||||
self.append_error(report)
|
||||
@@ -176,13 +176,6 @@ class LogXML(object):
|
||||
elif report.skipped:
|
||||
self.append_skipped(report)
|
||||
|
||||
def pytest_runtest_call(self, item, __multicall__):
|
||||
start = time.time()
|
||||
try:
|
||||
return __multicall__.execute()
|
||||
finally:
|
||||
self._durations[item.nodeid] = time.time() - start
|
||||
|
||||
def pytest_collectreport(self, report):
|
||||
if not report.passed:
|
||||
if report.failed:
|
||||
|
||||
@@ -11,6 +11,8 @@ EXIT_TESTSFAILED = 1
|
||||
EXIT_INTERRUPTED = 2
|
||||
EXIT_INTERNALERROR = 3
|
||||
|
||||
name_re = py.std.re.compile("^[a-zA-Z_]\w*$")
|
||||
|
||||
def pytest_addoption(parser):
|
||||
parser.addini("norecursedirs", "directory patterns to avoid for recursion",
|
||||
type="args", default=('.*', 'CVS', '_darcs', '{arch}'))
|
||||
@@ -27,6 +29,9 @@ def pytest_addoption(parser):
|
||||
action="store", type="int", dest="maxfail", default=0,
|
||||
help="exit after first num failures or errors.")
|
||||
|
||||
group._addoption('--strict', action="store_true",
|
||||
help="run pytest in strict mode, warnings become errors.")
|
||||
|
||||
group = parser.getgroup("collect", "collection")
|
||||
group.addoption('--collectonly',
|
||||
action="store_true", dest="collectonly",
|
||||
@@ -48,7 +53,7 @@ def pytest_addoption(parser):
|
||||
def pytest_namespace():
|
||||
collect = dict(Item=Item, Collector=Collector, File=File, Session=Session)
|
||||
return dict(collect=collect)
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
py.test.config = config # compatibiltiy
|
||||
if config.option.exitfirst:
|
||||
@@ -77,11 +82,11 @@ def wrap_session(config, doit):
|
||||
session.exitstatus = EXIT_INTERNALERROR
|
||||
if excinfo.errisinstance(SystemExit):
|
||||
sys.stderr.write("mainloop: caught Spurious SystemExit!\n")
|
||||
if not session.exitstatus and session._testsfailed:
|
||||
session.exitstatus = EXIT_TESTSFAILED
|
||||
if initstate >= 2:
|
||||
config.hook.pytest_sessionfinish(session=session,
|
||||
exitstatus=session.exitstatus)
|
||||
exitstatus=session.exitstatus or (session._testsfailed and 1))
|
||||
if not session.exitstatus and session._testsfailed:
|
||||
session.exitstatus = EXIT_TESTSFAILED
|
||||
if initstate >= 1:
|
||||
config.pluginmanager.do_unconfigure(config)
|
||||
return session.exitstatus
|
||||
@@ -101,7 +106,7 @@ def pytest_collection(session):
|
||||
def pytest_runtestloop(session):
|
||||
if session.config.option.collectonly:
|
||||
return True
|
||||
for item in session.session.items:
|
||||
for item in session.items:
|
||||
item.config.hook.pytest_runtest_protocol(item=item)
|
||||
if session.shouldstop:
|
||||
raise session.Interrupted(session.shouldstop)
|
||||
@@ -132,7 +137,7 @@ def compatproperty(name):
|
||||
return getattr(pytest, name)
|
||||
return property(fget, None, None,
|
||||
"deprecated attribute %r, use pytest.%s" % (name,name))
|
||||
|
||||
|
||||
class Node(object):
|
||||
""" base class for all Nodes in the collection tree.
|
||||
Collector subclasses have children, Items are terminal nodes."""
|
||||
@@ -143,13 +148,13 @@ class Node(object):
|
||||
|
||||
#: the parent collector node.
|
||||
self.parent = parent
|
||||
|
||||
|
||||
#: the test config object
|
||||
self.config = config or parent.config
|
||||
|
||||
#: the collection this node is part of
|
||||
self.session = session or parent.session
|
||||
|
||||
|
||||
#: filesystem path where this node was collected from
|
||||
self.fspath = getattr(parent, 'fspath', None)
|
||||
self.ihook = self.session.gethookproxy(self.fspath)
|
||||
@@ -325,6 +330,8 @@ class Item(Node):
|
||||
""" a basic test invocation item. Note that for a single function
|
||||
there might be multiple test invocation items.
|
||||
"""
|
||||
nextitem = None
|
||||
|
||||
def reportinfo(self):
|
||||
return self.fspath, None, ""
|
||||
|
||||
@@ -472,6 +479,13 @@ class Session(FSCollector):
|
||||
mod = None
|
||||
path = [os.path.abspath('.')] + sys.path
|
||||
for name in x.split('.'):
|
||||
# ignore anything that's not a proper name here
|
||||
# else something like --pyargs will mess up '.'
|
||||
# since imp.find_module will actually sometimes work for it
|
||||
# but it's supposed to be considered a filesystem path
|
||||
# not a package
|
||||
if name_re.match(name) is None:
|
||||
return x
|
||||
try:
|
||||
fd, mod, type_ = imp.find_module(name, path)
|
||||
except ImportError:
|
||||
@@ -479,7 +493,7 @@ class Session(FSCollector):
|
||||
else:
|
||||
if fd is not None:
|
||||
fd.close()
|
||||
|
||||
|
||||
if type_[2] != imp.PKG_DIRECTORY:
|
||||
path = [os.path.dirname(mod)]
|
||||
else:
|
||||
@@ -502,7 +516,7 @@ class Session(FSCollector):
|
||||
raise pytest.UsageError(msg + arg)
|
||||
parts[0] = path
|
||||
return parts
|
||||
|
||||
|
||||
def matchnodes(self, matching, names):
|
||||
self.trace("matchnodes", matching, names)
|
||||
self.trace.root.indent += 1
|
||||
|
||||
@@ -14,12 +14,37 @@ def pytest_addoption(parser):
|
||||
"Terminate expression with ':' to make the first match match "
|
||||
"all subsequent tests (usually file-order). ")
|
||||
|
||||
group._addoption("-m",
|
||||
action="store", dest="markexpr", default="", metavar="MARKEXPR",
|
||||
help="only run tests matching given mark expression. "
|
||||
"example: -m 'mark1 and not mark2'."
|
||||
)
|
||||
|
||||
group.addoption("--markers", action="store_true", help=
|
||||
"show markers (builtin, plugin and per-project ones).")
|
||||
|
||||
parser.addini("markers", "markers for test functions", 'linelist')
|
||||
|
||||
def pytest_cmdline_main(config):
|
||||
if config.option.markers:
|
||||
config.pluginmanager.do_configure(config)
|
||||
tw = py.io.TerminalWriter()
|
||||
for line in config.getini("markers"):
|
||||
name, rest = line.split(":", 1)
|
||||
tw.write("@pytest.mark.%s:" % name, bold=True)
|
||||
tw.line(rest)
|
||||
tw.line()
|
||||
config.pluginmanager.do_unconfigure(config)
|
||||
return 0
|
||||
pytest_cmdline_main.tryfirst = True
|
||||
|
||||
def pytest_collection_modifyitems(items, config):
|
||||
keywordexpr = config.option.keyword
|
||||
if not keywordexpr:
|
||||
matchexpr = config.option.markexpr
|
||||
if not keywordexpr and not matchexpr:
|
||||
return
|
||||
selectuntil = False
|
||||
if keywordexpr[-1] == ":":
|
||||
if keywordexpr[-1:] == ":":
|
||||
selectuntil = True
|
||||
keywordexpr = keywordexpr[:-1]
|
||||
|
||||
@@ -29,21 +54,38 @@ def pytest_collection_modifyitems(items, config):
|
||||
if keywordexpr and skipbykeyword(colitem, keywordexpr):
|
||||
deselected.append(colitem)
|
||||
else:
|
||||
remaining.append(colitem)
|
||||
if selectuntil:
|
||||
keywordexpr = None
|
||||
if matchexpr:
|
||||
if not matchmark(colitem, matchexpr):
|
||||
deselected.append(colitem)
|
||||
continue
|
||||
remaining.append(colitem)
|
||||
|
||||
if deselected:
|
||||
config.hook.pytest_deselected(items=deselected)
|
||||
items[:] = remaining
|
||||
|
||||
class BoolDict:
|
||||
def __init__(self, mydict):
|
||||
self._mydict = mydict
|
||||
def __getitem__(self, name):
|
||||
return name in self._mydict
|
||||
|
||||
def matchmark(colitem, matchexpr):
|
||||
return eval(matchexpr, {}, BoolDict(colitem.obj.__dict__))
|
||||
|
||||
def pytest_configure(config):
|
||||
if config.option.strict:
|
||||
pytest.mark._config = config
|
||||
|
||||
def skipbykeyword(colitem, keywordexpr):
|
||||
""" return True if they given keyword expression means to
|
||||
skip this collector/item.
|
||||
"""
|
||||
if not keywordexpr:
|
||||
return
|
||||
|
||||
|
||||
itemkeywords = getkeywords(colitem)
|
||||
for key in filter(None, keywordexpr.split()):
|
||||
eor = key[:1] == '-'
|
||||
@@ -77,15 +119,31 @@ class MarkGenerator:
|
||||
@py.test.mark.slowtest
|
||||
def test_function():
|
||||
pass
|
||||
|
||||
|
||||
will set a 'slowtest' :class:`MarkInfo` object
|
||||
on the ``test_function`` object. """
|
||||
|
||||
def __getattr__(self, name):
|
||||
if name[0] == "_":
|
||||
raise AttributeError(name)
|
||||
if hasattr(self, '_config'):
|
||||
self._check(name)
|
||||
return MarkDecorator(name)
|
||||
|
||||
def _check(self, name):
|
||||
try:
|
||||
if name in self._markers:
|
||||
return
|
||||
except AttributeError:
|
||||
pass
|
||||
self._markers = l = set()
|
||||
for line in self._config.getini("markers"):
|
||||
beginning = line.split(":", 1)
|
||||
x = beginning[0].split("(", 1)[0]
|
||||
l.add(x)
|
||||
if name not in self._markers:
|
||||
raise AttributeError("%r not a registered marker" % (name,))
|
||||
|
||||
class MarkDecorator:
|
||||
""" A decorator for test functions and test classes. When applied
|
||||
it will create :class:`MarkInfo` objects which may be
|
||||
|
||||
@@ -38,7 +38,11 @@ def pytest_unconfigure(config):
|
||||
del tr._tw.__dict__['write']
|
||||
|
||||
def getproxy():
|
||||
return py.std.xmlrpclib.ServerProxy(url.xmlrpc).pastes
|
||||
if sys.version_info < (3, 0):
|
||||
from xmlrpclib import ServerProxy
|
||||
else:
|
||||
from xmlrpc.client import ServerProxy
|
||||
return ServerProxy(url.xmlrpc).pastes
|
||||
|
||||
def pytest_terminal_summary(terminalreporter):
|
||||
if terminalreporter.config.option.pastebin != "failed":
|
||||
|
||||
@@ -70,7 +70,13 @@ class PdbInvoke:
|
||||
tw.sep(">", "traceback")
|
||||
rep.toterminal(tw)
|
||||
tw.sep(">", "entering PDB")
|
||||
post_mortem(call.excinfo._excinfo[2])
|
||||
# A doctest.UnexpectedException is not useful for post_mortem.
|
||||
# Use the underlying exception instead:
|
||||
if isinstance(call.excinfo.value, py.std.doctest.UnexpectedException):
|
||||
tb = call.excinfo.value.exc_info[2]
|
||||
else:
|
||||
tb = call.excinfo._excinfo[2]
|
||||
post_mortem(tb)
|
||||
rep._pdbshown = True
|
||||
return rep
|
||||
|
||||
|
||||
@@ -25,6 +25,7 @@ def pytest_configure(config):
|
||||
_pytest_fullpath
|
||||
except NameError:
|
||||
_pytest_fullpath = os.path.abspath(pytest.__file__.rstrip("oc"))
|
||||
_pytest_fullpath = _pytest_fullpath.replace("$py.class", ".py")
|
||||
|
||||
def pytest_funcarg___pytest(request):
|
||||
return PytestArg(request)
|
||||
@@ -313,16 +314,6 @@ class TmpTestdir:
|
||||
result.extend(session.genitems(colitem))
|
||||
return result
|
||||
|
||||
def inline_genitems(self, *args):
|
||||
#config = self.parseconfig(*args)
|
||||
config = self.parseconfigure(*args)
|
||||
rec = self.getreportrecorder(config)
|
||||
session = Session(config)
|
||||
config.hook.pytest_sessionstart(session=session)
|
||||
session.perform_collect()
|
||||
config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK)
|
||||
return session.items, rec
|
||||
|
||||
def runitem(self, source):
|
||||
# used from runner functional tests
|
||||
item = self.getitem(source)
|
||||
@@ -343,64 +334,57 @@ class TmpTestdir:
|
||||
l = list(args) + [p]
|
||||
reprec = self.inline_run(*l)
|
||||
reports = reprec.getreports("pytest_runtest_logreport")
|
||||
assert len(reports) == 1, reports
|
||||
return reports[0]
|
||||
assert len(reports) == 3, reports # setup/call/teardown
|
||||
return reports[1]
|
||||
|
||||
def inline_genitems(self, *args):
|
||||
return self.inprocess_run(list(args) + ['--collectonly'])
|
||||
|
||||
def inline_run(self, *args):
|
||||
args = ("-s", ) + args # otherwise FD leakage
|
||||
config = self.parseconfig(*args)
|
||||
reprec = self.getreportrecorder(config)
|
||||
#config.pluginmanager.do_configure(config)
|
||||
config.hook.pytest_cmdline_main(config=config)
|
||||
#config.pluginmanager.do_unconfigure(config)
|
||||
return reprec
|
||||
items, rec = self.inprocess_run(args)
|
||||
return rec
|
||||
|
||||
def config_preparse(self):
|
||||
config = self.Config()
|
||||
for plugin in self.plugins:
|
||||
if isinstance(plugin, str):
|
||||
config.pluginmanager.import_plugin(plugin)
|
||||
else:
|
||||
if isinstance(plugin, dict):
|
||||
plugin = PseudoPlugin(plugin)
|
||||
if not config.pluginmanager.isregistered(plugin):
|
||||
config.pluginmanager.register(plugin)
|
||||
return config
|
||||
def inprocess_run(self, args, plugins=None):
|
||||
rec = []
|
||||
items = []
|
||||
class Collect:
|
||||
def pytest_configure(x, config):
|
||||
rec.append(self.getreportrecorder(config))
|
||||
def pytest_itemcollected(self, item):
|
||||
items.append(item)
|
||||
if not plugins:
|
||||
plugins = []
|
||||
plugins.append(Collect())
|
||||
ret = self.pytestmain(list(args), plugins=[Collect()])
|
||||
reprec = rec[0]
|
||||
reprec.ret = ret
|
||||
assert len(rec) == 1
|
||||
return items, reprec
|
||||
|
||||
def parseconfig(self, *args):
|
||||
if not args:
|
||||
args = (self.tmpdir,)
|
||||
config = self.config_preparse()
|
||||
args = list(args)
|
||||
args = [str(x) for x in args]
|
||||
for x in args:
|
||||
if str(x).startswith('--basetemp'):
|
||||
break
|
||||
else:
|
||||
args.append("--basetemp=%s" % self.tmpdir.dirpath('basetemp'))
|
||||
config.parse(args)
|
||||
import _pytest.core
|
||||
config = _pytest.core._prepareconfig(args, self.plugins)
|
||||
# the in-process pytest invocation needs to avoid leaking FDs
|
||||
# so we register a "reset_capturings" callmon the capturing manager
|
||||
# and make sure it gets called
|
||||
config._cleanup.append(
|
||||
config.pluginmanager.getplugin("capturemanager").reset_capturings)
|
||||
import _pytest.config
|
||||
self.request.addfinalizer(
|
||||
lambda: _pytest.config.pytest_unconfigure(config))
|
||||
return config
|
||||
|
||||
def reparseconfig(self, args=None):
|
||||
""" this is used from tests that want to re-invoke parse(). """
|
||||
if not args:
|
||||
args = [self.tmpdir]
|
||||
oldconfig = getattr(py.test, 'config', None)
|
||||
try:
|
||||
c = py.test.config = self.Config()
|
||||
c.basetemp = py.path.local.make_numbered_dir(prefix="reparse",
|
||||
keep=0, rootdir=self.tmpdir, lock_timeout=None)
|
||||
c.parse(args)
|
||||
c.pluginmanager.do_configure(c)
|
||||
self.request.addfinalizer(lambda: c.pluginmanager.do_unconfigure(c))
|
||||
return c
|
||||
finally:
|
||||
py.test.config = oldconfig
|
||||
|
||||
def parseconfigure(self, *args):
|
||||
config = self.parseconfig(*args)
|
||||
config.pluginmanager.do_configure(config)
|
||||
self.request.addfinalizer(lambda:
|
||||
config.pluginmanager.do_unconfigure(config))
|
||||
config.pluginmanager.do_unconfigure(config))
|
||||
return config
|
||||
|
||||
def getitem(self, source, funcname="test_func"):
|
||||
@@ -420,7 +404,6 @@ class TmpTestdir:
|
||||
self.makepyfile(__init__ = "#")
|
||||
self.config = config = self.parseconfigure(path, *configargs)
|
||||
node = self.getnode(config, path)
|
||||
#config.pluginmanager.do_unconfigure(config)
|
||||
return node
|
||||
|
||||
def collect_by_name(self, modcol, name):
|
||||
@@ -437,9 +420,16 @@ class TmpTestdir:
|
||||
return py.std.subprocess.Popen(cmdargs, stdout=stdout, stderr=stderr, **kw)
|
||||
|
||||
def pytestmain(self, *args, **kwargs):
|
||||
ret = pytest.main(*args, **kwargs)
|
||||
if ret == 2:
|
||||
raise KeyboardInterrupt()
|
||||
class ResetCapturing:
|
||||
@pytest.mark.trylast
|
||||
def pytest_unconfigure(self, config):
|
||||
capman = config.pluginmanager.getplugin("capturemanager")
|
||||
capman.reset_capturings()
|
||||
plugins = kwargs.setdefault("plugins", [])
|
||||
rc = ResetCapturing()
|
||||
plugins.append(rc)
|
||||
return pytest.main(*args, **kwargs)
|
||||
|
||||
def run(self, *cmdargs):
|
||||
return self._run(*cmdargs)
|
||||
|
||||
@@ -528,6 +518,8 @@ class TmpTestdir:
|
||||
pexpect = py.test.importorskip("pexpect", "2.4")
|
||||
if hasattr(sys, 'pypy_version_info') and '64' in py.std.platform.machine():
|
||||
pytest.skip("pypy-64 bit not supported")
|
||||
if sys.platform == "darwin":
|
||||
pytest.xfail("pexpect does not work reliably on darwin?!")
|
||||
logfile = self.tmpdir.join("spawn.out")
|
||||
child = pexpect.spawn(cmd, logfile=logfile.open("w"))
|
||||
child.timeout = expect_timeout
|
||||
@@ -540,10 +532,6 @@ def getdecoded(out):
|
||||
return "INTERNAL not-utf8-decodeable, truncated string:\n%s" % (
|
||||
py.io.saferepr(out),)
|
||||
|
||||
class PseudoPlugin:
|
||||
def __init__(self, vars):
|
||||
self.__dict__.update(vars)
|
||||
|
||||
class ReportRecorder(object):
|
||||
def __init__(self, hook):
|
||||
self.hook = hook
|
||||
@@ -565,10 +553,17 @@ class ReportRecorder(object):
|
||||
def getreports(self, names="pytest_runtest_logreport pytest_collectreport"):
|
||||
return [x.report for x in self.getcalls(names)]
|
||||
|
||||
def matchreport(self, inamepart="", names="pytest_runtest_logreport pytest_collectreport", when=None):
|
||||
def matchreport(self, inamepart="",
|
||||
names="pytest_runtest_logreport pytest_collectreport", when=None):
|
||||
""" return a testreport whose dotted import path matches """
|
||||
l = []
|
||||
for rep in self.getreports(names=names):
|
||||
try:
|
||||
if not when and rep.when != "call" and rep.passed:
|
||||
# setup/teardown passing reports - let's ignore those
|
||||
continue
|
||||
except AttributeError:
|
||||
pass
|
||||
if when and getattr(rep, 'when', None) != when:
|
||||
continue
|
||||
if not inamepart or inamepart in rep.nodeid.split("::"):
|
||||
|
||||
@@ -4,6 +4,7 @@ import inspect
|
||||
import sys
|
||||
import pytest
|
||||
from py._code.code import TerminalRepr
|
||||
from _pytest.monkeypatch import monkeypatch
|
||||
|
||||
import _pytest
|
||||
cutdir = py.path.local(_pytest.__file__).dirpath()
|
||||
@@ -26,6 +27,23 @@ def pytest_cmdline_main(config):
|
||||
showfuncargs(config)
|
||||
return 0
|
||||
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
try:
|
||||
param = metafunc.function.parametrize
|
||||
except AttributeError:
|
||||
return
|
||||
metafunc.parametrize(*param.args, **param.kwargs)
|
||||
|
||||
def pytest_configure(config):
|
||||
config.addinivalue_line("markers",
|
||||
"parametrize(argnames, argvalues): call a test function multiple "
|
||||
"times passing in multiple different argument value sets. Example: "
|
||||
"@parametrize('arg1', [1,2]) would lead to two calls of the decorated "
|
||||
"test function, one with arg1=1 and another with arg1=2."
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.trylast
|
||||
def pytest_namespace():
|
||||
raises.Exception = pytest.fail.Exception
|
||||
@@ -369,6 +387,7 @@ class FuncargLookupErrorRepr(TerminalRepr):
|
||||
tw.line()
|
||||
tw.line("%s:%d" % (self.filename, self.firstlineno+1))
|
||||
|
||||
|
||||
class Generator(FunctionMixin, PyCollectorMixin, pytest.Collector):
|
||||
def collect(self):
|
||||
# test generators are seen as collectors but they also
|
||||
@@ -425,6 +444,7 @@ class Function(FunctionMixin, pytest.Item):
|
||||
"yielded functions (deprecated) cannot have funcargs")
|
||||
else:
|
||||
if callspec is not None:
|
||||
self.callspec = callspec
|
||||
self.funcargs = callspec.funcargs or {}
|
||||
self._genid = callspec.id
|
||||
if hasattr(callspec, "param"):
|
||||
@@ -501,15 +521,59 @@ def fillfuncargs(function):
|
||||
request._fillfuncargs()
|
||||
|
||||
_notexists = object()
|
||||
class CallSpec:
|
||||
def __init__(self, funcargs, id, param):
|
||||
self.funcargs = funcargs
|
||||
self.id = id
|
||||
|
||||
class CallSpec2(object):
|
||||
def __init__(self, metafunc):
|
||||
self.metafunc = metafunc
|
||||
self.funcargs = {}
|
||||
self._idlist = []
|
||||
self.params = {}
|
||||
self._globalid = _notexists
|
||||
self._globalid_args = set()
|
||||
self._globalparam = _notexists
|
||||
|
||||
def copy(self, metafunc):
|
||||
cs = CallSpec2(self.metafunc)
|
||||
cs.funcargs.update(self.funcargs)
|
||||
cs.params.update(self.params)
|
||||
cs._idlist = list(self._idlist)
|
||||
cs._globalid = self._globalid
|
||||
cs._globalid_args = self._globalid_args
|
||||
cs._globalparam = self._globalparam
|
||||
return cs
|
||||
|
||||
def _checkargnotcontained(self, arg):
|
||||
if arg in self.params or arg in self.funcargs:
|
||||
raise ValueError("duplicate %r" %(arg,))
|
||||
|
||||
def getparam(self, name):
|
||||
try:
|
||||
return self.params[name]
|
||||
except KeyError:
|
||||
if self._globalparam is _notexists:
|
||||
raise ValueError(name)
|
||||
return self._globalparam
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
return "-".join(filter(None, self._idlist))
|
||||
|
||||
def setmulti(self, valtype, argnames, valset, id):
|
||||
for arg,val in zip(argnames, valset):
|
||||
self._checkargnotcontained(arg)
|
||||
getattr(self, valtype)[arg] = val
|
||||
self._idlist.append(id)
|
||||
|
||||
def setall(self, funcargs, id, param):
|
||||
for x in funcargs:
|
||||
self._checkargnotcontained(x)
|
||||
self.funcargs.update(funcargs)
|
||||
if id is not _notexists:
|
||||
self._idlist.append(id)
|
||||
if param is not _notexists:
|
||||
self.param = param
|
||||
def __repr__(self):
|
||||
return "<CallSpec id=%r param=%r funcargs=%r>" %(
|
||||
self.id, getattr(self, 'param', '?'), self.funcargs)
|
||||
assert self._globalparam is _notexists
|
||||
self._globalparam = param
|
||||
|
||||
|
||||
class Metafunc:
|
||||
def __init__(self, function, config=None, cls=None, module=None):
|
||||
@@ -523,31 +587,69 @@ class Metafunc:
|
||||
self._calls = []
|
||||
self._ids = py.builtin.set()
|
||||
|
||||
def parametrize(self, argnames, argvalues, indirect=False, ids=None):
|
||||
""" parametrize calls to the underlying test function during
|
||||
the collection phase of a test run. parametrize may be called
|
||||
multiple times for disjunct argnames sets.
|
||||
|
||||
:arg argnames: an argument name or a list of argument names
|
||||
|
||||
:arg argvalues: a list of values for a single argument if argnames
|
||||
specified a single argument only or a list of tuples which specify
|
||||
values for the multiple argument names.
|
||||
|
||||
:arg indirect: if True each argvalue corresponding to an argument will be
|
||||
passed as request.param to the respective funcarg factory so that
|
||||
it can perform more expensive setups during the setup phase of
|
||||
a test rather than at collection time (which is the default).
|
||||
|
||||
:arg ids: list of string ids corresponding to the (list of) argvalues
|
||||
so that they are part of the test id. If no ids are provided
|
||||
they will be generated automatically from the argvalues.
|
||||
"""
|
||||
if not isinstance(argnames, (tuple, list)):
|
||||
argnames = (argnames,)
|
||||
argvalues = [(val,) for val in argvalues]
|
||||
for arg in argnames:
|
||||
if arg not in self.funcargnames:
|
||||
raise ValueError("%r has no argument %r" %(self.function, arg))
|
||||
valtype = indirect and "params" or "funcargs"
|
||||
if not ids:
|
||||
idmaker = IDMaker()
|
||||
ids = list(map(idmaker, argvalues))
|
||||
newcalls = []
|
||||
for callspec in self._calls or [CallSpec2(self)]:
|
||||
for i, valset in enumerate(argvalues):
|
||||
assert len(valset) == len(argnames)
|
||||
newcallspec = callspec.copy(self)
|
||||
newcallspec.setmulti(valtype, argnames, valset, ids[i])
|
||||
newcalls.append(newcallspec)
|
||||
self._calls = newcalls
|
||||
|
||||
def addcall(self, funcargs=None, id=_notexists, param=_notexists):
|
||||
""" add a new call to the underlying test function during the
|
||||
collection phase of a test run. Note that request.addcall() is
|
||||
""" (deprecated, use parametrize) add a new call to the underlying
|
||||
test function during
|
||||
the collection phase of a test run. Note that request.addcall() is
|
||||
called during the test collection phase prior and independently
|
||||
to actual test execution. Therefore you should perform setup
|
||||
of resources in a funcarg factory which can be instrumented
|
||||
with the ``param``.
|
||||
to actual test execution. You should only use addcall()
|
||||
if you need to specify multiple arguments of a test function
|
||||
|
||||
:arg funcargs: argument keyword dictionary used when invoking
|
||||
the test function.
|
||||
|
||||
:arg id: used for reporting and identification purposes. If you
|
||||
don't supply an `id` the length of the currently
|
||||
list of calls to the test function will be used.
|
||||
don't supply an `id` an automatic unique id will be generated.
|
||||
|
||||
:arg param: will be exposed to a later funcarg factory invocation
|
||||
through the ``request.param`` attribute. It allows to
|
||||
defer test fixture setup activities to when an actual
|
||||
test is run.
|
||||
:arg param: a parameter which will be exposed to a later funcarg factory
|
||||
invocation through the ``request.param`` attribute.
|
||||
"""
|
||||
assert funcargs is None or isinstance(funcargs, dict)
|
||||
if funcargs is not None:
|
||||
for name in funcargs:
|
||||
if name not in self.funcargnames:
|
||||
pytest.fail("funcarg %r not used in this function." % name)
|
||||
else:
|
||||
funcargs = {}
|
||||
if id is None:
|
||||
raise ValueError("id=None not allowed")
|
||||
if id is _notexists:
|
||||
@@ -556,11 +658,26 @@ class Metafunc:
|
||||
if id in self._ids:
|
||||
raise ValueError("duplicate id %r" % id)
|
||||
self._ids.add(id)
|
||||
self._calls.append(CallSpec(funcargs, id, param))
|
||||
|
||||
cs = CallSpec2(self)
|
||||
cs.setall(funcargs, id, param)
|
||||
self._calls.append(cs)
|
||||
|
||||
class IDMaker:
|
||||
def __init__(self):
|
||||
self.counter = 0
|
||||
def __call__(self, valset):
|
||||
l = []
|
||||
for val in valset:
|
||||
if not isinstance(val, (int, str)):
|
||||
val = "."+str(self.counter)
|
||||
self.counter += 1
|
||||
l.append(str(val))
|
||||
return "-".join(l)
|
||||
|
||||
class FuncargRequest:
|
||||
""" A request for function arguments from a test function.
|
||||
|
||||
|
||||
Note that there is an optional ``param`` attribute in case
|
||||
there was an invocation to metafunc.addcall(param=...).
|
||||
If no such call was done in a ``pytest_generate_tests``
|
||||
@@ -693,11 +810,18 @@ class FuncargRequest:
|
||||
self._raiselookupfailed(argname)
|
||||
funcargfactory = self._name2factory[argname].pop()
|
||||
oldarg = self._currentarg
|
||||
self._currentarg = argname
|
||||
mp = monkeypatch()
|
||||
mp.setattr(self, '_currentarg', argname)
|
||||
try:
|
||||
param = self._pyfuncitem.callspec.getparam(argname)
|
||||
except (AttributeError, ValueError):
|
||||
pass
|
||||
else:
|
||||
mp.setattr(self, 'param', param, raising=False)
|
||||
try:
|
||||
self._funcargs[argname] = res = funcargfactory(request=self)
|
||||
finally:
|
||||
self._currentarg = oldarg
|
||||
mp.undo()
|
||||
return res
|
||||
|
||||
def _getscopeitem(self, scope):
|
||||
|
||||
@@ -63,6 +63,8 @@ class ResultLog(object):
|
||||
self.write_log_entry(testpath, lettercode, longrepr)
|
||||
|
||||
def pytest_runtest_logreport(self, report):
|
||||
if report.when != "call" and report.passed:
|
||||
return
|
||||
res = self.config.hook.pytest_report_teststatus(report=report)
|
||||
code = res[1]
|
||||
if code == 'x':
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
""" basic collect and runtest protocol implementations """
|
||||
|
||||
import py, sys
|
||||
import py, sys, time
|
||||
from py._code.code import TerminalRepr
|
||||
|
||||
def pytest_namespace():
|
||||
@@ -14,6 +14,37 @@ def pytest_namespace():
|
||||
#
|
||||
# pytest plugin hooks
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("terminal reporting", "reporting", after="general")
|
||||
group.addoption('--durations',
|
||||
action="store", type="int", default=None, metavar="N",
|
||||
help="show N slowest setup/test durations (N=0 for all)."),
|
||||
|
||||
def pytest_terminal_summary(terminalreporter):
|
||||
durations = terminalreporter.config.option.durations
|
||||
if durations is None:
|
||||
return
|
||||
tr = terminalreporter
|
||||
dlist = []
|
||||
for replist in tr.stats.values():
|
||||
for rep in replist:
|
||||
if hasattr(rep, 'duration'):
|
||||
dlist.append(rep)
|
||||
if not dlist:
|
||||
return
|
||||
dlist.sort(key=lambda x: x.duration)
|
||||
dlist.reverse()
|
||||
if not durations:
|
||||
tr.write_sep("=", "slowest test durations")
|
||||
else:
|
||||
tr.write_sep("=", "slowest %s test durations" % durations)
|
||||
dlist = dlist[:durations]
|
||||
|
||||
for rep in dlist:
|
||||
nodeid = rep.nodeid.replace("::()::", "::")
|
||||
tr.write_line("%02.2fs %-8s %s" %
|
||||
(rep.duration, rep.when, nodeid))
|
||||
|
||||
def pytest_sessionstart(session):
|
||||
session._setupstate = SetupState()
|
||||
|
||||
@@ -28,19 +59,33 @@ class NodeInfo:
|
||||
def __init__(self, location):
|
||||
self.location = location
|
||||
|
||||
def perform_pending_teardown(config, nextitem):
|
||||
try:
|
||||
olditem, log = config._pendingteardown
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
del config._pendingteardown
|
||||
olditem.nextitem = nextitem
|
||||
call_and_report(olditem, "teardown", log)
|
||||
|
||||
def pytest_runtest_protocol(item):
|
||||
perform_pending_teardown(item.config, item)
|
||||
item.ihook.pytest_runtest_logstart(
|
||||
nodeid=item.nodeid, location=item.location,
|
||||
)
|
||||
runtestprotocol(item)
|
||||
runtestprotocol(item, teardowndelayed=True)
|
||||
return True
|
||||
|
||||
def runtestprotocol(item, log=True):
|
||||
def runtestprotocol(item, log=True, teardowndelayed=False):
|
||||
rep = call_and_report(item, "setup", log)
|
||||
reports = [rep]
|
||||
if rep.passed:
|
||||
reports.append(call_and_report(item, "call", log))
|
||||
reports.append(call_and_report(item, "teardown", log))
|
||||
if teardowndelayed:
|
||||
item.config._pendingteardown = item, log
|
||||
else:
|
||||
reports.append(call_and_report(item, "teardown", log))
|
||||
return reports
|
||||
|
||||
def pytest_runtest_setup(item):
|
||||
@@ -53,12 +98,13 @@ def pytest_runtest_teardown(item):
|
||||
item.session._setupstate.teardown_exact(item)
|
||||
|
||||
def pytest__teardown_final(session):
|
||||
call = CallInfo(session._setupstate.teardown_all, when="teardown")
|
||||
if call.excinfo:
|
||||
ntraceback = call.excinfo.traceback .cut(excludepath=py._pydir)
|
||||
call.excinfo.traceback = ntraceback.filter()
|
||||
longrepr = call.excinfo.getrepr(funcargs=True)
|
||||
return TeardownErrorReport(longrepr)
|
||||
perform_pending_teardown(session.config, None)
|
||||
#call = CallInfo(session._setupstate.teardown_all, when="teardown")
|
||||
#if call.excinfo:
|
||||
# ntraceback = call.excinfo.traceback .cut(excludepath=py._pydir)
|
||||
# call.excinfo.traceback = ntraceback.filter()
|
||||
# longrepr = call.excinfo.getrepr(funcargs=True)
|
||||
# return TeardownErrorReport(longrepr)
|
||||
|
||||
def pytest_report_teststatus(report):
|
||||
if report.when in ("setup", "teardown"):
|
||||
@@ -78,7 +124,7 @@ def call_and_report(item, when, log=True):
|
||||
call = call_runtest_hook(item, when)
|
||||
hook = item.ihook
|
||||
report = hook.pytest_runtest_makereport(item=item, call=call)
|
||||
if log and (when == "call" or not report.passed):
|
||||
if log:
|
||||
hook.pytest_runtest_logreport(report=report)
|
||||
return report
|
||||
|
||||
@@ -95,12 +141,16 @@ class CallInfo:
|
||||
#: context of invocation: one of "setup", "call",
|
||||
#: "teardown", "memocollect"
|
||||
self.when = when
|
||||
self.start = time.time()
|
||||
try:
|
||||
self.result = func()
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except:
|
||||
self.excinfo = py.code.ExceptionInfo()
|
||||
try:
|
||||
self.result = func()
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except:
|
||||
self.excinfo = py.code.ExceptionInfo()
|
||||
finally:
|
||||
self.stop = time.time()
|
||||
|
||||
def __repr__(self):
|
||||
if self.excinfo:
|
||||
@@ -139,6 +189,7 @@ class BaseReport(object):
|
||||
|
||||
def pytest_runtest_makereport(item, call):
|
||||
when = call.when
|
||||
duration = call.stop-call.start
|
||||
keywords = dict([(x,1) for x in item.keywords])
|
||||
excinfo = call.excinfo
|
||||
if not call.excinfo:
|
||||
@@ -160,14 +211,15 @@ def pytest_runtest_makereport(item, call):
|
||||
else: # exception in setup or teardown
|
||||
longrepr = item._repr_failure_py(excinfo)
|
||||
return TestReport(item.nodeid, item.location,
|
||||
keywords, outcome, longrepr, when)
|
||||
keywords, outcome, longrepr, when,
|
||||
duration=duration)
|
||||
|
||||
class TestReport(BaseReport):
|
||||
""" Basic test report object (also used for setup and teardown calls if
|
||||
they fail).
|
||||
"""
|
||||
def __init__(self, nodeid, location,
|
||||
keywords, outcome, longrepr, when, sections=()):
|
||||
keywords, outcome, longrepr, when, sections=(), duration=0):
|
||||
#: normalized collection node id
|
||||
self.nodeid = nodeid
|
||||
|
||||
@@ -179,13 +231,13 @@ class TestReport(BaseReport):
|
||||
#: a name -> value dictionary containing all keywords and
|
||||
#: markers associated with a test invocation.
|
||||
self.keywords = keywords
|
||||
|
||||
|
||||
#: test outcome, always one of "passed", "failed", "skipped".
|
||||
self.outcome = outcome
|
||||
|
||||
#: None or a failure representation.
|
||||
self.longrepr = longrepr
|
||||
|
||||
|
||||
#: one of 'setup', 'call', 'teardown' to indicate runtest phase.
|
||||
self.when = when
|
||||
|
||||
@@ -193,6 +245,9 @@ class TestReport(BaseReport):
|
||||
#: marshallable
|
||||
self.sections = list(sections)
|
||||
|
||||
#: time it took to run just the test
|
||||
self.duration = duration
|
||||
|
||||
def __repr__(self):
|
||||
return "<TestReport %r when=%r outcome=%r>" % (
|
||||
self.nodeid, self.when, self.outcome)
|
||||
@@ -284,19 +339,22 @@ class SetupState(object):
|
||||
assert not self._finalizers
|
||||
|
||||
def teardown_exact(self, item):
|
||||
if self.stack and item == self.stack[-1]:
|
||||
colitem = item.nextitem
|
||||
needed_collectors = colitem and colitem.listchain() or []
|
||||
self._teardown_towards(needed_collectors)
|
||||
|
||||
def _teardown_towards(self, needed_collectors):
|
||||
while self.stack:
|
||||
if self.stack == needed_collectors[:len(self.stack)]:
|
||||
break
|
||||
self._pop_and_teardown()
|
||||
else:
|
||||
self._callfinalizers(item)
|
||||
|
||||
def prepare(self, colitem):
|
||||
""" setup objects along the collector chain to the test-method
|
||||
and teardown previously setup objects."""
|
||||
needed_collectors = colitem.listchain()
|
||||
while self.stack:
|
||||
if self.stack == needed_collectors[:len(self.stack)]:
|
||||
break
|
||||
self._pop_and_teardown()
|
||||
self._teardown_towards(needed_collectors)
|
||||
|
||||
# check if the last collection node has raised an error
|
||||
for col in self.stack:
|
||||
if hasattr(col, '_prepare_exc'):
|
||||
|
||||
@@ -9,6 +9,21 @@ def pytest_addoption(parser):
|
||||
action="store_true", dest="runxfail", default=False,
|
||||
help="run tests even if they are marked xfail")
|
||||
|
||||
def pytest_configure(config):
|
||||
config.addinivalue_line("markers",
|
||||
"skipif(*conditions): skip the given test function if evaluation "
|
||||
"of all conditions has a True value. Evaluation happens within the "
|
||||
"module global context. Example: skipif('sys.platform == \"win32\"') "
|
||||
"skips the test if we are on the win32 platform. "
|
||||
)
|
||||
config.addinivalue_line("markers",
|
||||
"xfail(*conditions, reason=None, run=True): mark the the test function "
|
||||
"as an expected failure. Optionally specify a reason and run=False "
|
||||
"if you don't even want to execute the test function. Any positional "
|
||||
"condition strings will be evaluated (like with skipif) and if one is "
|
||||
"False the marker will not be applied."
|
||||
)
|
||||
|
||||
def pytest_namespace():
|
||||
return dict(xfail=xfail)
|
||||
|
||||
@@ -169,21 +184,23 @@ def pytest_terminal_summary(terminalreporter):
|
||||
elif char == "X":
|
||||
show_xpassed(terminalreporter, lines)
|
||||
elif char in "fF":
|
||||
show_failed(terminalreporter, lines)
|
||||
show_simple(terminalreporter, lines, 'failed', "FAIL %s")
|
||||
elif char in "sS":
|
||||
show_skipped(terminalreporter, lines)
|
||||
elif char == "E":
|
||||
show_simple(terminalreporter, lines, 'error', "ERROR %s")
|
||||
if lines:
|
||||
tr._tw.sep("=", "short test summary info")
|
||||
for line in lines:
|
||||
tr._tw.line(line)
|
||||
|
||||
def show_failed(terminalreporter, lines):
|
||||
def show_simple(terminalreporter, lines, stat, format):
|
||||
tw = terminalreporter._tw
|
||||
failed = terminalreporter.stats.get("failed")
|
||||
failed = terminalreporter.stats.get(stat)
|
||||
if failed:
|
||||
for rep in failed:
|
||||
pos = rep.nodeid
|
||||
lines.append("FAIL %s" %(pos, ))
|
||||
lines.append(format %(pos, ))
|
||||
|
||||
def show_xfailed(terminalreporter, lines):
|
||||
xfailed = terminalreporter.stats.get("xfailed")
|
||||
|
||||
@@ -15,7 +15,7 @@ def pytest_addoption(parser):
|
||||
group._addoption('-r',
|
||||
action="store", dest="reportchars", default=None, metavar="chars",
|
||||
help="show extra test summary info as specified by chars (f)ailed, "
|
||||
"(s)skipped, (x)failed, (X)passed.")
|
||||
"(E)error, (s)skipped, (x)failed, (X)passed.")
|
||||
group._addoption('-l', '--showlocals',
|
||||
action="store_true", dest="showlocals", default=False,
|
||||
help="show locals in tracebacks (disabled by default).")
|
||||
@@ -43,7 +43,8 @@ def pytest_configure(config):
|
||||
pass
|
||||
else:
|
||||
stdout = os.fdopen(newfd, stdout.mode, 1)
|
||||
config._toclose = stdout
|
||||
config._cleanup.append(lambda: stdout.close())
|
||||
|
||||
reporter = TerminalReporter(config, stdout)
|
||||
config.pluginmanager.register(reporter, 'terminalreporter')
|
||||
if config.option.debug or config.option.traceconfig:
|
||||
@@ -52,11 +53,6 @@ def pytest_configure(config):
|
||||
reporter.write_line("[traceconfig] " + msg)
|
||||
config.trace.root.setprocessor("pytest:config", mywriter)
|
||||
|
||||
def pytest_unconfigure(config):
|
||||
if hasattr(config, '_toclose'):
|
||||
#print "closing", config._toclose, config._toclose.fileno()
|
||||
config._toclose.close()
|
||||
|
||||
def getreportopt(config):
|
||||
reportopts = ""
|
||||
optvalue = config.option.report
|
||||
@@ -430,9 +426,10 @@ class TerminalReporter:
|
||||
keys.append(key)
|
||||
parts = []
|
||||
for key in keys:
|
||||
val = self.stats.get(key, None)
|
||||
if val:
|
||||
parts.append("%d %s" %(len(val), key))
|
||||
if key: # setup/teardown reports have an empty key, ignore them
|
||||
val = self.stats.get(key, None)
|
||||
if val:
|
||||
parts.append("%d %s" %(len(val), key))
|
||||
line = ", ".join(parts)
|
||||
# XXX coloring
|
||||
msg = "%s in %.2f seconds" %(line, session_duration)
|
||||
@@ -443,8 +440,15 @@ class TerminalReporter:
|
||||
|
||||
def summary_deselected(self):
|
||||
if 'deselected' in self.stats:
|
||||
l = []
|
||||
k = self.config.option.keyword
|
||||
if k:
|
||||
l.append("-k%s" % k)
|
||||
m = self.config.option.markexpr
|
||||
if m:
|
||||
l.append("-m %r" % m)
|
||||
self.write_sep("=", "%d tests deselected by %r" %(
|
||||
len(self.stats['deselected']), self.config.option.keyword), bold=True)
|
||||
len(self.stats['deselected']), " ".join(l)), bold=True)
|
||||
|
||||
def repr_pythonversion(v=None):
|
||||
if v is None:
|
||||
|
||||
@@ -46,7 +46,7 @@ class TempdirHandler:
|
||||
|
||||
def finish(self):
|
||||
self.trace("finish")
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
mp = monkeypatch()
|
||||
t = TempdirHandler(config)
|
||||
@@ -64,5 +64,5 @@ def pytest_funcarg__tmpdir(request):
|
||||
name = request._pyfuncitem.name
|
||||
name = py.std.re.sub("[\W]", "_", name)
|
||||
x = request.config._tmpdirhandler.mktemp(name, numbered=True)
|
||||
return x.realpath()
|
||||
return x
|
||||
|
||||
|
||||
@@ -120,14 +120,19 @@ def pytest_runtest_protocol(item, __multicall__):
|
||||
ut = sys.modules['twisted.python.failure']
|
||||
Failure__init__ = ut.Failure.__init__.im_func
|
||||
check_testcase_implements_trial_reporter()
|
||||
def excstore(self, exc_value=None, exc_type=None, exc_tb=None):
|
||||
def excstore(self, exc_value=None, exc_type=None, exc_tb=None,
|
||||
captureVars=None):
|
||||
if exc_value is None:
|
||||
self._rawexcinfo = sys.exc_info()
|
||||
else:
|
||||
if exc_type is None:
|
||||
exc_type = type(exc_value)
|
||||
self._rawexcinfo = (exc_type, exc_value, exc_tb)
|
||||
Failure__init__(self, exc_value, exc_type, exc_tb)
|
||||
try:
|
||||
Failure__init__(self, exc_value, exc_type, exc_tb,
|
||||
captureVars=captureVars)
|
||||
except TypeError:
|
||||
Failure__init__(self, exc_value, exc_type, exc_tb)
|
||||
ut.Failure.__init__ = excstore
|
||||
try:
|
||||
return __multicall__.execute()
|
||||
|
||||
@@ -40,7 +40,7 @@ clean:
|
||||
-rm -rf $(BUILDDIR)/*
|
||||
|
||||
install: html
|
||||
@rsync -avz _build/html/ pytest.org:/www/pytest.org/latest
|
||||
@rsync -avz _build/html/ pytest.org:/www/pytest.org/2.2.0
|
||||
|
||||
installpdf: latexpdf
|
||||
@scp $(BUILDDIR)/latex/pytest.pdf pytest.org:/www/pytest.org/latest
|
||||
|
||||
@@ -5,6 +5,8 @@ Release announcements
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
release-2.2.0
|
||||
release-2.1.3
|
||||
release-2.1.2
|
||||
release-2.1.1
|
||||
release-2.1.0
|
||||
|
||||
32
doc/announce/release-2.1.3.txt
Normal file
32
doc/announce/release-2.1.3.txt
Normal file
@@ -0,0 +1,32 @@
|
||||
py.test 2.1.3: just some more fixes
|
||||
===========================================================================
|
||||
|
||||
pytest-2.1.3 is a minor backward compatible maintenance release of the
|
||||
popular py.test testing tool. It is commonly used for unit, functional-
|
||||
and integration testing. See extensive docs with examples here:
|
||||
|
||||
http://pytest.org/
|
||||
|
||||
The release contains another fix to the perfected assertions introduced
|
||||
with the 2.1 series as well as the new possibility to customize reporting
|
||||
for assertion expressions on a per-directory level.
|
||||
|
||||
If you want to install or upgrade pytest, just type one of::
|
||||
|
||||
pip install -U pytest # or
|
||||
easy_install -U pytest
|
||||
|
||||
Thanks to the bug reporters and to Ronny Pfannschmidt, Benjamin Peterson
|
||||
and Floris Bruynooghe who implemented the fixes.
|
||||
|
||||
best,
|
||||
holger krekel
|
||||
|
||||
Changes between 2.1.2 and 2.1.3
|
||||
----------------------------------------
|
||||
|
||||
- fix issue79: assertion rewriting failed on some comparisons in boolops,
|
||||
- correctly handle zero length arguments (a la pytest '')
|
||||
- fix issue67 / junitxml now contains correct test durations
|
||||
- fix issue75 / skipping test failure on jython
|
||||
- fix issue77 / Allow assertrepr_compare hook to apply to a subset of tests
|
||||
96
doc/announce/release-2.2.0.txt
Normal file
96
doc/announce/release-2.2.0.txt
Normal file
@@ -0,0 +1,96 @@
|
||||
py.test 2.2.0: test marking++, parametrization++ and duration profiling
|
||||
===========================================================================
|
||||
|
||||
pytest-2.2.0 is a test-suite compatible release of the popular
|
||||
py.test testing tool. Plugins might need upgrades. It comes
|
||||
with these improvements:
|
||||
|
||||
* easier and more powerful parametrization of tests:
|
||||
|
||||
- new @pytest.mark.parametrize decorator to run tests with different arguments
|
||||
- new metafunc.parametrize() API for parametrizing arguments independently
|
||||
- see examples at http://pytest.org/latest/example/parametrize.html
|
||||
- NOTE that parametrize() related APIs are still a bit experimental
|
||||
and might change in future releases.
|
||||
|
||||
* improved handling of test markers and refined marking mechanism:
|
||||
|
||||
- "-m markexpr" option for selecting tests according to their mark
|
||||
- a new "markers" ini-variable for registering test markers for your project
|
||||
- the new "--strict" bails out with an error if using unregistered markers.
|
||||
- see examples at http://pytest.org/latest/example/markers.html
|
||||
|
||||
* duration profiling: new "--duration=N" option showing the N slowest test
|
||||
execution or setup/teardown calls. This is most useful if you want to
|
||||
find out where your slowest test code is.
|
||||
|
||||
* also 2.2.0 performs more eager calling of teardown/finalizers functions
|
||||
resulting in better and more accurate reporting when they fail
|
||||
|
||||
Besides there is the usual set of bug fixes along with a cleanup of
|
||||
pytest's own test suite allowing it to run on a wider range of environments.
|
||||
|
||||
For general information, see extensive docs with examples here:
|
||||
|
||||
http://pytest.org/
|
||||
|
||||
If you want to install or upgrade pytest you might just type::
|
||||
|
||||
pip install -U pytest # or
|
||||
easy_install -U pytest
|
||||
|
||||
Thanks to Ronny Pfannschmidt, David Burns, Jeff Donner, Daniel Nouri,
|
||||
Alfredo Doza and all who gave feedback or sent bug reports.
|
||||
|
||||
best,
|
||||
holger krekel
|
||||
|
||||
|
||||
notes on incompatibility
|
||||
------------------------------
|
||||
|
||||
While test suites should work unchanged you might need to upgrade plugins:
|
||||
|
||||
* You need a new version of the pytest-xdist plugin (1.7) for distributing
|
||||
test runs.
|
||||
|
||||
* Other plugins might need an upgrade if they implement
|
||||
the ``pytest_runtest_logreport`` hook which now is called unconditionally
|
||||
for the setup/teardown fixture phases of a test. You may choose to
|
||||
ignore setup/teardown failures by inserting "if rep.when != 'call': return"
|
||||
or something similar. Note that most code probably "just" works because
|
||||
the hook was already called for failing setup/teardown phases of a test
|
||||
so a plugin should have been ready to grok such reports already.
|
||||
|
||||
|
||||
Changes between 2.1.3 and 2.2.0
|
||||
----------------------------------------
|
||||
|
||||
- fix issue90: introduce eager tearing down of test items so that
|
||||
teardown function are called earlier.
|
||||
- add an all-powerful metafunc.parametrize function which allows to
|
||||
parametrize test function arguments in multiple steps and therefore
|
||||
from indepdenent plugins and palces.
|
||||
- add a @pytest.mark.parametrize helper which allows to easily
|
||||
call a test function with different argument values
|
||||
- Add examples to the "parametrize" example page, including a quick port
|
||||
of Test scenarios and the new parametrize function and decorator.
|
||||
- introduce registration for "pytest.mark.*" helpers via ini-files
|
||||
or through plugin hooks. Also introduce a "--strict" option which
|
||||
will treat unregistered markers as errors
|
||||
allowing to avoid typos and maintain a well described set of markers
|
||||
for your test suite. See exaples at http://pytest.org/latest/mark.html
|
||||
and its links.
|
||||
- issue50: introduce "-m marker" option to select tests based on markers
|
||||
(this is a stricter and more predictable version of '-k' in that "-m"
|
||||
only matches complete markers and has more obvious rules for and/or
|
||||
semantics.
|
||||
- new feature to help optimizing the speed of your tests:
|
||||
--durations=N option for displaying N slowest test calls
|
||||
and setup/teardown methods.
|
||||
- fix issue87: --pastebin now works with python3
|
||||
- fix issue89: --pdb with unexpected exceptions in doctest work more sensibly
|
||||
- fix and cleanup pytest's own test suite to not leak FDs
|
||||
- fix issue83: link to generated funcarg list
|
||||
- fix issue74: pyarg module names are now checked against imp.find_module false positives
|
||||
- fix compatibility with twisted/trial-11.1.0 use cases
|
||||
@@ -23,7 +23,7 @@ you will see the return value of the function call::
|
||||
|
||||
$ py.test test_assert1.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.1 -- pytest-2.1.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0
|
||||
collecting ... collected 1 items
|
||||
|
||||
test_assert1.py F
|
||||
@@ -37,7 +37,7 @@ you will see the return value of the function call::
|
||||
E + where 3 = f()
|
||||
|
||||
test_assert1.py:5: AssertionError
|
||||
========================= 1 failed in 0.01 seconds =========================
|
||||
========================= 1 failed in 0.02 seconds =========================
|
||||
|
||||
py.test has support for showing the values of the most common subexpressions
|
||||
including calls, attributes, comparisons, and binary and unary
|
||||
@@ -105,7 +105,7 @@ if you run this module::
|
||||
|
||||
$ py.test test_assert2.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.1 -- pytest-2.1.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0
|
||||
collecting ... collected 1 items
|
||||
|
||||
test_assert2.py F
|
||||
@@ -124,7 +124,7 @@ if you run this module::
|
||||
E '5'
|
||||
|
||||
test_assert2.py:5: AssertionError
|
||||
========================= 1 failed in 0.01 seconds =========================
|
||||
========================= 1 failed in 0.02 seconds =========================
|
||||
|
||||
Special comparisons are done for a number of cases:
|
||||
|
||||
@@ -181,7 +181,7 @@ the conftest file::
|
||||
E vals: 1 != 2
|
||||
|
||||
test_foocompare.py:8: AssertionError
|
||||
1 failed in 0.01 seconds
|
||||
1 failed in 0.02 seconds
|
||||
|
||||
.. _assert-details:
|
||||
.. _`assert introspection`:
|
||||
|
||||
@@ -17,6 +17,9 @@ to get an overview on the globally available helpers.
|
||||
.. automodule:: pytest
|
||||
:members:
|
||||
|
||||
|
||||
.. _builtinfuncargs:
|
||||
|
||||
Builtin function arguments
|
||||
-----------------------------------------------------
|
||||
|
||||
@@ -25,7 +28,7 @@ You can ask for available builtin or project-custom
|
||||
|
||||
$ py.test --funcargs
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.1 -- pytest-2.1.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0
|
||||
collected 0 items
|
||||
pytestconfig
|
||||
the pytest config object with access to command line opts.
|
||||
|
||||
@@ -64,7 +64,7 @@ of the failing function and hide the other one::
|
||||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.1 -- pytest-2.1.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_module.py .F
|
||||
@@ -78,8 +78,8 @@ of the failing function and hide the other one::
|
||||
|
||||
test_module.py:9: AssertionError
|
||||
----------------------------- Captured stdout ------------------------------
|
||||
setting up <function test_func2 at 0x24fa320>
|
||||
==================== 1 failed, 1 passed in 0.01 seconds ====================
|
||||
setting up <function test_func2 at 0x101353a28>
|
||||
==================== 1 failed, 1 passed in 0.02 seconds ====================
|
||||
|
||||
Accessing captured output from a test function
|
||||
---------------------------------------------------
|
||||
|
||||
@@ -44,9 +44,9 @@ then you can just invoke ``py.test`` without command line options::
|
||||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.1 -- pytest-2.1.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0
|
||||
collecting ... collected 1 items
|
||||
|
||||
mymodule.py .
|
||||
|
||||
========================= 1 passed in 0.02 seconds =========================
|
||||
========================= 1 passed in 0.05 seconds =========================
|
||||
|
||||
@@ -18,5 +18,6 @@ need more examples or have questions. Also take a look at the :ref:`comprehensiv
|
||||
simple.txt
|
||||
mysetup.txt
|
||||
parametrize.txt
|
||||
markers.txt
|
||||
pythoncollection.txt
|
||||
nonpython.txt
|
||||
|
||||
260
doc/example/markers.txt
Normal file
260
doc/example/markers.txt
Normal file
@@ -0,0 +1,260 @@
|
||||
|
||||
.. _`mark examples`:
|
||||
|
||||
Working with custom markers
|
||||
=================================================
|
||||
|
||||
Here are some example using the :ref:`mark` mechanism.
|
||||
|
||||
marking test functions and selecting them for a run
|
||||
----------------------------------------------------
|
||||
|
||||
You can "mark" a test function with custom metadata like this::
|
||||
|
||||
# content of test_server.py
|
||||
|
||||
import pytest
|
||||
@pytest.mark.webtest
|
||||
def test_send_http():
|
||||
pass # perform some webtest test for your app
|
||||
def test_something_quick():
|
||||
pass
|
||||
|
||||
.. versionadded:: 2.2
|
||||
|
||||
You can then restrict a test run to only run tests marked with ``webtest``::
|
||||
|
||||
$ py.test -v -m webtest
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0 -- /Users/hpk/venv/1/bin/python
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_server.py:3: test_send_http PASSED
|
||||
|
||||
=================== 1 tests deselected by "-m 'webtest'" ===================
|
||||
================== 1 passed, 1 deselected in 0.01 seconds ==================
|
||||
|
||||
Or the inverse, running all tests except the webtest ones::
|
||||
|
||||
$ py.test -v -m "not webtest"
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0 -- /Users/hpk/venv/1/bin/python
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_server.py:6: test_something_quick PASSED
|
||||
|
||||
================= 1 tests deselected by "-m 'not webtest'" =================
|
||||
================== 1 passed, 1 deselected in 0.01 seconds ==================
|
||||
|
||||
Registering markers
|
||||
-------------------------------------
|
||||
|
||||
.. versionadded:: 2.2
|
||||
|
||||
.. ini-syntax for custom markers:
|
||||
|
||||
Registering markers for your test suite is simple::
|
||||
|
||||
# content of pytest.ini
|
||||
[pytest]
|
||||
markers =
|
||||
webtest: mark a test as a webtest.
|
||||
|
||||
You can ask which markers exist for your test suite - the list includes our just defined ``webtest`` markers::
|
||||
|
||||
$ py.test --markers
|
||||
@pytest.mark.webtest: mark a test as a webtest.
|
||||
|
||||
@pytest.mark.skipif(*conditions): skip the given test function if evaluation of all conditions has a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform.
|
||||
|
||||
@pytest.mark.xfail(*conditions, reason=None, run=True): mark the the test function as an expected failure. Optionally specify a reason and run=False if you don't even want to execute the test function. Any positional condition strings will be evaluated (like with skipif) and if one is False the marker will not be applied.
|
||||
|
||||
@pytest.mark.parametrize(argnames, argvalues): call a test function multiple times passing in multiple different argument value sets. Example: @parametrize('arg1', [1,2]) would lead to two calls of the decorated test function, one with arg1=1 and another with arg1=2.
|
||||
|
||||
@pytest.mark.tryfirst: mark a hook implementation function such that the plugin machinery will try to call it first/as early as possible.
|
||||
|
||||
@pytest.mark.trylast: mark a hook implementation function such that the plugin machinery will try to call it last/as late as possible.
|
||||
|
||||
|
||||
For an example on how to add and work with markers from a plugin, see
|
||||
:ref:`adding a custom marker from a plugin`.
|
||||
|
||||
.. note::
|
||||
|
||||
It is recommended to explicitely register markers so that:
|
||||
|
||||
* there is one place in your test suite defining your markers
|
||||
|
||||
* asking for existing markers via ``py.test --markers`` gives good output
|
||||
|
||||
* typos in function markers are treated as an error if you use
|
||||
the ``--strict`` option. Later versions of py.test are probably
|
||||
going to treat non-registered markers as an error.
|
||||
|
||||
.. _`scoped-marking`:
|
||||
|
||||
Marking whole classes or modules
|
||||
----------------------------------------------------
|
||||
|
||||
If you are programming with Python2.6 you may use ``pytest.mark`` decorators
|
||||
with classes to apply markers to all of its test methods::
|
||||
|
||||
# content of test_mark_classlevel.py
|
||||
import pytest
|
||||
@pytest.mark.webtest
|
||||
class TestClass:
|
||||
def test_startup(self):
|
||||
pass
|
||||
def test_startup_and_more(self):
|
||||
pass
|
||||
|
||||
This is equivalent to directly applying the decorator to the
|
||||
two test functions.
|
||||
|
||||
To remain backward-compatible with Python2.4 you can also set a
|
||||
``pytestmark`` attribute on a TestClass like this::
|
||||
|
||||
import pytest
|
||||
|
||||
class TestClass:
|
||||
pytestmark = pytest.mark.webtest
|
||||
|
||||
or if you need to use multiple markers you can use a list::
|
||||
|
||||
import pytest
|
||||
|
||||
class TestClass:
|
||||
pytestmark = [pytest.mark.webtest, pytest.mark.slowtest]
|
||||
|
||||
You can also set a module level marker::
|
||||
|
||||
import pytest
|
||||
pytestmark = pytest.mark.webtest
|
||||
|
||||
in which case it will be applied to all functions and
|
||||
methods defined in the module.
|
||||
|
||||
Using ``-k TEXT`` to select tests
|
||||
----------------------------------------------------
|
||||
|
||||
You can use the ``-k`` command line option to only run tests with names that match the given argument::
|
||||
|
||||
$ py.test -k send_http # running with the above defined examples
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0
|
||||
collecting ... collected 4 items
|
||||
|
||||
test_server.py .
|
||||
|
||||
=================== 3 tests deselected by '-ksend_http' ====================
|
||||
================== 1 passed, 3 deselected in 0.02 seconds ==================
|
||||
|
||||
And you can also run all tests except the ones that match the keyword::
|
||||
|
||||
$ py.test -k-send_http
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0
|
||||
collecting ... collected 4 items
|
||||
|
||||
test_mark_classlevel.py ..
|
||||
test_server.py .
|
||||
|
||||
=================== 1 tests deselected by '-k-send_http' ===================
|
||||
================== 3 passed, 1 deselected in 0.03 seconds ==================
|
||||
|
||||
Or to only select the class::
|
||||
|
||||
$ py.test -kTestClass
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0
|
||||
collecting ... collected 4 items
|
||||
|
||||
test_mark_classlevel.py ..
|
||||
|
||||
=================== 2 tests deselected by '-kTestClass' ====================
|
||||
================== 2 passed, 2 deselected in 0.02 seconds ==================
|
||||
|
||||
.. _`adding a custom marker from a plugin`:
|
||||
|
||||
custom marker and command line option to control test runs
|
||||
----------------------------------------------------------
|
||||
|
||||
Plugins can provide custom markers and implement specific behaviour
|
||||
based on it. This is a self-contained example which adds a command
|
||||
line option and a parametrized test function marker to run tests
|
||||
specifies via named environments::
|
||||
|
||||
# content of conftest.py
|
||||
|
||||
import pytest
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption("-E", dest="env", action="store", metavar="NAME",
|
||||
help="only run tests matching the environment NAME.")
|
||||
|
||||
def pytest_configure(config):
|
||||
# register an additional marker
|
||||
config.addinivalue_line("markers",
|
||||
"env(name): mark test to run only on named environment")
|
||||
|
||||
def pytest_runtest_setup(item):
|
||||
if not isinstance(item, item.Function):
|
||||
return
|
||||
if hasattr(item.obj, 'env'):
|
||||
envmarker = getattr(item.obj, 'env')
|
||||
envname = envmarker.args[0]
|
||||
if envname != item.config.option.env:
|
||||
pytest.skip("test requires env %r" % envname)
|
||||
|
||||
A test file using this local plugin::
|
||||
|
||||
# content of test_someenv.py
|
||||
|
||||
import pytest
|
||||
@pytest.mark.env("stage1")
|
||||
def test_basic_db_operation():
|
||||
pass
|
||||
|
||||
and an example invocations specifying a different environment than what
|
||||
the test needs::
|
||||
|
||||
$ py.test -E stage2
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0
|
||||
collecting ... collected 5 items
|
||||
|
||||
test_mark_classlevel.py ..
|
||||
test_server.py ..
|
||||
test_someenv.py s
|
||||
|
||||
=================== 4 passed, 1 skipped in 0.04 seconds ====================
|
||||
|
||||
and here is one that specifies exactly the environment needed::
|
||||
|
||||
$ py.test -E stage1
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0
|
||||
collecting ... collected 5 items
|
||||
|
||||
test_mark_classlevel.py ..
|
||||
test_server.py ..
|
||||
test_someenv.py .
|
||||
|
||||
========================= 5 passed in 0.04 seconds =========================
|
||||
|
||||
The ``--markers`` option always gives you a list of available markers::
|
||||
|
||||
$ py.test --markers
|
||||
@pytest.mark.webtest: mark a test as a webtest.
|
||||
|
||||
@pytest.mark.env(name): mark test to run only on named environment
|
||||
|
||||
@pytest.mark.skipif(*conditions): skip the given test function if evaluation of all conditions has a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform.
|
||||
|
||||
@pytest.mark.xfail(*conditions, reason=None, run=True): mark the the test function as an expected failure. Optionally specify a reason and run=False if you don't even want to execute the test function. Any positional condition strings will be evaluated (like with skipif) and if one is False the marker will not be applied.
|
||||
|
||||
@pytest.mark.parametrize(argnames, argvalues): call a test function multiple times passing in multiple different argument value sets. Example: @parametrize('arg1', [1,2]) would lead to two calls of the decorated test function, one with arg1=1 and another with arg1=2.
|
||||
|
||||
@pytest.mark.tryfirst: mark a hook implementation function such that the plugin machinery will try to call it first/as early as possible.
|
||||
|
||||
@pytest.mark.trylast: mark a hook implementation function such that the plugin machinery will try to call it last/as late as possible.
|
||||
|
||||
@@ -2,20 +2,20 @@
|
||||
module containing a parametrized tests testing cross-python
|
||||
serialization via the pickle module.
|
||||
"""
|
||||
import py
|
||||
import py, pytest
|
||||
|
||||
pythonlist = ['python2.4', 'python2.5', 'python2.6', 'python2.7', 'python2.8']
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
if 'python1' in metafunc.funcargnames:
|
||||
assert 'python2' in metafunc.funcargnames
|
||||
for obj in metafunc.function.multiarg.kwargs['obj']:
|
||||
for py1 in pythonlist:
|
||||
for py2 in pythonlist:
|
||||
metafunc.addcall(id="%s-%s-%s" % (py1, py2, obj),
|
||||
param=(py1, py2, obj))
|
||||
# we parametrize all "python1" and "python2" arguments to iterate
|
||||
# over the python interpreters of our list above - the actual
|
||||
# setup and lookup of interpreters in the python1/python2 factories
|
||||
# respectively.
|
||||
for arg in metafunc.funcargnames:
|
||||
if arg in ("python1", "python2"):
|
||||
metafunc.parametrize(arg, pythonlist, indirect=True)
|
||||
|
||||
@py.test.mark.multiarg(obj=[42, {}, {1:3},])
|
||||
@pytest.mark.parametrize("obj", [42, {}, {1:3},])
|
||||
def test_basic_objects(python1, python2, obj):
|
||||
python1.dumps(obj)
|
||||
python2.load_and_is_true("obj == %s" % obj)
|
||||
@@ -23,14 +23,11 @@ def test_basic_objects(python1, python2, obj):
|
||||
def pytest_funcarg__python1(request):
|
||||
tmpdir = request.getfuncargvalue("tmpdir")
|
||||
picklefile = tmpdir.join("data.pickle")
|
||||
return Python(request.param[0], picklefile)
|
||||
return Python(request.param, picklefile)
|
||||
|
||||
def pytest_funcarg__python2(request):
|
||||
python1 = request.getfuncargvalue("python1")
|
||||
return Python(request.param[1], python1.picklefile)
|
||||
|
||||
def pytest_funcarg__obj(request):
|
||||
return request.param[2]
|
||||
return Python(request.param, python1.picklefile)
|
||||
|
||||
class Python:
|
||||
def __init__(self, version, picklefile):
|
||||
|
||||
@@ -49,7 +49,7 @@ You can now run the test::
|
||||
|
||||
$ py.test test_sample.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.1 -- pytest-2.1.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0
|
||||
collecting ... collected 1 items
|
||||
|
||||
test_sample.py F
|
||||
@@ -57,7 +57,7 @@ You can now run the test::
|
||||
================================= FAILURES =================================
|
||||
_______________________________ test_answer ________________________________
|
||||
|
||||
mysetup = <conftest.MySetup instance at 0x1d345f0>
|
||||
mysetup = <conftest.MySetup instance at 0x1012b2bd8>
|
||||
|
||||
def test_answer(mysetup):
|
||||
app = mysetup.myapp()
|
||||
@@ -66,7 +66,7 @@ You can now run the test::
|
||||
E assert 54 == 42
|
||||
|
||||
test_sample.py:4: AssertionError
|
||||
========================= 1 failed in 0.01 seconds =========================
|
||||
========================= 1 failed in 0.02 seconds =========================
|
||||
|
||||
This means that our ``mysetup`` object was successfully instantiated
|
||||
and ``mysetup.app()`` returned an initialized ``MyApp`` instance.
|
||||
@@ -122,14 +122,14 @@ Running it yields::
|
||||
|
||||
$ py.test test_ssh.py -rs
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.1 -- pytest-2.1.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0
|
||||
collecting ... collected 1 items
|
||||
|
||||
test_ssh.py s
|
||||
========================= short test summary info ==========================
|
||||
SKIP [1] /tmp/doc-exec-296/conftest.py:22: specify ssh host with --ssh
|
||||
SKIP [1] /Users/hpk/tmp/doc-exec-625/conftest.py:22: specify ssh host with --ssh
|
||||
|
||||
======================== 1 skipped in 0.01 seconds =========================
|
||||
======================== 1 skipped in 0.02 seconds =========================
|
||||
|
||||
If you specify a command line option like ``py.test --ssh=python.org`` the test will execute as expected.
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ now execute the test specification::
|
||||
|
||||
nonpython $ py.test test_simple.yml
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.1 -- pytest-2.1.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_simple.yml .F
|
||||
@@ -37,7 +37,7 @@ now execute the test specification::
|
||||
usecase execution failed
|
||||
spec failed: 'some': 'other'
|
||||
no further details known at this point.
|
||||
==================== 1 failed, 1 passed in 0.07 seconds ====================
|
||||
==================== 1 failed, 1 passed in 0.10 seconds ====================
|
||||
|
||||
You get one dot for the passing ``sub1: sub1`` check and one failure.
|
||||
Obviously in the above ``conftest.py`` you'll want to implement a more
|
||||
@@ -56,7 +56,7 @@ reporting in ``verbose`` mode::
|
||||
|
||||
nonpython $ py.test -v
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.1 -- pytest-2.1.1 -- /home/hpk/venv/0/bin/python
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0 -- /Users/hpk/venv/1/bin/python
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_simple.yml:1: usecase: ok PASSED
|
||||
@@ -67,17 +67,17 @@ reporting in ``verbose`` mode::
|
||||
usecase execution failed
|
||||
spec failed: 'some': 'other'
|
||||
no further details known at this point.
|
||||
==================== 1 failed, 1 passed in 0.06 seconds ====================
|
||||
==================== 1 failed, 1 passed in 0.09 seconds ====================
|
||||
|
||||
While developing your custom test collection and execution it's also
|
||||
interesting to just look at the collection tree::
|
||||
|
||||
nonpython $ py.test --collectonly
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.1 -- pytest-2.1.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0
|
||||
collecting ... collected 2 items
|
||||
<YamlFile 'test_simple.yml'>
|
||||
<YamlItem 'ok'>
|
||||
<YamlItem 'hello'>
|
||||
|
||||
============================= in 0.06 seconds =============================
|
||||
============================= in 0.08 seconds =============================
|
||||
|
||||
@@ -4,18 +4,71 @@
|
||||
Parametrizing tests
|
||||
=================================================
|
||||
|
||||
py.test allows to easily implement your own custom
|
||||
parametrization scheme for tests. Here we provide
|
||||
some examples for inspiration and re-use.
|
||||
.. currentmodule:: _pytest.python
|
||||
|
||||
py.test allows to easily parametrize test functions.
|
||||
In the following we provide some examples using
|
||||
the builtin mechanisms.
|
||||
|
||||
.. _parametrizemark:
|
||||
|
||||
simple "decorator" parametrization of a test function
|
||||
----------------------------------------------------------------------------
|
||||
|
||||
.. versionadded:: 2.2
|
||||
|
||||
The builtin ``pytest.mark.parametrize`` decorator directly enables
|
||||
parametrization of arguments for a test function. Here is an example
|
||||
of a test function that wants to compare that processing some input
|
||||
results in expected output::
|
||||
|
||||
# content of test_expectation.py
|
||||
import pytest
|
||||
@pytest.mark.parametrize(("input", "expected"), [
|
||||
("3+5", 8),
|
||||
("2+4", 6),
|
||||
("6*9", 42),
|
||||
])
|
||||
def test_eval(input, expected):
|
||||
assert eval(input) == expected
|
||||
|
||||
we parametrize two arguments of the test function so that the test
|
||||
function is called three times. Let's run it::
|
||||
|
||||
$ py.test -q
|
||||
collecting ... collected 3 items
|
||||
..F
|
||||
================================= FAILURES =================================
|
||||
____________________________ test_eval[6*9-42] _____________________________
|
||||
|
||||
input = '6*9', expected = 42
|
||||
|
||||
@pytest.mark.parametrize(("input", "expected"), [
|
||||
("3+5", 8),
|
||||
("2+4", 6),
|
||||
("6*9", 42),
|
||||
])
|
||||
def test_eval(input, expected):
|
||||
> assert eval(input) == expected
|
||||
E assert 54 == 42
|
||||
E + where 54 = eval('6*9')
|
||||
|
||||
test_expectation.py:8: AssertionError
|
||||
1 failed, 2 passed in 0.03 seconds
|
||||
|
||||
As expected only one pair of input/output values fails the simple test function.
|
||||
|
||||
Note that there are various ways how you can mark groups of functions,
|
||||
see :ref:`mark`.
|
||||
|
||||
Generating parameters combinations, depending on command line
|
||||
----------------------------------------------------------------------------
|
||||
|
||||
.. regendoc:wipe
|
||||
|
||||
Let's say we want to execute a test with different parameters
|
||||
and the parameter range shall be determined by a command
|
||||
line argument. Let's first write a simple computation test::
|
||||
Let's say we want to execute a test with different computation
|
||||
parameters and the parameter range shall be determined by a command
|
||||
line argument. Let's first write a simple (do-nothing) computation test::
|
||||
|
||||
# content of test_compute.py
|
||||
|
||||
@@ -36,15 +89,14 @@ Now we add a test configuration like this::
|
||||
end = 5
|
||||
else:
|
||||
end = 2
|
||||
for i in range(end):
|
||||
metafunc.addcall(funcargs={'param1': i})
|
||||
metafunc.parametrize("param1", range(end))
|
||||
|
||||
This means that we only run 2 tests if we do not pass ``--all``::
|
||||
|
||||
$ py.test -q test_compute.py
|
||||
collecting ... collected 2 items
|
||||
..
|
||||
2 passed in 0.01 seconds
|
||||
2 passed in 0.02 seconds
|
||||
|
||||
We run only two computations, so we see two dots.
|
||||
let's run the full monty::
|
||||
@@ -62,20 +114,78 @@ let's run the full monty::
|
||||
E assert 4 < 4
|
||||
|
||||
test_compute.py:3: AssertionError
|
||||
1 failed, 4 passed in 0.01 seconds
|
||||
1 failed, 4 passed in 0.03 seconds
|
||||
|
||||
As expected when running the full range of ``param1`` values
|
||||
we'll get an error on the last one.
|
||||
|
||||
Deferring the setup of parametrizing resources
|
||||
a quick port of "testscenarios"
|
||||
------------------------------------
|
||||
|
||||
.. _`test scenarios`: http://bazaar.launchpad.net/~lifeless/testscenarios/trunk/annotate/head%3A/doc/example.py
|
||||
|
||||
Here is a quick port of to run tests configured with `test scenarios`_,
|
||||
an add-on from Robert Collins for the standard unittest framework. We
|
||||
only have to work a bit to construct the correct arguments for pytest's
|
||||
:py:func:`Metafunc.parametrize`::
|
||||
|
||||
# content of test_scenarios.py
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
idlist = []
|
||||
argvalues = []
|
||||
for scenario in metafunc.cls.scenarios:
|
||||
idlist.append(scenario[0])
|
||||
items = scenario[1].items()
|
||||
argnames = [x[0] for x in items]
|
||||
argvalues.append(([x[1] for x in items]))
|
||||
metafunc.parametrize(argnames, argvalues, ids=idlist)
|
||||
|
||||
scenario1 = ('basic', {'attribute': 'value'})
|
||||
scenario2 = ('advanced', {'attribute': 'value2'})
|
||||
|
||||
class TestSampleWithScenarios:
|
||||
scenarios = [scenario1, scenario2]
|
||||
|
||||
def test_demo(self, attribute):
|
||||
assert isinstance(attribute, str)
|
||||
|
||||
this is a fully self-contained example which you can run with::
|
||||
|
||||
$ py.test test_scenarios.py
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_scenarios.py ..
|
||||
|
||||
========================= 2 passed in 0.02 seconds =========================
|
||||
|
||||
If you just collect tests you'll also nicely see 'advanced' and 'basic' as variants for the test function::
|
||||
|
||||
|
||||
$ py.test --collectonly test_scenarios.py
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0
|
||||
collecting ... collected 2 items
|
||||
<Module 'test_scenarios.py'>
|
||||
<Class 'TestSampleWithScenarios'>
|
||||
<Instance '()'>
|
||||
<Function 'test_demo[basic]'>
|
||||
<Function 'test_demo[advanced]'>
|
||||
|
||||
============================= in 0.01 seconds =============================
|
||||
|
||||
Deferring the setup of parametrized resources
|
||||
---------------------------------------------------
|
||||
|
||||
.. regendoc:wipe
|
||||
|
||||
The parametrization of test functions happens at collection
|
||||
time. It is often a good idea to setup possibly expensive
|
||||
resources only when the actual test is run. Here is a simple
|
||||
example how you can achieve that::
|
||||
time. It is a good idea to setup expensive resources like DB
|
||||
connections or subprocess only when the actual test is run.
|
||||
Here is a simple example how you can achieve that, first
|
||||
the actual test requiring a ``db`` object::
|
||||
|
||||
# content of test_backends.py
|
||||
|
||||
@@ -85,17 +195,15 @@ example how you can achieve that::
|
||||
if db.__class__.__name__ == "DB2":
|
||||
pytest.fail("deliberately failing for demo purposes")
|
||||
|
||||
Now we add a test configuration that takes care to generate
|
||||
two invocations of the ``test_db_initialized`` function and
|
||||
furthermore a factory that creates a database object when
|
||||
each test is actually run::
|
||||
We can now add a test configuration that generates two invocations of
|
||||
the ``test_db_initialized`` function and also implements a factory that
|
||||
creates a database object for the actual test invocations::
|
||||
|
||||
# content of conftest.py
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
if 'db' in metafunc.funcargnames:
|
||||
metafunc.addcall(param="d1")
|
||||
metafunc.addcall(param="d2")
|
||||
metafunc.parametrize("db", ['d1', 'd2'], indirect=True)
|
||||
|
||||
class DB1:
|
||||
"one database object"
|
||||
@@ -114,13 +222,13 @@ Let's first see how it looks like at collection time::
|
||||
|
||||
$ py.test test_backends.py --collectonly
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.1 -- pytest-2.1.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0
|
||||
collecting ... collected 2 items
|
||||
<Module 'test_backends.py'>
|
||||
<Function 'test_db_initialized[0]'>
|
||||
<Function 'test_db_initialized[1]'>
|
||||
<Function 'test_db_initialized[d1]'>
|
||||
<Function 'test_db_initialized[d2]'>
|
||||
|
||||
============================= in 0.00 seconds =============================
|
||||
============================= in 0.01 seconds =============================
|
||||
|
||||
And then when we run the test::
|
||||
|
||||
@@ -128,9 +236,9 @@ And then when we run the test::
|
||||
collecting ... collected 2 items
|
||||
.F
|
||||
================================= FAILURES =================================
|
||||
__________________________ test_db_initialized[1] __________________________
|
||||
_________________________ test_db_initialized[d2] __________________________
|
||||
|
||||
db = <conftest.DB2 instance at 0x17829e0>
|
||||
db = <conftest.DB2 instance at 0x10150ab90>
|
||||
|
||||
def test_db_initialized(db):
|
||||
# a dummy test
|
||||
@@ -139,34 +247,37 @@ And then when we run the test::
|
||||
E Failed: deliberately failing for demo purposes
|
||||
|
||||
test_backends.py:6: Failed
|
||||
1 failed, 1 passed in 0.01 seconds
|
||||
1 failed, 1 passed in 0.02 seconds
|
||||
|
||||
Now you see that one invocation of the test passes and another fails,
|
||||
as it to be expected.
|
||||
The first invocation with ``db == "DB1"`` passed while the second with ``db == "DB2"`` failed. Our ``pytest_funcarg__db`` factory has instantiated each of the DB values during the setup phase while the ``pytest_generate_tests`` generated two according calls to the ``test_db_initialized`` during the collection phase.
|
||||
|
||||
.. regendoc:wipe
|
||||
|
||||
Parametrizing test methods through per-class configuration
|
||||
--------------------------------------------------------------
|
||||
|
||||
.. _`unittest parameterizer`: http://code.google.com/p/unittest-ext/source/browse/trunk/params.py
|
||||
|
||||
|
||||
Here is an example ``pytest_generate_function`` function implementing a
|
||||
parametrization scheme similar to Michael Foords `unittest
|
||||
parameterizer`_ in a lot less code::
|
||||
parameterizer`_ but in a lot less code::
|
||||
|
||||
# content of ./test_parametrize.py
|
||||
import pytest
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
# called once per each test function
|
||||
for funcargs in metafunc.cls.params[metafunc.function.__name__]:
|
||||
# schedule a new test function run with applied **funcargs
|
||||
metafunc.addcall(funcargs=funcargs)
|
||||
funcarglist = metafunc.cls.params[metafunc.function.__name__]
|
||||
argnames = list(funcarglist[0])
|
||||
metafunc.parametrize(argnames, [[funcargs[name] for name in argnames]
|
||||
for funcargs in funcarglist])
|
||||
|
||||
class TestClass:
|
||||
# a map specifying multiple argument sets for a test method
|
||||
params = {
|
||||
'test_equals': [dict(a=1, b=2), dict(a=3, b=3), ],
|
||||
'test_zerodivision': [dict(a=1, b=0), dict(a=3, b=2)],
|
||||
'test_zerodivision': [dict(a=1, b=0), ],
|
||||
}
|
||||
|
||||
def test_equals(self, a, b):
|
||||
@@ -175,120 +286,44 @@ parameterizer`_ in a lot less code::
|
||||
def test_zerodivision(self, a, b):
|
||||
pytest.raises(ZeroDivisionError, "a/b")
|
||||
|
||||
Running it means we are two tests for each test functions, using
|
||||
the respective settings::
|
||||
Our test generator looks up a class-level definition which specifies which
|
||||
argument sets to use for each test function. Let's run it::
|
||||
|
||||
$ py.test -q
|
||||
collecting ... collected 6 items
|
||||
.FF..F
|
||||
collecting ... collected 3 items
|
||||
F..
|
||||
================================= FAILURES =================================
|
||||
__________________________ test_db_initialized[1] __________________________
|
||||
________________________ TestClass.test_equals[1-2] ________________________
|
||||
|
||||
db = <conftest.DB2 instance at 0x2acf4d0>
|
||||
|
||||
def test_db_initialized(db):
|
||||
# a dummy test
|
||||
if db.__class__.__name__ == "DB2":
|
||||
> pytest.fail("deliberately failing for demo purposes")
|
||||
E Failed: deliberately failing for demo purposes
|
||||
|
||||
test_backends.py:6: Failed
|
||||
_________________________ TestClass.test_equals[0] _________________________
|
||||
|
||||
self = <test_parametrize.TestClass instance at 0x2ad2830>, a = 1, b = 2
|
||||
self = <test_parametrize.TestClass instance at 0x101509638>, a = 1, b = 2
|
||||
|
||||
def test_equals(self, a, b):
|
||||
> assert a == b
|
||||
E assert 1 == 2
|
||||
|
||||
test_parametrize.py:17: AssertionError
|
||||
______________________ TestClass.test_zerodivision[1] ______________________
|
||||
|
||||
self = <test_parametrize.TestClass instance at 0x2ad8830>, a = 3, b = 2
|
||||
|
||||
def test_zerodivision(self, a, b):
|
||||
> pytest.raises(ZeroDivisionError, "a/b")
|
||||
E Failed: DID NOT RAISE
|
||||
|
||||
test_parametrize.py:20: Failed
|
||||
3 failed, 3 passed in 0.02 seconds
|
||||
test_parametrize.py:18: AssertionError
|
||||
1 failed, 2 passed in 0.03 seconds
|
||||
|
||||
Parametrizing test methods through a decorator
|
||||
--------------------------------------------------------------
|
||||
|
||||
Modifying the previous example we can also allow decorators
|
||||
for parametrizing test methods::
|
||||
|
||||
# content of test_parametrize2.py
|
||||
|
||||
import pytest
|
||||
|
||||
# test support code
|
||||
def params(funcarglist):
|
||||
def wrapper(function):
|
||||
function.funcarglist = funcarglist
|
||||
return function
|
||||
return wrapper
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
for funcargs in getattr(metafunc.function, 'funcarglist', ()):
|
||||
metafunc.addcall(funcargs=funcargs)
|
||||
|
||||
# actual test code
|
||||
class TestClass:
|
||||
@params([dict(a=1, b=2), dict(a=3, b=3), ])
|
||||
def test_equals(self, a, b):
|
||||
assert a == b
|
||||
|
||||
@params([dict(a=1, b=0), dict(a=3, b=2)])
|
||||
def test_zerodivision(self, a, b):
|
||||
pytest.raises(ZeroDivisionError, "a/b")
|
||||
|
||||
Running it gives similar results as before::
|
||||
|
||||
$ py.test -q test_parametrize2.py
|
||||
collecting ... collected 4 items
|
||||
F..F
|
||||
================================= FAILURES =================================
|
||||
_________________________ TestClass.test_equals[0] _________________________
|
||||
|
||||
self = <test_parametrize2.TestClass instance at 0x1ef2170>, a = 1, b = 2
|
||||
|
||||
@params([dict(a=1, b=2), dict(a=3, b=3), ])
|
||||
def test_equals(self, a, b):
|
||||
> assert a == b
|
||||
E assert 1 == 2
|
||||
|
||||
test_parametrize2.py:19: AssertionError
|
||||
______________________ TestClass.test_zerodivision[1] ______________________
|
||||
|
||||
self = <test_parametrize2.TestClass instance at 0x20e4248>, a = 3, b = 2
|
||||
|
||||
@params([dict(a=1, b=0), dict(a=3, b=2)])
|
||||
def test_zerodivision(self, a, b):
|
||||
> pytest.raises(ZeroDivisionError, "a/b")
|
||||
E Failed: DID NOT RAISE
|
||||
|
||||
test_parametrize2.py:23: Failed
|
||||
2 failed, 2 passed in 0.02 seconds
|
||||
|
||||
Checking serialization between Python interpreters
|
||||
Indirect parametrization with multiple resources
|
||||
--------------------------------------------------------------
|
||||
|
||||
Here is a stripped down real-life example of using parametrized
|
||||
testing for testing serialization between different interpreters.
|
||||
testing for testing serialization, invoking different python interpreters.
|
||||
We define a ``test_basic_objects`` function which is to be run
|
||||
with different sets of arguments for its three arguments::
|
||||
with different sets of arguments for its three arguments:
|
||||
|
||||
* ``python1``: first python interpreter
|
||||
* ``python2``: second python interpreter
|
||||
* ``obj``: object to be dumped from first interpreter and loaded into second interpreter
|
||||
* ``python1``: first python interpreter, run to pickle-dump an object to a file
|
||||
* ``python2``: second interpreter, run to pickle-load an object from a file
|
||||
* ``obj``: object to be dumped/loaded
|
||||
|
||||
.. literalinclude:: multipython.py
|
||||
|
||||
Running it (with Python-2.4 through to Python2.7 installed)::
|
||||
Running it results in some skips if we don't have all the python interpreters installed and otherwise runs all combinations (5 interpreters times 5 interpreters times 3 objects to serialize/deserialize)::
|
||||
|
||||
. $ py.test -q multipython.py
|
||||
. $ py.test -rs -q multipython.py
|
||||
collecting ... collected 75 items
|
||||
....s....s....s....ssssss....s....s....s....ssssss....s....s....s....ssssss
|
||||
48 passed, 27 skipped in 2.48 seconds
|
||||
ssssssssssssssssss.........ssssss.........ssssss.........ssssssssssssssssss
|
||||
========================= short test summary info ==========================
|
||||
SKIP [24] /Users/hpk/p/pytest/doc/example/multipython.py:36: 'python2.8' not found
|
||||
SKIP [24] /Users/hpk/p/pytest/doc/example/multipython.py:36: 'python2.4' not found
|
||||
27 passed, 48 skipped in 3.03 seconds
|
||||
|
||||
@@ -43,7 +43,7 @@ then the test collection looks like this::
|
||||
|
||||
$ py.test --collectonly
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.1 -- pytest-2.1.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0
|
||||
collecting ... collected 2 items
|
||||
<Module 'check_myapp.py'>
|
||||
<Class 'CheckMyApp'>
|
||||
@@ -51,7 +51,7 @@ then the test collection looks like this::
|
||||
<Function 'check_simple'>
|
||||
<Function 'check_complex'>
|
||||
|
||||
============================= in 0.01 seconds =============================
|
||||
============================= in 0.02 seconds =============================
|
||||
|
||||
Interpreting cmdline arguments as Python packages
|
||||
-----------------------------------------------------
|
||||
@@ -82,7 +82,7 @@ You can always peek at the collection tree without running tests like this::
|
||||
|
||||
. $ py.test --collectonly pythoncollection.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.1 -- pytest-2.1.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0
|
||||
collecting ... collected 3 items
|
||||
<Module 'pythoncollection.py'>
|
||||
<Function 'test_function'>
|
||||
|
||||
@@ -13,7 +13,7 @@ get on the terminal - we are working on that):
|
||||
|
||||
assertion $ py.test failure_demo.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.1 -- pytest-2.1.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0
|
||||
collecting ... collected 39 items
|
||||
|
||||
failure_demo.py FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
|
||||
@@ -30,7 +30,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:15: AssertionError
|
||||
_________________________ TestFailing.test_simple __________________________
|
||||
|
||||
self = <failure_demo.TestFailing object at 0x1b79310>
|
||||
self = <failure_demo.TestFailing object at 0x1013552d0>
|
||||
|
||||
def test_simple(self):
|
||||
def f():
|
||||
@@ -40,13 +40,13 @@ get on the terminal - we are working on that):
|
||||
|
||||
> assert f() == g()
|
||||
E assert 42 == 43
|
||||
E + where 42 = <function f at 0x1c57488>()
|
||||
E + and 43 = <function g at 0x1c57500>()
|
||||
E + where 42 = <function f at 0x101514f50>()
|
||||
E + and 43 = <function g at 0x101516050>()
|
||||
|
||||
failure_demo.py:28: AssertionError
|
||||
____________________ TestFailing.test_simple_multiline _____________________
|
||||
|
||||
self = <failure_demo.TestFailing object at 0x1b79850>
|
||||
self = <failure_demo.TestFailing object at 0x101355950>
|
||||
|
||||
def test_simple_multiline(self):
|
||||
otherfunc_multi(
|
||||
@@ -66,19 +66,19 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:11: AssertionError
|
||||
___________________________ TestFailing.test_not ___________________________
|
||||
|
||||
self = <failure_demo.TestFailing object at 0x1b79290>
|
||||
self = <failure_demo.TestFailing object at 0x101355ad0>
|
||||
|
||||
def test_not(self):
|
||||
def f():
|
||||
return 42
|
||||
> assert not f()
|
||||
E assert not 42
|
||||
E + where 42 = <function f at 0x1c57500>()
|
||||
E + where 42 = <function f at 0x101514f50>()
|
||||
|
||||
failure_demo.py:38: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_eq_text _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1b79f90>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1013559d0>
|
||||
|
||||
def test_eq_text(self):
|
||||
> assert 'spam' == 'eggs'
|
||||
@@ -89,7 +89,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:42: AssertionError
|
||||
_____________ TestSpecialisedExplanations.test_eq_similar_text _____________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1b7af50>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x101350dd0>
|
||||
|
||||
def test_eq_similar_text(self):
|
||||
> assert 'foo 1 bar' == 'foo 2 bar'
|
||||
@@ -102,7 +102,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:45: AssertionError
|
||||
____________ TestSpecialisedExplanations.test_eq_multiline_text ____________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1b7af90>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x101350d10>
|
||||
|
||||
def test_eq_multiline_text(self):
|
||||
> assert 'foo\nspam\nbar' == 'foo\neggs\nbar'
|
||||
@@ -115,7 +115,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:48: AssertionError
|
||||
______________ TestSpecialisedExplanations.test_eq_long_text _______________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1b79d10>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x101350cd0>
|
||||
|
||||
def test_eq_long_text(self):
|
||||
a = '1'*100 + 'a' + '2'*100
|
||||
@@ -132,7 +132,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:53: AssertionError
|
||||
_________ TestSpecialisedExplanations.test_eq_long_text_multiline __________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1b7a490>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x101350f50>
|
||||
|
||||
def test_eq_long_text_multiline(self):
|
||||
a = '1\n'*100 + 'a' + '2\n'*100
|
||||
@@ -156,7 +156,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:58: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_eq_list _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1b7ac90>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x10134f350>
|
||||
|
||||
def test_eq_list(self):
|
||||
> assert [0, 1, 2] == [0, 1, 3]
|
||||
@@ -166,7 +166,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:61: AssertionError
|
||||
______________ TestSpecialisedExplanations.test_eq_list_long _______________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1b79cd0>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x10134fc10>
|
||||
|
||||
def test_eq_list_long(self):
|
||||
a = [0]*100 + [1] + [3]*100
|
||||
@@ -178,7 +178,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:66: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_eq_dict _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1b75e90>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x10134f2d0>
|
||||
|
||||
def test_eq_dict(self):
|
||||
> assert {'a': 0, 'b': 1} == {'a': 0, 'b': 2}
|
||||
@@ -191,7 +191,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:69: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_eq_set __________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1b75c10>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x10134f110>
|
||||
|
||||
def test_eq_set(self):
|
||||
> assert set([0, 10, 11, 12]) == set([0, 20, 21])
|
||||
@@ -207,7 +207,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:72: AssertionError
|
||||
_____________ TestSpecialisedExplanations.test_eq_longer_list ______________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1b79590>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x10134f510>
|
||||
|
||||
def test_eq_longer_list(self):
|
||||
> assert [1,2] == [1,2,3]
|
||||
@@ -217,7 +217,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:75: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_in_list _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1b7a8d0>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x10134f6d0>
|
||||
|
||||
def test_in_list(self):
|
||||
> assert 1 in [0, 2, 3, 4, 5]
|
||||
@@ -226,7 +226,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:78: AssertionError
|
||||
__________ TestSpecialisedExplanations.test_not_in_text_multiline __________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1b75410>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x10152c490>
|
||||
|
||||
def test_not_in_text_multiline(self):
|
||||
text = 'some multiline\ntext\nwhich\nincludes foo\nand a\ntail'
|
||||
@@ -244,7 +244,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:82: AssertionError
|
||||
___________ TestSpecialisedExplanations.test_not_in_text_single ____________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1b75c90>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x10152cfd0>
|
||||
|
||||
def test_not_in_text_single(self):
|
||||
text = 'single foo line'
|
||||
@@ -257,7 +257,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:86: AssertionError
|
||||
_________ TestSpecialisedExplanations.test_not_in_text_single_long _________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1b75dd0>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x10152c090>
|
||||
|
||||
def test_not_in_text_single_long(self):
|
||||
text = 'head ' * 50 + 'foo ' + 'tail ' * 20
|
||||
@@ -270,7 +270,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:90: AssertionError
|
||||
______ TestSpecialisedExplanations.test_not_in_text_single_long_term _______
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1b751d0>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x10152cb90>
|
||||
|
||||
def test_not_in_text_single_long_term(self):
|
||||
text = 'head ' * 50 + 'f'*70 + 'tail ' * 20
|
||||
@@ -289,7 +289,7 @@ get on the terminal - we are working on that):
|
||||
i = Foo()
|
||||
> assert i.b == 2
|
||||
E assert 1 == 2
|
||||
E + where 1 = <failure_demo.Foo object at 0x1b75310>.b
|
||||
E + where 1 = <failure_demo.Foo object at 0x10152c350>.b
|
||||
|
||||
failure_demo.py:101: AssertionError
|
||||
_________________________ test_attribute_instance __________________________
|
||||
@@ -299,8 +299,8 @@ get on the terminal - we are working on that):
|
||||
b = 1
|
||||
> assert Foo().b == 2
|
||||
E assert 1 == 2
|
||||
E + where 1 = <failure_demo.Foo object at 0x1b75bd0>.b
|
||||
E + where <failure_demo.Foo object at 0x1b75bd0> = <class 'failure_demo.Foo'>()
|
||||
E + where 1 = <failure_demo.Foo object at 0x10134fe90>.b
|
||||
E + where <failure_demo.Foo object at 0x10134fe90> = <class 'failure_demo.Foo'>()
|
||||
|
||||
failure_demo.py:107: AssertionError
|
||||
__________________________ test_attribute_failure __________________________
|
||||
@@ -316,7 +316,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:116:
|
||||
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
||||
|
||||
self = <failure_demo.Foo object at 0x1c6ee50>
|
||||
self = <failure_demo.Foo object at 0x10152c610>
|
||||
|
||||
def _get_b(self):
|
||||
> raise Exception('Failed to get attrib')
|
||||
@@ -332,15 +332,15 @@ get on the terminal - we are working on that):
|
||||
b = 2
|
||||
> assert Foo().b == Bar().b
|
||||
E assert 1 == 2
|
||||
E + where 1 = <failure_demo.Foo object at 0x1b7a750>.b
|
||||
E + where <failure_demo.Foo object at 0x1b7a750> = <class 'failure_demo.Foo'>()
|
||||
E + and 2 = <failure_demo.Bar object at 0x1c6e310>.b
|
||||
E + where <failure_demo.Bar object at 0x1c6e310> = <class 'failure_demo.Bar'>()
|
||||
E + where 1 = <failure_demo.Foo object at 0x10152c950>.b
|
||||
E + where <failure_demo.Foo object at 0x10152c950> = <class 'failure_demo.Foo'>()
|
||||
E + and 2 = <failure_demo.Bar object at 0x10152c250>.b
|
||||
E + where <failure_demo.Bar object at 0x10152c250> = <class 'failure_demo.Bar'>()
|
||||
|
||||
failure_demo.py:124: AssertionError
|
||||
__________________________ TestRaises.test_raises __________________________
|
||||
|
||||
self = <failure_demo.TestRaises instance at 0x1b92878>
|
||||
self = <failure_demo.TestRaises instance at 0x1015219e0>
|
||||
|
||||
def test_raises(self):
|
||||
s = 'qwe'
|
||||
@@ -352,10 +352,10 @@ get on the terminal - we are working on that):
|
||||
> int(s)
|
||||
E ValueError: invalid literal for int() with base 10: 'qwe'
|
||||
|
||||
<0-codegen /home/hpk/p/pytest/_pytest/python.py:833>:1: ValueError
|
||||
<0-codegen /Users/hpk/p/pytest/_pytest/python.py:957>:1: ValueError
|
||||
______________________ TestRaises.test_raises_doesnt _______________________
|
||||
|
||||
self = <failure_demo.TestRaises instance at 0x1c63248>
|
||||
self = <failure_demo.TestRaises instance at 0x1013794d0>
|
||||
|
||||
def test_raises_doesnt(self):
|
||||
> raises(IOError, "int('3')")
|
||||
@@ -364,7 +364,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:136: Failed
|
||||
__________________________ TestRaises.test_raise ___________________________
|
||||
|
||||
self = <failure_demo.TestRaises instance at 0x1b97560>
|
||||
self = <failure_demo.TestRaises instance at 0x10151f6c8>
|
||||
|
||||
def test_raise(self):
|
||||
> raise ValueError("demo error")
|
||||
@@ -373,7 +373,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:139: ValueError
|
||||
________________________ TestRaises.test_tupleerror ________________________
|
||||
|
||||
self = <failure_demo.TestRaises instance at 0x1b8e0e0>
|
||||
self = <failure_demo.TestRaises instance at 0x1013733f8>
|
||||
|
||||
def test_tupleerror(self):
|
||||
> a,b = [1]
|
||||
@@ -382,7 +382,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:142: ValueError
|
||||
______ TestRaises.test_reinterpret_fails_with_print_for_the_fun_of_it ______
|
||||
|
||||
self = <failure_demo.TestRaises instance at 0x1b8edd0>
|
||||
self = <failure_demo.TestRaises instance at 0x10136e170>
|
||||
|
||||
def test_reinterpret_fails_with_print_for_the_fun_of_it(self):
|
||||
l = [1,2,3]
|
||||
@@ -395,7 +395,7 @@ get on the terminal - we are working on that):
|
||||
l is [1, 2, 3]
|
||||
________________________ TestRaises.test_some_error ________________________
|
||||
|
||||
self = <failure_demo.TestRaises instance at 0x1b88bd8>
|
||||
self = <failure_demo.TestRaises instance at 0x10136ef38>
|
||||
|
||||
def test_some_error(self):
|
||||
> if namenotexi:
|
||||
@@ -420,10 +420,10 @@ get on the terminal - we are working on that):
|
||||
> assert 1 == 0
|
||||
E assert 1 == 0
|
||||
|
||||
<2-codegen 'abc-123' /home/hpk/p/pytest/doc/example/assertion/failure_demo.py:162>:2: AssertionError
|
||||
<2-codegen 'abc-123' /Users/hpk/p/pytest/doc/example/assertion/failure_demo.py:162>:2: AssertionError
|
||||
____________________ TestMoreErrors.test_complex_error _____________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1b8e248>
|
||||
self = <failure_demo.TestMoreErrors instance at 0x101520638>
|
||||
|
||||
def test_complex_error(self):
|
||||
def f():
|
||||
@@ -452,7 +452,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:5: AssertionError
|
||||
___________________ TestMoreErrors.test_z1_unpack_error ____________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1b97050>
|
||||
self = <failure_demo.TestMoreErrors instance at 0x10136bcb0>
|
||||
|
||||
def test_z1_unpack_error(self):
|
||||
l = []
|
||||
@@ -462,7 +462,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:179: ValueError
|
||||
____________________ TestMoreErrors.test_z2_type_error _____________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1b8bd88>
|
||||
self = <failure_demo.TestMoreErrors instance at 0x10136a440>
|
||||
|
||||
def test_z2_type_error(self):
|
||||
l = 3
|
||||
@@ -472,19 +472,19 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:183: TypeError
|
||||
______________________ TestMoreErrors.test_startswith ______________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1b8ab90>
|
||||
self = <failure_demo.TestMoreErrors instance at 0x101368290>
|
||||
|
||||
def test_startswith(self):
|
||||
s = "123"
|
||||
g = "456"
|
||||
> assert s.startswith(g)
|
||||
E assert <built-in method startswith of str object at 0x1b68508>('456')
|
||||
E + where <built-in method startswith of str object at 0x1b68508> = '123'.startswith
|
||||
E assert <built-in method startswith of str object at 0x101354030>('456')
|
||||
E + where <built-in method startswith of str object at 0x101354030> = '123'.startswith
|
||||
|
||||
failure_demo.py:188: AssertionError
|
||||
__________________ TestMoreErrors.test_startswith_nested ___________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1b878c0>
|
||||
self = <failure_demo.TestMoreErrors instance at 0x101368f38>
|
||||
|
||||
def test_startswith_nested(self):
|
||||
def f():
|
||||
@@ -492,15 +492,15 @@ get on the terminal - we are working on that):
|
||||
def g():
|
||||
return "456"
|
||||
> assert f().startswith(g())
|
||||
E assert <built-in method startswith of str object at 0x1b68508>('456')
|
||||
E + where <built-in method startswith of str object at 0x1b68508> = '123'.startswith
|
||||
E + where '123' = <function f at 0x1b96848>()
|
||||
E + and '456' = <function g at 0x1b968c0>()
|
||||
E assert <built-in method startswith of str object at 0x101354030>('456')
|
||||
E + where <built-in method startswith of str object at 0x101354030> = '123'.startswith
|
||||
E + where '123' = <function f at 0x10136c578>()
|
||||
E + and '456' = <function g at 0x10136c5f0>()
|
||||
|
||||
failure_demo.py:195: AssertionError
|
||||
_____________________ TestMoreErrors.test_global_func ______________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1b8a320>
|
||||
self = <failure_demo.TestMoreErrors instance at 0x10136aef0>
|
||||
|
||||
def test_global_func(self):
|
||||
> assert isinstance(globf(42), float)
|
||||
@@ -510,18 +510,18 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:198: AssertionError
|
||||
_______________________ TestMoreErrors.test_instance _______________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1b8b0e0>
|
||||
self = <failure_demo.TestMoreErrors instance at 0x10151c440>
|
||||
|
||||
def test_instance(self):
|
||||
self.x = 6*7
|
||||
> assert self.x != 42
|
||||
E assert 42 != 42
|
||||
E + where 42 = <failure_demo.TestMoreErrors instance at 0x1b8b0e0>.x
|
||||
E + where 42 = <failure_demo.TestMoreErrors instance at 0x10151c440>.x
|
||||
|
||||
failure_demo.py:202: AssertionError
|
||||
_______________________ TestMoreErrors.test_compare ________________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1b97998>
|
||||
self = <failure_demo.TestMoreErrors instance at 0x101373a70>
|
||||
|
||||
def test_compare(self):
|
||||
> assert globf(10) < 5
|
||||
@@ -531,7 +531,7 @@ get on the terminal - we are working on that):
|
||||
failure_demo.py:205: AssertionError
|
||||
_____________________ TestMoreErrors.test_try_finally ______________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1b807e8>
|
||||
self = <failure_demo.TestMoreErrors instance at 0x101363c68>
|
||||
|
||||
def test_try_finally(self):
|
||||
x = 1
|
||||
@@ -540,4 +540,4 @@ get on the terminal - we are working on that):
|
||||
E assert 1 == 0
|
||||
|
||||
failure_demo.py:210: AssertionError
|
||||
======================== 39 failed in 0.20 seconds =========================
|
||||
======================== 39 failed in 0.41 seconds =========================
|
||||
|
||||
@@ -53,7 +53,7 @@ Let's run this without supplying our new command line option::
|
||||
test_sample.py:6: AssertionError
|
||||
----------------------------- Captured stdout ------------------------------
|
||||
first
|
||||
1 failed in 0.01 seconds
|
||||
1 failed in 0.02 seconds
|
||||
|
||||
And now with supplying a command line option::
|
||||
|
||||
@@ -76,7 +76,7 @@ And now with supplying a command line option::
|
||||
test_sample.py:6: AssertionError
|
||||
----------------------------- Captured stdout ------------------------------
|
||||
second
|
||||
1 failed in 0.01 seconds
|
||||
1 failed in 0.02 seconds
|
||||
|
||||
Ok, this completes the basic pattern. However, one often rather
|
||||
wants to process command line options outside of the test and
|
||||
@@ -109,13 +109,13 @@ directory with the above conftest.py::
|
||||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.1 -- pytest-2.1.1
|
||||
gw0 I / gw1 I
|
||||
gw0 [0] / gw1 [0]
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0
|
||||
gw0 I
|
||||
gw0 [0]
|
||||
|
||||
scheduling tests via LoadScheduling
|
||||
|
||||
============================= in 0.26 seconds =============================
|
||||
============================= in 0.71 seconds =============================
|
||||
|
||||
.. _`excontrolskip`:
|
||||
|
||||
@@ -156,25 +156,25 @@ and when running it will see a skipped "slow" test::
|
||||
|
||||
$ py.test -rs # "-rs" means report details on the little 's'
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.1 -- pytest-2.1.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_module.py .s
|
||||
========================= short test summary info ==========================
|
||||
SKIP [1] /tmp/doc-exec-301/conftest.py:9: need --runslow option to run
|
||||
SKIP [1] /Users/hpk/tmp/doc-exec-630/conftest.py:9: need --runslow option to run
|
||||
|
||||
=================== 1 passed, 1 skipped in 0.01 seconds ====================
|
||||
=================== 1 passed, 1 skipped in 0.02 seconds ====================
|
||||
|
||||
Or run it including the ``slow`` marked test::
|
||||
|
||||
$ py.test --runslow
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.1 -- pytest-2.1.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_module.py ..
|
||||
|
||||
========================= 2 passed in 0.01 seconds =========================
|
||||
========================= 2 passed in 0.62 seconds =========================
|
||||
|
||||
Writing well integrated assertion helpers
|
||||
--------------------------------------------------
|
||||
@@ -213,7 +213,7 @@ Let's run our little function::
|
||||
E Failed: not configured: 42
|
||||
|
||||
test_checkconfig.py:8: Failed
|
||||
1 failed in 0.01 seconds
|
||||
1 failed in 0.02 seconds
|
||||
|
||||
Detect if running from within a py.test run
|
||||
--------------------------------------------------------------
|
||||
@@ -261,7 +261,7 @@ which will add the string to the test header accordingly::
|
||||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.1 -- pytest-2.1.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0
|
||||
project deps: mylib-1.1
|
||||
collecting ... collected 0 items
|
||||
|
||||
@@ -284,7 +284,7 @@ which will add info only when run with "--v"::
|
||||
|
||||
$ py.test -v
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.1 -- pytest-2.1.1 -- /home/hpk/venv/0/bin/python
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0 -- /Users/hpk/venv/1/bin/python
|
||||
info1: did you know that ...
|
||||
did you?
|
||||
collecting ... collected 0 items
|
||||
@@ -295,7 +295,45 @@ and nothing when run plainly::
|
||||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.1 -- pytest-2.1.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0
|
||||
collecting ... collected 0 items
|
||||
|
||||
============================= in 0.00 seconds =============================
|
||||
|
||||
profiling test duration
|
||||
--------------------------
|
||||
|
||||
.. regendoc:wipe
|
||||
|
||||
.. versionadded: 2.2
|
||||
|
||||
If you have a slow running large test suite you might want to find
|
||||
out which tests are slowest. Let's make an artifical test suite::
|
||||
|
||||
# content of test_some_are_slow.py
|
||||
|
||||
import time
|
||||
|
||||
def test_funcfast():
|
||||
pass
|
||||
|
||||
def test_funcslow1():
|
||||
time.sleep(0.1)
|
||||
|
||||
def test_funcslow2():
|
||||
time.sleep(0.2)
|
||||
|
||||
Now we can profile which test functions execute slowest::
|
||||
|
||||
$ py.test --durations=3
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0
|
||||
collecting ... collected 3 items
|
||||
|
||||
test_some_are_slow.py ...
|
||||
|
||||
========================= slowest 3 test durations =========================
|
||||
0.20s call test_some_are_slow.py::test_funcslow2
|
||||
0.10s call test_some_are_slow.py::test_funcslow1
|
||||
0.00s setup test_some_are_slow.py::test_funcfast
|
||||
========================= 3 passed in 0.32 seconds =========================
|
||||
|
||||
@@ -38,6 +38,7 @@ very useful if you want to test e.g. against different database backends
|
||||
or with multiple numerical arguments sets and want to reuse the same set
|
||||
of test functions.
|
||||
|
||||
py.test comes with :ref:`builtinfuncargs` and there are some refined usages in the examples section.
|
||||
|
||||
.. _funcarg:
|
||||
|
||||
@@ -61,7 +62,7 @@ Running the test looks like this::
|
||||
|
||||
$ py.test test_simplefactory.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.1 -- pytest-2.1.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0
|
||||
collecting ... collected 1 items
|
||||
|
||||
test_simplefactory.py F
|
||||
@@ -76,7 +77,7 @@ Running the test looks like this::
|
||||
E assert 42 == 17
|
||||
|
||||
test_simplefactory.py:5: AssertionError
|
||||
========================= 1 failed in 0.01 seconds =========================
|
||||
========================= 1 failed in 0.03 seconds =========================
|
||||
|
||||
This means that indeed the test function was called with a ``myfuncarg``
|
||||
argument value of ``42`` and the assert fails. Here is how py.test
|
||||
@@ -157,17 +158,16 @@ hook to generate several calls to the same test function::
|
||||
# content of test_example.py
|
||||
def pytest_generate_tests(metafunc):
|
||||
if "numiter" in metafunc.funcargnames:
|
||||
for i in range(10):
|
||||
metafunc.addcall(funcargs=dict(numiter=i))
|
||||
metafunc.parametrize("numiter", range(10))
|
||||
|
||||
def test_func(numiter):
|
||||
assert numiter < 9
|
||||
|
||||
Running this::
|
||||
Running this will generate ten invocations of ``test_func`` passing in each of the items in the list of ``range(10)``::
|
||||
|
||||
$ py.test test_example.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.1 -- pytest-2.1.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0
|
||||
collecting ... collected 10 items
|
||||
|
||||
test_example.py .........F
|
||||
@@ -181,16 +181,16 @@ Running this::
|
||||
> assert numiter < 9
|
||||
E assert 9 < 9
|
||||
|
||||
test_example.py:7: AssertionError
|
||||
==================== 1 failed, 9 passed in 0.02 seconds ====================
|
||||
test_example.py:6: AssertionError
|
||||
==================== 1 failed, 9 passed in 0.05 seconds ====================
|
||||
|
||||
Note that the ``pytest_generate_tests(metafunc)`` hook is called during
|
||||
Obviously, only when ``numiter`` has the value of ``9`` does the test fail. Note that the ``pytest_generate_tests(metafunc)`` hook is called during
|
||||
the test collection phase which is separate from the actual test running.
|
||||
Let's just look at what is collected::
|
||||
|
||||
$ py.test --collectonly test_example.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.1 -- pytest-2.1.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0
|
||||
collecting ... collected 10 items
|
||||
<Module 'test_example.py'>
|
||||
<Function 'test_func[0]'>
|
||||
@@ -204,19 +204,19 @@ Let's just look at what is collected::
|
||||
<Function 'test_func[8]'>
|
||||
<Function 'test_func[9]'>
|
||||
|
||||
============================= in 0.00 seconds =============================
|
||||
============================= in 0.01 seconds =============================
|
||||
|
||||
If you want to select only the run with the value ``7`` you could do::
|
||||
|
||||
$ py.test -v -k 7 test_example.py # or -k test_func[7]
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.1 -- pytest-2.1.1 -- /home/hpk/venv/0/bin/python
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0 -- /Users/hpk/venv/1/bin/python
|
||||
collecting ... collected 10 items
|
||||
|
||||
test_example.py:6: test_func[7] PASSED
|
||||
test_example.py:5: test_func[7] PASSED
|
||||
|
||||
======================== 9 tests deselected by '7' =========================
|
||||
================== 1 passed, 9 deselected in 0.01 seconds ==================
|
||||
======================= 9 tests deselected by '-k7' ========================
|
||||
================== 1 passed, 9 deselected in 0.02 seconds ==================
|
||||
|
||||
You might want to look at :ref:`more parametrization examples <paramexamples>`.
|
||||
|
||||
@@ -240,4 +240,5 @@ in the class or module where a test function is defined:
|
||||
|
||||
``metafunc.config``: access to command line opts and general config
|
||||
|
||||
.. automethod:: Metafunc.parametrize(name, values, idmaker=None)
|
||||
.. automethod:: Metafunc.addcall(funcargs=None, id=_notexists, param=_notexists)
|
||||
|
||||
@@ -22,9 +22,9 @@ Installation options::
|
||||
To check your installation has installed the correct version::
|
||||
|
||||
$ py.test --version
|
||||
This is py.test version 2.1.1, imported from /home/hpk/p/pytest/pytest.py
|
||||
This is py.test version 2.2.0, imported from /Users/hpk/p/pytest/pytest.pyc
|
||||
setuptools registered plugins:
|
||||
pytest-xdist-1.6 at /home/hpk/p/pytest-xdist/xdist/plugin.pyc
|
||||
pytest-xdist-1.7.dev1 at /Users/hpk/p/pytest-xdist/xdist/plugin.pyc
|
||||
|
||||
If you get an error checkout :ref:`installation issues`.
|
||||
|
||||
@@ -46,7 +46,7 @@ That's it. You can execute the test function now::
|
||||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.1 -- pytest-2.1.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0
|
||||
collecting ... collected 1 items
|
||||
|
||||
test_sample.py F
|
||||
@@ -60,7 +60,7 @@ That's it. You can execute the test function now::
|
||||
E + where 4 = func(3)
|
||||
|
||||
test_sample.py:5: AssertionError
|
||||
========================= 1 failed in 0.01 seconds =========================
|
||||
========================= 1 failed in 0.04 seconds =========================
|
||||
|
||||
py.test found the ``test_answer`` function by following :ref:`standard test discovery rules <test discovery>`, basically detecting the ``test_`` prefixes. We got a failure report because our little ``func(3)`` call did not return ``5``.
|
||||
|
||||
@@ -95,7 +95,7 @@ Running it with, this time in "quiet" reporting mode::
|
||||
$ py.test -q test_sysexit.py
|
||||
collecting ... collected 1 items
|
||||
.
|
||||
1 passed in 0.00 seconds
|
||||
1 passed in 0.01 seconds
|
||||
|
||||
.. todo:: For further ways to assert exceptions see the `raises`
|
||||
|
||||
@@ -126,7 +126,7 @@ run the module by passing its filename::
|
||||
================================= FAILURES =================================
|
||||
____________________________ TestClass.test_two ____________________________
|
||||
|
||||
self = <test_class.TestClass instance at 0x2037908>
|
||||
self = <test_class.TestClass instance at 0x10150a170>
|
||||
|
||||
def test_two(self):
|
||||
x = "hello"
|
||||
@@ -134,7 +134,7 @@ run the module by passing its filename::
|
||||
E assert hasattr('hello', 'check')
|
||||
|
||||
test_class.py:8: AssertionError
|
||||
1 failed, 1 passed in 0.01 seconds
|
||||
1 failed, 1 passed in 0.03 seconds
|
||||
|
||||
The first test passed, the second failed. Again we can easily see
|
||||
the intermediate values used in the assertion, helping us to
|
||||
@@ -163,7 +163,7 @@ before performing the test function call. Let's just run it::
|
||||
================================= FAILURES =================================
|
||||
_____________________________ test_needsfiles ______________________________
|
||||
|
||||
tmpdir = local('/tmp/pytest-60/test_needsfiles0')
|
||||
tmpdir = local('/Users/hpk/tmp/pytest-1595/test_needsfiles0')
|
||||
|
||||
def test_needsfiles(tmpdir):
|
||||
print tmpdir
|
||||
@@ -172,8 +172,8 @@ before performing the test function call. Let's just run it::
|
||||
|
||||
test_tmpdir.py:3: AssertionError
|
||||
----------------------------- Captured stdout ------------------------------
|
||||
/tmp/pytest-60/test_needsfiles0
|
||||
1 failed in 0.02 seconds
|
||||
/Users/hpk/tmp/pytest-1595/test_needsfiles0
|
||||
1 failed in 0.15 seconds
|
||||
|
||||
Before the test runs, a unique-per-test-invocation temporary directory
|
||||
was created. More info at :ref:`tmpdir handling`.
|
||||
|
||||
@@ -25,14 +25,15 @@ Welcome to pytest!
|
||||
|
||||
- **supports functional testing and complex test setups**
|
||||
|
||||
- (new in 2.2) :ref:`durations`
|
||||
- (much improved in 2.2) :ref:`marking and test selection <mark>`
|
||||
- (improved in 2.2) :ref:`parametrized test functions <parametrized test functions>`
|
||||
- advanced :ref:`skip and xfail`
|
||||
- generic :ref:`marking and test selection <mark>`
|
||||
- unique :ref:`dependency injection through funcargs <funcargs>`
|
||||
- can :ref:`distribute tests to multiple CPUs <xdistcpu>` through :ref:`xdist plugin <xdist>`
|
||||
- can :ref:`continuously re-run failing tests <looponfailing>`
|
||||
- many :ref:`builtin helpers <pytest helpers>`
|
||||
- flexible :ref:`Python test discovery`
|
||||
- unique :ref:`dependency injection through funcargs <funcargs>`
|
||||
- :ref:`parametrized test functions <parametrized test functions>`
|
||||
|
||||
- **integrates many common testing methods**
|
||||
|
||||
|
||||
119
doc/mark.txt
119
doc/mark.txt
@@ -6,117 +6,20 @@ Marking test functions with attributes
|
||||
|
||||
.. currentmodule:: _pytest.mark
|
||||
|
||||
By using the ``pytest.mark`` helper you can instantiate
|
||||
decorators that will set named metadata on test functions.
|
||||
By using the ``pytest.mark`` helper you can easily set
|
||||
metadata on your test functions. There are
|
||||
some builtin markers, for example:
|
||||
|
||||
Marking a single function
|
||||
----------------------------------------------------
|
||||
* :ref:`skipif <skipif>` - skip a test function if a certain condition is met
|
||||
* :ref:`xfail <xfail>` - produce an "expected failure" outcome if a certain
|
||||
condition is met
|
||||
* :ref:`parametrize <parametrizemark>` to perform multiple calls
|
||||
to the same test function.
|
||||
|
||||
You can "mark" a test function with metadata like this::
|
||||
It's easy to create custom markers or to apply markers
|
||||
to whole test classes or modules. See :ref:`mark examples` for examples
|
||||
which also serve as documentation.
|
||||
|
||||
import pytest
|
||||
@pytest.mark.webtest
|
||||
def test_send_http():
|
||||
...
|
||||
|
||||
This will set the function attribute ``webtest`` to a :py:class:`MarkInfo`
|
||||
instance. You can also specify parametrized metadata like this::
|
||||
|
||||
# content of test_mark.py
|
||||
|
||||
import pytest
|
||||
@pytest.mark.webtest(firefox=30)
|
||||
def test_receive():
|
||||
pass
|
||||
|
||||
@pytest.mark.webtest("functional", firefox=30)
|
||||
def test_run_and_look():
|
||||
pass
|
||||
|
||||
and access it from other places like this::
|
||||
|
||||
test_receive.webtest.kwargs['firefox'] == 30
|
||||
test_run_and_look.webtest.args[0] == "functional"
|
||||
|
||||
.. _`scoped-marking`:
|
||||
|
||||
Marking whole classes or modules
|
||||
----------------------------------------------------
|
||||
|
||||
If you are programming with Python2.6 you may use ``pytest.mark`` decorators
|
||||
with classes to apply markers to all of its test methods::
|
||||
|
||||
# content of test_mark_classlevel.py
|
||||
import pytest
|
||||
@pytest.mark.webtest
|
||||
class TestClass:
|
||||
def test_startup(self):
|
||||
pass
|
||||
def test_startup_and_more(self):
|
||||
pass
|
||||
|
||||
This is equivalent to directly applying the decorator to the
|
||||
two test functions.
|
||||
|
||||
To remain compatible with Python2.5 you can also set a
|
||||
``pytestmark`` attribute on a TestClass like this::
|
||||
|
||||
import pytest
|
||||
|
||||
class TestClass:
|
||||
pytestmark = pytest.mark.webtest
|
||||
|
||||
or if you need to use multiple markers you can use a list::
|
||||
|
||||
import pytest
|
||||
|
||||
class TestClass:
|
||||
pytestmark = [pytest.mark.webtest, pytest.mark.slowtest]
|
||||
|
||||
You can also set a module level marker::
|
||||
|
||||
import pytest
|
||||
pytestmark = pytest.mark.webtest
|
||||
|
||||
in which case it will be applied to all functions and
|
||||
methods defined in the module.
|
||||
|
||||
Using ``-k TEXT`` to select tests
|
||||
----------------------------------------------------
|
||||
|
||||
You can use the ``-k`` command line option to select tests::
|
||||
|
||||
$ py.test -k webtest # running with the above defined examples yields
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.1 -- pytest-2.1.1
|
||||
collecting ... collected 4 items
|
||||
|
||||
test_mark.py ..
|
||||
test_mark_classlevel.py ..
|
||||
|
||||
========================= 4 passed in 0.01 seconds =========================
|
||||
|
||||
And you can also run all tests except the ones that match the keyword::
|
||||
|
||||
$ py.test -k-webtest
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.1 -- pytest-2.1.1
|
||||
collecting ... collected 4 items
|
||||
|
||||
===================== 4 tests deselected by '-webtest' =====================
|
||||
======================= 4 deselected in 0.01 seconds =======================
|
||||
|
||||
Or to only select the class::
|
||||
|
||||
$ py.test -kTestClass
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.1 -- pytest-2.1.1
|
||||
collecting ... collected 4 items
|
||||
|
||||
test_mark_classlevel.py ..
|
||||
|
||||
==================== 2 tests deselected by 'TestClass' =====================
|
||||
================== 2 passed, 2 deselected in 0.01 seconds ==================
|
||||
|
||||
API reference for mark related objects
|
||||
------------------------------------------------
|
||||
|
||||
@@ -39,10 +39,10 @@ will be undone.
|
||||
.. background check:
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.1 -- pytest-2.1.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0
|
||||
collecting ... collected 0 items
|
||||
|
||||
============================= in 0.00 seconds =============================
|
||||
============================= in 0.20 seconds =============================
|
||||
|
||||
Method reference of the monkeypatch function argument
|
||||
-----------------------------------------------------
|
||||
|
||||
@@ -327,7 +327,6 @@ test execution:
|
||||
|
||||
.. autofunction: pytest_runtest_logreport
|
||||
|
||||
|
||||
Reference of important objects involved in hooks
|
||||
===========================================================
|
||||
|
||||
|
||||
@@ -46,7 +46,7 @@ Some organisations using py.test
|
||||
* `Some Mozilla QA people <http://www.theautomatedtester.co.uk/blog/2011/pytest_and_xdist_plugin.html>`_ use pytest to distribute their Selenium tests
|
||||
* `Tandberg <http://www.tandberg.com/>`_
|
||||
* `Shootq <http://web.shootq.com/>`_
|
||||
* `Stups department of Heinrich Heine University Düsseldorf <http://www.stups.uni-duesseldorf.de/projects.php>`_
|
||||
* `Stups department of Heinrich Heine University Duesseldorf <http://www.stups.uni-duesseldorf.de/projects.php>`_
|
||||
* `cellzome <http://www.cellzome.com/>`_
|
||||
* `Open End, Gothenborg <http://www.openend.se>`_
|
||||
* `Laboraratory of Bioinformatics, Warsaw <http://genesilico.pl/>`_
|
||||
|
||||
@@ -130,7 +130,7 @@ Running it with the report-on-xfail option gives this output::
|
||||
|
||||
example $ py.test -rx xfail_demo.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.1 -- pytest-2.1.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0
|
||||
collecting ... collected 6 items
|
||||
|
||||
xfail_demo.py xxxxxx
|
||||
@@ -147,7 +147,7 @@ Running it with the report-on-xfail option gives this output::
|
||||
XFAIL xfail_demo.py::test_hello6
|
||||
reason: reason
|
||||
|
||||
======================== 6 xfailed in 0.03 seconds =========================
|
||||
======================== 6 xfailed in 0.08 seconds =========================
|
||||
|
||||
.. _`evaluation of skipif/xfail conditions`:
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ Function arguments:
|
||||
|
||||
Test parametrization:
|
||||
|
||||
- `generating parametrized tests with funcargs`_
|
||||
- `generating parametrized tests with funcargs`_ (uses deprecated ``addcall()`` API.
|
||||
- `test generators and cached setup`_
|
||||
- `parametrizing tests, generalized`_ (blog post)
|
||||
- `putting test-hooks into local or global plugins`_ (blog post)
|
||||
|
||||
@@ -28,7 +28,7 @@ Running this would result in a passed test except for the last
|
||||
|
||||
$ py.test test_tmpdir.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.1 -- pytest-2.1.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0
|
||||
collecting ... collected 1 items
|
||||
|
||||
test_tmpdir.py F
|
||||
@@ -36,7 +36,7 @@ Running this would result in a passed test except for the last
|
||||
================================= FAILURES =================================
|
||||
_____________________________ test_create_file _____________________________
|
||||
|
||||
tmpdir = local('/tmp/pytest-61/test_create_file0')
|
||||
tmpdir = local('/Users/hpk/tmp/pytest-1596/test_create_file0')
|
||||
|
||||
def test_create_file(tmpdir):
|
||||
p = tmpdir.mkdir("sub").join("hello.txt")
|
||||
@@ -47,7 +47,7 @@ Running this would result in a passed test except for the last
|
||||
E assert 0
|
||||
|
||||
test_tmpdir.py:7: AssertionError
|
||||
========================= 1 failed in 0.03 seconds =========================
|
||||
========================= 1 failed in 0.20 seconds =========================
|
||||
|
||||
.. _`base temporary directory`:
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ Running it yields::
|
||||
|
||||
$ py.test test_unittest.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.1 -- pytest-2.1.1
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0
|
||||
collecting ... collected 1 items
|
||||
|
||||
test_unittest.py F
|
||||
@@ -42,7 +42,7 @@ Running it yields::
|
||||
test_unittest.py:8: AssertionError
|
||||
----------------------------- Captured stdout ------------------------------
|
||||
hello
|
||||
========================= 1 failed in 0.02 seconds =========================
|
||||
========================= 1 failed in 0.23 seconds =========================
|
||||
|
||||
.. _`unittest.py style`: http://docs.python.org/library/unittest.html
|
||||
|
||||
|
||||
@@ -98,6 +98,18 @@ can use a helper::
|
||||
In previous versions you could only enter PDB tracing if
|
||||
you disable capturing on the command line via ``py.test -s``.
|
||||
|
||||
.. _durations:
|
||||
|
||||
Profiling test execution duration
|
||||
-------------------------------------
|
||||
|
||||
.. versionadded: 2.2
|
||||
|
||||
To get a list of the slowest 10 test durations::
|
||||
|
||||
py.test --durations=10
|
||||
|
||||
|
||||
Creating JUnitXML format files
|
||||
----------------------------------------------------
|
||||
|
||||
|
||||
2
setup.py
2
setup.py
@@ -24,7 +24,7 @@ def main():
|
||||
name='pytest',
|
||||
description='py.test: simple powerful testing with Python',
|
||||
long_description = long_description,
|
||||
version='2.1.2',
|
||||
version='2.2.0',
|
||||
url='http://pytest.org',
|
||||
license='MIT license',
|
||||
platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'],
|
||||
|
||||
@@ -13,6 +13,12 @@ class TestGeneralUsage:
|
||||
'*ERROR: hello'
|
||||
])
|
||||
|
||||
def test_root_conftest_syntax_error(self, testdir):
|
||||
p = testdir.makepyfile(conftest="raise SyntaxError\n")
|
||||
result = testdir.runpytest()
|
||||
result.stderr.fnmatch_lines(["*raise SyntaxError*"])
|
||||
assert result.ret != 0
|
||||
|
||||
def test_early_hook_error_issue38_1(self, testdir):
|
||||
testdir.makeconftest("""
|
||||
def pytest_sessionstart():
|
||||
@@ -354,24 +360,24 @@ class TestInvocationVariants:
|
||||
def test_equivalence_pytest_pytest(self):
|
||||
assert pytest.main == py.test.cmdline.main
|
||||
|
||||
def test_invoke_with_string(self, capsys):
|
||||
retcode = pytest.main("-h")
|
||||
def test_invoke_with_string(self, testdir, capsys):
|
||||
retcode = testdir.pytestmain("-h")
|
||||
assert not retcode
|
||||
out, err = capsys.readouterr()
|
||||
assert "--help" in out
|
||||
pytest.raises(ValueError, lambda: pytest.main(retcode))
|
||||
pytest.raises(ValueError, lambda: pytest.main(0))
|
||||
|
||||
def test_invoke_with_path(self, testdir, capsys):
|
||||
retcode = testdir.pytestmain(testdir.tmpdir)
|
||||
assert not retcode
|
||||
out, err = capsys.readouterr()
|
||||
|
||||
def test_invoke_plugin_api(self, capsys):
|
||||
def test_invoke_plugin_api(self, testdir, capsys):
|
||||
class MyPlugin:
|
||||
def pytest_addoption(self, parser):
|
||||
parser.addoption("--myopt")
|
||||
|
||||
pytest.main(["-h"], plugins=[MyPlugin()])
|
||||
testdir.pytestmain(["-h"], plugins=[MyPlugin()])
|
||||
out, err = capsys.readouterr()
|
||||
assert "--myopt" in out
|
||||
|
||||
@@ -403,11 +409,16 @@ class TestInvocationVariants:
|
||||
result.stdout.fnmatch_lines([
|
||||
"*1 passed*"
|
||||
])
|
||||
|
||||
empty_package = testdir.mkpydir("empty_package")
|
||||
monkeypatch.setenv('PYTHONPATH', empty_package)
|
||||
result = testdir.runpytest("--pyargs", ".")
|
||||
assert result.ret == 0
|
||||
result.stdout.fnmatch_lines([
|
||||
"*2 passed*"
|
||||
])
|
||||
|
||||
monkeypatch.setenv('PYTHONPATH', testdir)
|
||||
path.join('test_hello.py').remove()
|
||||
result = testdir.runpytest("--pyargs", "tpkg.test_hello")
|
||||
assert result.ret != 0
|
||||
@@ -454,3 +465,92 @@ class TestInvocationVariants:
|
||||
"*1 failed*",
|
||||
])
|
||||
|
||||
class TestDurations:
|
||||
source = """
|
||||
import time
|
||||
frag = 0.02
|
||||
def test_2():
|
||||
time.sleep(frag*2)
|
||||
def test_1():
|
||||
time.sleep(frag)
|
||||
def test_3():
|
||||
time.sleep(frag*3)
|
||||
"""
|
||||
|
||||
def test_calls(self, testdir):
|
||||
testdir.makepyfile(self.source)
|
||||
result = testdir.runpytest("--durations=10")
|
||||
assert result.ret == 0
|
||||
result.stdout.fnmatch_lines([
|
||||
"*durations*",
|
||||
"*call*test_3*",
|
||||
"*call*test_2*",
|
||||
"*call*test_1*",
|
||||
])
|
||||
|
||||
def test_calls_show_2(self, testdir):
|
||||
testdir.makepyfile(self.source)
|
||||
result = testdir.runpytest("--durations=2")
|
||||
assert result.ret == 0
|
||||
result.stdout.fnmatch_lines([
|
||||
"*durations*",
|
||||
"*call*test_3*",
|
||||
"*call*test_2*",
|
||||
])
|
||||
assert "test_1" not in result.stdout.str()
|
||||
|
||||
def test_calls_showall(self, testdir):
|
||||
testdir.makepyfile(self.source)
|
||||
result = testdir.runpytest("--durations=0")
|
||||
assert result.ret == 0
|
||||
for x in "123":
|
||||
for y in 'call',: #'setup', 'call', 'teardown':
|
||||
l = []
|
||||
for line in result.stdout.lines:
|
||||
if ("test_%s" % x) in line and y in line:
|
||||
break
|
||||
else:
|
||||
raise AssertionError("not found %s %s" % (x,y))
|
||||
|
||||
def test_with_deselected(self, testdir):
|
||||
testdir.makepyfile(self.source)
|
||||
result = testdir.runpytest("--durations=2", "-k test_1")
|
||||
assert result.ret == 0
|
||||
result.stdout.fnmatch_lines([
|
||||
"*durations*",
|
||||
"*call*test_1*",
|
||||
])
|
||||
|
||||
def test_with_failing_collection(self, testdir):
|
||||
testdir.makepyfile(self.source)
|
||||
testdir.makepyfile(test_collecterror="""xyz""")
|
||||
result = testdir.runpytest("--durations=2", "-k test_1")
|
||||
assert result.ret != 0
|
||||
result.stdout.fnmatch_lines([
|
||||
"*durations*",
|
||||
"*call*test_1*",
|
||||
])
|
||||
|
||||
|
||||
class TestDurationWithFixture:
|
||||
source = """
|
||||
import time
|
||||
frag = 0.01
|
||||
def setup_function(func):
|
||||
time.sleep(frag * 3)
|
||||
def test_1():
|
||||
time.sleep(frag*2)
|
||||
def test_2():
|
||||
time.sleep(frag)
|
||||
"""
|
||||
def test_setup_function(self, testdir):
|
||||
testdir.makepyfile(self.source)
|
||||
result = testdir.runpytest("--durations=10")
|
||||
assert result.ret == 0
|
||||
|
||||
result.stdout.fnmatch_lines([
|
||||
"*durations*",
|
||||
"* setup *test_1*",
|
||||
"* call *test_1*",
|
||||
])
|
||||
|
||||
|
||||
@@ -12,13 +12,17 @@ def pytest_addoption(parser):
|
||||
help=("run FD checks if lsof is available"))
|
||||
|
||||
def pytest_configure(config):
|
||||
config.addinivalue_line("markers",
|
||||
"multi(arg=[value1,value2, ...]): call the test function "
|
||||
"multiple times with arg=value1, then with arg=value2, ... "
|
||||
)
|
||||
if config.getvalue("lsof"):
|
||||
try:
|
||||
out = py.process.cmdexec("lsof -p %d" % pid)
|
||||
except py.process.cmdexec.Error:
|
||||
pass
|
||||
else:
|
||||
config._numfiles = getopenfiles(out)
|
||||
config._numfiles = len(getopenfiles(out))
|
||||
|
||||
#def pytest_report_header():
|
||||
# return "pid: %s" % os.getpid()
|
||||
@@ -26,23 +30,31 @@ def pytest_configure(config):
|
||||
def getopenfiles(out):
|
||||
def isopen(line):
|
||||
return ("REG" in line or "CHR" in line) and (
|
||||
"deleted" not in line and 'mem' not in line)
|
||||
return len([x for x in out.split("\n") if isopen(x)])
|
||||
"deleted" not in line and 'mem' not in line and "txt" not in line)
|
||||
return [x for x in out.split("\n") if isopen(x)]
|
||||
|
||||
def pytest_unconfigure(config, __multicall__):
|
||||
if not hasattr(config, '_numfiles'):
|
||||
return
|
||||
__multicall__.execute()
|
||||
def check_open_files(config):
|
||||
out2 = py.process.cmdexec("lsof -p %d" % pid)
|
||||
len2 = getopenfiles(out2)
|
||||
assert len2 < config._numfiles + 7, out2
|
||||
|
||||
lines2 = getopenfiles(out2)
|
||||
if len(lines2) > config._numfiles + 1:
|
||||
error = []
|
||||
error.append("***** %s FD leackage detected" %
|
||||
(len(lines2)-config._numfiles))
|
||||
error.extend(lines2)
|
||||
error.append(error[0])
|
||||
# update numfile so that the overall test run continuess
|
||||
config._numfiles = len(lines2)
|
||||
raise AssertionError("\n".join(error))
|
||||
|
||||
def pytest_runtest_setup(item):
|
||||
item._oldir = py.path.local()
|
||||
|
||||
def pytest_runtest_teardown(item):
|
||||
def pytest_runtest_teardown(item, __multicall__):
|
||||
item._oldir.chdir()
|
||||
if hasattr(item.config, '_numfiles'):
|
||||
x = __multicall__.execute()
|
||||
check_open_files(item.config)
|
||||
return x
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
multi = getattr(metafunc.function, 'multi', None)
|
||||
|
||||
@@ -40,15 +40,6 @@ class TestBinReprIntegration:
|
||||
assert hook.left == [0, 1]
|
||||
assert hook.right == [0, 2]
|
||||
|
||||
def test_configure_unconfigure(self, testdir, hook):
|
||||
assert hook == util._reprcompare
|
||||
config = testdir.parseconfig()
|
||||
plugin.pytest_configure(config)
|
||||
assert hook != util._reprcompare
|
||||
from _pytest.config import pytest_unconfigure
|
||||
pytest_unconfigure(config)
|
||||
assert hook == util._reprcompare
|
||||
|
||||
def callequal(left, right):
|
||||
return plugin.pytest_assertrepr_compare('==', left, right)
|
||||
|
||||
@@ -167,6 +158,28 @@ def test_sequence_comparison_uses_repr(testdir):
|
||||
"*E*'y'*",
|
||||
])
|
||||
|
||||
@needsnewassert
|
||||
def test_assertrepr_loaded_per_dir(testdir):
|
||||
testdir.makepyfile(test_base=['def test_base(): assert 1 == 2'])
|
||||
a = testdir.mkdir('a')
|
||||
a_test = a.join('test_a.py')
|
||||
a_test.write('def test_a(): assert 1 == 2')
|
||||
a_conftest = a.join('conftest.py')
|
||||
a_conftest.write('def pytest_assertrepr_compare(): return ["summary a"]')
|
||||
b = testdir.mkdir('b')
|
||||
b_test = b.join('test_b.py')
|
||||
b_test.write('def test_b(): assert 1 == 2')
|
||||
b_conftest = b.join('conftest.py')
|
||||
b_conftest.write('def pytest_assertrepr_compare(): return ["summary b"]')
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines([
|
||||
'*def test_base():*',
|
||||
'*E*assert 1 == 2*',
|
||||
'*def test_a():*',
|
||||
'*E*assert summary a*',
|
||||
'*def test_b():*',
|
||||
'*E*assert summary b*'])
|
||||
|
||||
|
||||
def test_assertion_options(testdir):
|
||||
testdir.makepyfile("""
|
||||
|
||||
@@ -144,6 +144,14 @@ class TestAssertionRewrite:
|
||||
def f():
|
||||
assert False or x()
|
||||
assert getmsg(f, {"x" : x}) == "assert (False or x())"
|
||||
def f():
|
||||
assert 1 in {} and 2 in {}
|
||||
assert getmsg(f) == "assert (1 in {})"
|
||||
def f():
|
||||
x = 1
|
||||
y = 2
|
||||
assert x in {1 : None} and y in {}
|
||||
assert getmsg(f) == "assert (1 in {1: None} and 2 in {})"
|
||||
def f():
|
||||
f = True
|
||||
g = False
|
||||
|
||||
@@ -16,7 +16,6 @@ class TestCaptureManager:
|
||||
|
||||
def test_configure_per_fspath(self, testdir):
|
||||
config = testdir.parseconfig(testdir.tmpdir)
|
||||
assert config.getvalue("capture") is None
|
||||
capman = CaptureManager()
|
||||
hasfd = hasattr(os, 'dup')
|
||||
if hasfd:
|
||||
@@ -53,6 +52,7 @@ class TestCaptureManager:
|
||||
capman.resumecapture(method)
|
||||
out, err = capman.suspendcapture()
|
||||
assert not out and not err
|
||||
capman.reset_capturings()
|
||||
finally:
|
||||
capouter.reset()
|
||||
|
||||
@@ -60,20 +60,23 @@ class TestCaptureManager:
|
||||
def test_juggle_capturings(self, testdir):
|
||||
capouter = py.io.StdCaptureFD()
|
||||
try:
|
||||
config = testdir.parseconfig(testdir.tmpdir)
|
||||
#config = testdir.parseconfig(testdir.tmpdir)
|
||||
capman = CaptureManager()
|
||||
capman.resumecapture("fd")
|
||||
pytest.raises(ValueError, 'capman.resumecapture("fd")')
|
||||
pytest.raises(ValueError, 'capman.resumecapture("sys")')
|
||||
os.write(1, "hello\n".encode('ascii'))
|
||||
out, err = capman.suspendcapture()
|
||||
assert out == "hello\n"
|
||||
capman.resumecapture("sys")
|
||||
os.write(1, "hello\n".encode('ascii'))
|
||||
py.builtin.print_("world", file=sys.stderr)
|
||||
out, err = capman.suspendcapture()
|
||||
assert not out
|
||||
assert err == "world\n"
|
||||
try:
|
||||
capman.resumecapture("fd")
|
||||
pytest.raises(ValueError, 'capman.resumecapture("fd")')
|
||||
pytest.raises(ValueError, 'capman.resumecapture("sys")')
|
||||
os.write(1, "hello\n".encode('ascii'))
|
||||
out, err = capman.suspendcapture()
|
||||
assert out == "hello\n"
|
||||
capman.resumecapture("sys")
|
||||
os.write(1, "hello\n".encode('ascii'))
|
||||
py.builtin.print_("world", file=sys.stderr)
|
||||
out, err = capman.suspendcapture()
|
||||
assert not out
|
||||
assert err == "world\n"
|
||||
finally:
|
||||
capman.reset_capturings()
|
||||
finally:
|
||||
capouter.reset()
|
||||
|
||||
|
||||
@@ -313,7 +313,8 @@ class TestSession:
|
||||
def test_collect_topdir(self, testdir):
|
||||
p = testdir.makepyfile("def test_func(): pass")
|
||||
id = "::".join([p.basename, "test_func"])
|
||||
config = testdir.parseconfigure(id)
|
||||
# XXX migrate to inline_genitems? (see below)
|
||||
config = testdir.parseconfig(id)
|
||||
topdir = testdir.tmpdir
|
||||
rcol = Session(config)
|
||||
assert topdir == rcol.fspath
|
||||
@@ -328,15 +329,9 @@ class TestSession:
|
||||
def test_collect_protocol_single_function(self, testdir):
|
||||
p = testdir.makepyfile("def test_func(): pass")
|
||||
id = "::".join([p.basename, "test_func"])
|
||||
config = testdir.parseconfigure(id)
|
||||
topdir = testdir.tmpdir
|
||||
rcol = Session(config)
|
||||
assert topdir == rcol.fspath
|
||||
hookrec = testdir.getreportrecorder(config)
|
||||
rcol.perform_collect()
|
||||
items = rcol.items
|
||||
assert len(items) == 1
|
||||
item = items[0]
|
||||
items, hookrec = testdir.inline_genitems(id)
|
||||
item, = items
|
||||
assert item.name == "test_func"
|
||||
newid = item.nodeid
|
||||
assert newid == id
|
||||
@@ -363,10 +358,7 @@ class TestSession:
|
||||
p.basename + "::TestClass::()",
|
||||
normid,
|
||||
]:
|
||||
config = testdir.parseconfigure(id)
|
||||
rcol = Session(config=config)
|
||||
rcol.perform_collect()
|
||||
items = rcol.items
|
||||
items, hookrec = testdir.inline_genitems(id)
|
||||
assert len(items) == 1
|
||||
assert items[0].name == "test_method"
|
||||
newid = items[0].nodeid
|
||||
@@ -388,11 +380,7 @@ class TestSession:
|
||||
""" % p.basename)
|
||||
id = p.basename
|
||||
|
||||
config = testdir.parseconfigure(id)
|
||||
rcol = Session(config)
|
||||
hookrec = testdir.getreportrecorder(config)
|
||||
rcol.perform_collect()
|
||||
items = rcol.items
|
||||
items, hookrec = testdir.inline_genitems(id)
|
||||
py.std.pprint.pprint(hookrec.hookrecorder.calls)
|
||||
assert len(items) == 2
|
||||
hookrec.hookrecorder.contains([
|
||||
@@ -413,11 +401,8 @@ class TestSession:
|
||||
aaa = testdir.mkpydir("aaa")
|
||||
test_aaa = aaa.join("test_aaa.py")
|
||||
p.move(test_aaa)
|
||||
config = testdir.parseconfigure()
|
||||
rcol = Session(config)
|
||||
hookrec = testdir.getreportrecorder(config)
|
||||
rcol.perform_collect()
|
||||
items = rcol.items
|
||||
|
||||
items, hookrec = testdir.inline_genitems()
|
||||
assert len(items) == 1
|
||||
py.std.pprint.pprint(hookrec.hookrecorder.calls)
|
||||
hookrec.hookrecorder.contains([
|
||||
@@ -437,11 +422,8 @@ class TestSession:
|
||||
p.move(test_bbb)
|
||||
|
||||
id = "."
|
||||
config = testdir.parseconfigure(id)
|
||||
rcol = Session(config)
|
||||
hookrec = testdir.getreportrecorder(config)
|
||||
rcol.perform_collect()
|
||||
items = rcol.items
|
||||
|
||||
items, hookrec = testdir.inline_genitems(id)
|
||||
assert len(items) == 2
|
||||
py.std.pprint.pprint(hookrec.hookrecorder.calls)
|
||||
hookrec.hookrecorder.contains([
|
||||
@@ -455,19 +437,13 @@ class TestSession:
|
||||
|
||||
def test_serialization_byid(self, testdir):
|
||||
p = testdir.makepyfile("def test_func(): pass")
|
||||
config = testdir.parseconfigure()
|
||||
rcol = Session(config)
|
||||
rcol.perform_collect()
|
||||
items = rcol.items
|
||||
items, hookrec = testdir.inline_genitems()
|
||||
assert len(items) == 1
|
||||
item, = items
|
||||
rcol.config.pluginmanager.unregister(name="session")
|
||||
newcol = Session(config)
|
||||
item2, = newcol.perform_collect([item.nodeid], genitems=False)
|
||||
items2, hookrec = testdir.inline_genitems(item.nodeid)
|
||||
item2, = items2
|
||||
assert item2.name == item.name
|
||||
assert item2.fspath == item.fspath
|
||||
item2b, = newcol.perform_collect([item.nodeid], genitems=False)
|
||||
assert item2b == item2
|
||||
|
||||
def test_find_byid_without_instance_parents(self, testdir):
|
||||
p = testdir.makepyfile("""
|
||||
@@ -476,10 +452,7 @@ class TestSession:
|
||||
pass
|
||||
""")
|
||||
arg = p.basename + ("::TestClass::test_method")
|
||||
config = testdir.parseconfigure(arg)
|
||||
rcol = Session(config)
|
||||
rcol.perform_collect()
|
||||
items = rcol.items
|
||||
items, hookrec = testdir.inline_genitems(arg)
|
||||
assert len(items) == 1
|
||||
item, = items
|
||||
assert item.nodeid.endswith("TestClass::()::test_method")
|
||||
@@ -487,7 +460,7 @@ class TestSession:
|
||||
class Test_getinitialnodes:
|
||||
def test_global_file(self, testdir, tmpdir):
|
||||
x = tmpdir.ensure("x.py")
|
||||
config = testdir.reparseconfig([x])
|
||||
config = testdir.parseconfigure(x)
|
||||
col = testdir.getnode(config, x)
|
||||
assert isinstance(col, pytest.Module)
|
||||
assert col.name == 'x.py'
|
||||
@@ -502,7 +475,7 @@ class Test_getinitialnodes:
|
||||
subdir = tmpdir.join("subdir")
|
||||
x = subdir.ensure("x.py")
|
||||
subdir.ensure("__init__.py")
|
||||
config = testdir.reparseconfig([x])
|
||||
config = testdir.parseconfigure(x)
|
||||
col = testdir.getnode(config, x)
|
||||
assert isinstance(col, pytest.Module)
|
||||
assert col.name == 'subdir/x.py'
|
||||
@@ -528,12 +501,6 @@ class Test_genitems:
|
||||
assert hash(i) != hash(j)
|
||||
assert i != j
|
||||
|
||||
def test_root_conftest_syntax_error(self, testdir):
|
||||
# do we want to unify behaviour with
|
||||
# test_subdir_conftest_error?
|
||||
p = testdir.makepyfile(conftest="raise SyntaxError\n")
|
||||
pytest.raises(SyntaxError, testdir.inline_genitems, p.dirpath())
|
||||
|
||||
def test_example_items1(self, testdir):
|
||||
p = testdir.makepyfile('''
|
||||
def testone():
|
||||
@@ -597,6 +564,6 @@ def test_matchnodes_two_collections_same_file(testdir):
|
||||
res.stdout.fnmatch_lines([
|
||||
"*1 passed*",
|
||||
])
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
import py, pytest
|
||||
|
||||
from _pytest.config import getcfg, Config
|
||||
from _pytest.config import getcfg
|
||||
|
||||
class TestParseIni:
|
||||
def test_getcfg_and_config(self, tmpdir):
|
||||
def test_getcfg_and_config(self, testdir, tmpdir):
|
||||
sub = tmpdir.mkdir("sub")
|
||||
sub.chdir()
|
||||
tmpdir.join("setup.cfg").write(py.code.Source("""
|
||||
@@ -12,22 +12,23 @@ class TestParseIni:
|
||||
"""))
|
||||
cfg = getcfg([sub], ["setup.cfg"])
|
||||
assert cfg['name'] == "value"
|
||||
config = Config()
|
||||
config._preparse([sub])
|
||||
config = testdir.parseconfigure(sub)
|
||||
assert config.inicfg['name'] == 'value'
|
||||
|
||||
def test_append_parse_args(self, tmpdir):
|
||||
def test_getcfg_empty_path(self, tmpdir):
|
||||
cfg = getcfg([''], ['setup.cfg']) #happens on py.test ""
|
||||
|
||||
def test_append_parse_args(self, testdir, tmpdir):
|
||||
tmpdir.join("setup.cfg").write(py.code.Source("""
|
||||
[pytest]
|
||||
addopts = --verbose
|
||||
"""))
|
||||
config = Config()
|
||||
config.parse([tmpdir])
|
||||
config = testdir.parseconfig(tmpdir)
|
||||
assert config.option.verbose
|
||||
config = Config()
|
||||
args = [tmpdir,]
|
||||
config._preparse(args, addopts=False)
|
||||
assert len(args) == 1
|
||||
#config = testdir.Config()
|
||||
#args = [tmpdir,]
|
||||
#config._preparse(args, addopts=False)
|
||||
#assert len(args) == 1
|
||||
|
||||
def test_tox_ini_wrong_version(self, testdir):
|
||||
p = testdir.makefile('.ini', tox="""
|
||||
@@ -46,8 +47,7 @@ class TestParseIni:
|
||||
[pytest]
|
||||
minversion = 1.0
|
||||
"""))
|
||||
config = Config()
|
||||
config.parse([testdir.tmpdir])
|
||||
config = testdir.parseconfig()
|
||||
assert config.getini("minversion") == "1.0"
|
||||
|
||||
def test_toxini_before_lower_pytestini(self, testdir):
|
||||
@@ -60,8 +60,7 @@ class TestParseIni:
|
||||
[pytest]
|
||||
minversion = 1.5
|
||||
"""))
|
||||
config = Config()
|
||||
config.parse([sub])
|
||||
config = testdir.parseconfigure(sub)
|
||||
assert config.getini("minversion") == "2.0"
|
||||
|
||||
@pytest.mark.xfail(reason="probably not needed")
|
||||
@@ -74,10 +73,10 @@ class TestParseIni:
|
||||
""")
|
||||
result = testdir.runpytest("--confcutdir=.")
|
||||
assert result.ret == 0
|
||||
|
||||
|
||||
class TestConfigCmdlineParsing:
|
||||
def test_parsing_again_fails(self, testdir):
|
||||
config = testdir.reparseconfig([testdir.tmpdir])
|
||||
config = testdir.parseconfig()
|
||||
pytest.raises(AssertionError, "config.parse([])")
|
||||
|
||||
|
||||
@@ -98,7 +97,7 @@ class TestConfigAPI:
|
||||
assert config.getvalue("x") == 1
|
||||
assert config.getvalue("x", o.join('sub')) == 2
|
||||
pytest.raises(KeyError, "config.getvalue('y')")
|
||||
config = testdir.reparseconfig([str(o.join('sub'))])
|
||||
config = testdir.parseconfigure(str(o.join('sub')))
|
||||
assert config.getvalue("x") == 2
|
||||
assert config.getvalue("y") == 3
|
||||
assert config.getvalue("x", o) == 1
|
||||
@@ -124,18 +123,18 @@ class TestConfigAPI:
|
||||
def test_config_overwrite(self, testdir):
|
||||
o = testdir.tmpdir
|
||||
o.ensure("conftest.py").write("x=1")
|
||||
config = testdir.reparseconfig([str(o)])
|
||||
config = testdir.parseconfig(str(o))
|
||||
assert config.getvalue('x') == 1
|
||||
config.option.x = 2
|
||||
assert config.getvalue('x') == 2
|
||||
config = testdir.reparseconfig([str(o)])
|
||||
config = testdir.parseconfig([str(o)])
|
||||
assert config.getvalue('x') == 1
|
||||
|
||||
def test_getconftest_pathlist(self, testdir, tmpdir):
|
||||
somepath = tmpdir.join("x", "y", "z")
|
||||
p = tmpdir.join("conftest.py")
|
||||
p.write("pathlist = ['.', %r]" % str(somepath))
|
||||
config = testdir.reparseconfig([p])
|
||||
config = testdir.parseconfigure(p)
|
||||
assert config._getconftest_pathlist('notexist') is None
|
||||
pl = config._getconftest_pathlist('pathlist')
|
||||
print(pl)
|
||||
@@ -209,6 +208,40 @@ class TestConfigAPI:
|
||||
l = config.getini("a2")
|
||||
assert l == []
|
||||
|
||||
def test_addinivalue_line_existing(self, testdir):
|
||||
testdir.makeconftest("""
|
||||
def pytest_addoption(parser):
|
||||
parser.addini("xy", "", type="linelist")
|
||||
""")
|
||||
p = testdir.makeini("""
|
||||
[pytest]
|
||||
xy= 123
|
||||
""")
|
||||
config = testdir.parseconfig()
|
||||
l = config.getini("xy")
|
||||
assert len(l) == 1
|
||||
assert l == ["123"]
|
||||
config.addinivalue_line("xy", "456")
|
||||
l = config.getini("xy")
|
||||
assert len(l) == 2
|
||||
assert l == ["123", "456"]
|
||||
|
||||
def test_addinivalue_line_new(self, testdir):
|
||||
testdir.makeconftest("""
|
||||
def pytest_addoption(parser):
|
||||
parser.addini("xy", "", type="linelist")
|
||||
""")
|
||||
config = testdir.parseconfig()
|
||||
assert not config.getini("xy")
|
||||
config.addinivalue_line("xy", "456")
|
||||
l = config.getini("xy")
|
||||
assert len(l) == 1
|
||||
assert l == ["456"]
|
||||
config.addinivalue_line("xy", "123")
|
||||
l = config.getini("xy")
|
||||
assert len(l) == 2
|
||||
assert l == ["456", "123"]
|
||||
|
||||
def test_options_on_small_file_do_not_blow_up(testdir):
|
||||
def runfiletest(opts):
|
||||
reprec = testdir.inline_run(*opts)
|
||||
|
||||
@@ -332,17 +332,6 @@ class TestPytestPluginInteractions:
|
||||
"*did not find*sys*"
|
||||
])
|
||||
|
||||
def test_do_option_conftestplugin(self, testdir):
|
||||
p = testdir.makepyfile("""
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption('--test123', action="store_true")
|
||||
""")
|
||||
config = testdir.Config()
|
||||
config._conftest.importconftest(p)
|
||||
print(config.pluginmanager.getplugins())
|
||||
config.parse([])
|
||||
assert not config.option.test123
|
||||
|
||||
def test_namespace_early_from_import(self, testdir):
|
||||
p = testdir.makepyfile("""
|
||||
from pytest import Item
|
||||
@@ -370,9 +359,7 @@ class TestPytestPluginInteractions:
|
||||
])
|
||||
|
||||
def test_do_option_postinitialize(self, testdir):
|
||||
config = testdir.Config()
|
||||
config.parse([])
|
||||
config.pluginmanager.do_configure(config=config)
|
||||
config = testdir.parseconfigure()
|
||||
assert not hasattr(config.option, 'test123')
|
||||
p = testdir.makepyfile("""
|
||||
def pytest_addoption(parser):
|
||||
@@ -640,7 +627,7 @@ class TestTracer:
|
||||
log2("seen")
|
||||
tags, args = l2[0]
|
||||
assert args == ("seen",)
|
||||
|
||||
|
||||
|
||||
def test_setmyprocessor(self):
|
||||
from _pytest.core import TagTracer
|
||||
@@ -657,3 +644,10 @@ class TestTracer:
|
||||
assert "1" in tags
|
||||
assert "2" in tags
|
||||
assert args == (42,)
|
||||
|
||||
def test_default_markers(testdir):
|
||||
result = testdir.runpytest("--markers")
|
||||
result.stdout.fnmatch_lines([
|
||||
"*tryfirst*first*",
|
||||
"*trylast*last*",
|
||||
])
|
||||
|
||||
@@ -68,7 +68,78 @@ class TestMark:
|
||||
assert 'reason' not in g.some.kwargs
|
||||
assert g.some.kwargs['reason2'] == "456"
|
||||
|
||||
|
||||
def test_ini_markers(testdir):
|
||||
testdir.makeini("""
|
||||
[pytest]
|
||||
markers =
|
||||
a1: this is a webtest marker
|
||||
a2: this is a smoke marker
|
||||
""")
|
||||
testdir.makepyfile("""
|
||||
def test_markers(pytestconfig):
|
||||
markers = pytestconfig.getini("markers")
|
||||
print (markers)
|
||||
assert len(markers) >= 2
|
||||
assert markers[0].startswith("a1:")
|
||||
assert markers[1].startswith("a2:")
|
||||
""")
|
||||
rec = testdir.inline_run()
|
||||
rec.assertoutcome(passed=1)
|
||||
|
||||
def test_markers_option(testdir):
|
||||
testdir.makeini("""
|
||||
[pytest]
|
||||
markers =
|
||||
a1: this is a webtest marker
|
||||
a1some: another marker
|
||||
""")
|
||||
result = testdir.runpytest("--markers", )
|
||||
result.stdout.fnmatch_lines([
|
||||
"*a1*this is a webtest*",
|
||||
"*a1some*another marker",
|
||||
])
|
||||
|
||||
|
||||
def test_strict_prohibits_unregistered_markers(testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
@pytest.mark.unregisteredmark
|
||||
def test_hello():
|
||||
pass
|
||||
""")
|
||||
result = testdir.runpytest("--strict")
|
||||
assert result.ret != 0
|
||||
result.stdout.fnmatch_lines([
|
||||
"*unregisteredmark*not*registered*",
|
||||
])
|
||||
|
||||
@pytest.mark.multi(spec=[
|
||||
("xyz", ("test_one",)),
|
||||
("xyz and xyz2", ()),
|
||||
("xyz2", ("test_two",)),
|
||||
("xyz or xyz2", ("test_one", "test_two"),)
|
||||
])
|
||||
def test_mark_option(spec, testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
@pytest.mark.xyz
|
||||
def test_one():
|
||||
pass
|
||||
@pytest.mark.xyz2
|
||||
def test_two():
|
||||
pass
|
||||
""")
|
||||
opt, passed_result = spec
|
||||
rec = testdir.inline_run("-m", opt)
|
||||
passed, skipped, fail = rec.listoutcomes()
|
||||
passed = [x.nodeid.split("::")[-1] for x in passed]
|
||||
assert len(passed) == len(passed_result)
|
||||
assert list(passed) == list(passed_result)
|
||||
|
||||
|
||||
class TestFunctional:
|
||||
|
||||
def test_mark_per_function(self, testdir):
|
||||
p = testdir.makepyfile("""
|
||||
import pytest
|
||||
@@ -77,7 +148,7 @@ class TestFunctional:
|
||||
assert hasattr(test_hello, 'hello')
|
||||
""")
|
||||
result = testdir.runpytest(p)
|
||||
result.stdout.fnmatch_lines(["*passed*"])
|
||||
result.stdout.fnmatch_lines(["*1 passed*"])
|
||||
|
||||
def test_mark_per_module(self, testdir):
|
||||
item = testdir.getitem("""
|
||||
@@ -189,58 +260,6 @@ class TestFunctional:
|
||||
])
|
||||
|
||||
|
||||
class Test_genitems:
|
||||
def test_check_collect_hashes(self, testdir):
|
||||
p = testdir.makepyfile("""
|
||||
def test_1():
|
||||
pass
|
||||
|
||||
def test_2():
|
||||
pass
|
||||
""")
|
||||
p.copy(p.dirpath(p.purebasename + "2" + ".py"))
|
||||
items, reprec = testdir.inline_genitems(p.dirpath())
|
||||
assert len(items) == 4
|
||||
for numi, i in enumerate(items):
|
||||
for numj, j in enumerate(items):
|
||||
if numj != numi:
|
||||
assert hash(i) != hash(j)
|
||||
assert i != j
|
||||
|
||||
def test_root_conftest_syntax_error(self, testdir):
|
||||
# do we want to unify behaviour with
|
||||
# test_subdir_conftest_error?
|
||||
p = testdir.makepyfile(conftest="raise SyntaxError\n")
|
||||
pytest.raises(SyntaxError, testdir.inline_genitems, p.dirpath())
|
||||
|
||||
def test_example_items1(self, testdir):
|
||||
p = testdir.makepyfile('''
|
||||
def testone():
|
||||
pass
|
||||
|
||||
class TestX:
|
||||
def testmethod_one(self):
|
||||
pass
|
||||
|
||||
class TestY(TestX):
|
||||
pass
|
||||
''')
|
||||
items, reprec = testdir.inline_genitems(p)
|
||||
assert len(items) == 3
|
||||
assert items[0].name == 'testone'
|
||||
assert items[1].name == 'testmethod_one'
|
||||
assert items[2].name == 'testmethod_one'
|
||||
|
||||
# let's also test getmodpath here
|
||||
assert items[0].getmodpath() == "testone"
|
||||
assert items[1].getmodpath() == "TestX.testmethod_one"
|
||||
assert items[2].getmodpath() == "TestY.testmethod_one"
|
||||
|
||||
s = items[0].getmodpath(stopatmodule=False)
|
||||
assert s.endswith("test_example_items1.testone")
|
||||
print(s)
|
||||
|
||||
|
||||
class TestKeywordSelection:
|
||||
def test_select_simple(self, testdir):
|
||||
file_test = testdir.makepyfile("""
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import pytest
|
||||
|
||||
class TestPasting:
|
||||
def pytest_funcarg__pastebinlist(self, request):
|
||||
@@ -45,3 +46,14 @@ class TestPasting:
|
||||
for x in 'test_fail test_skip skipped'.split():
|
||||
assert s.find(x), (s, x)
|
||||
|
||||
|
||||
class TestRPCClient:
|
||||
def pytest_funcarg__pastebin(self, request):
|
||||
return request.config.pluginmanager.getplugin('pastebin')
|
||||
|
||||
def test_getproxy(self, pastebin):
|
||||
proxy = pastebin.getproxy()
|
||||
assert proxy is not None
|
||||
assert proxy.__class__.__module__.startswith('xmlrpc')
|
||||
|
||||
|
||||
|
||||
@@ -106,6 +106,26 @@ class TestPDB:
|
||||
if child.isalive():
|
||||
child.wait()
|
||||
|
||||
def test_pdb_interaction_doctest(self, testdir):
|
||||
p1 = testdir.makepyfile("""
|
||||
import pytest
|
||||
def function_1():
|
||||
'''
|
||||
>>> i = 0
|
||||
>>> assert i == 1
|
||||
'''
|
||||
""")
|
||||
child = testdir.spawn_pytest("--doctest-modules --pdb %s" % p1)
|
||||
child.expect("(Pdb)")
|
||||
child.sendline('i')
|
||||
child.expect("0")
|
||||
child.expect("(Pdb)")
|
||||
child.sendeof()
|
||||
rest = child.read()
|
||||
assert "1 failed" in rest
|
||||
if child.isalive():
|
||||
child.wait()
|
||||
|
||||
def test_pdb_interaction_capturing_twice(self, testdir):
|
||||
p1 = testdir.makepyfile("""
|
||||
import pytest
|
||||
|
||||
@@ -257,7 +257,7 @@ class TestFunction:
|
||||
assert hasattr(modcol.obj, 'test_func')
|
||||
|
||||
def test_function_equality(self, testdir, tmpdir):
|
||||
config = testdir.reparseconfig()
|
||||
config = testdir.parseconfigure()
|
||||
session = testdir.Session(config)
|
||||
f1 = pytest.Function(name="name", config=config,
|
||||
args=(1,), callobj=isinstance, session=session)
|
||||
@@ -279,7 +279,7 @@ class TestFunction:
|
||||
assert not f1 != f1_b
|
||||
|
||||
def test_function_equality_with_callspec(self, testdir, tmpdir):
|
||||
config = testdir.reparseconfig()
|
||||
config = testdir.parseconfigure()
|
||||
class callspec1:
|
||||
param = 1
|
||||
funcargs = {}
|
||||
@@ -520,12 +520,6 @@ def test_getfuncargnames():
|
||||
if sys.version_info < (3,0):
|
||||
assert funcargs.getfuncargnames(A.f) == ['arg1']
|
||||
|
||||
def test_callspec_repr():
|
||||
cs = funcargs.CallSpec({}, 'hello', 1)
|
||||
repr(cs)
|
||||
cs = funcargs.CallSpec({}, 'hello', funcargs._notexists)
|
||||
repr(cs)
|
||||
|
||||
class TestFillFuncArgs:
|
||||
def test_fillfuncargs_exposed(self):
|
||||
# used by oejskit
|
||||
@@ -783,7 +777,7 @@ class TestRequestCachedSetup:
|
||||
req2 = funcargs.FuncargRequest(item2)
|
||||
ret2 = req2.cached_setup(setup, scope="class")
|
||||
assert ret2 == "hello"
|
||||
|
||||
|
||||
req3 = funcargs.FuncargRequest(item3)
|
||||
ret3a = req3.cached_setup(setup, scope="class")
|
||||
ret3b = req3.cached_setup(setup, scope="class")
|
||||
@@ -886,6 +880,7 @@ class TestMetafunc:
|
||||
def function(): pass
|
||||
metafunc = funcargs.Metafunc(function)
|
||||
assert not metafunc.funcargnames
|
||||
repr(metafunc._calls)
|
||||
|
||||
def test_function_basic(self):
|
||||
def func(arg1, arg2="qwe"): pass
|
||||
@@ -925,9 +920,9 @@ class TestMetafunc:
|
||||
metafunc.addcall(param=obj)
|
||||
metafunc.addcall(param=1)
|
||||
assert len(metafunc._calls) == 3
|
||||
assert metafunc._calls[0].param == obj
|
||||
assert metafunc._calls[1].param == obj
|
||||
assert metafunc._calls[2].param == 1
|
||||
assert metafunc._calls[0].getparam("arg1") == obj
|
||||
assert metafunc._calls[1].getparam("arg1") == obj
|
||||
assert metafunc._calls[2].getparam("arg1") == 1
|
||||
|
||||
def test_addcall_funcargs(self):
|
||||
def func(x): pass
|
||||
@@ -941,7 +936,119 @@ class TestMetafunc:
|
||||
assert metafunc._calls[1].funcargs == {'x': 3}
|
||||
assert not hasattr(metafunc._calls[1], 'param')
|
||||
|
||||
class TestGenfuncFunctional:
|
||||
def test_parametrize_error(self):
|
||||
def func(x, y): pass
|
||||
metafunc = funcargs.Metafunc(func)
|
||||
metafunc.parametrize("x", [1,2])
|
||||
pytest.raises(ValueError, lambda: metafunc.parametrize("x", [5,6]))
|
||||
pytest.raises(ValueError, lambda: metafunc.parametrize("x", [5,6]))
|
||||
metafunc.parametrize("y", [1,2])
|
||||
pytest.raises(ValueError, lambda: metafunc.parametrize("y", [5,6]))
|
||||
pytest.raises(ValueError, lambda: metafunc.parametrize("y", [5,6]))
|
||||
|
||||
def test_parametrize_and_id(self):
|
||||
def func(x, y): pass
|
||||
metafunc = funcargs.Metafunc(func)
|
||||
|
||||
metafunc.parametrize("x", [1,2], ids=['basic', 'advanced'])
|
||||
metafunc.parametrize("y", ["abc", "def"])
|
||||
ids = [x.id for x in metafunc._calls]
|
||||
assert ids == ["basic-abc", "basic-def", "advanced-abc", "advanced-def"]
|
||||
|
||||
def test_parametrize_with_userobjects(self):
|
||||
def func(x, y): pass
|
||||
metafunc = funcargs.Metafunc(func)
|
||||
class A:
|
||||
pass
|
||||
metafunc.parametrize("x", [A(), A()])
|
||||
metafunc.parametrize("y", list("ab"))
|
||||
assert metafunc._calls[0].id == ".0-a"
|
||||
assert metafunc._calls[1].id == ".0-b"
|
||||
assert metafunc._calls[2].id == ".1-a"
|
||||
assert metafunc._calls[3].id == ".1-b"
|
||||
|
||||
def test_addcall_and_parametrize(self):
|
||||
def func(x, y): pass
|
||||
metafunc = funcargs.Metafunc(func)
|
||||
metafunc.addcall({'x': 1})
|
||||
metafunc.parametrize('y', [2,3])
|
||||
assert len(metafunc._calls) == 2
|
||||
assert metafunc._calls[0].funcargs == {'x': 1, 'y': 2}
|
||||
assert metafunc._calls[1].funcargs == {'x': 1, 'y': 3}
|
||||
assert metafunc._calls[0].id == "0-2"
|
||||
assert metafunc._calls[1].id == "0-3"
|
||||
|
||||
def test_parametrize_indirect(self):
|
||||
def func(x, y): pass
|
||||
metafunc = funcargs.Metafunc(func)
|
||||
metafunc.parametrize('x', [1], indirect=True)
|
||||
metafunc.parametrize('y', [2,3], indirect=True)
|
||||
assert len(metafunc._calls) == 2
|
||||
assert metafunc._calls[0].funcargs == {}
|
||||
assert metafunc._calls[1].funcargs == {}
|
||||
assert metafunc._calls[0].params == dict(x=1,y=2)
|
||||
assert metafunc._calls[1].params == dict(x=1,y=3)
|
||||
|
||||
def test_addcalls_and_parametrize_indirect(self):
|
||||
def func(x, y): pass
|
||||
metafunc = funcargs.Metafunc(func)
|
||||
metafunc.addcall(param="123")
|
||||
metafunc.parametrize('x', [1], indirect=True)
|
||||
metafunc.parametrize('y', [2,3], indirect=True)
|
||||
assert len(metafunc._calls) == 2
|
||||
assert metafunc._calls[0].funcargs == {}
|
||||
assert metafunc._calls[1].funcargs == {}
|
||||
assert metafunc._calls[0].params == dict(x=1,y=2)
|
||||
assert metafunc._calls[1].params == dict(x=1,y=3)
|
||||
|
||||
def test_parametrize_functional(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
def pytest_generate_tests(metafunc):
|
||||
metafunc.parametrize('x', [1,2], indirect=True)
|
||||
metafunc.parametrize('y', [2])
|
||||
def pytest_funcarg__x(request):
|
||||
return request.param * 10
|
||||
def pytest_funcarg__y(request):
|
||||
return request.param
|
||||
|
||||
def test_simple(x,y):
|
||||
assert x in (10,20)
|
||||
assert y == 2
|
||||
""")
|
||||
result = testdir.runpytest("-v")
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_simple*1-2*",
|
||||
"*test_simple*2-2*",
|
||||
"*2 passed*",
|
||||
])
|
||||
|
||||
def test_parametrize_onearg(self):
|
||||
metafunc = funcargs.Metafunc(lambda x: None)
|
||||
metafunc.parametrize("x", [1,2])
|
||||
assert len(metafunc._calls) == 2
|
||||
assert metafunc._calls[0].funcargs == dict(x=1)
|
||||
assert metafunc._calls[0].id == "1"
|
||||
assert metafunc._calls[1].funcargs == dict(x=2)
|
||||
assert metafunc._calls[1].id == "2"
|
||||
|
||||
def test_parametrize_onearg_indirect(self):
|
||||
metafunc = funcargs.Metafunc(lambda x: None)
|
||||
metafunc.parametrize("x", [1,2], indirect=True)
|
||||
assert metafunc._calls[0].params == dict(x=1)
|
||||
assert metafunc._calls[0].id == "1"
|
||||
assert metafunc._calls[1].params == dict(x=2)
|
||||
assert metafunc._calls[1].id == "2"
|
||||
|
||||
def test_parametrize_twoargs(self):
|
||||
metafunc = funcargs.Metafunc(lambda x,y: None)
|
||||
metafunc.parametrize(("x", "y"), [(1,2), (3,4)])
|
||||
assert len(metafunc._calls) == 2
|
||||
assert metafunc._calls[0].funcargs == dict(x=1, y=2)
|
||||
assert metafunc._calls[0].id == "1-2"
|
||||
assert metafunc._calls[1].funcargs == dict(x=3, y=4)
|
||||
assert metafunc._calls[1].id == "3-4"
|
||||
|
||||
class TestMetafuncFunctional:
|
||||
def test_attributes(self, testdir):
|
||||
p = testdir.makepyfile("""
|
||||
# assumes that generate/provide runs in the same process
|
||||
@@ -1109,6 +1216,46 @@ class TestGenfuncFunctional:
|
||||
"*1 pass*",
|
||||
])
|
||||
|
||||
def test_parametrize_functional2(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
def pytest_generate_tests(metafunc):
|
||||
metafunc.parametrize("arg1", [1,2])
|
||||
metafunc.parametrize("arg2", [4,5])
|
||||
def test_hello(arg1, arg2):
|
||||
assert 0, (arg1, arg2)
|
||||
""")
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines([
|
||||
"*(1, 4)*",
|
||||
"*(1, 5)*",
|
||||
"*(2, 4)*",
|
||||
"*(2, 5)*",
|
||||
"*4 failed*",
|
||||
])
|
||||
|
||||
def test_parametrize_and_inner_getfuncargvalue(self, testdir):
|
||||
p = testdir.makepyfile("""
|
||||
def pytest_generate_tests(metafunc):
|
||||
metafunc.parametrize("arg1", [1], indirect=True)
|
||||
metafunc.parametrize("arg2", [10], indirect=True)
|
||||
|
||||
def pytest_funcarg__arg1(request):
|
||||
x = request.getfuncargvalue("arg2")
|
||||
return x + request.param
|
||||
|
||||
def pytest_funcarg__arg2(request):
|
||||
return request.param
|
||||
|
||||
def test_func1(arg1, arg2):
|
||||
assert arg1 == 11
|
||||
""")
|
||||
result = testdir.runpytest("-v", p)
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_func1*1*PASS*",
|
||||
"*1 passed*"
|
||||
])
|
||||
|
||||
|
||||
def test_conftest_funcargs_only_available_in_subdir(testdir):
|
||||
sub1 = testdir.mkpydir("sub1")
|
||||
sub2 = testdir.mkpydir("sub2")
|
||||
@@ -1320,7 +1467,7 @@ def test_customized_python_discovery(testdir):
|
||||
"*CheckMyApp*",
|
||||
"*check_meth*",
|
||||
])
|
||||
|
||||
|
||||
result = testdir.runpytest()
|
||||
assert result.ret == 0
|
||||
result.stdout.fnmatch_lines([
|
||||
@@ -1354,7 +1501,7 @@ def test_customize_through_attributes(testdir):
|
||||
Function = MyFunction
|
||||
class MyClass(pytest.Class):
|
||||
Instance = MyInstance
|
||||
|
||||
|
||||
def pytest_pycollect_makeitem(collector, name, obj):
|
||||
if name.startswith("MyTestClass"):
|
||||
return MyClass(name, parent=collector)
|
||||
|
||||
@@ -160,6 +160,45 @@ class BaseFunctionalTests:
|
||||
#assert rep.failed.where.path.basename == "test_func.py"
|
||||
#assert rep.failed.failurerepr == "hello"
|
||||
|
||||
def test_teardown_final_returncode(self, testdir):
|
||||
rec = testdir.inline_runsource("""
|
||||
def test_func():
|
||||
pass
|
||||
def teardown_function(func):
|
||||
raise ValueError(42)
|
||||
""")
|
||||
assert rec.ret == 1
|
||||
|
||||
def test_exact_teardown_issue90(self, testdir):
|
||||
rec = testdir.inline_runsource("""
|
||||
import pytest
|
||||
|
||||
class TestClass:
|
||||
def test_method(self):
|
||||
pass
|
||||
def teardown_class(cls):
|
||||
raise Exception()
|
||||
|
||||
def test_func():
|
||||
pass
|
||||
def teardown_function(func):
|
||||
raise ValueError(42)
|
||||
""")
|
||||
reps = rec.getreports("pytest_runtest_logreport")
|
||||
print (reps)
|
||||
for i in range(2):
|
||||
assert reps[i].nodeid.endswith("test_method")
|
||||
assert reps[i].passed
|
||||
assert reps[2].when == "teardown"
|
||||
assert reps[2].failed
|
||||
assert len(reps) == 6
|
||||
for i in range(3,5):
|
||||
assert reps[i].nodeid.endswith("test_func")
|
||||
assert reps[i].passed
|
||||
assert reps[5].when == "teardown"
|
||||
assert reps[5].nodeid.endswith("test_func")
|
||||
assert reps[5].failed
|
||||
|
||||
def test_failure_in_setup_function_ignores_custom_repr(self, testdir):
|
||||
testdir.makepyfile(conftest="""
|
||||
import pytest
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import pytest
|
||||
import sys
|
||||
|
||||
from _pytest.skipping import MarkEvaluator, folded_skips
|
||||
from _pytest.skipping import pytest_runtest_setup
|
||||
@@ -471,6 +472,21 @@ def test_reportchars(testdir):
|
||||
"SKIP*four*",
|
||||
])
|
||||
|
||||
def test_reportchars_error(testdir):
|
||||
testdir.makepyfile(
|
||||
conftest="""
|
||||
def pytest_runtest_teardown():
|
||||
assert 0
|
||||
""",
|
||||
test_simple="""
|
||||
def test_foo():
|
||||
pass
|
||||
""")
|
||||
result = testdir.runpytest('-rE')
|
||||
result.stdout.fnmatch_lines([
|
||||
'ERROR*test_foo*',
|
||||
])
|
||||
|
||||
@pytest.mark.xfail("hasattr(sys, 'pypy_version_info')")
|
||||
def test_errors_in_xfail_skip_expressions(testdir):
|
||||
testdir.makepyfile("""
|
||||
@@ -486,6 +502,10 @@ def test_errors_in_xfail_skip_expressions(testdir):
|
||||
pass
|
||||
""")
|
||||
result = testdir.runpytest()
|
||||
markline = " ^"
|
||||
if sys.platform.startswith("java"):
|
||||
# XXX report this to java
|
||||
markline = "*" + markline[8:]
|
||||
result.stdout.fnmatch_lines([
|
||||
"*ERROR*test_nameerror*",
|
||||
"*evaluating*skipif*expression*",
|
||||
@@ -493,7 +513,7 @@ def test_errors_in_xfail_skip_expressions(testdir):
|
||||
"*ERROR*test_syntax*",
|
||||
"*evaluating*xfail*expression*",
|
||||
" syntax error",
|
||||
" ^",
|
||||
markline,
|
||||
"SyntaxError: invalid syntax",
|
||||
"*1 pass*2 error*",
|
||||
])
|
||||
@@ -529,3 +549,10 @@ def test_direct_gives_error(testdir):
|
||||
])
|
||||
|
||||
|
||||
def test_default_markers(testdir):
|
||||
result = testdir.runpytest("--markers")
|
||||
result.stdout.fnmatch_lines([
|
||||
"*skipif(*conditions)*skip*",
|
||||
"*xfail(*conditions, reason=None, run=True)*expected failure*",
|
||||
])
|
||||
|
||||
|
||||
@@ -340,7 +340,7 @@ class TestTerminalFunctional:
|
||||
result = testdir.runpytest("-k", "test_two:", testpath)
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_deselected.py ..",
|
||||
"=* 1 test*deselected by 'test_two:'*=",
|
||||
"=* 1 test*deselected by*test_two:*=",
|
||||
])
|
||||
assert result.ret == 0
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import py, pytest
|
||||
import os
|
||||
|
||||
from _pytest.tmpdir import pytest_funcarg__tmpdir, TempdirHandler
|
||||
from _pytest.python import FuncargRequest
|
||||
@@ -54,17 +55,6 @@ class TestConfigTmpdir:
|
||||
assert b2.check()
|
||||
assert not h.check()
|
||||
|
||||
def test_reparse(self, testdir):
|
||||
config2 = testdir.reparseconfig([])
|
||||
config3 = testdir.reparseconfig([])
|
||||
assert config2.basetemp != config3.basetemp
|
||||
assert not config2.basetemp.relto(config3.basetemp)
|
||||
assert not config3.basetemp.relto(config2.basetemp)
|
||||
|
||||
def test_reparse_filename_too_long(self, testdir):
|
||||
config = testdir.reparseconfig(["--basetemp=%s" % ("123"*300)])
|
||||
|
||||
|
||||
def test_basetemp(testdir):
|
||||
mytemp = testdir.tmpdir.mkdir("mytemp")
|
||||
p = testdir.makepyfile("""
|
||||
@@ -75,3 +65,16 @@ def test_basetemp(testdir):
|
||||
result = testdir.runpytest(p, '--basetemp=%s' % mytemp)
|
||||
assert result.ret == 0
|
||||
assert mytemp.join('hello').check()
|
||||
|
||||
@pytest.mark.skipif("not hasattr(py.path.local, 'mksymlinkto')")
|
||||
def test_tmpdir_keeps_symlinks(testdir):
|
||||
realtemp = testdir.tmpdir.mkdir("myrealtemp")
|
||||
linktemp = testdir.tmpdir.join("symlinktemp")
|
||||
linktemp.mksymlinkto(realtemp)
|
||||
p = testdir.makepyfile("""
|
||||
def test_1(tmpdir):
|
||||
import os
|
||||
assert os.path.realpath(str(tmpdir)) != str(tmpdir)
|
||||
""")
|
||||
result = testdir.runpytest("-s", p, '--basetemp=%s/bt' % linktemp)
|
||||
assert not result.ret
|
||||
|
||||
4
tox.ini
4
tox.ini
@@ -8,7 +8,7 @@ indexserver=
|
||||
|
||||
[testenv]
|
||||
changedir=testing
|
||||
commands= py.test -rfsxX --junitxml={envlogdir}/junit-{envname}.xml []
|
||||
commands= py.test --lsof -rfsxX --junitxml={envlogdir}/junit-{envname}.xml []
|
||||
deps=
|
||||
:pypi:pexpect
|
||||
:pypi:nose
|
||||
@@ -66,7 +66,7 @@ deps=py>=1.4.0
|
||||
[testenv:jython]
|
||||
changedir=testing
|
||||
commands=
|
||||
{envpython} {envbindir}/py.test-jython --no-tools-on-path \
|
||||
{envpython} {envbindir}/py.test-jython \
|
||||
-rfsxX --junitxml={envlogdir}/junit-{envname}2.xml []
|
||||
|
||||
[pytest]
|
||||
|
||||
Reference in New Issue
Block a user