merge with upstream

This commit is contained in:
Anatoly Bubenkov 2013-07-06 10:06:12 +02:00
commit 2c7613c15c
60 changed files with 1623 additions and 477 deletions

View File

@ -1,9 +1,15 @@
# Automatically generated by `hgimportsvn` # Automatically generated by `hgimportsvn`
syntax:glob syntax:glob
.svn .svn
.hgsvn .hgsvn
# Ignore local virtualenvs
syntax:glob
lib/
bin/
include/
.Python/
# These lines are suggested according to the svn:ignore property # These lines are suggested according to the svn:ignore property
# Feel free to enable them by uncommenting them # Feel free to enable them by uncommenting them
syntax:glob syntax:glob
@ -25,3 +31,4 @@ env/
.tox .tox
.cache .cache
.coverage .coverage
.ropeproject

View File

@ -54,3 +54,9 @@ acf0e1477fb19a1d35a4e40242b77fa6af32eb17 2.3.1
8738b828dec53937765db71951ef955cca4c51f6 2.3.2 8738b828dec53937765db71951ef955cca4c51f6 2.3.2
7fe44182c434f8ac89149a3c340479872a5d5ccb 2.3.3 7fe44182c434f8ac89149a3c340479872a5d5ccb 2.3.3
ef299e57f24218dbdd949498d7e660723636bcc3 2.3.4 ef299e57f24218dbdd949498d7e660723636bcc3 2.3.4
fc3a793e87ec907000a47ea0d3a372a2fe218c0a 2.3.5
b93ac0cdae02effaa3c136a681cc45bba757fe46 1.4.14
b93ac0cdae02effaa3c136a681cc45bba757fe46 1.4.14
0000000000000000000000000000000000000000 1.4.14
0000000000000000000000000000000000000000 1.4.14
0000000000000000000000000000000000000000 1.4.14

View File

@ -6,9 +6,14 @@ Contributors include::
Ronny Pfannschmidt Ronny Pfannschmidt
Benjamin Peterson Benjamin Peterson
Floris Bruynooghe Floris Bruynooghe
Jason R. Coombs
Wouter van Ackooy
Samuele Pedroni Samuele Pedroni
Brianna Laugher
Carl Friedrich Bolz Carl Friedrich Bolz
Armin Rigo Armin Rigo
Maho
Jaap Broekhuizen
Maciek Fijalkowski Maciek Fijalkowski
Guido Wesdorp Guido Wesdorp
Brian Dorsey Brian Dorsey
@ -25,3 +30,4 @@ Christian Tismer
Daniel Nuri Daniel Nuri
Graham Horler Graham Horler
Andreas Zeidler Andreas Zeidler
Brian Okken

View File

@ -1,6 +1,61 @@
Changes between 2.3.4 and 2.3.5dev Changes between 2.3.5 and 2.4.DEV
----------------------------------- -----------------------------------
- fix issue323 - sorting of many module-scoped arg parametrizations
- add support for setUpModule/tearDownModule detection, thanks Brian Okken.
- make sessionfinish hooks execute with the same cwd-context as at
session start (helps fix plugin behaviour which write output files
with relative path such as pytest-cov)
- fix issue316 - properly reference collection hooks in docs
- fix issue 308 - allow to mark/xfail/skip individual parameter sets
when parametrizing. Thanks Brianna Laugher.
- simplify parametrize() signature: allow to pass a CSV-separated string
to specify argnames. For example: ``pytest.mark.parametrize("input,expected", [(1,2), (2,3)])`` is possible now in addition to the prior
``pytest.mark.parametrize(("input", "expected"), ...)``.
- fix issue 306 - cleanup of -k/-m options to only match markers/test
names/keywords respectively. Thanks Wouter van Ackooy.
- (experimental) allow fixture functions to be
implemented as context managers. Thanks Andreas Pelme,
Vladimir Keleshev.
- (experimental) allow boolean expression directly with skipif/xfail
if a "reason" is also specified. Rework skipping documentation
to recommend "condition as booleans" because it prevents surprises
when importing markers between modules. Specifying conditions
as strings will remain fully supported.
- improved doctest counting for doctests in python modules --
files without any doctest items will not show up anymore
and doctest examples are counted as separate test items.
thanks Danilo Bellini.
- fix issue245 by depending on the released py-1.4.14
which fixes py.io.dupfile to work with files with no
mode. Thanks Jason R. Coombs.
- fix junitxml generation when test output contains control characters,
addressing issue267, thanks Jaap Broekhuizen
- honor --tb style for setup/teardown errors as well. Thanks Maho.
- fix issue307 - use yaml.safe_load in example, thanks Mark Eichin.
Changes between 2.3.4 and 2.3.5
-----------------------------------
- never consider a fixture function for test function collection
- allow re-running of test items / helps to fix pytest-reruntests plugin
and also help to keep less fixture/resource references alive
- put captured stdout/stderr into junitxml output even for passing tests - put captured stdout/stderr into junitxml output even for passing tests
(thanks Adam Goucher) (thanks Adam Goucher)

View File

@ -1,2 +1,2 @@
# #
__version__ = '2.3.5.dev8' __version__ = '2.4.0.dev5'

View File

@ -177,6 +177,10 @@ def _write_pyc(co, source_path, pyc):
# This happens when we get a EEXIST in find_module creating the # This happens when we get a EEXIST in find_module creating the
# __pycache__ directory and __pycache__ is by some non-dir node. # __pycache__ directory and __pycache__ is by some non-dir node.
return False return False
elif err == errno.EACCES:
# The directory is read-only; this can happen for example when
# running the tests in a package installed as root
return False
raise raise
try: try:
fp.write(imp.get_magic()) fp.write(imp.get_magic())
@ -215,11 +219,17 @@ def _rewrite_test(state, fn):
if (not source.startswith(BOM_UTF8) and if (not source.startswith(BOM_UTF8) and
(not cookie_re.match(source[0:end1]) or (not cookie_re.match(source[0:end1]) or
not cookie_re.match(source[end1:end2]))): not cookie_re.match(source[end1:end2]))):
if hasattr(state, "_indecode"):
return None # encodings imported us again, we don't rewrite
state._indecode = True
try: try:
source.decode("ascii") try:
except UnicodeDecodeError: source.decode("ascii")
# Let it fail in real import. except UnicodeDecodeError:
return None # Let it fail in real import.
return None
finally:
del state._indecode
# On Python versions which are not 2.7 and less than or equal to 3.1, the # On Python versions which are not 2.7 and less than or equal to 3.1, the
# parser expects *nix newlines. # parser expects *nix newlines.
if REWRITE_NEWLINES: if REWRITE_NEWLINES:

View File

@ -10,6 +10,7 @@ BuiltinAssertionError = py.builtin.builtins.AssertionError
# DebugInterpreter. # DebugInterpreter.
_reprcompare = None _reprcompare = None
def format_explanation(explanation): def format_explanation(explanation):
"""This formats an explanation """This formats an explanation
@ -85,7 +86,7 @@ except NameError:
def assertrepr_compare(config, op, left, right): def assertrepr_compare(config, op, left, right):
"""Return specialised explanations for some operators/operands""" """Return specialised explanations for some operators/operands"""
width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op
left_repr = py.io.saferepr(left, maxsize=int(width/2)) left_repr = py.io.saferepr(left, maxsize=int(width/2))
right_repr = py.io.saferepr(right, maxsize=width-len(left_repr)) right_repr = py.io.saferepr(right, maxsize=width-len(left_repr))
summary = '%s %s %s' % (left_repr, op, right_repr) summary = '%s %s %s' % (left_repr, op, right_repr)
@ -93,7 +94,7 @@ def assertrepr_compare(config, op, left, right):
issequence = lambda x: isinstance(x, (list, tuple)) issequence = lambda x: isinstance(x, (list, tuple))
istext = lambda x: isinstance(x, basestring) istext = lambda x: isinstance(x, basestring)
isdict = lambda x: isinstance(x, dict) isdict = lambda x: isinstance(x, dict)
isset = lambda x: isinstance(x, set) isset = lambda x: isinstance(x, (set, frozenset))
verbose = config.getoption('verbose') verbose = config.getoption('verbose')
explanation = None explanation = None
@ -114,9 +115,9 @@ def assertrepr_compare(config, op, left, right):
raise raise
except: except:
excinfo = py.code.ExceptionInfo() excinfo = py.code.ExceptionInfo()
explanation = ['(pytest_assertion plugin: representation of ' explanation = [
'details failed. Probably an object has a faulty __repr__.)', '(pytest_assertion plugin: representation of details failed. '
str(excinfo)] 'Probably an object has a faulty __repr__.)', str(excinfo)]
if not explanation: if not explanation:
return None return None
@ -132,7 +133,7 @@ def _diff_text(left, right, verbose=False):
""" """
explanation = [] explanation = []
if not verbose: if not verbose:
i = 0 # just in case left or right has zero length i = 0 # just in case left or right has zero length
for i in range(min(len(left), len(right))): for i in range(min(len(left), len(right))):
if left[i] != right[i]: if left[i] != right[i]:
break break
@ -166,13 +167,15 @@ def _compare_eq_sequence(left, right, verbose=False):
(i, left[i], right[i])] (i, left[i], right[i])]
break break
if len(left) > len(right): if len(left) > len(right):
explanation += ['Left contains more items, ' explanation += [
'first extra item: %s' % py.io.saferepr(left[len(right)],)] 'Left contains more items, first extra item: %s' %
py.io.saferepr(left[len(right)],)]
elif len(left) < len(right): elif len(left) < len(right):
explanation += ['Right contains more items, ' explanation += [
'first extra item: %s' % py.io.saferepr(right[len(left)],)] 'Right contains more items, first extra item: %s' %
return explanation # + _diff_text(py.std.pprint.pformat(left), py.io.saferepr(right[len(left)],)]
# py.std.pprint.pformat(right)) return explanation # + _diff_text(py.std.pprint.pformat(left),
# py.std.pprint.pformat(right))
def _compare_eq_set(left, right, verbose=False): def _compare_eq_set(left, right, verbose=False):
@ -210,12 +213,12 @@ def _compare_eq_dict(left, right, verbose=False):
if extra_left: if extra_left:
explanation.append('Left contains more items:') explanation.append('Left contains more items:')
explanation.extend(py.std.pprint.pformat( explanation.extend(py.std.pprint.pformat(
dict((k, left[k]) for k in extra_left)).splitlines()) dict((k, left[k]) for k in extra_left)).splitlines())
extra_right = set(right) - set(left) extra_right = set(right) - set(left)
if extra_right: if extra_right:
explanation.append('Right contains more items:') explanation.append('Right contains more items:')
explanation.extend(py.std.pprint.pformat( explanation.extend(py.std.pprint.pformat(
dict((k, right[k]) for k in extra_right)).splitlines()) dict((k, right[k]) for k in extra_right)).splitlines())
return explanation return explanation

View File

@ -34,6 +34,14 @@ class ReprFailDoctest(TerminalRepr):
self.reprlocation.toterminal(tw) self.reprlocation.toterminal(tw)
class DoctestItem(pytest.Item): class DoctestItem(pytest.Item):
def __init__(self, name, parent, runner=None, dtest=None):
super(DoctestItem, self).__init__(name, parent)
self.runner = runner
self.dtest = dtest
def runtest(self):
self.runner.run(self.dtest)
def repr_failure(self, excinfo): def repr_failure(self, excinfo):
doctest = py.std.doctest doctest = py.std.doctest
if excinfo.errisinstance((doctest.DocTestFailure, if excinfo.errisinstance((doctest.DocTestFailure,
@ -76,7 +84,7 @@ class DoctestItem(pytest.Item):
return super(DoctestItem, self).repr_failure(excinfo) return super(DoctestItem, self).repr_failure(excinfo)
def reportinfo(self): def reportinfo(self):
return self.fspath, None, "[doctest]" return self.fspath, None, "[doctest] %s" % self.name
class DoctestTextfile(DoctestItem, pytest.File): class DoctestTextfile(DoctestItem, pytest.File):
def runtest(self): def runtest(self):
@ -91,8 +99,8 @@ class DoctestTextfile(DoctestItem, pytest.File):
extraglobs=dict(getfixture=fixture_request.getfuncargvalue), extraglobs=dict(getfixture=fixture_request.getfuncargvalue),
raise_on_error=True, verbose=0) raise_on_error=True, verbose=0)
class DoctestModule(DoctestItem, pytest.File): class DoctestModule(pytest.File):
def runtest(self): def collect(self):
doctest = py.std.doctest doctest = py.std.doctest
if self.fspath.basename == "conftest.py": if self.fspath.basename == "conftest.py":
module = self.config._conftest.importconftest(self.fspath) module = self.config._conftest.importconftest(self.fspath)
@ -102,7 +110,11 @@ class DoctestModule(DoctestItem, pytest.File):
self.funcargs = {} self.funcargs = {}
self._fixtureinfo = FuncFixtureInfo((), [], {}) self._fixtureinfo = FuncFixtureInfo((), [], {})
fixture_request = FixtureRequest(self) fixture_request = FixtureRequest(self)
failed, tot = doctest.testmod( doctest_globals = dict(getfixture=fixture_request.getfuncargvalue)
module, raise_on_error=True, verbose=0, # uses internal doctest module parsing mechanism
extraglobs=dict(getfixture=fixture_request.getfuncargvalue), finder = doctest.DocTestFinder()
optionflags=doctest.ELLIPSIS) runner = doctest.DebugRunner(verbose=0, optionflags=doctest.ELLIPSIS)
for test in finder.find(module, module.__name__,
extraglobs=doctest_globals):
if test.examples: # skip empty doctests
yield DoctestItem(test.name, self, runner, test)

View File

@ -36,7 +36,8 @@ class Junit(py.xml.Namespace):
# | [#x10000-#x10FFFF] # | [#x10000-#x10FFFF]
_legal_chars = (0x09, 0x0A, 0x0d) _legal_chars = (0x09, 0x0A, 0x0d)
_legal_ranges = ( _legal_ranges = (
(0x20, 0xD7FF), (0x20, 0x7E),
(0x80, 0xD7FF),
(0xE000, 0xFFFD), (0xE000, 0xFFFD),
(0x10000, 0x10FFFF), (0x10000, 0x10FFFF),
) )
@ -103,7 +104,7 @@ class LogXML(object):
classnames.insert(0, self.prefix) classnames.insert(0, self.prefix)
self.tests.append(Junit.testcase( self.tests.append(Junit.testcase(
classname=".".join(classnames), classname=".".join(classnames),
name=names[-1], name=bin_xml_escape(names[-1]),
time=getattr(report, 'duration', 0) time=getattr(report, 'duration', 0)
)) ))

View File

@ -97,6 +97,7 @@ def wrap_session(config, doit):
if session._testsfailed: if session._testsfailed:
session.exitstatus = EXIT_TESTSFAILED session.exitstatus = EXIT_TESTSFAILED
finally: finally:
session.startdir.chdir()
if initstate >= 2: if initstate >= 2:
config.hook.pytest_sessionfinish( config.hook.pytest_sessionfinish(
session=session, session=session,
@ -216,6 +217,9 @@ class Node(object):
#: keywords/markers collected from all scopes #: keywords/markers collected from all scopes
self.keywords = NodeKeywords(self) self.keywords = NodeKeywords(self)
#: allow adding of extra keywords to use for matching
self.extra_keyword_matches = set()
#self.extrainit() #self.extrainit()
@property @property
@ -307,6 +311,14 @@ class Node(object):
chain.reverse() chain.reverse()
return chain return chain
def listextrakeywords(self):
""" Return a set of all extra keywords in self and any parents."""
extra_keywords = set()
item = self
for item in self.listchain():
extra_keywords.update(item.extra_keyword_matches)
return extra_keywords
def listnames(self): def listnames(self):
return [x.name for x in self.listchain()] return [x.name for x in self.listchain()]
@ -441,6 +453,7 @@ class Session(FSCollector):
self.shouldstop = False self.shouldstop = False
self.trace = config.trace.root.get("collection") self.trace = config.trace.root.get("collection")
self._norecursepatterns = config.getini("norecursedirs") self._norecursepatterns = config.getini("norecursedirs")
self.startdir = py.path.local()
def pytest_collectstart(self): def pytest_collectstart(self):
if self.shouldstop: if self.shouldstop:

View File

@ -1,44 +1,56 @@
""" generic mechanism for marking and selecting python functions. """ """ generic mechanism for marking and selecting python functions. """
import pytest, py import pytest, py
def pytest_namespace(): def pytest_namespace():
return {'mark': MarkGenerator()} return {'mark': MarkGenerator()}
def pytest_addoption(parser): def pytest_addoption(parser):
group = parser.getgroup("general") group = parser.getgroup("general")
group._addoption('-k', group._addoption(
'-k',
action="store", dest="keyword", default='', metavar="EXPRESSION", action="store", dest="keyword", default='', metavar="EXPRESSION",
help="only run tests which match the given substring expression. " help="only run tests which match the given substring expression. "
"An expression is a python evaluatable expression " "An expression is a python evaluatable expression "
"where all names are substring-matched against test names " "where all names are substring-matched against test names "
"and keywords. Example: -k 'test_method or test_other' " "and their parent classes. Example: -k 'test_method or test "
"matches all test functions whose name contains " "other' matches all test functions and classes whose name "
"'test_method' or 'test_other'.") "contains 'test_method' or 'test_other'. "
"Additionally keywords are matched to classes and functions "
"containing extra names in their 'extra_keyword_matches' set, "
"as well as functions which have names assigned directly to them."
)
group._addoption("-m", group._addoption(
"-m",
action="store", dest="markexpr", default="", metavar="MARKEXPR", action="store", dest="markexpr", default="", metavar="MARKEXPR",
help="only run tests matching given mark expression. " help="only run tests matching given mark expression. "
"example: -m 'mark1 and not mark2'." "example: -m 'mark1 and not mark2'."
) )
group.addoption("--markers", action="store_true", help= group.addoption(
"show markers (builtin, plugin and per-project ones).") "--markers", action="store_true",
help="show markers (builtin, plugin and per-project ones)."
)
parser.addini("markers", "markers for test functions", 'linelist') parser.addini("markers", "markers for test functions", 'linelist')
def pytest_cmdline_main(config): def pytest_cmdline_main(config):
if config.option.markers: if config.option.markers:
config.pluginmanager.do_configure(config) config.pluginmanager.do_configure(config)
tw = py.io.TerminalWriter() tw = py.io.TerminalWriter()
for line in config.getini("markers"): for line in config.getini("markers"):
name, rest = line.split(":", 1) name, rest = line.split(":", 1)
tw.write("@pytest.mark.%s:" % name, bold=True) tw.write("@pytest.mark.%s:" % name, bold=True)
tw.line(rest) tw.line(rest)
tw.line() tw.line()
config.pluginmanager.do_unconfigure(config) config.pluginmanager.do_unconfigure(config)
return 0 return 0
pytest_cmdline_main.tryfirst = True pytest_cmdline_main.tryfirst = True
def pytest_collection_modifyitems(items, config): def pytest_collection_modifyitems(items, config):
keywordexpr = config.option.keyword keywordexpr = config.option.keyword
matchexpr = config.option.markexpr matchexpr = config.option.markexpr
@ -67,32 +79,76 @@ def pytest_collection_modifyitems(items, config):
config.hook.pytest_deselected(items=deselected) config.hook.pytest_deselected(items=deselected)
items[:] = remaining items[:] = remaining
class BoolDict:
def __init__(self, mydict):
self._mydict = mydict
def __getitem__(self, name):
return name in self._mydict
class SubstringDict: class MarkMapping:
def __init__(self, mydict): """Provides a local mapping for markers.
self._mydict = mydict Only the marker names from the given :class:`NodeKeywords` will be mapped,
def __getitem__(self, name): so the names are taken only from :class:`MarkInfo` or
for key in self._mydict: :class:`MarkDecorator` items.
if name in key: """
def __init__(self, keywords):
mymarks = set()
for key, value in keywords.items():
if isinstance(value, MarkInfo) or isinstance(value, MarkDecorator):
mymarks.add(key)
self._mymarks = mymarks
def __getitem__(self, markname):
return markname in self._mymarks
class KeywordMapping:
"""Provides a local mapping for keywords.
Given a list of names, map any substring of one of these names to True.
"""
def __init__(self, names):
self._names = names
def __getitem__(self, subname):
for name in self._names:
if subname in name:
return True return True
return False return False
def matchmark(colitem, matchexpr):
return eval(matchexpr, {}, BoolDict(colitem.keywords)) def matchmark(colitem, markexpr):
"""Tries to match on any marker names, attached to the given colitem."""
return eval(markexpr, {}, MarkMapping(colitem.keywords))
def matchkeyword(colitem, keywordexpr): def matchkeyword(colitem, keywordexpr):
"""Tries to match given keyword expression to given collector item.
Will match on the name of colitem, including the names of its parents.
Only matches names of items which are either a :class:`Class` or a
:class:`Function`.
Additionally, matches on names in the 'extra_keyword_matches' set of
any item, as well as names directly assigned to test functions.
"""
keywordexpr = keywordexpr.replace("-", "not ") keywordexpr = keywordexpr.replace("-", "not ")
return eval(keywordexpr, {}, SubstringDict(colitem.keywords)) mapped_names = set()
# Add the names of the current item and any parent items
for item in colitem.listchain():
if not isinstance(item, pytest.Instance):
mapped_names.add(item.name)
# Add the names added as extra keywords to current or parent items
for name in colitem.listextrakeywords():
mapped_names.add(name)
# Add the names attached to the current function through direct assignment
for name in colitem.function.__dict__:
mapped_names.add(name)
return eval(keywordexpr, {}, KeywordMapping(mapped_names))
def pytest_configure(config): def pytest_configure(config):
if config.option.strict: if config.option.strict:
pytest.mark._config = config pytest.mark._config = config
class MarkGenerator: class MarkGenerator:
""" Factory for :class:`MarkDecorator` objects - exposed as """ Factory for :class:`MarkDecorator` objects - exposed as
a ``py.test.mark`` singleton instance. Example:: a ``py.test.mark`` singleton instance. Example::
@ -126,6 +182,7 @@ class MarkGenerator:
if name not in self._markers: if name not in self._markers:
raise AttributeError("%r not a registered marker" % (name,)) raise AttributeError("%r not a registered marker" % (name,))
class MarkDecorator: class MarkDecorator:
""" A decorator for test functions and test classes. When applied """ A decorator for test functions and test classes. When applied
it will create :class:`MarkInfo` objects which may be it will create :class:`MarkInfo` objects which may be
@ -149,7 +206,7 @@ class MarkDecorator:
def __repr__(self): def __repr__(self):
d = self.__dict__.copy() d = self.__dict__.copy()
name = d.pop('markname') name = d.pop('markname')
return "<MarkDecorator %r %r>" %(name, d) return "<MarkDecorator %r %r>" % (name, d)
def __call__(self, *args, **kwargs): def __call__(self, *args, **kwargs):
""" if passed a single callable argument: decorate it with mark info. """ if passed a single callable argument: decorate it with mark info.
@ -162,15 +219,17 @@ class MarkDecorator:
if hasattr(func, 'pytestmark'): if hasattr(func, 'pytestmark'):
l = func.pytestmark l = func.pytestmark
if not isinstance(l, list): if not isinstance(l, list):
func.pytestmark = [l, self] func.pytestmark = [l, self]
else: else:
l.append(self) l.append(self)
else: else:
func.pytestmark = [self] func.pytestmark = [self]
else: else:
holder = getattr(func, self.markname, None) holder = getattr(func, self.markname, None)
if holder is None: if holder is None:
holder = MarkInfo(self.markname, self.args, self.kwargs) holder = MarkInfo(
self.markname, self.args, self.kwargs
)
setattr(func, self.markname, holder) setattr(func, self.markname, holder)
else: else:
holder.add(self.args, self.kwargs) holder.add(self.args, self.kwargs)
@ -180,6 +239,7 @@ class MarkDecorator:
args = self.args + args args = self.args + args
return self.__class__(self.markname, args=args, kwargs=kw) return self.__class__(self.markname, args=args, kwargs=kw)
class MarkInfo: class MarkInfo:
""" Marking object created by :class:`MarkDecorator` instances. """ """ Marking object created by :class:`MarkDecorator` instances. """
def __init__(self, name, args, kwargs): def __init__(self, name, args, kwargs):
@ -193,7 +253,8 @@ class MarkInfo:
def __repr__(self): def __repr__(self):
return "<MarkInfo %r args=%r kwargs=%r>" % ( return "<MarkInfo %r args=%r kwargs=%r>" % (
self.name, self.args, self.kwargs) self.name, self.args, self.kwargs
)
def add(self, args, kwargs): def add(self, args, kwargs):
""" add a MarkInfo with the given args and kwargs. """ """ add a MarkInfo with the given args and kwargs. """
@ -205,4 +266,3 @@ class MarkInfo:
""" yield MarkInfo objects each relating to a marking-call. """ """ yield MarkInfo objects each relating to a marking-call. """
for args, kwargs in self._arglist: for args, kwargs in self._arglist:
yield MarkInfo(self.name, args, kwargs) yield MarkInfo(self.name, args, kwargs)

View File

@ -4,6 +4,7 @@ import inspect
import sys import sys
import pytest import pytest
from _pytest.main import getfslineno from _pytest.main import getfslineno
from _pytest.mark import MarkDecorator, MarkInfo
from _pytest.monkeypatch import monkeypatch from _pytest.monkeypatch import monkeypatch
from py._code.code import TerminalRepr from py._code.code import TerminalRepr
@ -177,7 +178,8 @@ def pytest_pycollect_makeitem(__multicall__, collector, name, obj):
if collector.classnamefilter(name): if collector.classnamefilter(name):
Class = collector._getcustomclass("Class") Class = collector._getcustomclass("Class")
return Class(name, parent=collector) return Class(name, parent=collector)
elif collector.funcnamefilter(name) and hasattr(obj, '__call__'): elif collector.funcnamefilter(name) and hasattr(obj, '__call__') and \
getfixturemarker(obj) is None:
if is_generator(obj): if is_generator(obj):
return Generator(name, parent=collector) return Generator(name, parent=collector)
else: else:
@ -369,7 +371,9 @@ class Module(pytest.File, PyCollector):
return mod return mod
def setup(self): def setup(self):
setup_module = xunitsetup(self.obj, "setup_module") setup_module = xunitsetup(self.obj, "setUpModule")
if setup_module is None:
setup_module = xunitsetup(self.obj, "setup_module")
if setup_module is not None: if setup_module is not None:
#XXX: nose compat hack, move to nose plugin #XXX: nose compat hack, move to nose plugin
# if it takes a positional arg, its probably a pytest style one # if it takes a positional arg, its probably a pytest style one
@ -380,7 +384,9 @@ class Module(pytest.File, PyCollector):
setup_module() setup_module()
def teardown(self): def teardown(self):
teardown_module = xunitsetup(self.obj, 'teardown_module') teardown_module = xunitsetup(self.obj, 'tearDownModule')
if teardown_module is None:
teardown_module = xunitsetup(self.obj, 'teardown_module')
if teardown_module is not None: if teardown_module is not None:
#XXX: nose compat hack, move to nose plugin #XXX: nose compat hack, move to nose plugin
# if it takes a positional arg, its probably a py.test style one # if it takes a positional arg, its probably a py.test style one
@ -564,11 +570,13 @@ class CallSpec2(object):
self._globalid_args = set() self._globalid_args = set()
self._globalparam = _notexists self._globalparam = _notexists
self._arg2scopenum = {} # used for sorting parametrized resources self._arg2scopenum = {} # used for sorting parametrized resources
self.keywords = {}
def copy(self, metafunc): def copy(self, metafunc):
cs = CallSpec2(self.metafunc) cs = CallSpec2(self.metafunc)
cs.funcargs.update(self.funcargs) cs.funcargs.update(self.funcargs)
cs.params.update(self.params) cs.params.update(self.params)
cs.keywords.update(self.keywords)
cs._arg2scopenum.update(self._arg2scopenum) cs._arg2scopenum.update(self._arg2scopenum)
cs._idlist = list(self._idlist) cs._idlist = list(self._idlist)
cs._globalid = self._globalid cs._globalid = self._globalid
@ -592,7 +600,7 @@ class CallSpec2(object):
def id(self): def id(self):
return "-".join(map(str, filter(None, self._idlist))) return "-".join(map(str, filter(None, self._idlist)))
def setmulti(self, valtype, argnames, valset, id, scopenum=0): def setmulti(self, valtype, argnames, valset, id, keywords, scopenum=0):
for arg,val in zip(argnames, valset): for arg,val in zip(argnames, valset):
self._checkargnotcontained(arg) self._checkargnotcontained(arg)
getattr(self, valtype)[arg] = val getattr(self, valtype)[arg] = val
@ -604,6 +612,7 @@ class CallSpec2(object):
if val is _notexists: if val is _notexists:
self._emptyparamspecified = True self._emptyparamspecified = True
self._idlist.append(id) self._idlist.append(id)
self.keywords.update(keywords)
def setall(self, funcargs, id, param): def setall(self, funcargs, id, param):
for x in funcargs: for x in funcargs:
@ -644,13 +653,15 @@ class Metafunc(FuncargnamesCompatAttr):
during the collection phase. If you need to setup expensive resources during the collection phase. If you need to setup expensive resources
see about setting indirect=True to do it rather at test setup time. see about setting indirect=True to do it rather at test setup time.
:arg argnames: an argument name or a list of argument names :arg argnames: a comma-separated string denoting one or more argument
names, or a list/tuple of argument strings.
:arg argvalues: The list of argvalues determines how often a test is invoked :arg argvalues: The list of argvalues determines how often a
with different argument values. If only one argname was specified argvalues test is invoked with different argument values. If only one
is a list of simple values. If N argnames were specified, argvalues must argname was specified argvalues is a list of simple values. If N
be a list of N-tuples, where each tuple-element specifies a value for its argnames were specified, argvalues must be a list of N-tuples,
respective argname. where each tuple-element specifies a value for its respective
argname.
:arg indirect: if True each argvalue corresponding to an argname will :arg indirect: if True each argvalue corresponding to an argname will
be passed as request.param to its respective argname fixture be passed as request.param to its respective argname fixture
@ -666,8 +677,24 @@ class Metafunc(FuncargnamesCompatAttr):
It will also override any fixture-function defined scope, allowing It will also override any fixture-function defined scope, allowing
to set a dynamic scope using test context or configuration. to set a dynamic scope using test context or configuration.
""" """
# individual parametrized argument sets can be wrapped in a
# marker in which case we unwrap the values and apply the mark
# at Function init
newkeywords = {}
unwrapped_argvalues = []
for i, argval in enumerate(argvalues):
if isinstance(argval, MarkDecorator):
newmark = MarkDecorator(argval.markname,
argval.args[:-1], argval.kwargs)
newkeywords[i] = {newmark.markname: newmark}
argval = argval.args[-1]
unwrapped_argvalues.append(argval)
argvalues = unwrapped_argvalues
if not isinstance(argnames, (tuple, list)): if not isinstance(argnames, (tuple, list)):
argnames = (argnames,) argnames = [x.strip() for x in argnames.split(",") if x.strip()]
if len(argnames) == 1:
argvalues = [(val,) for val in argvalues] argvalues = [(val,) for val in argvalues]
if not argvalues: if not argvalues:
argvalues = [(_notexists,) * len(argnames)] argvalues = [(_notexists,) * len(argnames)]
@ -690,7 +717,7 @@ class Metafunc(FuncargnamesCompatAttr):
assert len(valset) == len(argnames) assert len(valset) == len(argnames)
newcallspec = callspec.copy(self) newcallspec = callspec.copy(self)
newcallspec.setmulti(valtype, argnames, valset, ids[i], newcallspec.setmulti(valtype, argnames, valset, ids[i],
scopenum) newkeywords.get(i, {}), scopenum)
newcalls.append(newcallspec) newcalls.append(newcallspec)
self._calls = newcalls self._calls = newcalls
@ -907,6 +934,9 @@ class Function(FunctionMixin, pytest.Item, FuncargnamesCompatAttr):
for name, val in (py.builtin._getfuncdict(self.obj) or {}).items(): for name, val in (py.builtin._getfuncdict(self.obj) or {}).items():
self.keywords[name] = val self.keywords[name] = val
if callspec:
for name, val in callspec.keywords.items():
self.keywords[name] = val
if keywords: if keywords:
for name, val in keywords.items(): for name, val in keywords.items():
self.keywords[name] = val self.keywords[name] = val
@ -917,20 +947,25 @@ class Function(FunctionMixin, pytest.Item, FuncargnamesCompatAttr):
self.cls, self.cls,
funcargs=not isyield) funcargs=not isyield)
self.fixturenames = fi.names_closure self.fixturenames = fi.names_closure
if isyield: if callspec is not None:
assert not callspec, ( self.callspec = callspec
self._initrequest()
def _initrequest(self):
if self._isyieldedfunction():
assert not hasattr(self, "callspec"), (
"yielded functions (deprecated) cannot have funcargs") "yielded functions (deprecated) cannot have funcargs")
self.funcargs = {} self.funcargs = {}
else: else:
if callspec is not None: if hasattr(self, "callspec"):
self.callspec = callspec callspec = self.callspec
self.funcargs = callspec.funcargs or {} self.funcargs = callspec.funcargs.copy()
self._genid = callspec.id self._genid = callspec.id
if hasattr(callspec, "param"): if hasattr(callspec, "param"):
self.param = callspec.param self.param = callspec.param
else: else:
self.funcargs = {} self.funcargs = {}
self._request = req = FixtureRequest(self) self._request = FixtureRequest(self)
@property @property
def function(self): def function(self):
@ -1561,15 +1596,7 @@ class FixtureManager:
continue continue
# fixture functions have a pytest_funcarg__ prefix (pre-2.3 style) # fixture functions have a pytest_funcarg__ prefix (pre-2.3 style)
# or are "@pytest.fixture" marked # or are "@pytest.fixture" marked
try: marker = getfixturemarker(obj)
marker = obj._pytestfixturefunction
except KeyboardInterrupt:
raise
except Exception:
# some objects raise errors like request (from flask import request)
# we don't expect them to be fixture functions
marker = None
if marker is None: if marker is None:
if not name.startswith(self._argprefix): if not name.startswith(self._argprefix):
continue continue
@ -1615,6 +1642,29 @@ class FixtureManager:
except ValueError: except ValueError:
pass pass
def call_fixture_func(fixturefunc, request, kwargs):
if is_generator(fixturefunc):
iter = fixturefunc(**kwargs)
next = getattr(iter, "__next__", None)
if next is None:
next = getattr(iter, "next")
res = next()
def teardown():
try:
next()
except StopIteration:
pass
else:
fs, lineno = getfslineno(fixturefunc)
location = "%s:%s" % (fs, lineno+1)
pytest.fail(
"fixture function %s has more than one 'yield': \n%s" %
(fixturefunc.__name__, location), pytrace=False)
request.addfinalizer(teardown)
else:
res = fixturefunc(**kwargs)
return res
class FixtureDef: class FixtureDef:
""" A container for a factory definition. """ """ A container for a factory definition. """
def __init__(self, fixturemanager, baseid, argname, func, scope, params, def __init__(self, fixturemanager, baseid, argname, func, scope, params,
@ -1665,7 +1715,7 @@ class FixtureDef:
fixturefunc = fixturefunc.__get__(request.instance) fixturefunc = fixturefunc.__get__(request.instance)
except AttributeError: except AttributeError:
pass pass
result = fixturefunc(**kwargs) result = call_fixture_func(fixturefunc, request, kwargs)
assert not hasattr(self, "cached_result") assert not hasattr(self, "cached_result")
self.cached_result = result self.cached_result = result
return result return result
@ -1697,29 +1747,39 @@ def getfuncargnames(function, startindex=None):
def parametrize_sorted(items, ignore, cache, scopenum): def parametrize_sorted(items, ignore, cache, scopenum):
if scopenum >= 3: if scopenum >= 3:
return items return items
newitems = []
olditems = [] # we pick the first item which has a arg/param combo in the
# requested scope and sort other items with the same combo
# into "newitems" which then is a list of all items using this
# arg/param.
similar_items = []
other_items = []
slicing_argparam = None slicing_argparam = None
slicing_index = 0
for item in items: for item in items:
argparamlist = getfuncargparams(item, ignore, scopenum, cache) argparamlist = getfuncargparams(item, ignore, scopenum, cache)
if slicing_argparam is None and argparamlist: if slicing_argparam is None and argparamlist:
slicing_argparam = argparamlist[0] slicing_argparam = argparamlist[0]
slicing_index = len(olditems) slicing_index = len(other_items)
if slicing_argparam in argparamlist: if slicing_argparam in argparamlist:
newitems.append(item) similar_items.append(item)
else: else:
olditems.append(item) other_items.append(item)
if newitems:
if (len(similar_items) + slicing_index) > 1:
newignore = ignore.copy() newignore = ignore.copy()
newignore.add(slicing_argparam) newignore.add(slicing_argparam)
newitems = parametrize_sorted(newitems + olditems[slicing_index:], part2 = parametrize_sorted(
newignore, cache, scopenum) similar_items + other_items[slicing_index:],
old1 = parametrize_sorted(olditems[:slicing_index], newignore, newignore, cache, scopenum)
cache, scopenum+1) part1 = parametrize_sorted(
return old1 + newitems other_items[:slicing_index], newignore,
cache, scopenum+1)
return part1 + part2
else: else:
olditems = parametrize_sorted(olditems, ignore, cache, scopenum+1) other_items = parametrize_sorted(other_items, ignore, cache, scopenum+1)
return olditems + newitems return other_items + similar_items
def getfuncargparams(item, ignore, scopenum, cache): def getfuncargparams(item, ignore, scopenum, cache):
""" return list of (arg,param) tuple, sorted by broader scope first. """ """ return list of (arg,param) tuple, sorted by broader scope first. """
@ -1766,6 +1826,18 @@ def getfuncargparams(item, ignore, scopenum, cache):
def xunitsetup(obj, name): def xunitsetup(obj, name):
meth = getattr(obj, name, None) meth = getattr(obj, name, None)
if meth is not None: if getfixturemarker(meth) is None:
if not hasattr(meth, "_pytestfixturefunction"): return meth
return meth
def getfixturemarker(obj):
""" return fixturemarker or None if it doesn't exist or raised
exceptions."""
try:
return getattr(obj, "_pytestfixturefunction", None)
except KeyboardInterrupt:
raise
except Exception:
# some objects raise errors like request (from flask import request)
# we don't expect them to be fixture functions
return None

View File

@ -63,12 +63,20 @@ def pytest_runtest_protocol(item, nextitem):
return True return True
def runtestprotocol(item, log=True, nextitem=None): def runtestprotocol(item, log=True, nextitem=None):
hasrequest = hasattr(item, "_request")
if hasrequest and not item._request:
item._initrequest()
rep = call_and_report(item, "setup", log) rep = call_and_report(item, "setup", log)
reports = [rep] reports = [rep]
if rep.passed: if rep.passed:
reports.append(call_and_report(item, "call", log)) reports.append(call_and_report(item, "call", log))
reports.append(call_and_report(item, "teardown", log, reports.append(call_and_report(item, "teardown", log,
nextitem=nextitem)) nextitem=nextitem))
# after all teardown hooks have been called
# want funcargs and request info to go away
if hasrequest:
item._request = False
item.funcargs = None
return reports return reports
def pytest_runtest_setup(item): def pytest_runtest_setup(item):
@ -190,7 +198,8 @@ def pytest_runtest_makereport(item, call):
if call.when == "call": if call.when == "call":
longrepr = item.repr_failure(excinfo) longrepr = item.repr_failure(excinfo)
else: # exception in setup or teardown else: # exception in setup or teardown
longrepr = item._repr_failure_py(excinfo) longrepr = item._repr_failure_py(excinfo,
style=item.config.option.tbstyle)
return TestReport(item.nodeid, item.location, return TestReport(item.nodeid, item.location,
keywords, outcome, longrepr, when, keywords, outcome, longrepr, when,
duration=duration) duration=duration)

View File

@ -89,7 +89,11 @@ class MarkEvaluator:
if isinstance(expr, py.builtin._basestring): if isinstance(expr, py.builtin._basestring):
result = cached_eval(self.item.config, expr, d) result = cached_eval(self.item.config, expr, d)
else: else:
pytest.fail("expression is not a string") if self.get("reason") is None:
# XXX better be checked at collection time
pytest.fail("you need to specify reason=STRING "
"when using booleans as conditions.")
result = bool(expr)
if result: if result:
self.result = True self.result = True
self.expr = expr self.expr = expr

View File

@ -454,10 +454,14 @@ class TerminalReporter:
if val: if val:
parts.append("%d %s" %(len(val), key)) parts.append("%d %s" %(len(val), key))
line = ", ".join(parts) line = ", ".join(parts)
# XXX coloring
msg = "%s in %.2f seconds" %(line, session_duration) msg = "%s in %.2f seconds" %(line, session_duration)
if self.verbosity >= 0: if self.verbosity >= 0:
self.write_sep("=", msg, bold=True) markup = dict(bold=True)
if 'failed' in self.stats:
markup['red'] = True
else:
markup['green'] = True
self.write_sep("=", msg, **markup)
#else: #else:
# self.write_line(msg, bold=True) # self.write_line(msg, bold=True)

View File

@ -3,6 +3,12 @@
{% block relbaritems %} {% block relbaritems %}
{{ super() }} {{ super() }}
<g:plusone></g:plusone> <g:plusone></g:plusone>
<iframe style="border: 0; margin: 0; padding: 0;"
src="https://www.gittip.com/hpk42/widget.html"
width="48pt" height="22pt"></iframe>
{% endblock %} {% endblock %}
{% block footer %} {% block footer %}

View File

@ -31,6 +31,8 @@
<a href="https://bitbucket.org/hpk42/pytest/issues?status=new&status=open">issues[bb]</a> <a href="https://bitbucket.org/hpk42/pytest/issues?status=new&status=open">issues[bb]</a>
</td><td> </td><td>
<a href="{{ pathto('contact') }}">contact</a> <a href="{{ pathto('contact') }}">contact</a>
</td></tr><tr><td>
<a href="{{ pathto('talks') }}">Talks/Posts</a>
</td></tr></table> </td></tr></table>
</div> </div>
{% extends "basic/localtoc.html" %} {% extends "basic/localtoc.html" %}

View File

@ -5,6 +5,7 @@ Release announcements
.. toctree:: .. toctree::
:maxdepth: 2 :maxdepth: 2
release-2.3.5
release-2.3.4 release-2.3.4
release-2.3.3 release-2.3.3
release-2.3.2 release-2.3.2

View File

@ -1,23 +1,61 @@
pytest-2.3.5: bug fixes pytest-2.3.5: bug fixes and little improvements
=========================================================================== ===========================================================================
pytest-2.3.5 is a bug fix release for the pytest testing tool. pytest-2.3.5 is a maintenance release with many bug fixes and little
See the changelog below for details. And improvements. See the changelog below for details. No backward
compatibility issues are foreseen and all plugins which worked with the
prior version are expected to work unmodified. Speaking of which, a
few interesting new plugins saw the light last month:
- pytest-instafail: show failure information while tests are running
- pytest-qt: testing of GUI applications written with QT/Pyside
- pytest-xprocess: managing external processes across test runs
- pytest-random: randomize test ordering
And several others like pytest-django saw maintenance releases.
For a more complete list, check out
https://pypi.python.org/pypi?%3Aaction=search&term=pytest&submit=search.
For general information see:
http://pytest.org/ http://pytest.org/
for general information. To install or upgrade pytest: To install or upgrade pytest:
pip install -U pytest # or pip install -U pytest # or
easy_install -U pytest easy_install -U pytest
best, Particular thanks to Floris, Ronny, Benjamin and the many bug reporters
and fix providers.
may the fixtures be with you,
holger krekel holger krekel
Changes between 2.3.4 and 2.3.5 Changes between 2.3.4 and 2.3.5
----------------------------------- -----------------------------------
- never consider a fixture function for test function collection
- allow re-running of test items / helps to fix pytest-reruntests plugin
and also help to keep less fixture/resource references alive
- put captured stdout/stderr into junitxml output even for passing tests
(thanks Adam Goucher)
- Issue 265 - integrate nose setup/teardown with setupstate
so it doesnt try to teardown if it did not setup
- issue 271 - dont write junitxml on slave nodes
- Issue 274 - dont try to show full doctest example
when doctest does not know the example location
- issue 280 - disable assertion rewriting on buggy CPython 2.6.0
- inject "getfixture()" helper to retrieve fixtures from doctests,
thanks Andreas Zeidler
- issue 259 - when assertion rewriting, be consistent with the default - issue 259 - when assertion rewriting, be consistent with the default
source encoding of ASCII on Python 2 source encoding of ASCII on Python 2
@ -26,7 +64,7 @@ Changes between 2.3.4 and 2.3.5
- issue250 unicode/str mixes in parametrization names and values now works - issue250 unicode/str mixes in parametrization names and values now works
- issue257, assertion-triggered compilation of source ending in a - issue257, assertion-triggered compilation of source ending in a
comment line doesn't blow up in python2.5 (fixed through py>=1.4.13) comment line doesn't blow up in python2.5 (fixed through py>=1.4.13.dev6)
- fix --genscript option to generate standalone scripts that also - fix --genscript option to generate standalone scripts that also
work with python3.3 (importer ordering) work with python3.3 (importer ordering)

View File

@ -26,7 +26,7 @@ you will see the return value of the function call::
$ py.test test_assert1.py $ py.test test_assert1.py
=========================== test session starts ============================ =========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.4 platform linux2 -- Python 2.7.3 -- pytest-2.3.5
collected 1 items collected 1 items
test_assert1.py F test_assert1.py F
@ -110,7 +110,7 @@ if you run this module::
$ py.test test_assert2.py $ py.test test_assert2.py
=========================== test session starts ============================ =========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.4 platform linux2 -- Python 2.7.3 -- pytest-2.3.5
collected 1 items collected 1 items
test_assert2.py F test_assert2.py F

View File

@ -64,7 +64,7 @@ of the failing function and hide the other one::
$ py.test $ py.test
=========================== test session starts ============================ =========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.4 platform linux2 -- Python 2.7.3 -- pytest-2.3.5
collected 2 items collected 2 items
test_module.py .F test_module.py .F
@ -78,7 +78,7 @@ of the failing function and hide the other one::
test_module.py:9: AssertionError test_module.py:9: AssertionError
----------------------------- Captured stdout ------------------------------ ----------------------------- Captured stdout ------------------------------
setting up <function test_func2 at 0x1e12f50> setting up <function test_func2 at 0x2d79f50>
==================== 1 failed, 1 passed in 0.01 seconds ==================== ==================== 1 failed, 1 passed in 0.01 seconds ====================
Accessing captured output from a test function Accessing captured output from a test function

View File

@ -17,7 +17,7 @@
# #
# The full version, including alpha/beta/rc tags. # The full version, including alpha/beta/rc tags.
# The short X.Y version. # The short X.Y version.
version = release = "2.3.4.1" version = release = "2.3.5"
import sys, os import sys, os

View File

@ -44,7 +44,7 @@ then you can just invoke ``py.test`` without command line options::
$ py.test $ py.test
=========================== test session starts ============================ =========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.4 platform linux2 -- Python 2.7.3 -- pytest-2.3.5
collected 1 items collected 1 items
mymodule.py . mymodule.py .

View File

@ -28,7 +28,7 @@ You can then restrict a test run to only run tests marked with ``webtest``::
$ py.test -v -m webtest $ py.test -v -m webtest
=========================== test session starts ============================ =========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.4 -- /home/hpk/p/pytest/.tox/regen/bin/python platform linux2 -- Python 2.7.3 -- pytest-2.3.5 -- /home/hpk/p/pytest/.tox/regen/bin/python
collecting ... collected 3 items collecting ... collected 3 items
test_server.py:3: test_send_http PASSED test_server.py:3: test_send_http PASSED
@ -40,7 +40,7 @@ Or the inverse, running all tests except the webtest ones::
$ py.test -v -m "not webtest" $ py.test -v -m "not webtest"
=========================== test session starts ============================ =========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.4 -- /home/hpk/p/pytest/.tox/regen/bin/python platform linux2 -- Python 2.7.3 -- pytest-2.3.5 -- /home/hpk/p/pytest/.tox/regen/bin/python
collecting ... collected 3 items collecting ... collected 3 items
test_server.py:6: test_something_quick PASSED test_server.py:6: test_something_quick PASSED
@ -61,7 +61,7 @@ select tests based on their names::
$ py.test -v -k http # running with the above defined example module $ py.test -v -k http # running with the above defined example module
=========================== test session starts ============================ =========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.4 -- /home/hpk/p/pytest/.tox/regen/bin/python platform linux2 -- Python 2.7.3 -- pytest-2.3.5 -- /home/hpk/p/pytest/.tox/regen/bin/python
collecting ... collected 3 items collecting ... collected 3 items
test_server.py:3: test_send_http PASSED test_server.py:3: test_send_http PASSED
@ -73,7 +73,7 @@ And you can also run all tests except the ones that match the keyword::
$ py.test -k "not send_http" -v $ py.test -k "not send_http" -v
=========================== test session starts ============================ =========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.4 -- /home/hpk/p/pytest/.tox/regen/bin/python platform linux2 -- Python 2.7.3 -- pytest-2.3.5 -- /home/hpk/p/pytest/.tox/regen/bin/python
collecting ... collected 3 items collecting ... collected 3 items
test_server.py:6: test_something_quick PASSED test_server.py:6: test_something_quick PASSED
@ -86,7 +86,7 @@ Or to select "http" and "quick" tests::
$ py.test -k "http or quick" -v $ py.test -k "http or quick" -v
=========================== test session starts ============================ =========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.4 -- /home/hpk/p/pytest/.tox/regen/bin/python platform linux2 -- Python 2.7.3 -- pytest-2.3.5 -- /home/hpk/p/pytest/.tox/regen/bin/python
collecting ... collected 3 items collecting ... collected 3 items
test_server.py:3: test_send_http PASSED test_server.py:3: test_send_http PASSED
@ -185,6 +185,29 @@ You can also set a module level marker::
in which case it will be applied to all functions and in which case it will be applied to all functions and
methods defined in the module. methods defined in the module.
.. _`marking individual tests when using parametrize`:
Marking individual tests when using parametrize
-----------------------------------------------
When using parametrize, applying a mark will make it apply
to each individual test. However it is also possible to
apply a marker to an individual test instance::
import pytest
@pytest.mark.foo
@pytest.mark.parametrize(("n", "expected"), [
(1, 2),
pytest.mark.bar((1, 3)),
(2, 3),
])
def test_increment(n, expected):
assert n + 1 == expected
In this example the mark "foo" will apply to each of the three
tests, whereas the "bar" mark is only applied to the second test.
Skip and xfail marks can also be applied in this way, see :ref:`skip/xfail with parametrize`.
.. _`adding a custom marker from a plugin`: .. _`adding a custom marker from a plugin`:
@ -232,7 +255,7 @@ the test needs::
$ py.test -E stage2 $ py.test -E stage2
=========================== test session starts ============================ =========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.4 platform linux2 -- Python 2.7.3 -- pytest-2.3.5
collected 1 items collected 1 items
test_someenv.py s test_someenv.py s
@ -243,7 +266,7 @@ and here is one that specifies exactly the environment needed::
$ py.test -E stage1 $ py.test -E stage1
=========================== test session starts ============================ =========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.4 platform linux2 -- Python 2.7.3 -- pytest-2.3.5
collected 1 items collected 1 items
test_someenv.py . test_someenv.py .
@ -360,12 +383,12 @@ then you will see two test skipped and two executed tests as expected::
$ py.test -rs # this option reports skip reasons $ py.test -rs # this option reports skip reasons
=========================== test session starts ============================ =========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.4 platform linux2 -- Python 2.7.3 -- pytest-2.3.5
collected 4 items collected 4 items
test_plat.py s.s. test_plat.py s.s.
========================= short test summary info ========================== ========================= short test summary info ==========================
SKIP [2] /tmp/doc-exec-133/conftest.py:12: cannot run on platform linux2 SKIP [2] /tmp/doc-exec-273/conftest.py:12: cannot run on platform linux2
=================== 2 passed, 2 skipped in 0.01 seconds ==================== =================== 2 passed, 2 skipped in 0.01 seconds ====================
@ -373,7 +396,7 @@ Note that if you specify a platform via the marker-command line option like this
$ py.test -m linux2 $ py.test -m linux2
=========================== test session starts ============================ =========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.4 platform linux2 -- Python 2.7.3 -- pytest-2.3.5
collected 4 items collected 4 items
test_plat.py . test_plat.py .
@ -424,7 +447,7 @@ We can now use the ``-m option`` to select one set::
$ py.test -m interface --tb=short $ py.test -m interface --tb=short
=========================== test session starts ============================ =========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.4 platform linux2 -- Python 2.7.3 -- pytest-2.3.5
collected 4 items collected 4 items
test_module.py FF test_module.py FF
@ -445,7 +468,7 @@ or to select both "event" and "interface" tests::
$ py.test -m "interface or event" --tb=short $ py.test -m "interface or event" --tb=short
=========================== test session starts ============================ =========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.4 platform linux2 -- Python 2.7.3 -- pytest-2.3.5
collected 4 items collected 4 items
test_module.py FFF test_module.py FFF

View File

@ -27,7 +27,7 @@ now execute the test specification::
nonpython $ py.test test_simple.yml nonpython $ py.test test_simple.yml
=========================== test session starts ============================ =========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.4 platform linux2 -- Python 2.7.3 -- pytest-2.3.5
collected 2 items collected 2 items
test_simple.yml .F test_simple.yml .F
@ -37,7 +37,7 @@ now execute the test specification::
usecase execution failed usecase execution failed
spec failed: 'some': 'other' spec failed: 'some': 'other'
no further details known at this point. no further details known at this point.
==================== 1 failed, 1 passed in 0.09 seconds ==================== ==================== 1 failed, 1 passed in 0.05 seconds ====================
You get one dot for the passing ``sub1: sub1`` check and one failure. You get one dot for the passing ``sub1: sub1`` check and one failure.
Obviously in the above ``conftest.py`` you'll want to implement a more Obviously in the above ``conftest.py`` you'll want to implement a more
@ -56,7 +56,7 @@ consulted when reporting in ``verbose`` mode::
nonpython $ py.test -v nonpython $ py.test -v
=========================== test session starts ============================ =========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.4 -- /home/hpk/p/pytest/.tox/regen/bin/python platform linux2 -- Python 2.7.3 -- pytest-2.3.5 -- /home/hpk/p/pytest/.tox/regen/bin/python
collecting ... collected 2 items collecting ... collected 2 items
test_simple.yml:1: usecase: ok PASSED test_simple.yml:1: usecase: ok PASSED
@ -67,17 +67,17 @@ consulted when reporting in ``verbose`` mode::
usecase execution failed usecase execution failed
spec failed: 'some': 'other' spec failed: 'some': 'other'
no further details known at this point. no further details known at this point.
==================== 1 failed, 1 passed in 0.04 seconds ==================== ==================== 1 failed, 1 passed in 0.05 seconds ====================
While developing your custom test collection and execution it's also While developing your custom test collection and execution it's also
interesting to just look at the collection tree:: interesting to just look at the collection tree::
nonpython $ py.test --collectonly nonpython $ py.test --collectonly
=========================== test session starts ============================ =========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.4 platform linux2 -- Python 2.7.3 -- pytest-2.3.5
collected 2 items collected 2 items
<YamlFile 'test_simple.yml'> <YamlFile 'test_simple.yml'>
<YamlItem 'ok'> <YamlItem 'ok'>
<YamlItem 'hello'> <YamlItem 'hello'>
============================= in 0.04 seconds ============================= ============================= in 0.05 seconds =============================

View File

@ -9,7 +9,7 @@ def pytest_collect_file(parent, path):
class YamlFile(pytest.File): class YamlFile(pytest.File):
def collect(self): def collect(self):
import yaml # we need a yaml parser, e.g. PyYAML import yaml # we need a yaml parser, e.g. PyYAML
raw = yaml.load(self.fspath.open()) raw = yaml.safe_load(self.fspath.open())
for name, spec in raw.items(): for name, spec in raw.items():
yield YamlItem(name, self, spec) yield YamlItem(name, self, spec)

View File

@ -104,21 +104,19 @@ this is a fully self-contained example which you can run with::
$ py.test test_scenarios.py $ py.test test_scenarios.py
=========================== test session starts ============================ =========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.4.5dev3 platform linux2 -- Python 2.7.3 -- pytest-2.3.5
plugins: xdist, oejskit, pep8, cache, couchdbkit, quickcheck
collected 4 items collected 4 items
test_scenarios.py .... test_scenarios.py ....
========================= 4 passed in 0.04 seconds ========================= ========================= 4 passed in 0.01 seconds =========================
If you just collect tests you'll also nicely see 'advanced' and 'basic' as variants for the test function:: If you just collect tests you'll also nicely see 'advanced' and 'basic' as variants for the test function::
$ py.test --collectonly test_scenarios.py $ py.test --collectonly test_scenarios.py
=========================== test session starts ============================ =========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.4.5dev3 platform linux2 -- Python 2.7.3 -- pytest-2.3.5
plugins: xdist, oejskit, pep8, cache, couchdbkit, quickcheck
collected 4 items collected 4 items
<Module 'test_scenarios.py'> <Module 'test_scenarios.py'>
<Class 'TestSampleWithScenarios'> <Class 'TestSampleWithScenarios'>
@ -128,7 +126,7 @@ If you just collect tests you'll also nicely see 'advanced' and 'basic' as varia
<Function 'test_demo1[advanced]'> <Function 'test_demo1[advanced]'>
<Function 'test_demo2[advanced]'> <Function 'test_demo2[advanced]'>
============================= in 0.03 seconds ============================= ============================= in 0.01 seconds =============================
Note that we told ``metafunc.parametrize()`` that your scenario values Note that we told ``metafunc.parametrize()`` that your scenario values
should be considered class-scoped. With pytest-2.3 this leads to a should be considered class-scoped. With pytest-2.3 this leads to a
@ -182,14 +180,13 @@ Let's first see how it looks like at collection time::
$ py.test test_backends.py --collectonly $ py.test test_backends.py --collectonly
=========================== test session starts ============================ =========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.4.5dev3 platform linux2 -- Python 2.7.3 -- pytest-2.3.5
plugins: xdist, oejskit, pep8, cache, couchdbkit, quickcheck
collected 2 items collected 2 items
<Module 'test_backends.py'> <Module 'test_backends.py'>
<Function 'test_db_initialized[d1]'> <Function 'test_db_initialized[d1]'>
<Function 'test_db_initialized[d2]'> <Function 'test_db_initialized[d2]'>
============================= in 0.03 seconds ============================= ============================= in 0.00 seconds =============================
And then when we run the test:: And then when we run the test::
@ -198,7 +195,7 @@ And then when we run the test::
================================= FAILURES ================================= ================================= FAILURES =================================
_________________________ test_db_initialized[d2] __________________________ _________________________ test_db_initialized[d2] __________________________
db = <conftest.DB2 instance at 0x19ba7e8> db = <conftest.DB2 instance at 0x2038f80>
def test_db_initialized(db): def test_db_initialized(db):
# a dummy test # a dummy test
@ -253,7 +250,7 @@ argument sets to use for each test function. Let's run it::
================================= FAILURES ================================= ================================= FAILURES =================================
________________________ TestClass.test_equals[1-2] ________________________ ________________________ TestClass.test_equals[1-2] ________________________
self = <test_parametrize.TestClass instance at 0x2489b00>, a = 1, b = 2 self = <test_parametrize.TestClass instance at 0x1338f80>, a = 1, b = 2
def test_equals(self, a, b): def test_equals(self, a, b):
> assert a == b > assert a == b
@ -327,15 +324,14 @@ If you run this with reporting for skips enabled::
$ py.test -rs test_module.py $ py.test -rs test_module.py
=========================== test session starts ============================ =========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.4.5dev3 platform linux2 -- Python 2.7.3 -- pytest-2.3.5
plugins: xdist, oejskit, pep8, cache, couchdbkit, quickcheck
collected 2 items collected 2 items
test_module.py .s test_module.py .s
========================= short test summary info ========================== ========================= short test summary info ==========================
SKIP [1] /tmp/doc-exec-11/conftest.py:10: could not import 'opt2' SKIP [1] /tmp/doc-exec-275/conftest.py:10: could not import 'opt2'
=================== 1 passed, 1 skipped in 0.04 seconds ==================== =================== 1 passed, 1 skipped in 0.01 seconds ====================
You'll see that we don't have a ``opt2`` module and thus the second test run You'll see that we don't have a ``opt2`` module and thus the second test run
of our ``test_func1`` was skipped. A few notes: of our ``test_func1`` was skipped. A few notes:

View File

@ -43,7 +43,7 @@ then the test collection looks like this::
$ py.test --collectonly $ py.test --collectonly
=========================== test session starts ============================ =========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.4 platform linux2 -- Python 2.7.3 -- pytest-2.3.5
collected 2 items collected 2 items
<Module 'check_myapp.py'> <Module 'check_myapp.py'>
<Class 'CheckMyApp'> <Class 'CheckMyApp'>
@ -82,7 +82,7 @@ You can always peek at the collection tree without running tests like this::
. $ py.test --collectonly pythoncollection.py . $ py.test --collectonly pythoncollection.py
=========================== test session starts ============================ =========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.4 platform linux2 -- Python 2.7.3 -- pytest-2.3.5
collected 3 items collected 3 items
<Module 'pythoncollection.py'> <Module 'pythoncollection.py'>
<Function 'test_function'> <Function 'test_function'>
@ -135,7 +135,7 @@ interpreters and will leave out the setup.py file::
$ py.test --collectonly $ py.test --collectonly
=========================== test session starts ============================ =========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.4 platform linux2 -- Python 2.7.3 -- pytest-2.3.5
collected 1 items collected 1 items
<Module 'pkg/module_py2.py'> <Module 'pkg/module_py2.py'>
<Function 'test_only_on_python2'> <Function 'test_only_on_python2'>

View File

@ -13,7 +13,7 @@ get on the terminal - we are working on that):
assertion $ py.test failure_demo.py assertion $ py.test failure_demo.py
=========================== test session starts ============================ =========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.4 platform linux2 -- Python 2.7.3 -- pytest-2.3.5
collected 39 items collected 39 items
failure_demo.py FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF failure_demo.py FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
@ -30,7 +30,7 @@ get on the terminal - we are working on that):
failure_demo.py:15: AssertionError failure_demo.py:15: AssertionError
_________________________ TestFailing.test_simple __________________________ _________________________ TestFailing.test_simple __________________________
self = <failure_demo.TestFailing object at 0x2ad4550> self = <failure_demo.TestFailing object at 0x1445e10>
def test_simple(self): def test_simple(self):
def f(): def f():
@ -40,13 +40,13 @@ get on the terminal - we are working on that):
> assert f() == g() > assert f() == g()
E assert 42 == 43 E assert 42 == 43
E + where 42 = <function f at 0x2a7f578>() E + where 42 = <function f at 0x137c6e0>()
E + and 43 = <function g at 0x2a7f5f0>() E + and 43 = <function g at 0x137c758>()
failure_demo.py:28: AssertionError failure_demo.py:28: AssertionError
____________________ TestFailing.test_simple_multiline _____________________ ____________________ TestFailing.test_simple_multiline _____________________
self = <failure_demo.TestFailing object at 0x2a81e50> self = <failure_demo.TestFailing object at 0x135a1d0>
def test_simple_multiline(self): def test_simple_multiline(self):
otherfunc_multi( otherfunc_multi(
@ -66,19 +66,19 @@ get on the terminal - we are working on that):
failure_demo.py:11: AssertionError failure_demo.py:11: AssertionError
___________________________ TestFailing.test_not ___________________________ ___________________________ TestFailing.test_not ___________________________
self = <failure_demo.TestFailing object at 0x2a72b50> self = <failure_demo.TestFailing object at 0x1458ed0>
def test_not(self): def test_not(self):
def f(): def f():
return 42 return 42
> assert not f() > assert not f()
E assert not 42 E assert not 42
E + where 42 = <function f at 0x2a7f9b0>() E + where 42 = <function f at 0x137caa0>()
failure_demo.py:38: AssertionError failure_demo.py:38: AssertionError
_________________ TestSpecialisedExplanations.test_eq_text _________________ _________________ TestSpecialisedExplanations.test_eq_text _________________
self = <failure_demo.TestSpecialisedExplanations object at 0x2a6eb50> self = <failure_demo.TestSpecialisedExplanations object at 0x14451d0>
def test_eq_text(self): def test_eq_text(self):
> assert 'spam' == 'eggs' > assert 'spam' == 'eggs'
@ -89,7 +89,7 @@ get on the terminal - we are working on that):
failure_demo.py:42: AssertionError failure_demo.py:42: AssertionError
_____________ TestSpecialisedExplanations.test_eq_similar_text _____________ _____________ TestSpecialisedExplanations.test_eq_similar_text _____________
self = <failure_demo.TestSpecialisedExplanations object at 0x2b07cd0> self = <failure_demo.TestSpecialisedExplanations object at 0x1458c90>
def test_eq_similar_text(self): def test_eq_similar_text(self):
> assert 'foo 1 bar' == 'foo 2 bar' > assert 'foo 1 bar' == 'foo 2 bar'
@ -102,7 +102,7 @@ get on the terminal - we are working on that):
failure_demo.py:45: AssertionError failure_demo.py:45: AssertionError
____________ TestSpecialisedExplanations.test_eq_multiline_text ____________ ____________ TestSpecialisedExplanations.test_eq_multiline_text ____________
self = <failure_demo.TestSpecialisedExplanations object at 0x2a68050> self = <failure_demo.TestSpecialisedExplanations object at 0x1434390>
def test_eq_multiline_text(self): def test_eq_multiline_text(self):
> assert 'foo\nspam\nbar' == 'foo\neggs\nbar' > assert 'foo\nspam\nbar' == 'foo\neggs\nbar'
@ -115,15 +115,15 @@ get on the terminal - we are working on that):
failure_demo.py:48: AssertionError failure_demo.py:48: AssertionError
______________ TestSpecialisedExplanations.test_eq_long_text _______________ ______________ TestSpecialisedExplanations.test_eq_long_text _______________
self = <failure_demo.TestSpecialisedExplanations object at 0x2ad2990> self = <failure_demo.TestSpecialisedExplanations object at 0x1459f50>
def test_eq_long_text(self): def test_eq_long_text(self):
a = '1'*100 + 'a' + '2'*100 a = '1'*100 + 'a' + '2'*100
b = '1'*100 + 'b' + '2'*100 b = '1'*100 + 'b' + '2'*100
> assert a == b > assert a == b
E assert '111111111111...2222222222222' == '1111111111111...2222222222222' E assert '111111111111...2222222222222' == '1111111111111...2222222222222'
E Skipping 90 identical leading characters in diff E Skipping 90 identical leading characters in diff, use -v to show
E Skipping 91 identical trailing characters in diff E Skipping 91 identical trailing characters in diff, use -v to show
E - 1111111111a222222222 E - 1111111111a222222222
E ? ^ E ? ^
E + 1111111111b222222222 E + 1111111111b222222222
@ -132,15 +132,15 @@ get on the terminal - we are working on that):
failure_demo.py:53: AssertionError failure_demo.py:53: AssertionError
_________ TestSpecialisedExplanations.test_eq_long_text_multiline __________ _________ TestSpecialisedExplanations.test_eq_long_text_multiline __________
self = <failure_demo.TestSpecialisedExplanations object at 0x2ad6c10> self = <failure_demo.TestSpecialisedExplanations object at 0x135a790>
def test_eq_long_text_multiline(self): def test_eq_long_text_multiline(self):
a = '1\n'*100 + 'a' + '2\n'*100 a = '1\n'*100 + 'a' + '2\n'*100
b = '1\n'*100 + 'b' + '2\n'*100 b = '1\n'*100 + 'b' + '2\n'*100
> assert a == b > assert a == b
E assert '1\n1\n1\n1\n...n2\n2\n2\n2\n' == '1\n1\n1\n1\n1...n2\n2\n2\n2\n' E assert '1\n1\n1\n1\n...n2\n2\n2\n2\n' == '1\n1\n1\n1\n1...n2\n2\n2\n2\n'
E Skipping 190 identical leading characters in diff E Skipping 190 identical leading characters in diff, use -v to show
E Skipping 191 identical trailing characters in diff E Skipping 191 identical trailing characters in diff, use -v to show
E 1 E 1
E 1 E 1
E 1 E 1
@ -156,7 +156,7 @@ get on the terminal - we are working on that):
failure_demo.py:58: AssertionError failure_demo.py:58: AssertionError
_________________ TestSpecialisedExplanations.test_eq_list _________________ _________________ TestSpecialisedExplanations.test_eq_list _________________
self = <failure_demo.TestSpecialisedExplanations object at 0x2a81c50> self = <failure_demo.TestSpecialisedExplanations object at 0x138dfd0>
def test_eq_list(self): def test_eq_list(self):
> assert [0, 1, 2] == [0, 1, 3] > assert [0, 1, 2] == [0, 1, 3]
@ -166,7 +166,7 @@ get on the terminal - we are working on that):
failure_demo.py:61: AssertionError failure_demo.py:61: AssertionError
______________ TestSpecialisedExplanations.test_eq_list_long _______________ ______________ TestSpecialisedExplanations.test_eq_list_long _______________
self = <failure_demo.TestSpecialisedExplanations object at 0x2a69f50> self = <failure_demo.TestSpecialisedExplanations object at 0x135a990>
def test_eq_list_long(self): def test_eq_list_long(self):
a = [0]*100 + [1] + [3]*100 a = [0]*100 + [1] + [3]*100
@ -178,20 +178,23 @@ get on the terminal - we are working on that):
failure_demo.py:66: AssertionError failure_demo.py:66: AssertionError
_________________ TestSpecialisedExplanations.test_eq_dict _________________ _________________ TestSpecialisedExplanations.test_eq_dict _________________
self = <failure_demo.TestSpecialisedExplanations object at 0x2ad5f50> self = <failure_demo.TestSpecialisedExplanations object at 0x1459310>
def test_eq_dict(self): def test_eq_dict(self):
> assert {'a': 0, 'b': 1} == {'a': 0, 'b': 2} > assert {'a': 0, 'b': 1, 'c': 0} == {'a': 0, 'b': 2, 'd': 0}
E assert {'a': 0, 'b': 1} == {'a': 0, 'b': 2} E assert {'a': 0, 'b': 1, 'c': 0} == {'a': 0, 'b': 2, 'd': 0}
E - {'a': 0, 'b': 1} E Hiding 1 identical items, use -v to show
E ? ^ E Differing items:
E + {'a': 0, 'b': 2} E {'b': 1} != {'b': 2}
E ? ^ E Left contains more items:
E {'c': 0}
E Right contains more items:
E {'d': 0}
failure_demo.py:69: AssertionError failure_demo.py:69: AssertionError
_________________ TestSpecialisedExplanations.test_eq_set __________________ _________________ TestSpecialisedExplanations.test_eq_set __________________
self = <failure_demo.TestSpecialisedExplanations object at 0x2ad4410> self = <failure_demo.TestSpecialisedExplanations object at 0x1434310>
def test_eq_set(self): def test_eq_set(self):
> assert set([0, 10, 11, 12]) == set([0, 20, 21]) > assert set([0, 10, 11, 12]) == set([0, 20, 21])
@ -207,7 +210,7 @@ get on the terminal - we are working on that):
failure_demo.py:72: AssertionError failure_demo.py:72: AssertionError
_____________ TestSpecialisedExplanations.test_eq_longer_list ______________ _____________ TestSpecialisedExplanations.test_eq_longer_list ______________
self = <failure_demo.TestSpecialisedExplanations object at 0x2ad2d50> self = <failure_demo.TestSpecialisedExplanations object at 0x138ded0>
def test_eq_longer_list(self): def test_eq_longer_list(self):
> assert [1,2] == [1,2,3] > assert [1,2] == [1,2,3]
@ -217,7 +220,7 @@ get on the terminal - we are working on that):
failure_demo.py:75: AssertionError failure_demo.py:75: AssertionError
_________________ TestSpecialisedExplanations.test_in_list _________________ _________________ TestSpecialisedExplanations.test_in_list _________________
self = <failure_demo.TestSpecialisedExplanations object at 0x2a81310> self = <failure_demo.TestSpecialisedExplanations object at 0x1459e10>
def test_in_list(self): def test_in_list(self):
> assert 1 in [0, 2, 3, 4, 5] > assert 1 in [0, 2, 3, 4, 5]
@ -226,7 +229,7 @@ get on the terminal - we are working on that):
failure_demo.py:78: AssertionError failure_demo.py:78: AssertionError
__________ TestSpecialisedExplanations.test_not_in_text_multiline __________ __________ TestSpecialisedExplanations.test_not_in_text_multiline __________
self = <failure_demo.TestSpecialisedExplanations object at 0x2a697d0> self = <failure_demo.TestSpecialisedExplanations object at 0x1434950>
def test_not_in_text_multiline(self): def test_not_in_text_multiline(self):
text = 'some multiline\ntext\nwhich\nincludes foo\nand a\ntail' text = 'some multiline\ntext\nwhich\nincludes foo\nand a\ntail'
@ -244,7 +247,7 @@ get on the terminal - we are working on that):
failure_demo.py:82: AssertionError failure_demo.py:82: AssertionError
___________ TestSpecialisedExplanations.test_not_in_text_single ____________ ___________ TestSpecialisedExplanations.test_not_in_text_single ____________
self = <failure_demo.TestSpecialisedExplanations object at 0x2ad4d10> self = <failure_demo.TestSpecialisedExplanations object at 0x138dbd0>
def test_not_in_text_single(self): def test_not_in_text_single(self):
text = 'single foo line' text = 'single foo line'
@ -257,7 +260,7 @@ get on the terminal - we are working on that):
failure_demo.py:86: AssertionError failure_demo.py:86: AssertionError
_________ TestSpecialisedExplanations.test_not_in_text_single_long _________ _________ TestSpecialisedExplanations.test_not_in_text_single_long _________
self = <failure_demo.TestSpecialisedExplanations object at 0x2ad2fd0> self = <failure_demo.TestSpecialisedExplanations object at 0x14593d0>
def test_not_in_text_single_long(self): def test_not_in_text_single_long(self):
text = 'head ' * 50 + 'foo ' + 'tail ' * 20 text = 'head ' * 50 + 'foo ' + 'tail ' * 20
@ -270,7 +273,7 @@ get on the terminal - we are working on that):
failure_demo.py:90: AssertionError failure_demo.py:90: AssertionError
______ TestSpecialisedExplanations.test_not_in_text_single_long_term _______ ______ TestSpecialisedExplanations.test_not_in_text_single_long_term _______
self = <failure_demo.TestSpecialisedExplanations object at 0x2a6f410> self = <failure_demo.TestSpecialisedExplanations object at 0x1459650>
def test_not_in_text_single_long_term(self): def test_not_in_text_single_long_term(self):
text = 'head ' * 50 + 'f'*70 + 'tail ' * 20 text = 'head ' * 50 + 'f'*70 + 'tail ' * 20
@ -289,7 +292,7 @@ get on the terminal - we are working on that):
i = Foo() i = Foo()
> assert i.b == 2 > assert i.b == 2
E assert 1 == 2 E assert 1 == 2
E + where 1 = <failure_demo.Foo object at 0x2a81850>.b E + where 1 = <failure_demo.Foo object at 0x1434850>.b
failure_demo.py:101: AssertionError failure_demo.py:101: AssertionError
_________________________ test_attribute_instance __________________________ _________________________ test_attribute_instance __________________________
@ -299,8 +302,8 @@ get on the terminal - we are working on that):
b = 1 b = 1
> assert Foo().b == 2 > assert Foo().b == 2
E assert 1 == 2 E assert 1 == 2
E + where 1 = <failure_demo.Foo object at 0x2ad4bd0>.b E + where 1 = <failure_demo.Foo object at 0x1459dd0>.b
E + where <failure_demo.Foo object at 0x2ad4bd0> = <class 'failure_demo.Foo'>() E + where <failure_demo.Foo object at 0x1459dd0> = <class 'failure_demo.Foo'>()
failure_demo.py:107: AssertionError failure_demo.py:107: AssertionError
__________________________ test_attribute_failure __________________________ __________________________ test_attribute_failure __________________________
@ -316,7 +319,7 @@ get on the terminal - we are working on that):
failure_demo.py:116: failure_demo.py:116:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <failure_demo.Foo object at 0x2ad26d0> self = <failure_demo.Foo object at 0x1434150>
def _get_b(self): def _get_b(self):
> raise Exception('Failed to get attrib') > raise Exception('Failed to get attrib')
@ -332,15 +335,15 @@ get on the terminal - we are working on that):
b = 2 b = 2
> assert Foo().b == Bar().b > assert Foo().b == Bar().b
E assert 1 == 2 E assert 1 == 2
E + where 1 = <failure_demo.Foo object at 0x2a6ff10>.b E + where 1 = <failure_demo.Foo object at 0x14590d0>.b
E + where <failure_demo.Foo object at 0x2a6ff10> = <class 'failure_demo.Foo'>() E + where <failure_demo.Foo object at 0x14590d0> = <class 'failure_demo.Foo'>()
E + and 2 = <failure_demo.Bar object at 0x2a6fd50>.b E + and 2 = <failure_demo.Bar object at 0x1459b10>.b
E + where <failure_demo.Bar object at 0x2a6fd50> = <class 'failure_demo.Bar'>() E + where <failure_demo.Bar object at 0x1459b10> = <class 'failure_demo.Bar'>()
failure_demo.py:124: AssertionError failure_demo.py:124: AssertionError
__________________________ TestRaises.test_raises __________________________ __________________________ TestRaises.test_raises __________________________
self = <failure_demo.TestRaises instance at 0x2a75c68> self = <failure_demo.TestRaises instance at 0x13a0d88>
def test_raises(self): def test_raises(self):
s = 'qwe' s = 'qwe'
@ -352,10 +355,10 @@ get on the terminal - we are working on that):
> int(s) > int(s)
E ValueError: invalid literal for int() with base 10: 'qwe' E ValueError: invalid literal for int() with base 10: 'qwe'
<0-codegen /home/hpk/p/pytest/.tox/regen/lib/python2.7/site-packages/_pytest/python.py:851>:1: ValueError <0-codegen /home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/python.py:858>:1: ValueError
______________________ TestRaises.test_raises_doesnt _______________________ ______________________ TestRaises.test_raises_doesnt _______________________
self = <failure_demo.TestRaises instance at 0x2adf3f8> self = <failure_demo.TestRaises instance at 0x145fcf8>
def test_raises_doesnt(self): def test_raises_doesnt(self):
> raises(IOError, "int('3')") > raises(IOError, "int('3')")
@ -364,7 +367,7 @@ get on the terminal - we are working on that):
failure_demo.py:136: Failed failure_demo.py:136: Failed
__________________________ TestRaises.test_raise ___________________________ __________________________ TestRaises.test_raise ___________________________
self = <failure_demo.TestRaises instance at 0x2af1830> self = <failure_demo.TestRaises instance at 0x13a9ea8>
def test_raise(self): def test_raise(self):
> raise ValueError("demo error") > raise ValueError("demo error")
@ -373,7 +376,7 @@ get on the terminal - we are working on that):
failure_demo.py:139: ValueError failure_demo.py:139: ValueError
________________________ TestRaises.test_tupleerror ________________________ ________________________ TestRaises.test_tupleerror ________________________
self = <failure_demo.TestRaises instance at 0x2ae5290> self = <failure_demo.TestRaises instance at 0x13843f8>
def test_tupleerror(self): def test_tupleerror(self):
> a,b = [1] > a,b = [1]
@ -382,7 +385,7 @@ get on the terminal - we are working on that):
failure_demo.py:142: ValueError failure_demo.py:142: ValueError
______ TestRaises.test_reinterpret_fails_with_print_for_the_fun_of_it ______ ______ TestRaises.test_reinterpret_fails_with_print_for_the_fun_of_it ______
self = <failure_demo.TestRaises instance at 0x2ae2878> self = <failure_demo.TestRaises instance at 0x14532d8>
def test_reinterpret_fails_with_print_for_the_fun_of_it(self): def test_reinterpret_fails_with_print_for_the_fun_of_it(self):
l = [1,2,3] l = [1,2,3]
@ -395,7 +398,7 @@ get on the terminal - we are working on that):
l is [1, 2, 3] l is [1, 2, 3]
________________________ TestRaises.test_some_error ________________________ ________________________ TestRaises.test_some_error ________________________
self = <failure_demo.TestRaises instance at 0x2af0e18> self = <failure_demo.TestRaises instance at 0x139d290>
def test_some_error(self): def test_some_error(self):
> if namenotexi: > if namenotexi:
@ -423,7 +426,7 @@ get on the terminal - we are working on that):
<2-codegen 'abc-123' /home/hpk/p/pytest/doc/en/example/assertion/failure_demo.py:162>:2: AssertionError <2-codegen 'abc-123' /home/hpk/p/pytest/doc/en/example/assertion/failure_demo.py:162>:2: AssertionError
____________________ TestMoreErrors.test_complex_error _____________________ ____________________ TestMoreErrors.test_complex_error _____________________
self = <failure_demo.TestMoreErrors instance at 0x2adbc68> self = <failure_demo.TestMoreErrors instance at 0x137d758>
def test_complex_error(self): def test_complex_error(self):
def f(): def f():
@ -452,7 +455,7 @@ get on the terminal - we are working on that):
failure_demo.py:5: AssertionError failure_demo.py:5: AssertionError
___________________ TestMoreErrors.test_z1_unpack_error ____________________ ___________________ TestMoreErrors.test_z1_unpack_error ____________________
self = <failure_demo.TestMoreErrors instance at 0x2aee1b8> self = <failure_demo.TestMoreErrors instance at 0x13a5200>
def test_z1_unpack_error(self): def test_z1_unpack_error(self):
l = [] l = []
@ -462,7 +465,7 @@ get on the terminal - we are working on that):
failure_demo.py:179: ValueError failure_demo.py:179: ValueError
____________________ TestMoreErrors.test_z2_type_error _____________________ ____________________ TestMoreErrors.test_z2_type_error _____________________
self = <failure_demo.TestMoreErrors instance at 0x2ae27a0> self = <failure_demo.TestMoreErrors instance at 0x1395290>
def test_z2_type_error(self): def test_z2_type_error(self):
l = 3 l = 3
@ -472,19 +475,19 @@ get on the terminal - we are working on that):
failure_demo.py:183: TypeError failure_demo.py:183: TypeError
______________________ TestMoreErrors.test_startswith ______________________ ______________________ TestMoreErrors.test_startswith ______________________
self = <failure_demo.TestMoreErrors instance at 0x2ae1128> self = <failure_demo.TestMoreErrors instance at 0x137f200>
def test_startswith(self): def test_startswith(self):
s = "123" s = "123"
g = "456" g = "456"
> assert s.startswith(g) > assert s.startswith(g)
E assert <built-in method startswith of str object at 0x2adc918>('456') E assert <built-in method startswith of str object at 0x143f288>('456')
E + where <built-in method startswith of str object at 0x2adc918> = '123'.startswith E + where <built-in method startswith of str object at 0x143f288> = '123'.startswith
failure_demo.py:188: AssertionError failure_demo.py:188: AssertionError
__________________ TestMoreErrors.test_startswith_nested ___________________ __________________ TestMoreErrors.test_startswith_nested ___________________
self = <failure_demo.TestMoreErrors instance at 0x2c720e0> self = <failure_demo.TestMoreErrors instance at 0x145fb00>
def test_startswith_nested(self): def test_startswith_nested(self):
def f(): def f():
@ -492,15 +495,15 @@ get on the terminal - we are working on that):
def g(): def g():
return "456" return "456"
> assert f().startswith(g()) > assert f().startswith(g())
E assert <built-in method startswith of str object at 0x2adc918>('456') E assert <built-in method startswith of str object at 0x143f288>('456')
E + where <built-in method startswith of str object at 0x2adc918> = '123'.startswith E + where <built-in method startswith of str object at 0x143f288> = '123'.startswith
E + where '123' = <function f at 0x2af5b90>() E + where '123' = <function f at 0x13abaa0>()
E + and '456' = <function g at 0x2af5c08>() E + and '456' = <function g at 0x13ab578>()
failure_demo.py:195: AssertionError failure_demo.py:195: AssertionError
_____________________ TestMoreErrors.test_global_func ______________________ _____________________ TestMoreErrors.test_global_func ______________________
self = <failure_demo.TestMoreErrors instance at 0x2c725f0> self = <failure_demo.TestMoreErrors instance at 0x139cd40>
def test_global_func(self): def test_global_func(self):
> assert isinstance(globf(42), float) > assert isinstance(globf(42), float)
@ -510,18 +513,18 @@ get on the terminal - we are working on that):
failure_demo.py:198: AssertionError failure_demo.py:198: AssertionError
_______________________ TestMoreErrors.test_instance _______________________ _______________________ TestMoreErrors.test_instance _______________________
self = <failure_demo.TestMoreErrors instance at 0x2a67ab8> self = <failure_demo.TestMoreErrors instance at 0x13593b0>
def test_instance(self): def test_instance(self):
self.x = 6*7 self.x = 6*7
> assert self.x != 42 > assert self.x != 42
E assert 42 != 42 E assert 42 != 42
E + where 42 = <failure_demo.TestMoreErrors instance at 0x2a67ab8>.x E + where 42 = <failure_demo.TestMoreErrors instance at 0x13593b0>.x
failure_demo.py:202: AssertionError failure_demo.py:202: AssertionError
_______________________ TestMoreErrors.test_compare ________________________ _______________________ TestMoreErrors.test_compare ________________________
self = <failure_demo.TestMoreErrors instance at 0x2af8710> self = <failure_demo.TestMoreErrors instance at 0x1465d40>
def test_compare(self): def test_compare(self):
> assert globf(10) < 5 > assert globf(10) < 5
@ -531,7 +534,7 @@ get on the terminal - we are working on that):
failure_demo.py:205: AssertionError failure_demo.py:205: AssertionError
_____________________ TestMoreErrors.test_try_finally ______________________ _____________________ TestMoreErrors.test_try_finally ______________________
self = <failure_demo.TestMoreErrors instance at 0x2af03f8> self = <failure_demo.TestMoreErrors instance at 0x1456ea8>
def test_try_finally(self): def test_try_finally(self):
x = 1 x = 1
@ -540,4 +543,4 @@ get on the terminal - we are working on that):
E assert 1 == 0 E assert 1 == 0
failure_demo.py:210: AssertionError failure_demo.py:210: AssertionError
======================== 39 failed in 0.25 seconds ========================= ======================== 39 failed in 0.21 seconds =========================

View File

@ -106,7 +106,7 @@ directory with the above conftest.py::
$ py.test $ py.test
=========================== test session starts ============================ =========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.4 platform linux2 -- Python 2.7.3 -- pytest-2.3.5
collected 0 items collected 0 items
============================= in 0.00 seconds ============================= ============================= in 0.00 seconds =============================
@ -150,12 +150,12 @@ and when running it will see a skipped "slow" test::
$ py.test -rs # "-rs" means report details on the little 's' $ py.test -rs # "-rs" means report details on the little 's'
=========================== test session starts ============================ =========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.4 platform linux2 -- Python 2.7.3 -- pytest-2.3.5
collected 2 items collected 2 items
test_module.py .s test_module.py .s
========================= short test summary info ========================== ========================= short test summary info ==========================
SKIP [1] /tmp/doc-exec-138/conftest.py:9: need --runslow option to run SKIP [1] /tmp/doc-exec-278/conftest.py:9: need --runslow option to run
=================== 1 passed, 1 skipped in 0.01 seconds ==================== =================== 1 passed, 1 skipped in 0.01 seconds ====================
@ -163,7 +163,7 @@ Or run it including the ``slow`` marked test::
$ py.test --runslow $ py.test --runslow
=========================== test session starts ============================ =========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.4 platform linux2 -- Python 2.7.3 -- pytest-2.3.5
collected 2 items collected 2 items
test_module.py .. test_module.py ..
@ -253,7 +253,7 @@ which will add the string to the test header accordingly::
$ py.test $ py.test
=========================== test session starts ============================ =========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.4 platform linux2 -- Python 2.7.3 -- pytest-2.3.5
project deps: mylib-1.1 project deps: mylib-1.1
collected 0 items collected 0 items
@ -276,7 +276,7 @@ which will add info only when run with "--v"::
$ py.test -v $ py.test -v
=========================== test session starts ============================ =========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.4 -- /home/hpk/p/pytest/.tox/regen/bin/python platform linux2 -- Python 2.7.3 -- pytest-2.3.5 -- /home/hpk/p/pytest/.tox/regen/bin/python
info1: did you know that ... info1: did you know that ...
did you? did you?
collecting ... collected 0 items collecting ... collected 0 items
@ -287,7 +287,7 @@ and nothing when run plainly::
$ py.test $ py.test
=========================== test session starts ============================ =========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.4 platform linux2 -- Python 2.7.3 -- pytest-2.3.5
collected 0 items collected 0 items
============================= in 0.00 seconds ============================= ============================= in 0.00 seconds =============================
@ -319,7 +319,7 @@ Now we can profile which test functions execute the slowest::
$ py.test --durations=3 $ py.test --durations=3
=========================== test session starts ============================ =========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.4 platform linux2 -- Python 2.7.3 -- pytest-2.3.5
collected 3 items collected 3 items
test_some_are_slow.py ... test_some_are_slow.py ...
@ -327,7 +327,7 @@ Now we can profile which test functions execute the slowest::
========================= slowest 3 test durations ========================= ========================= slowest 3 test durations =========================
0.20s call test_some_are_slow.py::test_funcslow2 0.20s call test_some_are_slow.py::test_funcslow2
0.10s call test_some_are_slow.py::test_funcslow1 0.10s call test_some_are_slow.py::test_funcslow1
0.00s call test_some_are_slow.py::test_funcfast 0.00s setup test_some_are_slow.py::test_funcfast
========================= 3 passed in 0.31 seconds ========================= ========================= 3 passed in 0.31 seconds =========================
incremental testing - test steps incremental testing - test steps
@ -380,7 +380,7 @@ If we run this::
$ py.test -rx $ py.test -rx
=========================== test session starts ============================ =========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.4 platform linux2 -- Python 2.7.3 -- pytest-2.3.5
collected 4 items collected 4 items
test_step.py .Fx. test_step.py .Fx.
@ -388,7 +388,7 @@ If we run this::
================================= FAILURES ================================= ================================= FAILURES =================================
____________________ TestUserHandling.test_modification ____________________ ____________________ TestUserHandling.test_modification ____________________
self = <test_step.TestUserHandling instance at 0x193bc68> self = <test_step.TestUserHandling instance at 0x282b8c0>
def test_modification(self): def test_modification(self):
> assert 0 > assert 0
@ -398,7 +398,7 @@ If we run this::
========================= short test summary info ========================== ========================= short test summary info ==========================
XFAIL test_step.py::TestUserHandling::()::test_deletion XFAIL test_step.py::TestUserHandling::()::test_deletion
reason: previous test failed (test_modification) reason: previous test failed (test_modification)
============== 1 failed, 2 passed, 1 xfailed in 0.02 seconds =============== ============== 1 failed, 2 passed, 1 xfailed in 0.01 seconds ===============
We'll see that ``test_deletion`` was not executed because ``test_modification`` We'll see that ``test_deletion`` was not executed because ``test_modification``
failed. It is reported as an "expected failure". failed. It is reported as an "expected failure".
@ -450,7 +450,7 @@ We can run this::
$ py.test $ py.test
=========================== test session starts ============================ =========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.4 platform linux2 -- Python 2.7.3 -- pytest-2.3.5
collected 7 items collected 7 items
test_step.py .Fx. test_step.py .Fx.
@ -460,17 +460,17 @@ We can run this::
================================== ERRORS ================================== ================================== ERRORS ==================================
_______________________ ERROR at setup of test_root ________________________ _______________________ ERROR at setup of test_root ________________________
file /tmp/doc-exec-138/b/test_error.py, line 1 file /tmp/doc-exec-278/b/test_error.py, line 1
def test_root(db): # no db here, will error out def test_root(db): # no db here, will error out
fixture 'db' not found fixture 'db' not found
available fixtures: pytestconfig, recwarn, monkeypatch, capfd, capsys, tmpdir available fixtures: pytestconfig, recwarn, monkeypatch, capfd, capsys, tmpdir
use 'py.test --fixtures [testpath]' for help on them. use 'py.test --fixtures [testpath]' for help on them.
/tmp/doc-exec-138/b/test_error.py:1 /tmp/doc-exec-278/b/test_error.py:1
================================= FAILURES ================================= ================================= FAILURES =================================
____________________ TestUserHandling.test_modification ____________________ ____________________ TestUserHandling.test_modification ____________________
self = <test_step.TestUserHandling instance at 0x1492d88> self = <test_step.TestUserHandling instance at 0x26145f0>
def test_modification(self): def test_modification(self):
> assert 0 > assert 0
@ -479,23 +479,23 @@ We can run this::
test_step.py:9: AssertionError test_step.py:9: AssertionError
_________________________________ test_a1 __________________________________ _________________________________ test_a1 __________________________________
db = <conftest.DB instance at 0x1498e60> db = <conftest.DB instance at 0x26211b8>
def test_a1(db): def test_a1(db):
> assert 0, db # to show value > assert 0, db # to show value
E AssertionError: <conftest.DB instance at 0x1498e60> E AssertionError: <conftest.DB instance at 0x26211b8>
a/test_db.py:2: AssertionError a/test_db.py:2: AssertionError
_________________________________ test_a2 __________________________________ _________________________________ test_a2 __________________________________
db = <conftest.DB instance at 0x1498e60> db = <conftest.DB instance at 0x26211b8>
def test_a2(db): def test_a2(db):
> assert 0, db # to show value > assert 0, db # to show value
E AssertionError: <conftest.DB instance at 0x1498e60> E AssertionError: <conftest.DB instance at 0x26211b8>
a/test_db2.py:2: AssertionError a/test_db2.py:2: AssertionError
========== 3 failed, 2 passed, 1 xfailed, 1 error in 0.04 seconds ========== ========== 3 failed, 2 passed, 1 xfailed, 1 error in 0.03 seconds ==========
The two test modules in the ``a`` directory see the same ``db`` fixture instance The two test modules in the ``a`` directory see the same ``db`` fixture instance
while the one test in the sister-directory ``b`` doesn't see it. We could of course while the one test in the sister-directory ``b`` doesn't see it. We could of course
@ -550,7 +550,7 @@ and run them::
$ py.test test_module.py $ py.test test_module.py
=========================== test session starts ============================ =========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.4 platform linux2 -- Python 2.7.3 -- pytest-2.3.5
collected 2 items collected 2 items
test_module.py FF test_module.py FF
@ -558,7 +558,7 @@ and run them::
================================= FAILURES ================================= ================================= FAILURES =================================
________________________________ test_fail1 ________________________________ ________________________________ test_fail1 ________________________________
tmpdir = local('/tmp/pytest-543/test_fail10') tmpdir = local('/tmp/pytest-326/test_fail10')
def test_fail1(tmpdir): def test_fail1(tmpdir):
> assert 0 > assert 0
@ -577,7 +577,7 @@ and run them::
you will have a "failures" file which contains the failing test ids:: you will have a "failures" file which contains the failing test ids::
$ cat failures $ cat failures
test_module.py::test_fail1 (/tmp/pytest-543/test_fail10) test_module.py::test_fail1 (/tmp/pytest-326/test_fail10)
test_module.py::test_fail2 test_module.py::test_fail2
Making test result information available in fixtures Making test result information available in fixtures
@ -640,7 +640,7 @@ and run it::
$ py.test -s test_module.py $ py.test -s test_module.py
=========================== test session starts ============================ =========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.4 platform linux2 -- Python 2.7.3 -- pytest-2.3.5
collected 3 items collected 3 items
test_module.py EFF test_module.py EFF

View File

@ -7,14 +7,14 @@ pytest fixtures: explicit, modular, scalable
.. currentmodule:: _pytest.python .. currentmodule:: _pytest.python
.. versionadded:: 2.0/2.3 .. versionadded:: 2.0/2.3/2.4
.. _`xUnit`: http://en.wikipedia.org/wiki/XUnit .. _`xUnit`: http://en.wikipedia.org/wiki/XUnit
.. _`general purpose of test fixtures`: http://en.wikipedia.org/wiki/Test_fixture#Software .. _`purpose of test fixtures`: http://en.wikipedia.org/wiki/Test_fixture#Software
.. _`Dependency injection`: http://en.wikipedia.org/wiki/Dependency_injection#Definition .. _`Dependency injection`: http://en.wikipedia.org/wiki/Dependency_injection#Definition
The `general purpose of test fixtures`_ is to provide a fixed baseline The `purpose of test fixtures`_ is to provide a fixed baseline
upon which tests can reliably and repeatedly execute. pytest-2.3 fixtures upon which tests can reliably and repeatedly execute. pytest fixtures
offer dramatic improvements over the classic xUnit style of setup/teardown offer dramatic improvements over the classic xUnit style of setup/teardown
functions: functions:
@ -22,8 +22,7 @@ functions:
from test functions, modules, classes or whole projects. from test functions, modules, classes or whole projects.
* fixtures are implemented in a modular manner, as each fixture name * fixtures are implemented in a modular manner, as each fixture name
triggers a *fixture function* which can itself easily use other triggers a *fixture function* which can itself use other fixtures.
fixtures.
* fixture management scales from simple unit to complex * fixture management scales from simple unit to complex
functional testing, allowing to parametrize fixtures and tests according functional testing, allowing to parametrize fixtures and tests according
@ -71,7 +70,7 @@ marked ``smtp`` fixture function. Running the test looks like this::
$ py.test test_smtpsimple.py $ py.test test_smtpsimple.py
=========================== test session starts ============================ =========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.4 platform linux2 -- Python 2.7.3 -- pytest-2.3.5
collected 1 items collected 1 items
test_smtpsimple.py F test_smtpsimple.py F
@ -79,7 +78,7 @@ marked ``smtp`` fixture function. Running the test looks like this::
================================= FAILURES ================================= ================================= FAILURES =================================
________________________________ test_ehlo _________________________________ ________________________________ test_ehlo _________________________________
smtp = <smtplib.SMTP instance at 0x236cab8> smtp = <smtplib.SMTP instance at 0x226cc20>
def test_ehlo(smtp): def test_ehlo(smtp):
response, msg = smtp.ehlo() response, msg = smtp.ehlo()
@ -89,7 +88,7 @@ marked ``smtp`` fixture function. Running the test looks like this::
E assert 0 E assert 0
test_smtpsimple.py:12: AssertionError test_smtpsimple.py:12: AssertionError
========================= 1 failed in 0.17 seconds ========================= ========================= 1 failed in 0.20 seconds =========================
In the failure traceback we see that the test function was called with a In the failure traceback we see that the test function was called with a
``smtp`` argument, the ``smtplib.SMTP()`` instance created by the fixture ``smtp`` argument, the ``smtplib.SMTP()`` instance created by the fixture
@ -129,10 +128,10 @@ Funcargs a prime example of dependency injection
When injecting fixtures to test functions, pytest-2.0 introduced the When injecting fixtures to test functions, pytest-2.0 introduced the
term "funcargs" or "funcarg mechanism" which continues to be present term "funcargs" or "funcarg mechanism" which continues to be present
also in pytest-2.3 docs. It now refers to the specific case of injecting also in docs today. It now refers to the specific case of injecting
fixture values as arguments to test functions. With pytest-2.3 there are fixture values as arguments to test functions. With pytest-2.3 there are
more possibilities to use fixtures but "funcargs" probably will remain more possibilities to use fixtures but "funcargs" remain as the main way
as the main way of dealing with fixtures. as they allow to directly state the dependencies of a test function.
As the following examples show in more detail, funcargs allow test As the following examples show in more detail, funcargs allow test
functions to easily receive and work against specific pre-initialized functions to easily receive and work against specific pre-initialized
@ -154,10 +153,10 @@ can add a ``scope='module'`` parameter to the
:py:func:`@pytest.fixture <_pytest.python.fixture>` invocation :py:func:`@pytest.fixture <_pytest.python.fixture>` invocation
to cause the decorated ``smtp`` fixture function to only be invoked once to cause the decorated ``smtp`` fixture function to only be invoked once
per test module. Multiple test functions in a test module will thus per test module. Multiple test functions in a test module will thus
each receive the same ``smtp`` fixture instance. The next example also each receive the same ``smtp`` fixture instance. The next example puts
extracts the fixture function into a separate ``conftest.py`` file so the fixture function into a separate ``conftest.py`` file so
that all tests in test modules in the directory can access the fixture that tests from multiple test modules in the directory can
function:: access the fixture function::
# content of conftest.py # content of conftest.py
import pytest import pytest
@ -189,7 +188,7 @@ inspect what is going on and can now run the tests::
$ py.test test_module.py $ py.test test_module.py
=========================== test session starts ============================ =========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.4 platform linux2 -- Python 2.7.3 -- pytest-2.3.5
collected 2 items collected 2 items
test_module.py FF test_module.py FF
@ -197,7 +196,7 @@ inspect what is going on and can now run the tests::
================================= FAILURES ================================= ================================= FAILURES =================================
________________________________ test_ehlo _________________________________ ________________________________ test_ehlo _________________________________
smtp = <smtplib.SMTP instance at 0x1c9add0> smtp = <smtplib.SMTP instance at 0x18a6368>
def test_ehlo(smtp): def test_ehlo(smtp):
response = smtp.ehlo() response = smtp.ehlo()
@ -209,7 +208,7 @@ inspect what is going on and can now run the tests::
test_module.py:6: AssertionError test_module.py:6: AssertionError
________________________________ test_noop _________________________________ ________________________________ test_noop _________________________________
smtp = <smtplib.SMTP instance at 0x1c9add0> smtp = <smtplib.SMTP instance at 0x18a6368>
def test_noop(smtp): def test_noop(smtp):
response = smtp.noop() response = smtp.noop()
@ -218,7 +217,7 @@ inspect what is going on and can now run the tests::
E assert 0 E assert 0
test_module.py:11: AssertionError test_module.py:11: AssertionError
========================= 2 failed in 0.23 seconds ========================= ========================= 2 failed in 0.26 seconds =========================
You see the two ``assert 0`` failing and more importantly you can also see You see the two ``assert 0`` failing and more importantly you can also see
that the same (module-scoped) ``smtp`` object was passed into the two that the same (module-scoped) ``smtp`` object was passed into the two
@ -233,24 +232,91 @@ instance, you can simply declare it::
def smtp(...): def smtp(...):
# the returned fixture value will be shared for # the returned fixture value will be shared for
# all tests needing it # all tests needing it
.. _`contextfixtures`:
fixture finalization / teardowns
-------------------------------------------------------------
pytest supports two styles of fixture finalization:
- (new in pytest-2.4) by writing a contextmanager fixture
generator where a fixture value is "yielded" and the remainder
of the function serves as the teardown code. This integrates
very well with existing context managers.
- by making a fixture function accept a ``request`` argument
with which it can call ``request.addfinalizer(teardownfunction)``
to register a teardown callback function.
Both methods are strictly equivalent from pytest's view and will
remain supported in the future.
Because a number of people prefer the new contextmanager style
we describe it first::
# content of test_ctxfixture.py
import smtplib
import pytest
@pytest.fixture(scope="module")
def smtp():
smtp = smtplib.SMTP("merlinux.eu")
yield smtp # provide the fixture value
print ("teardown smtp")
smtp.close()
pytest detects that you are using a ``yield`` in your fixture function,
turns it into a generator and:
a) iterates once into it for producing the value
b) iterates a second time for tearing the fixture down, expecting
a StopIteration (which is produced automatically from the Python
runtime when the generator returns).
.. note::
The teardown will execute independently of the status of test functions.
You do not need to write the teardown code into a ``try-finally`` clause
like you would usually do with ``contextlib.contextmanager`` decorated
functions.
If the fixture generator yields a second value pytest will report
an error. Yielding cannot be used for parametrization. We'll describe
ways to implement parametrization further below.
Prior to pytest-2.4 you always needed to register a finalizer by accepting
a ``request`` object into your fixture function and calling
``request.addfinalizer`` with a teardown function::
import smtplib
import pytest
@pytest.fixture(scope="module")
def smtp(request):
smtp = smtplib.SMTP("merlinux.eu")
def fin():
print ("teardown smtp")
smtp.close()
return smtp # provide the fixture value
This method of registering a finalizer reads more indirect
than the new contextmanager style syntax because ``fin``
is a callback function.
.. _`request-context`: .. _`request-context`:
Fixtures can interact with the requesting test context Fixtures can interact with the requesting test context
------------------------------------------------------------- -------------------------------------------------------------
Fixture functions can themselves use other fixtures by naming pytest provides a builtin :py:class:`request <FixtureRequest>` object,
them as an input argument just like test functions do, see
:ref:`interdependent fixtures`. Moreover, pytest
provides a builtin :py:class:`request <FixtureRequest>` object,
which fixture functions can use to introspect the function, class or module which fixture functions can use to introspect the function, class or module
for which they are invoked or to register finalizing (cleanup) for which they are invoked.
functions which are called when the last test finished execution.
Further extending the previous ``smtp`` fixture example, let's Further extending the previous ``smtp`` fixture example, let's
read an optional server URL from the module namespace and register read an optional server URL from the module namespace::
a finalizer that closes the smtp connection after the last
test in a module finished execution::
# content of conftest.py # content of conftest.py
import pytest import pytest
@ -260,26 +326,25 @@ test in a module finished execution::
def smtp(request): def smtp(request):
server = getattr(request.module, "smtpserver", "merlinux.eu") server = getattr(request.module, "smtpserver", "merlinux.eu")
smtp = smtplib.SMTP(server) smtp = smtplib.SMTP(server)
def fin(): yield smtp # provide the fixture
print ("finalizing %s" % smtp) print ("finalizing %s" % smtp)
smtp.close() smtp.close()
request.addfinalizer(fin)
return smtp
The registered ``fin`` function will be called when the last test The finalizing part after the ``yield smtp`` statement will execute
using it has executed:: when the last test using the ``smtp`` fixture has executed::
$ py.test -s -q --tb=no $ py.test -s -q --tb=no
FF FF
finalizing <smtplib.SMTP instance at 0x2720290> finalizing <smtplib.SMTP instance at 0x1e10248>
We see that the ``smtp`` instance is finalized after the two We see that the ``smtp`` instance is finalized after the two
tests using it tests executed. If we had specified ``scope='function'`` tests which use it finished executin. If we rather specify
then fixture setup and cleanup would occur around each single test. ``scope='function'`` then fixture setup and cleanup occurs
Note that either case the test module itself does not need to change! around each single test. Note that in either case the test
module itself does not need to change!
Let's quickly create another test module that actually sets the Let's quickly create another test module that actually sets the
server URL and has a test to verify the fixture picks it up:: server URL in its module namespace::
# content of test_anothersmtp.py # content of test_anothersmtp.py
@ -298,6 +363,9 @@ Running it::
> assert 0, smtp.helo() > assert 0, smtp.helo()
E AssertionError: (250, 'mail.python.org') E AssertionError: (250, 'mail.python.org')
voila! The ``smtp`` fixture function picked up our mail server name
from the module namespace.
.. _`fixture-parametrize`: .. _`fixture-parametrize`:
@ -324,11 +392,9 @@ through the special :py:class:`request <FixtureRequest>` object::
params=["merlinux.eu", "mail.python.org"]) params=["merlinux.eu", "mail.python.org"])
def smtp(request): def smtp(request):
smtp = smtplib.SMTP(request.param) smtp = smtplib.SMTP(request.param)
def fin(): yield smtp
print ("finalizing %s" % smtp) print ("finalizing %s" % smtp)
smtp.close() smtp.close()
request.addfinalizer(fin)
return smtp
The main change is the declaration of ``params`` with The main change is the declaration of ``params`` with
:py:func:`@pytest.fixture <_pytest.python.fixture>`, a list of values :py:func:`@pytest.fixture <_pytest.python.fixture>`, a list of values
@ -341,7 +407,7 @@ So let's just do another run::
================================= FAILURES ================================= ================================= FAILURES =================================
__________________________ test_ehlo[merlinux.eu] __________________________ __________________________ test_ehlo[merlinux.eu] __________________________
smtp = <smtplib.SMTP instance at 0x1dae368> smtp = <smtplib.SMTP instance at 0x1b38a28>
def test_ehlo(smtp): def test_ehlo(smtp):
response = smtp.ehlo() response = smtp.ehlo()
@ -353,7 +419,7 @@ So let's just do another run::
test_module.py:6: AssertionError test_module.py:6: AssertionError
__________________________ test_noop[merlinux.eu] __________________________ __________________________ test_noop[merlinux.eu] __________________________
smtp = <smtplib.SMTP instance at 0x1dae368> smtp = <smtplib.SMTP instance at 0x1b38a28>
def test_noop(smtp): def test_noop(smtp):
response = smtp.noop() response = smtp.noop()
@ -364,18 +430,18 @@ So let's just do another run::
test_module.py:11: AssertionError test_module.py:11: AssertionError
________________________ test_ehlo[mail.python.org] ________________________ ________________________ test_ehlo[mail.python.org] ________________________
smtp = <smtplib.SMTP instance at 0x1dbc7a0> smtp = <smtplib.SMTP instance at 0x1b496c8>
def test_ehlo(smtp): def test_ehlo(smtp):
response = smtp.ehlo() response = smtp.ehlo()
assert response[0] == 250 assert response[0] == 250
> assert "merlinux" in response[1] > assert "merlinux" in response[1]
E assert 'merlinux' in 'mail.python.org\nSIZE 10240000\nETRN\nSTARTTLS\nENHANCEDSTATUSCODES\n8BITMIME\nDSN' E assert 'merlinux' in 'mail.python.org\nSIZE 25600000\nETRN\nSTARTTLS\nENHANCEDSTATUSCODES\n8BITMIME\nDSN'
test_module.py:5: AssertionError test_module.py:5: AssertionError
________________________ test_noop[mail.python.org] ________________________ ________________________ test_noop[mail.python.org] ________________________
smtp = <smtplib.SMTP instance at 0x1dbc7a0> smtp = <smtplib.SMTP instance at 0x1b496c8>
def test_noop(smtp): def test_noop(smtp):
response = smtp.noop() response = smtp.noop()
@ -423,13 +489,13 @@ Here we declare an ``app`` fixture which receives the previously defined
$ py.test -v test_appsetup.py $ py.test -v test_appsetup.py
=========================== test session starts ============================ =========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.4 -- /home/hpk/p/pytest/.tox/regen/bin/python platform linux2 -- Python 2.7.3 -- pytest-2.3.5 -- /home/hpk/p/pytest/.tox/regen/bin/python
collecting ... collected 2 items collecting ... collected 2 items
test_appsetup.py:12: test_smtp_exists[merlinux.eu] PASSED test_appsetup.py:12: test_smtp_exists[merlinux.eu] PASSED
test_appsetup.py:12: test_smtp_exists[mail.python.org] PASSED test_appsetup.py:12: test_smtp_exists[mail.python.org] PASSED
========================= 2 passed in 5.95 seconds ========================= ========================= 2 passed in 5.38 seconds =========================
Due to the parametrization of ``smtp`` the test will run twice with two Due to the parametrization of ``smtp`` the test will run twice with two
different ``App`` instances and respective smtp servers. There is no different ``App`` instances and respective smtp servers. There is no
@ -468,10 +534,8 @@ to show the setup/teardown flow::
def modarg(request): def modarg(request):
param = request.param param = request.param
print "create", param print "create", param
def fin(): yield param
print "fin", param print ("fin %s" % param)
request.addfinalizer(fin)
return param
@pytest.fixture(scope="function", params=[1,2]) @pytest.fixture(scope="function", params=[1,2])
def otherarg(request): def otherarg(request):
@ -488,7 +552,7 @@ Let's run the tests in verbose mode and with looking at the print-output::
$ py.test -v -s test_module.py $ py.test -v -s test_module.py
=========================== test session starts ============================ =========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.4 -- /home/hpk/p/pytest/.tox/regen/bin/python platform linux2 -- Python 2.7.3 -- pytest-2.3.5 -- /home/hpk/p/pytest/.tox/regen/bin/python
collecting ... collected 8 items collecting ... collected 8 items
test_module.py:16: test_0[1] PASSED test_module.py:16: test_0[1] PASSED
@ -518,8 +582,8 @@ You can see that the parametrized module-scoped ``modarg`` resource caused
an ordering of test execution that lead to the fewest possible "active" resources. The finalizer for the ``mod1`` parametrized resource was executed an ordering of test execution that lead to the fewest possible "active" resources. The finalizer for the ``mod1`` parametrized resource was executed
before the ``mod2`` resource was setup. before the ``mod2`` resource was setup.
.. _`usefixtures`:
.. _`usefixtures`:
using fixtures from classes, modules or projects using fixtures from classes, modules or projects
---------------------------------------------------------------------- ----------------------------------------------------------------------

View File

@ -23,7 +23,7 @@ Installation options::
To check your installation has installed the correct version:: To check your installation has installed the correct version::
$ py.test --version $ py.test --version
This is py.test version 2.3.4, imported from /home/hpk/p/pytest/.tox/regen/lib/python2.7/site-packages/pytest.pyc This is py.test version 2.3.5, imported from /home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/pytest.py
If you get an error checkout :ref:`installation issues`. If you get an error checkout :ref:`installation issues`.
@ -45,7 +45,7 @@ That's it. You can execute the test function now::
$ py.test $ py.test
=========================== test session starts ============================ =========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.4 platform linux2 -- Python 2.7.3 -- pytest-2.3.5
collected 1 items collected 1 items
test_sample.py F test_sample.py F
@ -122,7 +122,7 @@ run the module by passing its filename::
================================= FAILURES ================================= ================================= FAILURES =================================
____________________________ TestClass.test_two ____________________________ ____________________________ TestClass.test_two ____________________________
self = <test_class.TestClass instance at 0x2418e18> self = <test_class.TestClass instance at 0x315b488>
def test_two(self): def test_two(self):
x = "hello" x = "hello"
@ -157,7 +157,7 @@ before performing the test function call. Let's just run it::
================================= FAILURES ================================= ================================= FAILURES =================================
_____________________________ test_needsfiles ______________________________ _____________________________ test_needsfiles ______________________________
tmpdir = local('/tmp/pytest-539/test_needsfiles0') tmpdir = local('/tmp/pytest-322/test_needsfiles0')
def test_needsfiles(tmpdir): def test_needsfiles(tmpdir):
print tmpdir print tmpdir
@ -166,7 +166,7 @@ before performing the test function call. Let's just run it::
test_tmpdir.py:3: AssertionError test_tmpdir.py:3: AssertionError
----------------------------- Captured stdout ------------------------------ ----------------------------- Captured stdout ------------------------------
/tmp/pytest-539/test_needsfiles0 /tmp/pytest-322/test_needsfiles0
Before the test runs, a unique-per-test-invocation temporary directory Before the test runs, a unique-per-test-invocation temporary directory
was created. More info at :ref:`tmpdir handling`. was created. More info at :ref:`tmpdir handling`.

BIN
doc/en/img/cramer2.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

BIN
doc/en/img/gaynor3.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

BIN
doc/en/img/keleshev.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 23 KiB

BIN
doc/en/img/theuni.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 31 KiB

View File

@ -4,15 +4,15 @@
pytest: helps you write better programs pytest: helps you write better programs
============================================= =============================================
.. note:: Upcoming: `professional testing with pytest and tox <http://www.python-academy.com/courses/specialtopics/python_course_testing.html>`_ , 24th-26th June 2013, Leipzig.
**a mature full-featured Python testing tool** **a mature full-featured Python testing tool**
- runs on Posix/Windows, Python 2.4-3.3, PyPy and Jython-2.5.1 - runs on Posix/Windows, Python 2.4-3.3, PyPy and Jython-2.5.1
- :ref:`comprehensive online <toc>` and `PDF documentation <pytest.pdf>`_ - :ref:`comprehensive online <toc>` and `PDF documentation <pytest.pdf>`_
- many :ref:`third party plugins <extplugins>` and
:ref:`builtin helpers <pytest helpers>`
- used in :ref:`many projects and organisations <projects>`, in test - used in :ref:`many projects and organisations <projects>`, in test
suites ranging from 10 to 10s of thousands of tests suites with up to twenty thousand tests
- strict policy of remaining backward compatible across releases
- comes with many :ref:`tested examples <examples>` - comes with many :ref:`tested examples <examples>`
**provides easy no-boilerplate testing** **provides easy no-boilerplate testing**
@ -26,13 +26,13 @@ pytest: helps you write better programs
**scales from simple unit to complex functional testing** **scales from simple unit to complex functional testing**
- (new in 2.3) :ref:`modular parametrizeable fixtures <fixture>` - :ref:`modular parametrizeable fixtures <fixture>` (new in 2.3,
improved in 2.4)
- :ref:`parametrized test functions <parametrized test functions>` - :ref:`parametrized test functions <parametrized test functions>`
- :ref:`mark` - :ref:`mark`
- :ref:`skipping` - :ref:`skipping` (improved in 2.4)
- can :ref:`distribute tests to multiple CPUs <xdistcpu>` through :ref:`xdist plugin <xdist>` - can :ref:`distribute tests to multiple CPUs <xdistcpu>` through :ref:`xdist plugin <xdist>`
- can :ref:`continuously re-run failing tests <looponfailing>` - can :ref:`continuously re-run failing tests <looponfailing>`
- many :ref:`builtin helpers <pytest helpers>` and :ref:`plugins <plugins>`
- flexible :ref:`Python test discovery` - flexible :ref:`Python test discovery`
**integrates many common testing methods**: **integrates many common testing methods**:
@ -50,8 +50,9 @@ pytest: helps you write better programs
**extensive plugin and customization system**: **extensive plugin and customization system**:
- all collection, reporting, running aspects are delegated to hook functions - all collection, reporting, running aspects are delegated to hook functions
- customizations can be per-directory, per-project or per PyPI released plugins - customizations can be per-directory, per-project or per PyPI released plugin
- it is easy to add command line options or do other kind of add-ons and customizations. - it is easy to add command line options or customize existing behaviour
.. _`Javascript unit- and functional testing`: http://pypi.python.org/pypi/oejskit .. _`Javascript unit- and functional testing`: http://pypi.python.org/pypi/oejskit

View File

@ -30,7 +30,7 @@ pytest supports test parametrization in several well-integrated ways:
.. regendoc: wipe .. regendoc: wipe
.. versionadded:: 2.2 .. versionadded:: 2.2, improved in 2.4
The builtin ``pytest.mark.parametrize`` decorator enables The builtin ``pytest.mark.parametrize`` decorator enables
parametrization of arguments for a test function. Here is a typical example parametrization of arguments for a test function. Here is a typical example
@ -39,7 +39,7 @@ to an expected output::
# content of test_expectation.py # content of test_expectation.py
import pytest import pytest
@pytest.mark.parametrize(("input", "expected"), [ @pytest.mark.parametrize("input,expected", [
("3+5", 8), ("3+5", 8),
("2+4", 6), ("2+4", 6),
("6*9", 42), ("6*9", 42),
@ -47,23 +47,24 @@ to an expected output::
def test_eval(input, expected): def test_eval(input, expected):
assert eval(input) == expected assert eval(input) == expected
Here, the ``@parametrize`` decorator defines three different argument Here, the ``@parametrize`` decorator defines three different ``(input,output)``
sets for the two ``(input, output)`` arguments of the ``test_eval`` function tuples so that that the ``test_eval`` function will run three times using
which will thus run three times:: them in turn::
$ py.test $ py.test
=========================== test session starts ============================ ============================= test session starts ==============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.4 platform linux2 -- Python 2.7.3 -- pytest-2.4.0.dev3
plugins: xdist, cache, cli, pep8, xprocess, cov, capturelog, bdd-splinter, rerunfailures, instafail, localserver
collected 3 items collected 3 items
test_expectation.py ..F test_expectation.py ..F
================================= FAILURES ================================= =================================== FAILURES ===================================
____________________________ test_eval[6*9-42] _____________________________ ______________________________ test_eval[6*9-42] _______________________________
input = '6*9', expected = 42 input = '6*9', expected = 42
@pytest.mark.parametrize(("input", "expected"), [ @pytest.mark.parametrize("input,expected", [
("3+5", 8), ("3+5", 8),
("2+4", 6), ("2+4", 6),
("6*9", 42), ("6*9", 42),
@ -74,14 +75,49 @@ which will thus run three times::
E + where 54 = eval('6*9') E + where 54 = eval('6*9')
test_expectation.py:8: AssertionError test_expectation.py:8: AssertionError
==================== 1 failed, 2 passed in 0.01 seconds ==================== ====================== 1 failed, 2 passed in 0.02 seconds ======================
As expected only one pair of input/output values fails the simple test function. As designed in this example, only one pair of input/output values fails
And as usual with test function arguments, you can see the ``input`` and ``output`` values in the traceback. the simple test function. And as usual with test function arguments,
you can see the ``input`` and ``output`` values in the traceback.
Note that there ways how you can mark a class or a module, Note that you could also use the parametrize marker on a class or a module
see :ref:`mark`. (see :ref:`mark`) which would invoke several functions with the argument sets.
It is also possible to mark individual test instances within parametrize,
for example with the builtin ``mark.xfail``::
# content of test_expectation.py
import pytest
@pytest.mark.parametrize("input,expected", [
("3+5", 8),
("2+4", 6),
pytest.mark.xfail(("6*9", 42)),
])
def test_eval(input, expected):
assert eval(input) == expected
Let's run this::
$ py.test
============================= test session starts ==============================
platform linux2 -- Python 2.7.3 -- pytest-2.4.0.dev3
plugins: xdist, cache, cli, pep8, xprocess, cov, capturelog, bdd-splinter, rerunfailures, instafail, localserver
collected 3 items
test_expectation.py ..x
===================== 2 passed, 1 xfailed in 0.02 seconds ======================
The one parameter set which caused a failure previously now
shows up as an "xfailed (expected to fail)" test.
.. note::
In versions prior to 2.4 one needed to specify the argument
names as a tuple. This remains valid but the simpler ``"name1,name2,..."``
comma-separated-string syntax is now advertised fist because
it's easier to write, produces less line noise.
.. _`pytest_generate_tests`: .. _`pytest_generate_tests`:
@ -128,15 +164,15 @@ Let's also run with a stringinput that will lead to a failing test::
$ py.test -q --stringinput="!" test_strings.py $ py.test -q --stringinput="!" test_strings.py
F F
================================= FAILURES ================================= =================================== FAILURES ===================================
___________________________ test_valid_string[!] ___________________________ _____________________________ test_valid_string[!] _____________________________
stringinput = '!' stringinput = '!'
def test_valid_string(stringinput): def test_valid_string(stringinput):
> assert stringinput.isalpha() > assert stringinput.isalpha()
E assert <built-in method isalpha of str object at 0x2b2319ceffa8>() E assert <built-in method isalpha of str object at 0x7fd657390fd0>()
E + where <built-in method isalpha of str object at 0x2b2319ceffa8> = '!'.isalpha E + where <built-in method isalpha of str object at 0x7fd657390fd0> = '!'.isalpha
test_strings.py:3: AssertionError test_strings.py:3: AssertionError
@ -148,8 +184,8 @@ listlist::
$ py.test -q -rs test_strings.py $ py.test -q -rs test_strings.py
s s
========================= short test summary info ========================== =========================== short test summary info ============================
SKIP [1] /home/hpk/p/pytest/.tox/regen/lib/python2.7/site-packages/_pytest/python.py:962: got empty parameter set, function test_valid_string at /tmp/doc-exec-101/test_strings.py:1 SKIP [1] /home/hpk/p/pytest/_pytest/python.py:999: got empty parameter set, function test_valid_string at /tmp/doc-exec-2/test_strings.py:1
For further examples, you might want to look at :ref:`more For further examples, you might want to look at :ref:`more
parametrization examples <paramexamples>`. parametrization examples <paramexamples>`.

View File

@ -78,12 +78,22 @@ there is no need to activate it. Here is a initial list of known plugins:
* `pytest-capturelog <http://pypi.python.org/pypi/pytest-capturelog>`_: * `pytest-capturelog <http://pypi.python.org/pypi/pytest-capturelog>`_:
to capture and assert about messages from the logging module to capture and assert about messages from the logging module
* `pytest-cov <http://pypi.python.org/pypi/pytest-cov>`_:
coverage reporting, compatible with distributed testing
* `pytest-xdist <http://pypi.python.org/pypi/pytest-xdist>`_: * `pytest-xdist <http://pypi.python.org/pypi/pytest-xdist>`_:
to distribute tests to CPUs and remote hosts, to run in boxed to distribute tests to CPUs and remote hosts, to run in boxed
mode which allows to survive segmentation faults, to run in mode which allows to survive segmentation faults, to run in
looponfailing mode, automatically re-running failing tests looponfailing mode, automatically re-running failing tests
on file changes, see also :ref:`xdist` on file changes, see also :ref:`xdist`
* `pytest-instafail <http://pypi.python.org/pypi/pytest-instafail>`_:
to report failures while the test run is happening.
* `pytest-bdd <http://pypi.python.org/pypi/pytest-bdd>`_ and
`pytest-konira <http://pypi.python.org/pypi/pytest-konira>`_
to write tests using behaviour-driven testing.
* `pytest-timeout <http://pypi.python.org/pypi/pytest-timeout>`_: * `pytest-timeout <http://pypi.python.org/pypi/pytest-timeout>`_:
to timeout tests based on function marks or global definitions. to timeout tests based on function marks or global definitions.
@ -91,9 +101,6 @@ there is no need to activate it. Here is a initial list of known plugins:
to interactively re-run failing tests and help other plugins to to interactively re-run failing tests and help other plugins to
store test run information across invocations. store test run information across invocations.
* `pytest-cov <http://pypi.python.org/pypi/pytest-cov>`_:
coverage reporting, compatible with distributed testing
* `pytest-pep8 <http://pypi.python.org/pypi/pytest-pep8>`_: * `pytest-pep8 <http://pypi.python.org/pypi/pytest-pep8>`_:
a ``--pep8`` option to enable PEP8 compliance checking. a ``--pep8`` option to enable PEP8 compliance checking.
@ -338,15 +345,15 @@ Reporting hooks
Session related reporting hooks: Session related reporting hooks:
.. autofunction: pytest_collectstart .. autofunction:: pytest_collectstart
.. autofunction: pytest_itemcollected .. autofunction:: pytest_itemcollected
.. autofunction: pytest_collectreport .. autofunction:: pytest_collectreport
.. autofunction: pytest_deselected .. autofunction:: pytest_deselected
And here is the central hook for reporting about And here is the central hook for reporting about
test execution: test execution:
.. autofunction: pytest_runtest_logreport .. autofunction:: pytest_runtest_logreport
Reference of objects involved in hooks Reference of objects involved in hooks
=========================================================== ===========================================================

View File

@ -1,5 +1,22 @@
.. _projects: .. _projects:
.. image:: img/gaynor3.png
:width: 400px
:align: right
.. image:: img/theuni.png
:width: 400px
:align: right
.. image:: img/cramer2.png
:width: 400px
:align: right
.. image:: img/keleshev.png
:width: 400px
:align: right
Project examples Project examples
========================== ==========================

View File

@ -2,6 +2,8 @@
Asserting deprecation and other warnings Asserting deprecation and other warnings
===================================================== =====================================================
.. _function_argument:
The recwarn function argument The recwarn function argument
------------------------------------ ------------------------------------
@ -24,6 +26,9 @@ The ``recwarn`` function argument provides these methods:
* ``pop(category=None)``: return last warning matching the category. * ``pop(category=None)``: return last warning matching the category.
* ``clear()``: clear list of warnings * ``clear()``: clear list of warnings
.. _ensuring_function_triggers:
Ensuring a function triggers a deprecation warning Ensuring a function triggers a deprecation warning
------------------------------------------------------- -------------------------------------------------------

View File

@ -9,86 +9,110 @@ If you have test functions that cannot be run on certain platforms
or that you expect to fail you can mark them accordingly or you or that you expect to fail you can mark them accordingly or you
may call helper functions during execution of setup or test functions. may call helper functions during execution of setup or test functions.
A *skip* means that you expect your test to pass unless a certain A *skip* means that you expect your test to pass unless the environment
configuration or condition (e.g. wrong Python interpreter, missing (e.g. wrong Python interpreter, missing dependency) prevents it to run.
dependency) prevents it to run. And *xfail* means that your test And *xfail* means that your test can run but you expect it to fail
can run but you expect it to fail because there is an implementation problem. because there is an implementation problem.
py.test counts and lists *skip* and *xfail* tests separately. However, py.test counts and lists *skip* and *xfail* tests separately. Detailed
detailed information about skipped/xfailed tests is not shown by default information about skipped/xfailed tests is not shown by default to avoid
to avoid cluttering the output. You can use the ``-r`` option to see cluttering the output. You can use the ``-r`` option to see details
details corresponding to the "short" letters shown in the test corresponding to the "short" letters shown in the test progress::
progress::
py.test -rxs # show extra info on skips and xfails py.test -rxs # show extra info on skips and xfails
(See :ref:`how to change command line options defaults`) (See :ref:`how to change command line options defaults`)
.. _skipif: .. _skipif:
.. _`condition booleans`:
Marking a test function to be skipped Marking a test function to be skipped
------------------------------------------- -------------------------------------------
.. versionadded:: 2.4
Here is an example of marking a test function to be skipped Here is an example of marking a test function to be skipped
when run on a Python3 interpreter:: when run on a Python3.3 interpreter::
import sys import sys
@pytest.mark.skipif("sys.version_info >= (3,0)") @pytest.mark.skipif(sys.version_info >= (3,3),
reason="requires python3.3")
def test_function(): def test_function():
... ...
During test function setup the skipif condition is During test function setup the condition ("sys.version_info >= (3,3)") is
evaluated by calling ``eval('sys.version_info >= (3,0)', namespace)``. checked. If it evaluates to True, the test function will be skipped
(*New in version 2.0.2*) The namespace contains all the module globals of the test function so that with the specified reason. Note that pytest enforces specifying a reason
you can for example check for versions of a module you are using:: in order to report meaningful "skip reasons" (e.g. when using ``-rs``).
You can share skipif markers between modules. Consider this test module::
# content of test_mymodule.py
import mymodule import mymodule
minversion = pytest.mark.skipif(mymodule.__versioninfo__ >= (1,1),
@pytest.mark.skipif("mymodule.__version__ < '1.2'") reason="at least mymodule-1.1 required")
def test_function(): @minversion
...
The test function will not be run ("skipped") if
``mymodule`` is below the specified version. The reason
for specifying the condition as a string is mainly that
py.test can report a summary of skip conditions.
For information on the construction of the ``namespace``
see `evaluation of skipif/xfail conditions`_.
You can of course create a shortcut for your conditional skip
decorator at module level like this::
win32only = pytest.mark.skipif("sys.platform != 'win32'")
@win32only
def test_function(): def test_function():
... ...
Skip all test functions of a class You can import it from another test module::
--------------------------------------
# test_myothermodule.py
from test_mymodule import minversion
@minversion
def test_anotherfunction():
...
For larger test suites it's usually a good idea to have one file
where you define the markers which you then consistently apply
throughout your test suite.
Alternatively, the pre pytest-2.4 way to specify `condition strings <condition strings>`_ instead of booleans will remain fully supported in future
versions of pytest. It couldn't be easily used for importing markers
between test modules so it's no longer advertised as the primary method.
Skip all test functions of a class or module
---------------------------------------------
As with all function :ref:`marking <mark>` you can skip test functions at the As with all function :ref:`marking <mark>` you can skip test functions at the
`whole class- or module level`_. Here is an example `whole class- or module level`_. If your code targets python2.6 or above you
for skipping all methods of a test class based on the platform:: use the skipif decorator (and any other marker) on classes::
class TestPosixCalls: @pytest.mark.skipif(sys.platform == 'win32',
pytestmark = pytest.mark.skipif("sys.platform == 'win32'") reason="requires windows")
def test_function(self):
"will not be setup or run under 'win32' platform"
The ``pytestmark`` special name tells py.test to apply it to each test
function in the class. If your code targets python2.6 or above you can
more naturally use the skipif decorator (and any other marker) on
classes::
@pytest.mark.skipif("sys.platform == 'win32'")
class TestPosixCalls: class TestPosixCalls:
def test_function(self): def test_function(self):
"will not be setup or run under 'win32' platform" "will not be setup or run under 'win32' platform"
Using multiple "skipif" decorators on a single function is generally fine - it means that if any of the conditions apply the function execution will be skipped. If the condition is true, this marker will produce a skip result for
each of the test methods.
If your code targets python2.5 where class-decorators are not available,
you can set the ``pytestmark`` attribute of a class::
class TestPosixCalls:
pytestmark = pytest.mark.skipif(sys.platform == 'win32',
reason="requires Windows")
def test_function(self):
"will not be setup or run under 'win32' platform"
As with the class-decorator, the ``pytestmark`` special name tells
py.test to apply it to each test function in the class.
If you want to skip all test functions of a module, you must use
the ``pytestmark`` name on the global level::
# test_module.py
pytestmark = pytest.mark.skipif(...)
If multiple "skipif" decorators are applied to a test function, it
will be skipped if any of the skip conditions is true.
.. _`whole class- or module level`: mark.html#scoped-marking .. _`whole class- or module level`: mark.html#scoped-marking
@ -118,7 +142,8 @@ as if it weren't marked at all.
As with skipif_ you can also mark your expectation of a failure As with skipif_ you can also mark your expectation of a failure
on a particular platform:: on a particular platform::
@pytest.mark.xfail("sys.version_info >= (3,0)") @pytest.mark.xfail(sys.version_info >= (3,3),
reason="python3.3 api changes")
def test_function(): def test_function():
... ...
@ -132,7 +157,7 @@ Running it with the report-on-xfail option gives this output::
example $ py.test -rx xfail_demo.py example $ py.test -rx xfail_demo.py
=========================== test session starts ============================ =========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.4 platform linux2 -- Python 2.7.3 -- pytest-2.3.5
collected 6 items collected 6 items
xfail_demo.py xxxxxx xfail_demo.py xxxxxx
@ -151,41 +176,41 @@ Running it with the report-on-xfail option gives this output::
======================== 6 xfailed in 0.05 seconds ========================= ======================== 6 xfailed in 0.05 seconds =========================
.. _`evaluation of skipif/xfail conditions`: .. _`skip/xfail with parametrize`:
Evaluation of skipif/xfail expressions Skip/xfail with parametrize
---------------------------------------------------- ---------------------------
.. versionadded:: 2.0.2 It is possible to apply markers like skip and xfail to individual
test instances when using parametrize:
The evaluation of a condition string in ``pytest.mark.skipif(conditionstring)`` import pytest
or ``pytest.mark.xfail(conditionstring)`` takes place in a namespace
dictionary which is constructed as follows:
* the namespace is initialized by putting the ``sys`` and ``os`` modules @pytest.mark.parametrize(("n", "expected"), [
and the pytest ``config`` object into it. (1, 2),
pytest.mark.xfail((1, 0)),
* updated with the module globals of the test function for which the pytest.mark.xfail(reason="some bug")((1, 3)),
expression is applied. (2, 3),
(3, 4),
The pytest ``config`` object allows you to skip based on a test configuration value (4, 5),
which you might have added:: pytest.mark.skipif("sys.version_info >= (3,0)")((10, 11)),
])
@pytest.mark.skipif("not config.getvalue('db')") def test_increment(n, expected):
def test_function(...): assert n + 1 == expected
...
Imperative xfail from within a test or setup function Imperative xfail from within a test or setup function
------------------------------------------------------ ------------------------------------------------------
If you cannot declare xfail-conditions at import time If you cannot declare xfail- of skipif conditions at import
you can also imperatively produce an XFail-outcome from time you can also imperatively produce an according outcome
within test or setup code. Example:: imperatively, in test or setup code::
def test_function(): def test_function():
if not valid_config(): if not valid_config():
pytest.xfail("unsupported configuration") pytest.xfail("failing configuration (but should work)")
# or
pytest.skipif("unsupported configuration")
Skipping on a missing import dependency Skipping on a missing import dependency
@ -202,16 +227,61 @@ version number of a library::
docutils = pytest.importorskip("docutils", minversion="0.3") docutils = pytest.importorskip("docutils", minversion="0.3")
The version will be read from the specified module's ``__version__`` attribute. The version will be read from the specified
module's ``__version__`` attribute.
Imperative skip from within a test or setup function
------------------------------------------------------
If for some reason you cannot declare skip-conditions .. _`string conditions`:
you can also imperatively produce a skip-outcome from
within test or setup code. Example::
specifying conditions as strings versus booleans
----------------------------------------------------------
Prior to pytest-2.4 the only way to specify skipif/xfail conditions was
to use strings::
import sys
@pytest.mark.skipif("sys.version_info >= (3,3)")
def test_function(): def test_function():
if not valid_config(): ...
pytest.skip("unsupported configuration")
During test function setup the skipif condition is evaluated by calling
``eval('sys.version_info >= (3,0)', namespace)``. The namespace contains
all the module globals, and ``os`` and ``sys`` as a minimum.
Since pytest-2.4 `condition booleans`_ are considered preferable
because markers can then be freely imported between test modules.
With strings you need to import not only the marker but all variables
everything used by the marker, which violates encapsulation.
The reason for specifying the condition as a string was that py.test can
report a summary of skip conditions based purely on the condition string.
With conditions as booleans you are required to specify a ``reason`` string.
Note that string conditions will remain fully supported and you are free
to use them if you have no need for cross-importing markers.
The evaluation of a condition string in ``pytest.mark.skipif(conditionstring)``
or ``pytest.mark.xfail(conditionstring)`` takes place in a namespace
dictionary which is constructed as follows:
* the namespace is initialized by putting the ``sys`` and ``os`` modules
and the pytest ``config`` object into it.
* updated with the module globals of the test function for which the
expression is applied.
The pytest ``config`` object allows you to skip based on a test
configuration value which you might have added::
@pytest.mark.skipif("not config.getvalue('db')")
def test_function(...):
...
The equivalent with "boolean conditions" is::
@pytest.mark.skipif(not pytest.config.getvalue("db"),
reason="--db was not specified")
def test_function(...):
pass

View File

@ -4,8 +4,6 @@ Talks and Tutorials
.. _`funcargs`: funcargs.html .. _`funcargs`: funcargs.html
.. note:: Upcoming: `professional testing with pytest and tox <`http://www.python-academy.com/courses/specialtopics/python_course_testing.html>`_ , 24th-26th June 2013, Leipzig.
Tutorial examples and blog postings Tutorial examples and blog postings
--------------------------------------------- ---------------------------------------------

View File

@ -29,7 +29,7 @@ Running this would result in a passed test except for the last
$ py.test test_tmpdir.py $ py.test test_tmpdir.py
=========================== test session starts ============================ =========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.4 platform linux2 -- Python 2.7.3 -- pytest-2.3.5
collected 1 items collected 1 items
test_tmpdir.py F test_tmpdir.py F
@ -37,7 +37,7 @@ Running this would result in a passed test except for the last
================================= FAILURES ================================= ================================= FAILURES =================================
_____________________________ test_create_file _____________________________ _____________________________ test_create_file _____________________________
tmpdir = local('/tmp/pytest-540/test_create_file0') tmpdir = local('/tmp/pytest-323/test_create_file0')
def test_create_file(tmpdir): def test_create_file(tmpdir):
p = tmpdir.mkdir("sub").join("hello.txt") p = tmpdir.mkdir("sub").join("hello.txt")

View File

@ -88,7 +88,7 @@ the ``self.db`` values in the traceback::
$ py.test test_unittest_db.py $ py.test test_unittest_db.py
=========================== test session starts ============================ =========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.4 platform linux2 -- Python 2.7.3 -- pytest-2.3.5
collected 2 items collected 2 items
test_unittest_db.py FF test_unittest_db.py FF
@ -101,7 +101,7 @@ the ``self.db`` values in the traceback::
def test_method1(self): def test_method1(self):
assert hasattr(self, "db") assert hasattr(self, "db")
> assert 0, self.db # fail for demo purposes > assert 0, self.db # fail for demo purposes
E AssertionError: <conftest.DummyDB instance at 0x19be7e8> E AssertionError: <conftest.DummyDB instance at 0x19fdf38>
test_unittest_db.py:9: AssertionError test_unittest_db.py:9: AssertionError
___________________________ MyTest.test_method2 ____________________________ ___________________________ MyTest.test_method2 ____________________________
@ -110,7 +110,7 @@ the ``self.db`` values in the traceback::
def test_method2(self): def test_method2(self):
> assert 0, self.db # fail for demo purposes > assert 0, self.db # fail for demo purposes
E AssertionError: <conftest.DummyDB instance at 0x19be7e8> E AssertionError: <conftest.DummyDB instance at 0x19fdf38>
test_unittest_db.py:12: AssertionError test_unittest_db.py:12: AssertionError
========================= 2 failed in 0.02 seconds ========================= ========================= 2 failed in 0.02 seconds =========================

View File

@ -12,7 +12,7 @@ def main():
name='pytest', name='pytest',
description='py.test: simple powerful testing with Python', description='py.test: simple powerful testing with Python',
long_description = long_description, long_description = long_description,
version='2.3.5.dev8', version='2.4.0.dev5',
url='http://pytest.org', url='http://pytest.org',
license='MIT license', license='MIT license',
platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'], platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'],
@ -21,7 +21,7 @@ def main():
entry_points= make_entry_points(), entry_points= make_entry_points(),
cmdclass = {'test': PyTest}, cmdclass = {'test': PyTest},
# the following should be enabled for release # the following should be enabled for release
install_requires=['py>=1.4.13dev6'], install_requires=['py>=1.4.14'],
classifiers=['Development Status :: 6 - Mature', classifiers=['Development Status :: 6 - Mature',
'Intended Audience :: Developers', 'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License', 'License :: OSI Approved :: MIT License',

View File

@ -1712,6 +1712,20 @@ class TestFixtureMarker:
reprec = testdir.inline_run("-v") reprec = testdir.inline_run("-v")
reprec.assertoutcome(passed=6) reprec.assertoutcome(passed=6)
def test_fixture_marked_function_not_collected_as_test(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture
def test_app():
return 1
def test_something(test_app):
assert test_app == 1
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
class TestRequestScopeAccess: class TestRequestScopeAccess:
pytestmark = pytest.mark.parametrize(("scope", "ok", "error"),[ pytestmark = pytest.mark.parametrize(("scope", "ok", "error"),[
["session", "", "fspath class function module"], ["session", "", "fspath class function module"],
@ -1852,3 +1866,101 @@ class TestShowFixtures:
*hello world* *hello world*
""") """)
class TestContextManagerFixtureFuncs:
def test_simple(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture
def arg1():
print ("setup")
yield 1
print ("teardown")
def test_1(arg1):
print ("test1 %s" % arg1)
def test_2(arg1):
print ("test2 %s" % arg1)
assert 0
""")
result = testdir.runpytest("-s")
result.stdout.fnmatch_lines("""
setup
test1 1
teardown
setup
test2 1
teardown
""")
def test_scoped(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(scope="module")
def arg1():
print ("setup")
yield 1
print ("teardown")
def test_1(arg1):
print ("test1 %s" % arg1)
def test_2(arg1):
print ("test2 %s" % arg1)
""")
result = testdir.runpytest("-s")
result.stdout.fnmatch_lines("""
setup
test1 1
test2 1
teardown
""")
def test_setup_exception(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(scope="module")
def arg1():
pytest.fail("setup")
yield 1
def test_1(arg1):
pass
""")
result = testdir.runpytest("-s")
result.stdout.fnmatch_lines("""
*pytest.fail*setup*
*1 error*
""")
def test_teardown_exception(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(scope="module")
def arg1():
yield 1
pytest.fail("teardown")
def test_1(arg1):
pass
""")
result = testdir.runpytest("-s")
result.stdout.fnmatch_lines("""
*pytest.fail*teardown*
*1 passed*1 error*
""")
def test_yields_more_than_one(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(scope="module")
def arg1():
yield 1
yield 2
def test_1(arg1):
pass
""")
result = testdir.runpytest("-s")
result.stdout.fnmatch_lines("""
*fixture function*
*test_yields*:2*
""")

View File

@ -117,3 +117,35 @@ class TestMockDecoration:
""") """)
reprec = testdir.inline_run() reprec = testdir.inline_run()
reprec.assertoutcome(passed=2) reprec.assertoutcome(passed=2)
class TestReRunTests:
def test_rerun(self, testdir):
testdir.makeconftest("""
from _pytest.runner import runtestprotocol
def pytest_runtest_protocol(item, nextitem):
runtestprotocol(item, log=False, nextitem=nextitem)
runtestprotocol(item, log=True, nextitem=nextitem)
""")
testdir.makepyfile("""
import pytest
count = 0
req = None
@pytest.fixture
def fix(request):
global count, req
assert request != req
req = request
print ("fix count %s" % count)
count += 1
def test_fix(fix):
pass
""")
result = testdir.runpytest("-s")
result.stdout.fnmatch_lines("""
*fix count 0*
*fix count 1*
""")
result.stdout.fnmatch_lines("""
*2 passed*
""")

View File

@ -221,6 +221,16 @@ class TestMetafunc:
"*6 fail*", "*6 fail*",
]) ])
def test_parametrize_CSV(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.parametrize("x, y,", [(1,2), (2,3)])
def test_func(x, y):
assert x+1 == y
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=2)
def test_parametrize_class_scenarios(self, testdir): def test_parametrize_class_scenarios(self, testdir):
testdir.makepyfile(""" testdir.makepyfile("""
# same as doc/en/example/parametrize scenario example # same as doc/en/example/parametrize scenario example
@ -545,6 +555,20 @@ class TestMetafuncFunctional:
reprec = testdir.inline_run() reprec = testdir.inline_run()
reprec.assertoutcome(passed=5) reprec.assertoutcome(passed=5)
def test_parametrize_issue323(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(scope='module', params=range(966))
def foo(request):
return request.param
def test_it(foo):
pass
""")
reprec = testdir.inline_run("--collectonly")
assert not reprec.getcalls("pytest_internalerror")
def test_usefixtures_seen_in_generate_tests(self, testdir): def test_usefixtures_seen_in_generate_tests(self, testdir):
testdir.makepyfile(""" testdir.makepyfile("""
import pytest import pytest
@ -578,3 +602,186 @@ class TestMetafuncFunctional:
]) ])
class TestMarkersWithParametrization:
pytestmark = pytest.mark.issue308
def test_simple_mark(self, testdir):
s = """
import pytest
@pytest.mark.foo
@pytest.mark.parametrize(("n", "expected"), [
(1, 2),
pytest.mark.bar((1, 3)),
(2, 3),
])
def test_increment(n, expected):
assert n + 1 == expected
"""
items = testdir.getitems(s)
assert len(items) == 3
for item in items:
assert 'foo' in item.keywords
assert 'bar' not in items[0].keywords
assert 'bar' in items[1].keywords
assert 'bar' not in items[2].keywords
def test_select_based_on_mark(self, testdir):
s = """
import pytest
@pytest.mark.parametrize(("n", "expected"), [
(1, 2),
pytest.mark.foo((2, 3)),
(3, 4),
])
def test_increment(n, expected):
assert n + 1 == expected
"""
testdir.makepyfile(s)
rec = testdir.inline_run("-m", 'foo')
passed, skipped, fail = rec.listoutcomes()
assert len(passed) == 1
assert len(skipped) == 0
assert len(fail) == 0
@pytest.mark.xfail(reason="is this important to support??")
def test_nested_marks(self, testdir):
s = """
import pytest
mastermark = pytest.mark.foo(pytest.mark.bar)
@pytest.mark.parametrize(("n", "expected"), [
(1, 2),
mastermark((1, 3)),
(2, 3),
])
def test_increment(n, expected):
assert n + 1 == expected
"""
items = testdir.getitems(s)
assert len(items) == 3
for mark in ['foo', 'bar']:
assert mark not in items[0].keywords
assert mark in items[1].keywords
assert mark not in items[2].keywords
def test_simple_xfail(self, testdir):
s = """
import pytest
@pytest.mark.parametrize(("n", "expected"), [
(1, 2),
pytest.mark.xfail((1, 3)),
(2, 3),
])
def test_increment(n, expected):
assert n + 1 == expected
"""
testdir.makepyfile(s)
reprec = testdir.inline_run()
# xfail is skip??
reprec.assertoutcome(passed=2, skipped=1)
def test_simple_xfail_single_argname(self, testdir):
s = """
import pytest
@pytest.mark.parametrize("n", [
2,
pytest.mark.xfail(3),
4,
])
def test_isEven(n):
assert n % 2 == 0
"""
testdir.makepyfile(s)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=2, skipped=1)
def test_xfail_with_arg(self, testdir):
s = """
import pytest
@pytest.mark.parametrize(("n", "expected"), [
(1, 2),
pytest.mark.xfail("True")((1, 3)),
(2, 3),
])
def test_increment(n, expected):
assert n + 1 == expected
"""
testdir.makepyfile(s)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=2, skipped=1)
def test_xfail_with_kwarg(self, testdir):
s = """
import pytest
@pytest.mark.parametrize(("n", "expected"), [
(1, 2),
pytest.mark.xfail(reason="some bug")((1, 3)),
(2, 3),
])
def test_increment(n, expected):
assert n + 1 == expected
"""
testdir.makepyfile(s)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=2, skipped=1)
def test_xfail_with_arg_and_kwarg(self, testdir):
s = """
import pytest
@pytest.mark.parametrize(("n", "expected"), [
(1, 2),
pytest.mark.xfail("True", reason="some bug")((1, 3)),
(2, 3),
])
def test_increment(n, expected):
assert n + 1 == expected
"""
testdir.makepyfile(s)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=2, skipped=1)
def test_xfail_passing_is_xpass(self, testdir):
s = """
import pytest
@pytest.mark.parametrize(("n", "expected"), [
(1, 2),
pytest.mark.xfail("sys.version > 0", reason="some bug")((2, 3)),
(3, 4),
])
def test_increment(n, expected):
assert n + 1 == expected
"""
testdir.makepyfile(s)
reprec = testdir.inline_run()
# xpass is fail, obviously :)
reprec.assertoutcome(passed=2, failed=1)
def test_parametrize_called_in_generate_tests(self, testdir):
s = """
import pytest
def pytest_generate_tests(metafunc):
passingTestData = [(1, 2),
(2, 3)]
failingTestData = [(1, 3),
(2, 2)]
testData = passingTestData + [pytest.mark.xfail(d)
for d in failingTestData]
metafunc.parametrize(("n", "expected"), testData)
def test_increment(n, expected):
assert n + 1 == expected
"""
testdir.makepyfile(s)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=2, skipped=2)

View File

@ -99,6 +99,11 @@ class TestAssert_reprcompare:
expl = callequal(set([0, 1]), set([0, 2])) expl = callequal(set([0, 1]), set([0, 2]))
assert len(expl) > 1 assert len(expl) > 1
def test_frozenzet(self):
expl = callequal(frozenset([0, 1]), set([0, 2]))
print (expl)
assert len(expl) > 1
def test_list_tuples(self): def test_list_tuples(self):
expl = callequal([], [(1,2)]) expl = callequal([], [(1,2)])
assert len(expl) > 1 assert len(expl) > 1
@ -315,3 +320,17 @@ def test_warn_missing(testdir):
result.stderr.fnmatch_lines([ result.stderr.fnmatch_lines([
"*WARNING*assert statements are not executed*", "*WARNING*assert statements are not executed*",
]) ])
def test_recursion_source_decode(testdir):
testdir.makepyfile("""
def test_something():
pass
""")
testdir.makeini("""
[pytest]
python_files = *.py
""")
result = testdir.runpytest("--collectonly")
result.stdout.fnmatch_lines("""
<Module*>
""")

View File

@ -1,4 +1,5 @@
import os import os
import stat
import sys import sys
import zipfile import zipfile
import py import py
@ -319,10 +320,22 @@ class TestRewriteOnImport:
def test_pycache_is_a_file(self, testdir): def test_pycache_is_a_file(self, testdir):
testdir.tmpdir.join("__pycache__").write("Hello") testdir.tmpdir.join("__pycache__").write("Hello")
testdir.makepyfile(""" testdir.makepyfile("""
def test_rewritten(): def test_rewritten():
assert "@py_builtins" in globals()""") assert "@py_builtins" in globals()""")
assert testdir.runpytest().ret == 0 assert testdir.runpytest().ret == 0
def test_pycache_is_readonly(self, testdir):
cache = testdir.tmpdir.mkdir("__pycache__")
old_mode = cache.stat().mode
cache.chmod(old_mode ^ stat.S_IWRITE)
testdir.makepyfile("""
def test_rewritten():
assert "@py_builtins" in globals()""")
try:
assert testdir.runpytest().ret == 0
finally:
cache.chmod(old_mode)
def test_zipfile(self, testdir): def test_zipfile(self, testdir):
z = testdir.tmpdir.join("myzip.zip") z = testdir.tmpdir.join("myzip.zip")
z_fn = str(z) z_fn = str(z)
@ -334,9 +347,9 @@ def test_rewritten():
f.close() f.close()
z.chmod(256) z.chmod(256)
testdir.makepyfile(""" testdir.makepyfile("""
import sys import sys
sys.path.append(%r) sys.path.append(%r)
import test_gum.test_lizard""" % (z_fn,)) import test_gum.test_lizard""" % (z_fn,))
assert testdir.runpytest().ret == 0 assert testdir.runpytest().ret == 0
def test_readonly(self, testdir): def test_readonly(self, testdir):
@ -345,17 +358,21 @@ import test_gum.test_lizard""" % (z_fn,))
py.builtin._totext(""" py.builtin._totext("""
def test_rewritten(): def test_rewritten():
assert "@py_builtins" in globals() assert "@py_builtins" in globals()
""").encode("utf-8"), "wb") """).encode("utf-8"), "wb")
old_mode = sub.stat().mode
sub.chmod(320) sub.chmod(320)
assert testdir.runpytest().ret == 0 try:
assert testdir.runpytest().ret == 0
finally:
sub.chmod(old_mode)
def test_dont_write_bytecode(self, testdir, monkeypatch): def test_dont_write_bytecode(self, testdir, monkeypatch):
testdir.makepyfile(""" testdir.makepyfile("""
import os import os
def test_no_bytecode(): def test_no_bytecode():
assert "__pycache__" in __cached__ assert "__pycache__" in __cached__
assert not os.path.exists(__cached__) assert not os.path.exists(__cached__)
assert not os.path.exists(os.path.dirname(__cached__))""") assert not os.path.exists(os.path.dirname(__cached__))""")
monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", "1") monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", "1")
assert testdir.runpytest().ret == 0 assert testdir.runpytest().ret == 0

View File

@ -1,4 +1,4 @@
from _pytest.doctest import DoctestModule, DoctestTextfile from _pytest.doctest import DoctestItem, DoctestModule, DoctestTextfile
import py, pytest import py, pytest
class TestDoctests: class TestDoctests:
@ -19,13 +19,61 @@ class TestDoctests:
items, reprec = testdir.inline_genitems(w) items, reprec = testdir.inline_genitems(w)
assert len(items) == 1 assert len(items) == 1
def test_collect_module(self, testdir): def test_collect_module_empty(self, testdir):
path = testdir.makepyfile(whatever="#") path = testdir.makepyfile(whatever="#")
for p in (path, testdir.tmpdir):
items, reprec = testdir.inline_genitems(p,
'--doctest-modules')
assert len(items) == 0
def test_collect_module_single_modulelevel_doctest(self, testdir):
path = testdir.makepyfile(whatever='""">>> pass"""')
for p in (path, testdir.tmpdir): for p in (path, testdir.tmpdir):
items, reprec = testdir.inline_genitems(p, items, reprec = testdir.inline_genitems(p,
'--doctest-modules') '--doctest-modules')
assert len(items) == 1 assert len(items) == 1
assert isinstance(items[0], DoctestModule) assert isinstance(items[0], DoctestItem)
assert isinstance(items[0].parent, DoctestModule)
def test_collect_module_two_doctest_one_modulelevel(self, testdir):
path = testdir.makepyfile(whatever="""
'>>> x = None'
def my_func():
">>> magic = 42 "
""")
for p in (path, testdir.tmpdir):
items, reprec = testdir.inline_genitems(p,
'--doctest-modules')
assert len(items) == 2
assert isinstance(items[0], DoctestItem)
assert isinstance(items[1], DoctestItem)
assert isinstance(items[0].parent, DoctestModule)
assert items[0].parent is items[1].parent
def test_collect_module_two_doctest_no_modulelevel(self, testdir):
path = testdir.makepyfile(whatever="""
'# Empty'
def my_func():
">>> magic = 42 "
def unuseful():
'''
# This is a function
# >>> # it doesn't have any doctest
'''
def another():
'''
# This is another function
>>> import os # this one does have a doctest
'''
""")
for p in (path, testdir.tmpdir):
items, reprec = testdir.inline_genitems(p,
'--doctest-modules')
assert len(items) == 2
assert isinstance(items[0], DoctestItem)
assert isinstance(items[1], DoctestItem)
assert isinstance(items[0].parent, DoctestModule)
assert items[0].parent is items[1].parent
def test_simple_doctestfile(self, testdir): def test_simple_doctestfile(self, testdir):
p = testdir.maketxtfile(test_doc=""" p = testdir.maketxtfile(test_doc="""
@ -164,3 +212,47 @@ class TestDoctests:
""") """)
reprec = testdir.inline_run(p, "--doctest-modules") reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=1) reprec.assertoutcome(passed=1)
def test_doctestmodule_three_tests(self, testdir):
p = testdir.makepyfile("""
'''
>>> dir = getfixture('tmpdir')
>>> type(dir).__name__
'LocalPath'
'''
def my_func():
'''
>>> magic = 42
>>> magic - 42
0
'''
def unuseful():
pass
def another():
'''
>>> import os
>>> os is os
True
'''
""")
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=3)
def test_doctestmodule_two_tests_one_fail(self, testdir):
p = testdir.makepyfile("""
class MyClass:
def bad_meth(self):
'''
>>> magic = 42
>>> magic
0
'''
def nice_meth(self):
'''
>>> magic = 42
>>> magic - 42
0
'''
""")
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(failed=1, passed=1)

View File

@ -450,3 +450,16 @@ def test_logxml_changingdir(testdir):
assert result.ret == 0 assert result.ret == 0
assert testdir.tmpdir.join("a/x.xml").check() assert testdir.tmpdir.join("a/x.xml").check()
def test_escaped_parametrized_names_xml(testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.parametrize('char', ["\\x00"])
def test_func(char):
assert char
""")
result, dom = runandparse(testdir)
assert result.ret == 0
node = dom.getElementsByTagName("testcase")[0]
assert_attr(node,
name="test_func[#x00]")

View File

@ -345,6 +345,24 @@ class TestFunctional:
assert l[0].args == ("pos0",) assert l[0].args == ("pos0",)
assert l[1].args == ("pos1",) assert l[1].args == ("pos1",)
def test_no_marker_match_on_unmarked_names(self, testdir):
p = testdir.makepyfile("""
import pytest
@pytest.mark.shouldmatch
def test_marked():
assert 1
def test_unmarked():
assert 1
""")
reprec = testdir.inline_run("-m", "test_unmarked", p)
passed, skipped, failed = reprec.listoutcomes()
assert len(passed) + len(skipped) + len(failed) == 0
dlist = reprec.getcalls("pytest_deselected")
deselected_tests = dlist[0].items
assert len(deselected_tests) == 2
def test_keywords_at_node_level(self, testdir): def test_keywords_at_node_level(self, testdir):
p = testdir.makepyfile(""" p = testdir.makepyfile("""
import pytest import pytest
@ -382,7 +400,6 @@ class TestKeywordSelection:
assert len(reprec.getcalls('pytest_deselected')) == 1 assert len(reprec.getcalls('pytest_deselected')) == 1
for keyword in ['test_one', 'est_on']: for keyword in ['test_one', 'est_on']:
#yield check, keyword, 'test_one'
check(keyword, 'test_one') check(keyword, 'test_one')
check('TestClass and test', 'test_method_one') check('TestClass and test', 'test_method_one')
@ -401,7 +418,7 @@ class TestKeywordSelection:
def pytest_pycollect_makeitem(__multicall__, name): def pytest_pycollect_makeitem(__multicall__, name):
if name == "TestClass": if name == "TestClass":
item = __multicall__.execute() item = __multicall__.execute()
item.keywords["xxx"] = True item.extra_keyword_matches.add("xxx")
return item return item
""") """)
reprec = testdir.inline_run(p.dirpath(), '-s', '-k', keyword) reprec = testdir.inline_run(p.dirpath(), '-s', '-k', keyword)
@ -440,3 +457,22 @@ class TestKeywordSelection:
reprec = testdir.inline_run("-k", "mykeyword", p) reprec = testdir.inline_run("-k", "mykeyword", p)
passed, skipped, failed = reprec.countoutcomes() passed, skipped, failed = reprec.countoutcomes()
assert failed == 1 assert failed == 1
def test_no_magic_values(self, testdir):
"""Make sure the tests do not match on magic values,
no double underscored values, like '__dict__',
and no instance values, like '()'.
"""
p = testdir.makepyfile("""
def test_one(): assert 1
""")
def assert_test_is_not_selected(keyword):
reprec = testdir.inline_run("-k", keyword, p)
passed, skipped, failed = reprec.countoutcomes()
dlist = reprec.getcalls("pytest_deselected")
assert passed + skipped + failed == 0
deselected_tests = dlist[0].items
assert len(deselected_tests) == 1
assert_test_is_not_selected("__")
assert_test_is_not_selected("()")

View File

@ -226,3 +226,17 @@ def test_exclude(testdir):
assert result.ret == 0 assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"]) result.stdout.fnmatch_lines(["*1 passed*"])
def test_sessionfinish_with_start(testdir):
testdir.makeconftest("""
import os
l = []
def pytest_sessionstart():
l.append(os.getcwd())
os.chdir("..")
def pytest_sessionfinish():
assert l[0] == os.getcwd()
""")
res = testdir.runpytest("--collectonly")
assert res.ret == 0

View File

@ -569,7 +569,6 @@ def test_default_markers(testdir):
"*xfail(*condition, reason=None, run=True)*expected failure*", "*xfail(*condition, reason=None, run=True)*expected failure*",
]) ])
def test_xfail_test_setup_exception(testdir): def test_xfail_test_setup_exception(testdir):
testdir.makeconftest(""" testdir.makeconftest("""
def pytest_runtest_setup(): def pytest_runtest_setup():
@ -610,3 +609,44 @@ def test_imperativeskip_on_xfail_test(testdir):
""") """)
class TestBooleanCondition:
def test_skipif(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.skipif(True, reason="True123")
def test_func1():
pass
@pytest.mark.skipif(False, reason="True123")
def test_func2():
pass
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines("""
*1 passed*1 skipped*
""")
def test_skipif_noreason(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.skipif(True)
def test_func():
pass
""")
result = testdir.runpytest("-rs")
result.stdout.fnmatch_lines("""
*1 error*
""")
def test_xfail(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.xfail(True, reason="True123")
def test_func():
assert 0
""")
result = testdir.runpytest("-rxs")
result.stdout.fnmatch_lines("""
*XFAIL*
*True123*
*1 xfail*
""")

View File

@ -665,3 +665,19 @@ def test_fdopen_kept_alive_issue124(testdir):
result.stdout.fnmatch_lines([ result.stdout.fnmatch_lines([
"*2 passed*" "*2 passed*"
]) ])
def test_tbstyle_native_setup_error(testdir):
p = testdir.makepyfile("""
import pytest
@pytest.fixture
def setup_error_fixture():
raise Exception("error in exception")
def test_error_fixture(setup_error_fixture):
pass
""")
result = testdir.runpytest("--tb=native")
result.stdout.fnmatch_lines([
'*File *test_tbstyle_native_setup_error.py", line *, in setup_error_fixture*'
])

View File

@ -16,6 +16,7 @@ def test_funcarg(testdir):
# pytest_unconfigure has deleted the TempdirHandler already # pytest_unconfigure has deleted the TempdirHandler already
config = item.config config = item.config
config._tmpdirhandler = TempdirHandler(config) config._tmpdirhandler = TempdirHandler(config)
item._initrequest()
p = tmpdir(item._request) p = tmpdir(item._request)
assert p.check() assert p.check()
bn = p.basename.strip("0123456789") bn = p.basename.strip("0123456789")

View File

@ -65,6 +65,28 @@ def test_setup(testdir):
rep = reprec.matchreport("test_both", when="teardown") rep = reprec.matchreport("test_both", when="teardown")
assert rep.failed and '42' in str(rep.longrepr) assert rep.failed and '42' in str(rep.longrepr)
def test_unittest_style_setup_teardown(testdir):
testpath = testdir.makepyfile("""
l = []
def setUpModule():
l.append(1)
def tearDownModule():
del l[0]
def test_hello():
assert l == [1]
def test_world():
assert l == [1]
""")
result = testdir.runpytest(testpath)
result.stdout.fnmatch_lines([
"*2 passed*",
])
def test_new_instances(testdir): def test_new_instances(testdir):
testpath = testdir.makepyfile(""" testpath = testdir.makepyfile("""
import unittest import unittest

34
tox.ini
View File

@ -1,17 +1,13 @@
[tox] [tox]
distshare={homedir}/.tox/distshare distshare={homedir}/.tox/distshare
envlist=py25,py26,py27,py27-nobyte,py32,py33,py27-xdist,trial envlist=py25,py26,py27,py27-nobyte,py32,py33,py27-xdist,trial
indexserver=
pypi = https://pypi.python.org/simple
testrun = http://pypi.testrun.org
default = http://pypi.testrun.org
[testenv] [testenv]
changedir=testing changedir=testing
commands= py.test --lsof -rfsxX --junitxml={envlogdir}/junit-{envname}.xml [] commands= py.test --lsof -rfsxX --junitxml={envlogdir}/junit-{envname}.xml []
deps= deps=
:pypi:pexpect pexpect
:pypi:nose nose
[testenv:genscript] [testenv:genscript]
changedir=. changedir=.
@ -21,8 +17,8 @@ commands= py.test --genscript=pytest1
changedir=. changedir=.
basepython=python2.7 basepython=python2.7
deps=pytest-xdist deps=pytest-xdist
:pypi:mock mock
:pypi:nose nose
commands= commands=
py.test -n3 -rfsxX \ py.test -n3 -rfsxX \
--junitxml={envlogdir}/junit-{envname}.xml testing --junitxml={envlogdir}/junit-{envname}.xml testing
@ -35,12 +31,12 @@ setenv=
PYTHONDONTWRITEBYTECODE=1 PYTHONDONTWRITEBYTECODE=1
commands= commands=
py.test -n3 -rfsxX \ py.test -n3 -rfsxX \
--junitxml={envlogdir}/junit-{envname}.xml [] --junitxml={envlogdir}/junit-{envname}.xml {posargs:testing}
[testenv:trial] [testenv:trial]
changedir=. changedir=.
deps=:pypi:twisted deps=twisted
:pypi:pexpect pexpect
commands= commands=
py.test -rsxf testing/test_unittest.py \ py.test -rsxf testing/test_unittest.py \
--junitxml={envlogdir}/junit-{envname}.xml {posargs:testing/test_unittest.py} --junitxml={envlogdir}/junit-{envname}.xml {posargs:testing/test_unittest.py}
@ -51,17 +47,17 @@ deps=
[testenv:py32] [testenv:py32]
deps= deps=
:pypi:nose nose
[testenv:py33] [testenv:py33]
deps= deps=
:pypi:nose nose
[testenv:doc] [testenv:doc]
basepython=python basepython=python
changedir=doc/en changedir=doc/en
deps=:pypi:sphinx deps=sphinx
:pypi:PyYAML PyYAML
commands= commands=
make clean make clean
@ -70,15 +66,15 @@ commands=
[testenv:regen] [testenv:regen]
basepython=python basepython=python
changedir=doc/en changedir=doc/en
deps=:pypi:sphinx deps=sphinx
:pypi:PyYAML PyYAML
commands= commands=
rm -rf /tmp/doc-exec* rm -rf /tmp/doc-exec*
#pip install pytest==2.3.4 #pip install pytest==2.3.4
make regen make regen
[testenv:py31] [testenv:py31]
deps=:pypi:nose>=1.0 deps=nose>=1.0
[testenv:py31-xdist] [testenv:py31-xdist]
deps=pytest-xdist deps=pytest-xdist
@ -102,4 +98,4 @@ python_files=test_*.py *_test.py testing/*/*.py
python_classes=Test Acceptance python_classes=Test Acceptance
python_functions=test python_functions=test
pep8ignore = E401 E225 E261 E128 E124 E302 pep8ignore = E401 E225 E261 E128 E124 E302
norecursedirs = .tox ja norecursedirs = .tox ja .hg