run black
This commit is contained in:
parent
3e1590bcfc
commit
703e4b11ba
|
@ -4,7 +4,7 @@ repos:
|
|||
rev: 18.4a4
|
||||
hooks:
|
||||
- id: black
|
||||
args: [--safe, --quiet, --check]
|
||||
args: [--safe, --quiet]
|
||||
python_version: python3.6
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v1.2.3
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
__all__ = ['__version__']
|
||||
__all__ = ["__version__"]
|
||||
|
||||
try:
|
||||
from ._version import version as __version__
|
||||
except ImportError:
|
||||
# broken installation, we don't even try
|
||||
# unknown only works because we do poor mans version compare
|
||||
__version__ = 'unknown'
|
||||
__version__ = "unknown"
|
||||
|
|
|
@ -61,7 +61,7 @@ from glob import glob
|
|||
|
||||
|
||||
class FastFilesCompleter(object):
|
||||
'Fast file completer class'
|
||||
"Fast file completer class"
|
||||
|
||||
def __init__(self, directories=True):
|
||||
self.directories = directories
|
||||
|
@ -74,21 +74,21 @@ class FastFilesCompleter(object):
|
|||
prefix_dir = 0
|
||||
completion = []
|
||||
globbed = []
|
||||
if '*' not in prefix and '?' not in prefix:
|
||||
if "*" not in prefix and "?" not in prefix:
|
||||
# we are on unix, otherwise no bash
|
||||
if not prefix or prefix[-1] == os.path.sep:
|
||||
globbed.extend(glob(prefix + '.*'))
|
||||
prefix += '*'
|
||||
globbed.extend(glob(prefix + ".*"))
|
||||
prefix += "*"
|
||||
globbed.extend(glob(prefix))
|
||||
for x in sorted(globbed):
|
||||
if os.path.isdir(x):
|
||||
x += '/'
|
||||
x += "/"
|
||||
# append stripping the prefix (like bash, not like compgen)
|
||||
completion.append(x[prefix_dir:])
|
||||
return completion
|
||||
|
||||
|
||||
if os.environ.get('_ARGCOMPLETE'):
|
||||
if os.environ.get("_ARGCOMPLETE"):
|
||||
try:
|
||||
import argcomplete.completers
|
||||
except ImportError:
|
||||
|
@ -97,7 +97,11 @@ if os.environ.get('_ARGCOMPLETE'):
|
|||
|
||||
def try_argcomplete(parser):
|
||||
argcomplete.autocomplete(parser, always_complete_options=False)
|
||||
|
||||
|
||||
else:
|
||||
|
||||
def try_argcomplete(parser):
|
||||
pass
|
||||
|
||||
filescompleter = None
|
||||
|
|
|
@ -29,9 +29,12 @@ def format_exception_only(etype, value):
|
|||
#
|
||||
# Clear these out first because issubtype(string1, SyntaxError)
|
||||
# would throw another exception and mask the original problem.
|
||||
if (isinstance(etype, BaseException) or
|
||||
isinstance(etype, types.InstanceType) or
|
||||
etype is None or type(etype) is str):
|
||||
if (
|
||||
isinstance(etype, BaseException)
|
||||
or isinstance(etype, types.InstanceType)
|
||||
or etype is None
|
||||
or type(etype) is str
|
||||
):
|
||||
return [_format_final_exc_line(etype, value)]
|
||||
|
||||
stype = etype.__name__
|
||||
|
@ -50,14 +53,14 @@ def format_exception_only(etype, value):
|
|||
lines.append(' File "%s", line %d\n' % (filename, lineno))
|
||||
if badline is not None:
|
||||
if isinstance(badline, bytes): # python 2 only
|
||||
badline = badline.decode('utf-8', 'replace')
|
||||
lines.append(u' %s\n' % badline.strip())
|
||||
badline = badline.decode("utf-8", "replace")
|
||||
lines.append(u" %s\n" % badline.strip())
|
||||
if offset is not None:
|
||||
caretspace = badline.rstrip('\n')[:offset].lstrip()
|
||||
caretspace = badline.rstrip("\n")[:offset].lstrip()
|
||||
# non-space whitespace (likes tabs) must be kept for alignment
|
||||
caretspace = ((c.isspace() and c or ' ') for c in caretspace)
|
||||
caretspace = ((c.isspace() and c or " ") for c in caretspace)
|
||||
# only three spaces to account for offset1 == pos 0
|
||||
lines.append(' %s^\n' % ''.join(caretspace))
|
||||
lines.append(" %s^\n" % "".join(caretspace))
|
||||
value = msg
|
||||
|
||||
lines.append(_format_final_exc_line(stype, value))
|
||||
|
@ -82,4 +85,4 @@ def _some_str(value):
|
|||
return str(value)
|
||||
except Exception:
|
||||
pass
|
||||
return '<unprintable %s object>' % type(value).__name__
|
||||
return "<unprintable %s object>" % type(value).__name__
|
||||
|
|
|
@ -10,6 +10,7 @@ from weakref import ref
|
|||
from _pytest.compat import _PY2, _PY3, PY35, safe_str
|
||||
|
||||
import py
|
||||
|
||||
builtin_repr = repr
|
||||
|
||||
if _PY3:
|
||||
|
@ -61,6 +62,7 @@ class Code(object):
|
|||
""" return a _pytest._code.Source object for the full source file of the code
|
||||
"""
|
||||
from _pytest._code import source
|
||||
|
||||
full, _ = source.findsource(self.raw)
|
||||
return full
|
||||
|
||||
|
@ -69,6 +71,7 @@ class Code(object):
|
|||
"""
|
||||
# return source only for that part of code
|
||||
import _pytest._code
|
||||
|
||||
return _pytest._code.Source(self.raw)
|
||||
|
||||
def getargs(self, var=False):
|
||||
|
@ -101,6 +104,7 @@ class Frame(object):
|
|||
def statement(self):
|
||||
""" statement this frame is at """
|
||||
import _pytest._code
|
||||
|
||||
if self.code.fullsource is None:
|
||||
return _pytest._code.Source("")
|
||||
return self.code.fullsource.getstatement(self.lineno)
|
||||
|
@ -144,7 +148,7 @@ class Frame(object):
|
|||
try:
|
||||
retval.append((arg, self.f_locals[arg]))
|
||||
except KeyError:
|
||||
pass # this can occur when using Psyco
|
||||
pass # this can occur when using Psyco
|
||||
return retval
|
||||
|
||||
|
||||
|
@ -166,6 +170,7 @@ class TracebackEntry(object):
|
|||
@property
|
||||
def frame(self):
|
||||
import _pytest._code
|
||||
|
||||
return _pytest._code.Frame(self._rawentry.tb_frame)
|
||||
|
||||
@property
|
||||
|
@ -188,6 +193,7 @@ class TracebackEntry(object):
|
|||
|
||||
def getlocals(self):
|
||||
return self.frame.f_locals
|
||||
|
||||
locals = property(getlocals, None, None, "locals of underlaying frame")
|
||||
|
||||
def getfirstlinesource(self):
|
||||
|
@ -199,6 +205,7 @@ class TracebackEntry(object):
|
|||
# we use the passed in astcache to not reparse asttrees
|
||||
# within exception info printing
|
||||
from _pytest._code.source import getstatementrange_ast
|
||||
|
||||
source = self.frame.code.fullsource
|
||||
if source is None:
|
||||
return None
|
||||
|
@ -209,8 +216,9 @@ class TracebackEntry(object):
|
|||
astnode = astcache.get(key, None)
|
||||
start = self.getfirstlinesource()
|
||||
try:
|
||||
astnode, _, end = getstatementrange_ast(self.lineno, source,
|
||||
astnode=astnode)
|
||||
astnode, _, end = getstatementrange_ast(
|
||||
self.lineno, source, astnode=astnode
|
||||
)
|
||||
except SyntaxError:
|
||||
end = self.lineno + 1
|
||||
else:
|
||||
|
@ -230,10 +238,10 @@ class TracebackEntry(object):
|
|||
mostly for internal use
|
||||
"""
|
||||
try:
|
||||
tbh = self.frame.f_locals['__tracebackhide__']
|
||||
tbh = self.frame.f_locals["__tracebackhide__"]
|
||||
except KeyError:
|
||||
try:
|
||||
tbh = self.frame.f_globals['__tracebackhide__']
|
||||
tbh = self.frame.f_globals["__tracebackhide__"]
|
||||
except KeyError:
|
||||
return False
|
||||
|
||||
|
@ -246,7 +254,7 @@ class TracebackEntry(object):
|
|||
try:
|
||||
fn = str(self.path)
|
||||
except py.error.Error:
|
||||
fn = '???'
|
||||
fn = "???"
|
||||
name = self.frame.code.name
|
||||
try:
|
||||
line = str(self.statement).lstrip()
|
||||
|
@ -258,6 +266,7 @@ class TracebackEntry(object):
|
|||
|
||||
def name(self):
|
||||
return self.frame.code.raw.co_name
|
||||
|
||||
name = property(name, None, None, "co_name of underlaying code")
|
||||
|
||||
|
||||
|
@ -270,11 +279,13 @@ class Traceback(list):
|
|||
def __init__(self, tb, excinfo=None):
|
||||
""" initialize from given python traceback object and ExceptionInfo """
|
||||
self._excinfo = excinfo
|
||||
if hasattr(tb, 'tb_next'):
|
||||
if hasattr(tb, "tb_next"):
|
||||
|
||||
def f(cur):
|
||||
while cur is not None:
|
||||
yield self.Entry(cur, excinfo=excinfo)
|
||||
cur = cur.tb_next
|
||||
|
||||
list.__init__(self, f(tb))
|
||||
else:
|
||||
list.__init__(self, tb)
|
||||
|
@ -292,11 +303,16 @@ class Traceback(list):
|
|||
for x in self:
|
||||
code = x.frame.code
|
||||
codepath = code.path
|
||||
if ((path is None or codepath == path) and
|
||||
(excludepath is None or not hasattr(codepath, 'relto') or
|
||||
not codepath.relto(excludepath)) and
|
||||
(lineno is None or x.lineno == lineno) and
|
||||
(firstlineno is None or x.frame.code.firstlineno == firstlineno)):
|
||||
if (
|
||||
(path is None or codepath == path)
|
||||
and (
|
||||
excludepath is None
|
||||
or not hasattr(codepath, "relto")
|
||||
or not codepath.relto(excludepath)
|
||||
)
|
||||
and (lineno is None or x.lineno == lineno)
|
||||
and (firstlineno is None or x.frame.code.firstlineno == firstlineno)
|
||||
):
|
||||
return Traceback(x._rawentry, self._excinfo)
|
||||
return self
|
||||
|
||||
|
@ -345,35 +361,41 @@ class Traceback(list):
|
|||
f = entry.frame
|
||||
loc = f.f_locals
|
||||
for otherloc in values:
|
||||
if f.is_true(f.eval(co_equal,
|
||||
__recursioncache_locals_1=loc,
|
||||
__recursioncache_locals_2=otherloc)):
|
||||
if f.is_true(
|
||||
f.eval(
|
||||
co_equal,
|
||||
__recursioncache_locals_1=loc,
|
||||
__recursioncache_locals_2=otherloc,
|
||||
)
|
||||
):
|
||||
return i
|
||||
values.append(entry.frame.f_locals)
|
||||
return None
|
||||
|
||||
|
||||
co_equal = compile('__recursioncache_locals_1 == __recursioncache_locals_2',
|
||||
'?', 'eval')
|
||||
co_equal = compile(
|
||||
"__recursioncache_locals_1 == __recursioncache_locals_2", "?", "eval"
|
||||
)
|
||||
|
||||
|
||||
class ExceptionInfo(object):
|
||||
""" wraps sys.exc_info() objects and offers
|
||||
help for navigating the traceback.
|
||||
"""
|
||||
_striptext = ''
|
||||
_assert_start_repr = "AssertionError(u\'assert " if _PY2 else "AssertionError(\'assert "
|
||||
_striptext = ""
|
||||
_assert_start_repr = "AssertionError(u'assert " if _PY2 else "AssertionError('assert "
|
||||
|
||||
def __init__(self, tup=None, exprinfo=None):
|
||||
import _pytest._code
|
||||
|
||||
if tup is None:
|
||||
tup = sys.exc_info()
|
||||
if exprinfo is None and isinstance(tup[1], AssertionError):
|
||||
exprinfo = getattr(tup[1], 'msg', None)
|
||||
exprinfo = getattr(tup[1], "msg", None)
|
||||
if exprinfo is None:
|
||||
exprinfo = py.io.saferepr(tup[1])
|
||||
if exprinfo and exprinfo.startswith(self._assert_start_repr):
|
||||
self._striptext = 'AssertionError: '
|
||||
self._striptext = "AssertionError: "
|
||||
self._excinfo = tup
|
||||
#: the exception class
|
||||
self.type = tup[0]
|
||||
|
@ -398,7 +420,7 @@ class ExceptionInfo(object):
|
|||
removed from the beginning)
|
||||
"""
|
||||
lines = format_exception_only(self.type, self.value)
|
||||
text = ''.join(lines)
|
||||
text = "".join(lines)
|
||||
text = text.rstrip()
|
||||
if tryshort:
|
||||
if text.startswith(self._striptext):
|
||||
|
@ -415,8 +437,14 @@ class ExceptionInfo(object):
|
|||
path, lineno = entry.frame.code.raw.co_filename, entry.lineno
|
||||
return ReprFileLocation(path, lineno + 1, exconly)
|
||||
|
||||
def getrepr(self, showlocals=False, style="long",
|
||||
abspath=False, tbfilter=True, funcargs=False):
|
||||
def getrepr(
|
||||
self,
|
||||
showlocals=False,
|
||||
style="long",
|
||||
abspath=False,
|
||||
tbfilter=True,
|
||||
funcargs=False,
|
||||
):
|
||||
""" return str()able representation of this exception info.
|
||||
showlocals: show locals per traceback entry
|
||||
style: long|short|no|native traceback style
|
||||
|
@ -424,16 +452,23 @@ class ExceptionInfo(object):
|
|||
|
||||
in case of style==native, tbfilter and showlocals is ignored.
|
||||
"""
|
||||
if style == 'native':
|
||||
return ReprExceptionInfo(ReprTracebackNative(
|
||||
traceback.format_exception(
|
||||
self.type,
|
||||
self.value,
|
||||
self.traceback[0]._rawentry,
|
||||
)), self._getreprcrash())
|
||||
if style == "native":
|
||||
return ReprExceptionInfo(
|
||||
ReprTracebackNative(
|
||||
traceback.format_exception(
|
||||
self.type, self.value, self.traceback[0]._rawentry
|
||||
)
|
||||
),
|
||||
self._getreprcrash(),
|
||||
)
|
||||
|
||||
fmt = FormattedExcinfo(showlocals=showlocals, style=style,
|
||||
abspath=abspath, tbfilter=tbfilter, funcargs=funcargs)
|
||||
fmt = FormattedExcinfo(
|
||||
showlocals=showlocals,
|
||||
style=style,
|
||||
abspath=abspath,
|
||||
tbfilter=tbfilter,
|
||||
funcargs=funcargs,
|
||||
)
|
||||
return fmt.repr_excinfo(self)
|
||||
|
||||
def __str__(self):
|
||||
|
@ -455,8 +490,7 @@ class ExceptionInfo(object):
|
|||
"""
|
||||
__tracebackhide__ = True
|
||||
if not re.search(regexp, str(self.value)):
|
||||
assert 0, "Pattern '{!s}' not found in '{!s}'".format(
|
||||
regexp, self.value)
|
||||
assert 0, "Pattern '{!s}' not found in '{!s}'".format(regexp, self.value)
|
||||
return True
|
||||
|
||||
|
||||
|
@ -508,6 +542,7 @@ class FormattedExcinfo(object):
|
|||
def get_source(self, source, line_index=-1, excinfo=None, short=False):
|
||||
""" return formatted and marked up source lines. """
|
||||
import _pytest._code
|
||||
|
||||
lines = []
|
||||
if source is None or line_index >= len(source.lines):
|
||||
source = _pytest._code.Source("???")
|
||||
|
@ -532,7 +567,7 @@ class FormattedExcinfo(object):
|
|||
lines = []
|
||||
indent = " " * indent
|
||||
# get the real exception information out
|
||||
exlines = excinfo.exconly(tryshort=True).split('\n')
|
||||
exlines = excinfo.exconly(tryshort=True).split("\n")
|
||||
failindent = self.fail_marker + indent[1:]
|
||||
for line in exlines:
|
||||
lines.append(failindent + line)
|
||||
|
@ -547,7 +582,7 @@ class FormattedExcinfo(object):
|
|||
keys.sort()
|
||||
for name in keys:
|
||||
value = locals[name]
|
||||
if name == '__builtins__':
|
||||
if name == "__builtins__":
|
||||
lines.append("__builtins__ = <builtins>")
|
||||
else:
|
||||
# This formatting could all be handled by the
|
||||
|
@ -565,6 +600,7 @@ class FormattedExcinfo(object):
|
|||
|
||||
def repr_traceback_entry(self, entry, excinfo=None):
|
||||
import _pytest._code
|
||||
|
||||
source = self._getentrysource(entry)
|
||||
if source is None:
|
||||
source = _pytest._code.Source("???")
|
||||
|
@ -641,11 +677,16 @@ class FormattedExcinfo(object):
|
|||
except Exception as e:
|
||||
max_frames = 10
|
||||
extraline = (
|
||||
'!!! Recursion error detected, but an error occurred locating the origin of recursion.\n'
|
||||
' The following exception happened when comparing locals in the stack frame:\n'
|
||||
' {exc_type}: {exc_msg}\n'
|
||||
' Displaying first and last {max_frames} stack frames out of {total}.'
|
||||
).format(exc_type=type(e).__name__, exc_msg=safe_str(e), max_frames=max_frames, total=len(traceback))
|
||||
"!!! Recursion error detected, but an error occurred locating the origin of recursion.\n"
|
||||
" The following exception happened when comparing locals in the stack frame:\n"
|
||||
" {exc_type}: {exc_msg}\n"
|
||||
" Displaying first and last {max_frames} stack frames out of {total}."
|
||||
).format(
|
||||
exc_type=type(e).__name__,
|
||||
exc_msg=safe_str(e),
|
||||
max_frames=max_frames,
|
||||
total=len(traceback),
|
||||
)
|
||||
traceback = traceback[:max_frames] + traceback[-max_frames:]
|
||||
else:
|
||||
if recursionindex is not None:
|
||||
|
@ -673,18 +714,24 @@ class FormattedExcinfo(object):
|
|||
else:
|
||||
# fallback to native repr if the exception doesn't have a traceback:
|
||||
# ExceptionInfo objects require a full traceback to work
|
||||
reprtraceback = ReprTracebackNative(traceback.format_exception(type(e), e, None))
|
||||
reprtraceback = ReprTracebackNative(
|
||||
traceback.format_exception(type(e), e, None)
|
||||
)
|
||||
reprcrash = None
|
||||
|
||||
repr_chain += [(reprtraceback, reprcrash, descr)]
|
||||
if e.__cause__ is not None:
|
||||
e = e.__cause__
|
||||
excinfo = ExceptionInfo((type(e), e, e.__traceback__)) if e.__traceback__ else None
|
||||
descr = 'The above exception was the direct cause of the following exception:'
|
||||
excinfo = ExceptionInfo(
|
||||
(type(e), e, e.__traceback__)
|
||||
) if e.__traceback__ else None
|
||||
descr = "The above exception was the direct cause of the following exception:"
|
||||
elif (e.__context__ is not None and not e.__suppress_context__):
|
||||
e = e.__context__
|
||||
excinfo = ExceptionInfo((type(e), e, e.__traceback__)) if e.__traceback__ else None
|
||||
descr = 'During handling of the above exception, another exception occurred:'
|
||||
excinfo = ExceptionInfo(
|
||||
(type(e), e, e.__traceback__)
|
||||
) if e.__traceback__ else None
|
||||
descr = "During handling of the above exception, another exception occurred:"
|
||||
else:
|
||||
e = None
|
||||
repr_chain.reverse()
|
||||
|
@ -692,10 +739,11 @@ class FormattedExcinfo(object):
|
|||
|
||||
|
||||
class TerminalRepr(object):
|
||||
|
||||
def __str__(self):
|
||||
s = self.__unicode__()
|
||||
if _PY2:
|
||||
s = s.encode('utf-8')
|
||||
s = s.encode("utf-8")
|
||||
return s
|
||||
|
||||
def __unicode__(self):
|
||||
|
@ -711,6 +759,7 @@ class TerminalRepr(object):
|
|||
|
||||
|
||||
class ExceptionRepr(TerminalRepr):
|
||||
|
||||
def __init__(self):
|
||||
self.sections = []
|
||||
|
||||
|
@ -724,6 +773,7 @@ class ExceptionRepr(TerminalRepr):
|
|||
|
||||
|
||||
class ExceptionChainRepr(ExceptionRepr):
|
||||
|
||||
def __init__(self, chain):
|
||||
super(ExceptionChainRepr, self).__init__()
|
||||
self.chain = chain
|
||||
|
@ -742,6 +792,7 @@ class ExceptionChainRepr(ExceptionRepr):
|
|||
|
||||
|
||||
class ReprExceptionInfo(ExceptionRepr):
|
||||
|
||||
def __init__(self, reprtraceback, reprcrash):
|
||||
super(ReprExceptionInfo, self).__init__()
|
||||
self.reprtraceback = reprtraceback
|
||||
|
@ -768,8 +819,11 @@ class ReprTraceback(TerminalRepr):
|
|||
entry.toterminal(tw)
|
||||
if i < len(self.reprentries) - 1:
|
||||
next_entry = self.reprentries[i + 1]
|
||||
if entry.style == "long" or \
|
||||
entry.style == "short" and next_entry.style == "long":
|
||||
if (
|
||||
entry.style == "long"
|
||||
or entry.style == "short"
|
||||
and next_entry.style == "long"
|
||||
):
|
||||
tw.sep(self.entrysep)
|
||||
|
||||
if self.extraline:
|
||||
|
@ -777,6 +831,7 @@ class ReprTraceback(TerminalRepr):
|
|||
|
||||
|
||||
class ReprTracebackNative(ReprTraceback):
|
||||
|
||||
def __init__(self, tblines):
|
||||
self.style = "native"
|
||||
self.reprentries = [ReprEntryNative(tblines)]
|
||||
|
@ -826,12 +881,11 @@ class ReprEntry(TerminalRepr):
|
|||
self.reprfileloc.toterminal(tw)
|
||||
|
||||
def __str__(self):
|
||||
return "%s\n%s\n%s" % ("\n".join(self.lines),
|
||||
self.reprlocals,
|
||||
self.reprfileloc)
|
||||
return "%s\n%s\n%s" % ("\n".join(self.lines), self.reprlocals, self.reprfileloc)
|
||||
|
||||
|
||||
class ReprFileLocation(TerminalRepr):
|
||||
|
||||
def __init__(self, path, lineno, message):
|
||||
self.path = str(path)
|
||||
self.lineno = lineno
|
||||
|
@ -849,6 +903,7 @@ class ReprFileLocation(TerminalRepr):
|
|||
|
||||
|
||||
class ReprLocals(TerminalRepr):
|
||||
|
||||
def __init__(self, lines):
|
||||
self.lines = lines
|
||||
|
||||
|
@ -858,6 +913,7 @@ class ReprLocals(TerminalRepr):
|
|||
|
||||
|
||||
class ReprFuncArgs(TerminalRepr):
|
||||
|
||||
def __init__(self, args):
|
||||
self.args = args
|
||||
|
||||
|
@ -885,22 +941,26 @@ def getrawcode(obj, trycall=True):
|
|||
try:
|
||||
return obj.__code__
|
||||
except AttributeError:
|
||||
obj = getattr(obj, 'im_func', obj)
|
||||
obj = getattr(obj, 'func_code', obj)
|
||||
obj = getattr(obj, 'f_code', obj)
|
||||
obj = getattr(obj, '__code__', obj)
|
||||
if trycall and not hasattr(obj, 'co_firstlineno'):
|
||||
if hasattr(obj, '__call__') and not inspect.isclass(obj):
|
||||
obj = getattr(obj, "im_func", obj)
|
||||
obj = getattr(obj, "func_code", obj)
|
||||
obj = getattr(obj, "f_code", obj)
|
||||
obj = getattr(obj, "__code__", obj)
|
||||
if trycall and not hasattr(obj, "co_firstlineno"):
|
||||
if hasattr(obj, "__call__") and not inspect.isclass(obj):
|
||||
x = getrawcode(obj.__call__, trycall=False)
|
||||
if hasattr(x, 'co_firstlineno'):
|
||||
if hasattr(x, "co_firstlineno"):
|
||||
return x
|
||||
return obj
|
||||
|
||||
|
||||
if PY35: # RecursionError introduced in 3.5
|
||||
|
||||
def is_recursion_error(excinfo):
|
||||
return excinfo.errisinstance(RecursionError) # noqa
|
||||
|
||||
|
||||
else:
|
||||
|
||||
def is_recursion_error(excinfo):
|
||||
if not excinfo.errisinstance(RuntimeError):
|
||||
return False
|
||||
|
|
|
@ -21,8 +21,8 @@ class Source(object):
|
|||
|
||||
def __init__(self, *parts, **kwargs):
|
||||
self.lines = lines = []
|
||||
de = kwargs.get('deindent', True)
|
||||
rstrip = kwargs.get('rstrip', True)
|
||||
de = kwargs.get("deindent", True)
|
||||
rstrip = kwargs.get("rstrip", True)
|
||||
for part in parts:
|
||||
if not part:
|
||||
partlines = []
|
||||
|
@ -31,7 +31,7 @@ class Source(object):
|
|||
elif isinstance(part, (tuple, list)):
|
||||
partlines = [x.rstrip("\n") for x in part]
|
||||
elif isinstance(part, six.string_types):
|
||||
partlines = part.split('\n')
|
||||
partlines = part.split("\n")
|
||||
if rstrip:
|
||||
while partlines:
|
||||
if partlines[-1].strip():
|
||||
|
@ -79,7 +79,7 @@ class Source(object):
|
|||
source.lines[:] = self.lines[start:end]
|
||||
return source
|
||||
|
||||
def putaround(self, before='', after='', indent=' ' * 4):
|
||||
def putaround(self, before="", after="", indent=" " * 4):
|
||||
""" return a copy of the source object with
|
||||
'before' and 'after' wrapped around it.
|
||||
"""
|
||||
|
@ -90,7 +90,7 @@ class Source(object):
|
|||
newsource.lines = before.lines + lines + after.lines
|
||||
return newsource
|
||||
|
||||
def indent(self, indent=' ' * 4):
|
||||
def indent(self, indent=" " * 4):
|
||||
""" return a copy of the source object with
|
||||
all lines indented by the given indent-string.
|
||||
"""
|
||||
|
@ -139,7 +139,7 @@ class Source(object):
|
|||
source = str(self)
|
||||
try:
|
||||
# compile(source+'\n', "x", "exec")
|
||||
syntax_checker(source + '\n')
|
||||
syntax_checker(source + "\n")
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except Exception:
|
||||
|
@ -150,9 +150,14 @@ class Source(object):
|
|||
def __str__(self):
|
||||
return "\n".join(self.lines)
|
||||
|
||||
def compile(self, filename=None, mode='exec',
|
||||
flag=generators.compiler_flag,
|
||||
dont_inherit=0, _genframe=None):
|
||||
def compile(
|
||||
self,
|
||||
filename=None,
|
||||
mode="exec",
|
||||
flag=generators.compiler_flag,
|
||||
dont_inherit=0,
|
||||
_genframe=None,
|
||||
):
|
||||
""" return compiled code object. if filename is None
|
||||
invent an artificial filename which displays
|
||||
the source/line position of the caller frame.
|
||||
|
@ -164,10 +169,10 @@ class Source(object):
|
|||
base = "<%d-codegen " % self._compilecounter
|
||||
self.__class__._compilecounter += 1
|
||||
if not filename:
|
||||
filename = base + '%s:%d>' % (fn, lineno)
|
||||
filename = base + "%s:%d>" % (fn, lineno)
|
||||
else:
|
||||
filename = base + '%r %s:%d>' % (filename, fn, lineno)
|
||||
source = "\n".join(self.lines) + '\n'
|
||||
filename = base + "%r %s:%d>" % (filename, fn, lineno)
|
||||
source = "\n".join(self.lines) + "\n"
|
||||
try:
|
||||
co = cpy_compile(source, filename, mode, flag)
|
||||
except SyntaxError:
|
||||
|
@ -175,9 +180,9 @@ class Source(object):
|
|||
# re-represent syntax errors from parsing python strings
|
||||
msglines = self.lines[:ex.lineno]
|
||||
if ex.offset:
|
||||
msglines.append(" " * ex.offset + '^')
|
||||
msglines.append(" " * ex.offset + "^")
|
||||
msglines.append("(code was compiled probably from here: %s)" % filename)
|
||||
newex = SyntaxError('\n'.join(msglines))
|
||||
newex = SyntaxError("\n".join(msglines))
|
||||
newex.offset = ex.offset
|
||||
newex.lineno = ex.lineno
|
||||
newex.text = ex.text
|
||||
|
@ -189,12 +194,15 @@ class Source(object):
|
|||
linecache.cache[filename] = (1, None, lines, filename)
|
||||
return co
|
||||
|
||||
|
||||
#
|
||||
# public API shortcut functions
|
||||
#
|
||||
|
||||
|
||||
def compile_(source, filename=None, mode='exec', flags=generators.compiler_flag, dont_inherit=0):
|
||||
def compile_(
|
||||
source, filename=None, mode="exec", flags=generators.compiler_flag, dont_inherit=0
|
||||
):
|
||||
""" compile the given source to a raw code object,
|
||||
and maintain an internal cache which allows later
|
||||
retrieval of the source code for the code object
|
||||
|
@ -214,6 +222,7 @@ def getfslineno(obj):
|
|||
If the source cannot be determined return ("", -1)
|
||||
"""
|
||||
from .code import Code
|
||||
|
||||
try:
|
||||
code = Code(obj)
|
||||
except TypeError:
|
||||
|
@ -235,6 +244,7 @@ def getfslineno(obj):
|
|||
assert isinstance(lineno, int)
|
||||
return fspath, lineno
|
||||
|
||||
|
||||
#
|
||||
# helper functions
|
||||
#
|
||||
|
@ -254,11 +264,12 @@ def findsource(obj):
|
|||
|
||||
def getsource(obj, **kwargs):
|
||||
from .code import getrawcode
|
||||
|
||||
obj = getrawcode(obj)
|
||||
try:
|
||||
strsrc = inspect.getsource(obj)
|
||||
except IndentationError:
|
||||
strsrc = "\"Buggy python version consider upgrading, cannot get source\""
|
||||
strsrc = '"Buggy python version consider upgrading, cannot get source"'
|
||||
assert isinstance(strsrc, str)
|
||||
return Source(strsrc, **kwargs)
|
||||
|
||||
|
@ -279,12 +290,14 @@ def deindent(lines, offset=None):
|
|||
|
||||
def readline_generator(lines):
|
||||
for line in lines:
|
||||
yield line + '\n'
|
||||
yield line + "\n"
|
||||
|
||||
it = readline_generator(lines)
|
||||
|
||||
try:
|
||||
for _, _, (sline, _), (eline, _), _ in tokenize.generate_tokens(lambda: next(it)):
|
||||
for _, _, (sline, _), (eline, _), _ in tokenize.generate_tokens(
|
||||
lambda: next(it)
|
||||
):
|
||||
if sline > len(lines):
|
||||
break # End of input reached
|
||||
if sline > len(newlines):
|
||||
|
@ -306,6 +319,7 @@ def deindent(lines, offset=None):
|
|||
|
||||
def get_statement_startend2(lineno, node):
|
||||
import ast
|
||||
|
||||
# flatten all statements and except handlers into one lineno-list
|
||||
# AST's line numbers start indexing at 1
|
||||
values = []
|
||||
|
|
|
@ -12,17 +12,19 @@ from _pytest.assertion import truncate
|
|||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("debugconfig")
|
||||
group.addoption('--assert',
|
||||
action="store",
|
||||
dest="assertmode",
|
||||
choices=("rewrite", "plain",),
|
||||
default="rewrite",
|
||||
metavar="MODE",
|
||||
help="""Control assertion debugging tools. 'plain'
|
||||
group.addoption(
|
||||
"--assert",
|
||||
action="store",
|
||||
dest="assertmode",
|
||||
choices=("rewrite", "plain"),
|
||||
default="rewrite",
|
||||
metavar="MODE",
|
||||
help="""Control assertion debugging tools. 'plain'
|
||||
performs no assertion debugging. 'rewrite'
|
||||
(the default) rewrites assert statements in
|
||||
test modules on import to provide assert
|
||||
expression information.""")
|
||||
expression information.""",
|
||||
)
|
||||
|
||||
|
||||
def register_assert_rewrite(*names):
|
||||
|
@ -38,7 +40,7 @@ def register_assert_rewrite(*names):
|
|||
"""
|
||||
for name in names:
|
||||
if not isinstance(name, str):
|
||||
msg = 'expected module names as *args, got {0} instead'
|
||||
msg = "expected module names as *args, got {0} instead"
|
||||
raise TypeError(msg.format(repr(names)))
|
||||
for hook in sys.meta_path:
|
||||
if isinstance(hook, rewrite.AssertionRewritingHook):
|
||||
|
@ -68,13 +70,13 @@ class AssertionState(object):
|
|||
def install_importhook(config):
|
||||
"""Try to install the rewrite hook, raise SystemError if it fails."""
|
||||
# Jython has an AST bug that make the assertion rewriting hook malfunction.
|
||||
if (sys.platform.startswith('java')):
|
||||
raise SystemError('rewrite not supported')
|
||||
if sys.platform.startswith("java"):
|
||||
raise SystemError("rewrite not supported")
|
||||
|
||||
config._assertstate = AssertionState(config, 'rewrite')
|
||||
config._assertstate = AssertionState(config, "rewrite")
|
||||
config._assertstate.hook = hook = rewrite.AssertionRewritingHook(config)
|
||||
sys.meta_path.insert(0, hook)
|
||||
config._assertstate.trace('installed rewrite import hook')
|
||||
config._assertstate.trace("installed rewrite import hook")
|
||||
|
||||
def undo():
|
||||
hook = config._assertstate.hook
|
||||
|
@ -89,7 +91,7 @@ def pytest_collection(session):
|
|||
# this hook is only called when test modules are collected
|
||||
# so for example not in the master process of pytest-xdist
|
||||
# (which does not collect test modules)
|
||||
assertstate = getattr(session.config, '_assertstate', None)
|
||||
assertstate = getattr(session.config, "_assertstate", None)
|
||||
if assertstate:
|
||||
if assertstate.hook is not None:
|
||||
assertstate.hook.set_session(session)
|
||||
|
@ -103,6 +105,7 @@ def pytest_runtest_setup(item):
|
|||
pytest_assertrepr_compare hook. This sets up this custom
|
||||
comparison for the test.
|
||||
"""
|
||||
|
||||
def callbinrepr(op, left, right):
|
||||
"""Call the pytest_assertrepr_compare hook and prepare the result
|
||||
|
||||
|
@ -119,7 +122,8 @@ def pytest_runtest_setup(item):
|
|||
pretty printing.
|
||||
"""
|
||||
hook_result = item.ihook.pytest_assertrepr_compare(
|
||||
config=item.config, op=op, left=left, right=right)
|
||||
config=item.config, op=op, left=left, right=right
|
||||
)
|
||||
for new_expl in hook_result:
|
||||
if new_expl:
|
||||
new_expl = truncate.truncate_if_required(new_expl, item)
|
||||
|
@ -128,6 +132,7 @@ def pytest_runtest_setup(item):
|
|||
if item.config.getvalue("assertmode") == "rewrite":
|
||||
res = res.replace("%", "%%")
|
||||
return res
|
||||
|
||||
util._reprcompare = callbinrepr
|
||||
|
||||
|
||||
|
@ -136,7 +141,7 @@ def pytest_runtest_teardown(item):
|
|||
|
||||
|
||||
def pytest_sessionfinish(session):
|
||||
assertstate = getattr(session.config, '_assertstate', None)
|
||||
assertstate = getattr(session.config, "_assertstate", None)
|
||||
if assertstate:
|
||||
if assertstate.hook is not None:
|
||||
assertstate.hook.set_session(None)
|
||||
|
|
|
@ -40,6 +40,7 @@ ASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3
|
|||
if sys.version_info >= (3, 5):
|
||||
ast_Call = ast.Call
|
||||
else:
|
||||
|
||||
def ast_Call(a, b, c):
|
||||
return ast.Call(a, b, c, None, None)
|
||||
|
||||
|
@ -151,14 +152,13 @@ class AssertionRewritingHook(object):
|
|||
def _should_rewrite(self, name, fn_pypath, state):
|
||||
# always rewrite conftest files
|
||||
fn = str(fn_pypath)
|
||||
if fn_pypath.basename == 'conftest.py':
|
||||
if fn_pypath.basename == "conftest.py":
|
||||
state.trace("rewriting conftest file: %r" % (fn,))
|
||||
return True
|
||||
|
||||
if self.session is not None:
|
||||
if self.session.isinitpath(fn):
|
||||
state.trace("matched test file (was specified on cmdline): %r" %
|
||||
(fn,))
|
||||
state.trace("matched test file (was specified on cmdline): %r" % (fn,))
|
||||
return True
|
||||
|
||||
# modules not passed explicitly on the command line are only
|
||||
|
@ -169,7 +169,7 @@ class AssertionRewritingHook(object):
|
|||
return True
|
||||
|
||||
for marked in self._must_rewrite:
|
||||
if name == marked or name.startswith(marked + '.'):
|
||||
if name == marked or name.startswith(marked + "."):
|
||||
state.trace("matched marked file %r (from %r)" % (name, marked))
|
||||
return True
|
||||
|
||||
|
@ -181,19 +181,20 @@ class AssertionRewritingHook(object):
|
|||
The named module or package as well as any nested modules will
|
||||
be rewritten on import.
|
||||
"""
|
||||
already_imported = (set(names)
|
||||
.intersection(sys.modules)
|
||||
.difference(self._rewritten_names))
|
||||
already_imported = (
|
||||
set(names).intersection(sys.modules).difference(self._rewritten_names)
|
||||
)
|
||||
for name in already_imported:
|
||||
if not AssertionRewriter.is_rewrite_disabled(
|
||||
sys.modules[name].__doc__ or ""):
|
||||
sys.modules[name].__doc__ or ""
|
||||
):
|
||||
self._warn_already_imported(name)
|
||||
self._must_rewrite.update(names)
|
||||
|
||||
def _warn_already_imported(self, name):
|
||||
self.config.warn(
|
||||
'P1',
|
||||
'Module already imported so cannot be rewritten: %s' % name)
|
||||
"P1", "Module already imported so cannot be rewritten: %s" % name
|
||||
)
|
||||
|
||||
def load_module(self, name):
|
||||
# If there is an existing module object named 'fullname' in
|
||||
|
@ -237,6 +238,7 @@ class AssertionRewritingHook(object):
|
|||
"""
|
||||
try:
|
||||
import pkg_resources
|
||||
|
||||
# access an attribute in case a deferred importer is present
|
||||
pkg_resources.__name__
|
||||
except ImportError:
|
||||
|
@ -249,7 +251,7 @@ class AssertionRewritingHook(object):
|
|||
def get_data(self, pathname):
|
||||
"""Optional PEP302 get_data API.
|
||||
"""
|
||||
with open(pathname, 'rb') as f:
|
||||
with open(pathname, "rb") as f:
|
||||
return f.read()
|
||||
|
||||
|
||||
|
@ -282,7 +284,7 @@ RN = "\r\n".encode("utf-8")
|
|||
N = "\n".encode("utf-8")
|
||||
|
||||
cookie_re = re.compile(r"^[ \t\f]*#.*coding[:=][ \t]*[-\w.]+")
|
||||
BOM_UTF8 = '\xef\xbb\xbf'
|
||||
BOM_UTF8 = "\xef\xbb\xbf"
|
||||
|
||||
|
||||
def _rewrite_test(config, fn):
|
||||
|
@ -307,9 +309,11 @@ def _rewrite_test(config, fn):
|
|||
# gets this right.
|
||||
end1 = source.find("\n")
|
||||
end2 = source.find("\n", end1 + 1)
|
||||
if (not source.startswith(BOM_UTF8) and
|
||||
cookie_re.match(source[0:end1]) is None and
|
||||
cookie_re.match(source[end1 + 1:end2]) is None):
|
||||
if (
|
||||
not source.startswith(BOM_UTF8)
|
||||
and cookie_re.match(source[0:end1]) is None
|
||||
and cookie_re.match(source[end1 + 1:end2]) is None
|
||||
):
|
||||
if hasattr(state, "_indecode"):
|
||||
# encodings imported us again, so don't rewrite.
|
||||
return None, None
|
||||
|
@ -354,20 +358,23 @@ def _read_pyc(source, pyc, trace=lambda x: None):
|
|||
size = source.size()
|
||||
data = fp.read(12)
|
||||
except EnvironmentError as e:
|
||||
trace('_read_pyc(%s): EnvironmentError %s' % (source, e))
|
||||
trace("_read_pyc(%s): EnvironmentError %s" % (source, e))
|
||||
return None
|
||||
# Check for invalid or out of date pyc file.
|
||||
if (len(data) != 12 or data[:4] != imp.get_magic() or
|
||||
struct.unpack("<ll", data[4:]) != (mtime, size)):
|
||||
trace('_read_pyc(%s): invalid or out of date pyc' % source)
|
||||
if (
|
||||
len(data) != 12
|
||||
or data[:4] != imp.get_magic()
|
||||
or struct.unpack("<ll", data[4:]) != (mtime, size)
|
||||
):
|
||||
trace("_read_pyc(%s): invalid or out of date pyc" % source)
|
||||
return None
|
||||
try:
|
||||
co = marshal.load(fp)
|
||||
except Exception as e:
|
||||
trace('_read_pyc(%s): marshal.load error %s' % (source, e))
|
||||
trace("_read_pyc(%s): marshal.load error %s" % (source, e))
|
||||
return None
|
||||
if not isinstance(co, types.CodeType):
|
||||
trace('_read_pyc(%s): not a code object' % source)
|
||||
trace("_read_pyc(%s): not a code object" % source)
|
||||
return None
|
||||
return co
|
||||
|
||||
|
@ -437,7 +444,7 @@ def _format_boolop(explanations, is_or):
|
|||
t = six.text_type
|
||||
else:
|
||||
t = six.binary_type
|
||||
return explanation.replace(t('%'), t('%%'))
|
||||
return explanation.replace(t("%"), t("%%"))
|
||||
|
||||
|
||||
def _call_reprcompare(ops, results, expls, each_obj):
|
||||
|
@ -455,12 +462,7 @@ def _call_reprcompare(ops, results, expls, each_obj):
|
|||
return expl
|
||||
|
||||
|
||||
unary_map = {
|
||||
ast.Not: "not %s",
|
||||
ast.Invert: "~%s",
|
||||
ast.USub: "-%s",
|
||||
ast.UAdd: "+%s"
|
||||
}
|
||||
unary_map = {ast.Not: "not %s", ast.Invert: "~%s", ast.USub: "-%s", ast.UAdd: "+%s"}
|
||||
|
||||
binop_map = {
|
||||
ast.BitOr: "|",
|
||||
|
@ -484,7 +486,7 @@ binop_map = {
|
|||
ast.Is: "is",
|
||||
ast.IsNot: "is not",
|
||||
ast.In: "in",
|
||||
ast.NotIn: "not in"
|
||||
ast.NotIn: "not in",
|
||||
}
|
||||
# Python 3.5+ compatibility
|
||||
try:
|
||||
|
@ -496,12 +498,14 @@ except AttributeError:
|
|||
if hasattr(ast, "NameConstant"):
|
||||
_NameConstant = ast.NameConstant
|
||||
else:
|
||||
|
||||
def _NameConstant(c):
|
||||
return ast.Name(str(c), ast.Load())
|
||||
|
||||
|
||||
def set_location(node, lineno, col_offset):
|
||||
"""Set node location information recursively."""
|
||||
|
||||
def _fix(node, lineno, col_offset):
|
||||
if "lineno" in node._attributes:
|
||||
node.lineno = lineno
|
||||
|
@ -509,6 +513,7 @@ def set_location(node, lineno, col_offset):
|
|||
node.col_offset = col_offset
|
||||
for child in ast.iter_child_nodes(node):
|
||||
_fix(child, lineno, col_offset)
|
||||
|
||||
_fix(node, lineno, col_offset)
|
||||
return node
|
||||
|
||||
|
@ -577,8 +582,10 @@ class AssertionRewriter(ast.NodeVisitor):
|
|||
return
|
||||
# Insert some special imports at the top of the module but after any
|
||||
# docstrings and __future__ imports.
|
||||
aliases = [ast.alias(py.builtin.builtins.__name__, "@py_builtins"),
|
||||
ast.alias("_pytest.assertion.rewrite", "@pytest_ar")]
|
||||
aliases = [
|
||||
ast.alias(py.builtin.builtins.__name__, "@py_builtins"),
|
||||
ast.alias("_pytest.assertion.rewrite", "@pytest_ar"),
|
||||
]
|
||||
doc = getattr(mod, "docstring", None)
|
||||
expect_docstring = doc is None
|
||||
if doc is not None and self.is_rewrite_disabled(doc):
|
||||
|
@ -586,21 +593,28 @@ class AssertionRewriter(ast.NodeVisitor):
|
|||
pos = 0
|
||||
lineno = 1
|
||||
for item in mod.body:
|
||||
if (expect_docstring and isinstance(item, ast.Expr) and
|
||||
isinstance(item.value, ast.Str)):
|
||||
if (
|
||||
expect_docstring
|
||||
and isinstance(item, ast.Expr)
|
||||
and isinstance(item.value, ast.Str)
|
||||
):
|
||||
doc = item.value.s
|
||||
if self.is_rewrite_disabled(doc):
|
||||
return
|
||||
expect_docstring = False
|
||||
elif (not isinstance(item, ast.ImportFrom) or item.level > 0 or
|
||||
item.module != "__future__"):
|
||||
elif (
|
||||
not isinstance(item, ast.ImportFrom)
|
||||
or item.level > 0
|
||||
or item.module != "__future__"
|
||||
):
|
||||
lineno = item.lineno
|
||||
break
|
||||
pos += 1
|
||||
else:
|
||||
lineno = item.lineno
|
||||
imports = [ast.Import([alias], lineno=lineno, col_offset=0)
|
||||
for alias in aliases]
|
||||
imports = [
|
||||
ast.Import([alias], lineno=lineno, col_offset=0) for alias in aliases
|
||||
]
|
||||
mod.body[pos:pos] = imports
|
||||
# Collect asserts.
|
||||
nodes = [mod]
|
||||
|
@ -618,10 +632,13 @@ class AssertionRewriter(ast.NodeVisitor):
|
|||
if isinstance(child, ast.AST):
|
||||
nodes.append(child)
|
||||
setattr(node, name, new)
|
||||
elif (isinstance(field, ast.AST) and
|
||||
# Don't recurse into expressions as they can't contain
|
||||
# asserts.
|
||||
not isinstance(field, ast.expr)):
|
||||
elif (
|
||||
isinstance(field, ast.AST)
|
||||
and
|
||||
# Don't recurse into expressions as they can't contain
|
||||
# asserts.
|
||||
not isinstance(field, ast.expr)
|
||||
):
|
||||
nodes.append(field)
|
||||
|
||||
@staticmethod
|
||||
|
@ -719,8 +736,11 @@ class AssertionRewriter(ast.NodeVisitor):
|
|||
"""
|
||||
if isinstance(assert_.test, ast.Tuple) and self.config is not None:
|
||||
fslocation = (self.module_path, assert_.lineno)
|
||||
self.config.warn('R1', 'assertion is always true, perhaps '
|
||||
'remove parentheses?', fslocation=fslocation)
|
||||
self.config.warn(
|
||||
"R1",
|
||||
"assertion is always true, perhaps " "remove parentheses?",
|
||||
fslocation=fslocation,
|
||||
)
|
||||
self.statements = []
|
||||
self.variables = []
|
||||
self.variable_counter = itertools.count()
|
||||
|
@ -734,7 +754,7 @@ class AssertionRewriter(ast.NodeVisitor):
|
|||
negation = ast.UnaryOp(ast.Not(), top_condition)
|
||||
self.statements.append(ast.If(negation, body, []))
|
||||
if assert_.msg:
|
||||
assertmsg = self.helper('format_assertmsg', assert_.msg)
|
||||
assertmsg = self.helper("format_assertmsg", assert_.msg)
|
||||
explanation = "\n>assert " + explanation
|
||||
else:
|
||||
assertmsg = ast.Str("")
|
||||
|
@ -751,8 +771,7 @@ class AssertionRewriter(ast.NodeVisitor):
|
|||
body.append(raise_)
|
||||
# Clear temporary variables by setting them to None.
|
||||
if self.variables:
|
||||
variables = [ast.Name(name, ast.Store())
|
||||
for name in self.variables]
|
||||
variables = [ast.Name(name, ast.Store()) for name in self.variables]
|
||||
clear = ast.Assign(variables, _NameConstant(None))
|
||||
self.statements.append(clear)
|
||||
# Fix line numbers.
|
||||
|
@ -839,7 +858,7 @@ class AssertionRewriter(ast.NodeVisitor):
|
|||
else: # **args have `arg` keywords with an .arg of None
|
||||
arg_expls.append("**" + expl)
|
||||
|
||||
expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
|
||||
expl = "%s(%s)" % (func_expl, ", ".join(arg_expls))
|
||||
new_call = ast.Call(new_func, new_args, new_kwargs)
|
||||
res = self.assign(new_call)
|
||||
res_expl = self.explanation_param(self.display(res))
|
||||
|
@ -849,7 +868,7 @@ class AssertionRewriter(ast.NodeVisitor):
|
|||
def visit_Starred(self, starred):
|
||||
# From Python 3.5, a Starred node can appear in a function call
|
||||
res, expl = self.visit(starred.value)
|
||||
return starred, '*' + expl
|
||||
return starred, "*" + expl
|
||||
|
||||
def visit_Call_legacy(self, call):
|
||||
"""
|
||||
|
@ -874,9 +893,8 @@ class AssertionRewriter(ast.NodeVisitor):
|
|||
if call.kwargs:
|
||||
new_kwarg, expl = self.visit(call.kwargs)
|
||||
arg_expls.append("**" + expl)
|
||||
expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
|
||||
new_call = ast.Call(new_func, new_args, new_kwargs,
|
||||
new_star, new_kwarg)
|
||||
expl = "%s(%s)" % (func_expl, ", ".join(arg_expls))
|
||||
new_call = ast.Call(new_func, new_args, new_kwargs, new_star, new_kwarg)
|
||||
res = self.assign(new_call)
|
||||
res_expl = self.explanation_param(self.display(res))
|
||||
outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
|
||||
|
@ -925,11 +943,13 @@ class AssertionRewriter(ast.NodeVisitor):
|
|||
self.statements.append(ast.Assign([store_names[i]], res_expr))
|
||||
left_res, left_expl = next_res, next_expl
|
||||
# Use pytest.assertion.util._reprcompare if that's available.
|
||||
expl_call = self.helper("call_reprcompare",
|
||||
ast.Tuple(syms, ast.Load()),
|
||||
ast.Tuple(load_names, ast.Load()),
|
||||
ast.Tuple(expls, ast.Load()),
|
||||
ast.Tuple(results, ast.Load()))
|
||||
expl_call = self.helper(
|
||||
"call_reprcompare",
|
||||
ast.Tuple(syms, ast.Load()),
|
||||
ast.Tuple(load_names, ast.Load()),
|
||||
ast.Tuple(expls, ast.Load()),
|
||||
ast.Tuple(results, ast.Load()),
|
||||
)
|
||||
if len(comp.ops) > 1:
|
||||
res = ast.BoolOp(ast.And(), load_names)
|
||||
else:
|
||||
|
|
|
@ -34,7 +34,7 @@ def _should_truncate_item(item):
|
|||
|
||||
def _running_on_ci():
|
||||
"""Check if we're currently running on a CI system."""
|
||||
env_vars = ['CI', 'BUILD_NUMBER']
|
||||
env_vars = ["CI", "BUILD_NUMBER"]
|
||||
return any(var in os.environ for var in env_vars)
|
||||
|
||||
|
||||
|
@ -67,16 +67,13 @@ def _truncate_explanation(input_lines, max_lines=None, max_chars=None):
|
|||
# Append useful message to explanation
|
||||
truncated_line_count = len(input_lines) - len(truncated_explanation)
|
||||
truncated_line_count += 1 # Account for the part-truncated final line
|
||||
msg = '...Full output truncated'
|
||||
msg = "...Full output truncated"
|
||||
if truncated_line_count == 1:
|
||||
msg += ' ({} line hidden)'.format(truncated_line_count)
|
||||
msg += " ({} line hidden)".format(truncated_line_count)
|
||||
else:
|
||||
msg += ' ({} lines hidden)'.format(truncated_line_count)
|
||||
msg += ", {}" .format(USAGE_MSG)
|
||||
truncated_explanation.extend([
|
||||
six.text_type(""),
|
||||
six.text_type(msg),
|
||||
])
|
||||
msg += " ({} lines hidden)".format(truncated_line_count)
|
||||
msg += ", {}".format(USAGE_MSG)
|
||||
truncated_explanation.extend([six.text_type(""), six.text_type(msg)])
|
||||
return truncated_explanation
|
||||
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ _reprcompare = None
|
|||
# with non-ascii characters (see issue 877 and 1379)
|
||||
def ecu(s):
|
||||
try:
|
||||
return u(s, 'utf-8', 'replace')
|
||||
return u(s, "utf-8", "replace")
|
||||
except TypeError:
|
||||
return s
|
||||
|
||||
|
@ -38,7 +38,7 @@ def format_explanation(explanation):
|
|||
explanation = ecu(explanation)
|
||||
lines = _split_explanation(explanation)
|
||||
result = _format_lines(lines)
|
||||
return u('\n').join(result)
|
||||
return u("\n").join(result)
|
||||
|
||||
|
||||
def _split_explanation(explanation):
|
||||
|
@ -48,13 +48,13 @@ def _split_explanation(explanation):
|
|||
Any other newlines will be escaped and appear in the line as the
|
||||
literal '\n' characters.
|
||||
"""
|
||||
raw_lines = (explanation or u('')).split('\n')
|
||||
raw_lines = (explanation or u("")).split("\n")
|
||||
lines = [raw_lines[0]]
|
||||
for values in raw_lines[1:]:
|
||||
if values and values[0] in ['{', '}', '~', '>']:
|
||||
if values and values[0] in ["{", "}", "~", ">"]:
|
||||
lines.append(values)
|
||||
else:
|
||||
lines[-1] += '\\n' + values
|
||||
lines[-1] += "\\n" + values
|
||||
return lines
|
||||
|
||||
|
||||
|
@ -71,24 +71,24 @@ def _format_lines(lines):
|
|||
stack = [0]
|
||||
stackcnt = [0]
|
||||
for line in lines[1:]:
|
||||
if line.startswith('{'):
|
||||
if line.startswith("{"):
|
||||
if stackcnt[-1]:
|
||||
s = u('and ')
|
||||
s = u("and ")
|
||||
else:
|
||||
s = u('where ')
|
||||
s = u("where ")
|
||||
stack.append(len(result))
|
||||
stackcnt[-1] += 1
|
||||
stackcnt.append(0)
|
||||
result.append(u(' +') + u(' ') * (len(stack) - 1) + s + line[1:])
|
||||
elif line.startswith('}'):
|
||||
result.append(u(" +") + u(" ") * (len(stack) - 1) + s + line[1:])
|
||||
elif line.startswith("}"):
|
||||
stack.pop()
|
||||
stackcnt.pop()
|
||||
result[stack[-1]] += line[1:]
|
||||
else:
|
||||
assert line[0] in ['~', '>']
|
||||
assert line[0] in ["~", ">"]
|
||||
stack[-1] += 1
|
||||
indent = len(stack) if line.startswith('~') else len(stack) - 1
|
||||
result.append(u(' ') * indent + line[1:])
|
||||
indent = len(stack) if line.startswith("~") else len(stack) - 1
|
||||
result.append(u(" ") * indent + line[1:])
|
||||
assert len(stack) == 1
|
||||
return result
|
||||
|
||||
|
@ -106,7 +106,7 @@ def assertrepr_compare(config, op, left, right):
|
|||
left_repr = py.io.saferepr(left, maxsize=int(width // 2))
|
||||
right_repr = py.io.saferepr(right, maxsize=width - len(left_repr))
|
||||
|
||||
summary = u('%s %s %s') % (ecu(left_repr), op, ecu(right_repr))
|
||||
summary = u("%s %s %s") % (ecu(left_repr), op, ecu(right_repr))
|
||||
|
||||
def issequence(x):
|
||||
return isinstance(x, Sequence) and not isinstance(x, basestring)
|
||||
|
@ -127,10 +127,10 @@ def assertrepr_compare(config, op, left, right):
|
|||
except TypeError:
|
||||
return False
|
||||
|
||||
verbose = config.getoption('verbose')
|
||||
verbose = config.getoption("verbose")
|
||||
explanation = None
|
||||
try:
|
||||
if op == '==':
|
||||
if op == "==":
|
||||
if istext(left) and istext(right):
|
||||
explanation = _diff_text(left, right, verbose)
|
||||
else:
|
||||
|
@ -146,14 +146,17 @@ def assertrepr_compare(config, op, left, right):
|
|||
explanation.extend(expl)
|
||||
else:
|
||||
explanation = expl
|
||||
elif op == 'not in':
|
||||
elif op == "not in":
|
||||
if istext(left) and istext(right):
|
||||
explanation = _notin_text(left, right, verbose)
|
||||
except Exception:
|
||||
explanation = [
|
||||
u('(pytest_assertion plugin: representation of details failed. '
|
||||
'Probably an object has a faulty __repr__.)'),
|
||||
u(_pytest._code.ExceptionInfo())]
|
||||
u(
|
||||
"(pytest_assertion plugin: representation of details failed. "
|
||||
"Probably an object has a faulty __repr__.)"
|
||||
),
|
||||
u(_pytest._code.ExceptionInfo()),
|
||||
]
|
||||
|
||||
if not explanation:
|
||||
return None
|
||||
|
@ -170,6 +173,7 @@ def _diff_text(left, right, verbose=False):
|
|||
If the input are bytes they will be safely converted to text.
|
||||
"""
|
||||
from difflib import ndiff
|
||||
|
||||
explanation = []
|
||||
|
||||
def escape_for_readable_diff(binary_text):
|
||||
|
@ -179,8 +183,8 @@ def _diff_text(left, right, verbose=False):
|
|||
newlines and carriage returns (#429).
|
||||
"""
|
||||
r = six.text_type(repr(binary_text)[1:-1])
|
||||
r = r.replace(r'\n', '\n')
|
||||
r = r.replace(r'\r', '\r')
|
||||
r = r.replace(r"\n", "\n")
|
||||
r = r.replace(r"\r", "\r")
|
||||
return r
|
||||
|
||||
if isinstance(left, six.binary_type):
|
||||
|
@ -193,9 +197,11 @@ def _diff_text(left, right, verbose=False):
|
|||
if left[i] != right[i]:
|
||||
break
|
||||
if i > 42:
|
||||
i -= 10 # Provide some context
|
||||
explanation = [u('Skipping %s identical leading '
|
||||
'characters in diff, use -v to show') % i]
|
||||
i -= 10 # Provide some context
|
||||
explanation = [
|
||||
u("Skipping %s identical leading " "characters in diff, use -v to show")
|
||||
% i
|
||||
]
|
||||
left = left[i:]
|
||||
right = right[i:]
|
||||
if len(left) == len(right):
|
||||
|
@ -203,40 +209,48 @@ def _diff_text(left, right, verbose=False):
|
|||
if left[-i] != right[-i]:
|
||||
break
|
||||
if i > 42:
|
||||
i -= 10 # Provide some context
|
||||
explanation += [u('Skipping %s identical trailing '
|
||||
'characters in diff, use -v to show') % i]
|
||||
i -= 10 # Provide some context
|
||||
explanation += [
|
||||
u(
|
||||
"Skipping %s identical trailing "
|
||||
"characters in diff, use -v to show"
|
||||
)
|
||||
% i
|
||||
]
|
||||
left = left[:-i]
|
||||
right = right[:-i]
|
||||
keepends = True
|
||||
if left.isspace() or right.isspace():
|
||||
left = repr(str(left))
|
||||
right = repr(str(right))
|
||||
explanation += [u'Strings contain only whitespace, escaping them using repr()']
|
||||
explanation += [line.strip('\n')
|
||||
for line in ndiff(left.splitlines(keepends),
|
||||
right.splitlines(keepends))]
|
||||
explanation += [u"Strings contain only whitespace, escaping them using repr()"]
|
||||
explanation += [
|
||||
line.strip("\n")
|
||||
for line in ndiff(left.splitlines(keepends), right.splitlines(keepends))
|
||||
]
|
||||
return explanation
|
||||
|
||||
|
||||
def _compare_eq_iterable(left, right, verbose=False):
|
||||
if not verbose:
|
||||
return [u('Use -v to get the full diff')]
|
||||
return [u("Use -v to get the full diff")]
|
||||
# dynamic import to speedup pytest
|
||||
import difflib
|
||||
|
||||
try:
|
||||
left_formatting = pprint.pformat(left).splitlines()
|
||||
right_formatting = pprint.pformat(right).splitlines()
|
||||
explanation = [u('Full diff:')]
|
||||
explanation = [u("Full diff:")]
|
||||
except Exception:
|
||||
# hack: PrettyPrinter.pformat() in python 2 fails when formatting items that can't be sorted(), ie, calling
|
||||
# sorted() on a list would raise. See issue #718.
|
||||
# As a workaround, the full diff is generated by using the repr() string of each item of each container.
|
||||
left_formatting = sorted(repr(x) for x in left)
|
||||
right_formatting = sorted(repr(x) for x in right)
|
||||
explanation = [u('Full diff (fallback to calling repr on each item):')]
|
||||
explanation.extend(line.strip() for line in difflib.ndiff(left_formatting, right_formatting))
|
||||
explanation = [u("Full diff (fallback to calling repr on each item):")]
|
||||
explanation.extend(
|
||||
line.strip() for line in difflib.ndiff(left_formatting, right_formatting)
|
||||
)
|
||||
return explanation
|
||||
|
||||
|
||||
|
@ -244,16 +258,18 @@ def _compare_eq_sequence(left, right, verbose=False):
|
|||
explanation = []
|
||||
for i in range(min(len(left), len(right))):
|
||||
if left[i] != right[i]:
|
||||
explanation += [u('At index %s diff: %r != %r')
|
||||
% (i, left[i], right[i])]
|
||||
explanation += [u("At index %s diff: %r != %r") % (i, left[i], right[i])]
|
||||
break
|
||||
if len(left) > len(right):
|
||||
explanation += [u('Left contains more items, first extra item: %s')
|
||||
% py.io.saferepr(left[len(right)],)]
|
||||
explanation += [
|
||||
u("Left contains more items, first extra item: %s")
|
||||
% py.io.saferepr(left[len(right)])
|
||||
]
|
||||
elif len(left) < len(right):
|
||||
explanation += [
|
||||
u('Right contains more items, first extra item: %s') %
|
||||
py.io.saferepr(right[len(left)],)]
|
||||
u("Right contains more items, first extra item: %s")
|
||||
% py.io.saferepr(right[len(left)])
|
||||
]
|
||||
return explanation
|
||||
|
||||
|
||||
|
@ -262,11 +278,11 @@ def _compare_eq_set(left, right, verbose=False):
|
|||
diff_left = left - right
|
||||
diff_right = right - left
|
||||
if diff_left:
|
||||
explanation.append(u('Extra items in the left set:'))
|
||||
explanation.append(u("Extra items in the left set:"))
|
||||
for item in diff_left:
|
||||
explanation.append(py.io.saferepr(item))
|
||||
if diff_right:
|
||||
explanation.append(u('Extra items in the right set:'))
|
||||
explanation.append(u("Extra items in the right set:"))
|
||||
for item in diff_right:
|
||||
explanation.append(py.io.saferepr(item))
|
||||
return explanation
|
||||
|
@ -277,27 +293,29 @@ def _compare_eq_dict(left, right, verbose=False):
|
|||
common = set(left).intersection(set(right))
|
||||
same = {k: left[k] for k in common if left[k] == right[k]}
|
||||
if same and verbose < 2:
|
||||
explanation += [u('Omitting %s identical items, use -vv to show') %
|
||||
len(same)]
|
||||
explanation += [u("Omitting %s identical items, use -vv to show") % len(same)]
|
||||
elif same:
|
||||
explanation += [u('Common items:')]
|
||||
explanation += [u("Common items:")]
|
||||
explanation += pprint.pformat(same).splitlines()
|
||||
diff = {k for k in common if left[k] != right[k]}
|
||||
if diff:
|
||||
explanation += [u('Differing items:')]
|
||||
explanation += [u("Differing items:")]
|
||||
for k in diff:
|
||||
explanation += [py.io.saferepr({k: left[k]}) + ' != ' +
|
||||
py.io.saferepr({k: right[k]})]
|
||||
explanation += [
|
||||
py.io.saferepr({k: left[k]}) + " != " + py.io.saferepr({k: right[k]})
|
||||
]
|
||||
extra_left = set(left) - set(right)
|
||||
if extra_left:
|
||||
explanation.append(u('Left contains more items:'))
|
||||
explanation.extend(pprint.pformat(
|
||||
{k: left[k] for k in extra_left}).splitlines())
|
||||
explanation.append(u("Left contains more items:"))
|
||||
explanation.extend(
|
||||
pprint.pformat({k: left[k] for k in extra_left}).splitlines()
|
||||
)
|
||||
extra_right = set(right) - set(left)
|
||||
if extra_right:
|
||||
explanation.append(u('Right contains more items:'))
|
||||
explanation.extend(pprint.pformat(
|
||||
{k: right[k] for k in extra_right}).splitlines())
|
||||
explanation.append(u("Right contains more items:"))
|
||||
explanation.extend(
|
||||
pprint.pformat({k: right[k] for k in extra_right}).splitlines()
|
||||
)
|
||||
return explanation
|
||||
|
||||
|
||||
|
@ -307,14 +325,14 @@ def _notin_text(term, text, verbose=False):
|
|||
tail = text[index + len(term):]
|
||||
correct_text = head + tail
|
||||
diff = _diff_text(correct_text, text, verbose)
|
||||
newdiff = [u('%s is contained here:') % py.io.saferepr(term, maxsize=42)]
|
||||
newdiff = [u("%s is contained here:") % py.io.saferepr(term, maxsize=42)]
|
||||
for line in diff:
|
||||
if line.startswith(u('Skipping')):
|
||||
if line.startswith(u("Skipping")):
|
||||
continue
|
||||
if line.startswith(u('- ')):
|
||||
if line.startswith(u("- ")):
|
||||
continue
|
||||
if line.startswith(u('+ ')):
|
||||
newdiff.append(u(' ') + line[2:])
|
||||
if line.startswith(u("+ ")):
|
||||
newdiff.append(u(" ") + line[2:])
|
||||
else:
|
||||
newdiff.append(line)
|
||||
return newdiff
|
||||
|
|
|
@ -18,6 +18,7 @@ from os.path import sep as _sep, altsep as _altsep
|
|||
|
||||
|
||||
class Cache(object):
|
||||
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self._cachedir = Cache.cache_dir_from_config(config)
|
||||
|
@ -53,7 +54,7 @@ class Cache(object):
|
|||
return self._cachedir.ensure_dir("d", name)
|
||||
|
||||
def _getvaluepath(self, key):
|
||||
return self._cachedir.join('v', *key.split('/'))
|
||||
return self._cachedir.join("v", *key.split("/"))
|
||||
|
||||
def get(self, key, default):
|
||||
""" return cached value for the given key. If no value
|
||||
|
@ -89,17 +90,18 @@ class Cache(object):
|
|||
path.dirpath().ensure_dir()
|
||||
except (py.error.EEXIST, py.error.EACCES):
|
||||
self.config.warn(
|
||||
code='I9', message='could not create cache path %s' % (path,)
|
||||
code="I9", message="could not create cache path %s" % (path,)
|
||||
)
|
||||
return
|
||||
try:
|
||||
f = path.open('w')
|
||||
f = path.open("w")
|
||||
except py.error.ENOTDIR:
|
||||
self.config.warn(
|
||||
code='I9', message='cache could not write path %s' % (path,))
|
||||
code="I9", message="cache could not write path %s" % (path,)
|
||||
)
|
||||
else:
|
||||
with f:
|
||||
self.trace("cache-write %s: %r" % (key, value,))
|
||||
self.trace("cache-write %s: %r" % (key, value))
|
||||
json.dump(value, f, indent=2, sort_keys=True)
|
||||
|
||||
|
||||
|
@ -108,39 +110,38 @@ class LFPlugin(object):
|
|||
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
active_keys = 'lf', 'failedfirst'
|
||||
active_keys = "lf", "failedfirst"
|
||||
self.active = any(config.getoption(key) for key in active_keys)
|
||||
self.lastfailed = config.cache.get("cache/lastfailed", {})
|
||||
self._previously_failed_count = None
|
||||
self._no_failures_behavior = self.config.getoption('last_failed_no_failures')
|
||||
self._no_failures_behavior = self.config.getoption("last_failed_no_failures")
|
||||
|
||||
def pytest_report_collectionfinish(self):
|
||||
if self.active:
|
||||
if not self._previously_failed_count:
|
||||
mode = "run {} (no recorded failures)".format(self._no_failures_behavior)
|
||||
mode = "run {} (no recorded failures)".format(
|
||||
self._no_failures_behavior
|
||||
)
|
||||
else:
|
||||
noun = 'failure' if self._previously_failed_count == 1 else 'failures'
|
||||
suffix = " first" if self.config.getoption(
|
||||
"failedfirst") else ""
|
||||
noun = "failure" if self._previously_failed_count == 1 else "failures"
|
||||
suffix = " first" if self.config.getoption("failedfirst") else ""
|
||||
mode = "rerun previous {count} {noun}{suffix}".format(
|
||||
count=self._previously_failed_count, suffix=suffix, noun=noun
|
||||
)
|
||||
return "run-last-failure: %s" % mode
|
||||
|
||||
def pytest_runtest_logreport(self, report):
|
||||
if (report.when == 'call' and report.passed) or report.skipped:
|
||||
if (report.when == "call" and report.passed) or report.skipped:
|
||||
self.lastfailed.pop(report.nodeid, None)
|
||||
elif report.failed:
|
||||
self.lastfailed[report.nodeid] = True
|
||||
|
||||
def pytest_collectreport(self, report):
|
||||
passed = report.outcome in ('passed', 'skipped')
|
||||
passed = report.outcome in ("passed", "skipped")
|
||||
if passed:
|
||||
if report.nodeid in self.lastfailed:
|
||||
self.lastfailed.pop(report.nodeid)
|
||||
self.lastfailed.update(
|
||||
(item.nodeid, True)
|
||||
for item in report.result)
|
||||
self.lastfailed.update((item.nodeid, True) for item in report.result)
|
||||
else:
|
||||
self.lastfailed[report.nodeid] = True
|
||||
|
||||
|
@ -164,7 +165,7 @@ class LFPlugin(object):
|
|||
config.hook.pytest_deselected(items=previously_passed)
|
||||
else:
|
||||
items[:] = previously_failed + previously_passed
|
||||
elif self._no_failures_behavior == 'none':
|
||||
elif self._no_failures_behavior == "none":
|
||||
config.hook.pytest_deselected(items=items)
|
||||
items[:] = []
|
||||
|
||||
|
@ -196,8 +197,11 @@ class NFPlugin(object):
|
|||
else:
|
||||
other_items[item.nodeid] = item
|
||||
|
||||
items[:] = self._get_increasing_order(six.itervalues(new_items)) + \
|
||||
self._get_increasing_order(six.itervalues(other_items))
|
||||
items[:] = self._get_increasing_order(
|
||||
six.itervalues(new_items)
|
||||
) + self._get_increasing_order(
|
||||
six.itervalues(other_items)
|
||||
)
|
||||
self.cached_nodeids = [x.nodeid for x in items if isinstance(x, pytest.Item)]
|
||||
|
||||
def _get_increasing_order(self, items):
|
||||
|
@ -214,38 +218,59 @@ class NFPlugin(object):
|
|||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("general")
|
||||
group.addoption(
|
||||
'--lf', '--last-failed', action='store_true', dest="lf",
|
||||
"--lf",
|
||||
"--last-failed",
|
||||
action="store_true",
|
||||
dest="lf",
|
||||
help="rerun only the tests that failed "
|
||||
"at the last run (or all if none failed)")
|
||||
"at the last run (or all if none failed)",
|
||||
)
|
||||
group.addoption(
|
||||
'--ff', '--failed-first', action='store_true', dest="failedfirst",
|
||||
"--ff",
|
||||
"--failed-first",
|
||||
action="store_true",
|
||||
dest="failedfirst",
|
||||
help="run all tests but run the last failures first. "
|
||||
"This may re-order tests and thus lead to "
|
||||
"repeated fixture setup/teardown")
|
||||
"This may re-order tests and thus lead to "
|
||||
"repeated fixture setup/teardown",
|
||||
)
|
||||
group.addoption(
|
||||
'--nf', '--new-first', action='store_true', dest="newfirst",
|
||||
"--nf",
|
||||
"--new-first",
|
||||
action="store_true",
|
||||
dest="newfirst",
|
||||
help="run tests from new files first, then the rest of the tests "
|
||||
"sorted by file mtime")
|
||||
"sorted by file mtime",
|
||||
)
|
||||
group.addoption(
|
||||
'--cache-show', action='store_true', dest="cacheshow",
|
||||
help="show cache contents, don't perform collection or tests")
|
||||
"--cache-show",
|
||||
action="store_true",
|
||||
dest="cacheshow",
|
||||
help="show cache contents, don't perform collection or tests",
|
||||
)
|
||||
group.addoption(
|
||||
'--cache-clear', action='store_true', dest="cacheclear",
|
||||
help="remove all cache contents at start of test run.")
|
||||
parser.addini(
|
||||
"cache_dir", default='.pytest_cache',
|
||||
help="cache directory path.")
|
||||
"--cache-clear",
|
||||
action="store_true",
|
||||
dest="cacheclear",
|
||||
help="remove all cache contents at start of test run.",
|
||||
)
|
||||
parser.addini("cache_dir", default=".pytest_cache", help="cache directory path.")
|
||||
group.addoption(
|
||||
'--lfnf', '--last-failed-no-failures', action='store',
|
||||
dest='last_failed_no_failures', choices=('all', 'none'), default='all',
|
||||
help='change the behavior when no test failed in the last run or no '
|
||||
'information about the last failures was found in the cache'
|
||||
"--lfnf",
|
||||
"--last-failed-no-failures",
|
||||
action="store",
|
||||
dest="last_failed_no_failures",
|
||||
choices=("all", "none"),
|
||||
default="all",
|
||||
help="change the behavior when no test failed in the last run or no "
|
||||
"information about the last failures was found in the cache",
|
||||
)
|
||||
|
||||
|
||||
def pytest_cmdline_main(config):
|
||||
if config.option.cacheshow:
|
||||
from _pytest.main import wrap_session
|
||||
|
||||
return wrap_session(config, cacheshow)
|
||||
|
||||
|
||||
|
@ -280,6 +305,7 @@ def pytest_report_header(config):
|
|||
|
||||
def cacheshow(config, session):
|
||||
from pprint import pprint
|
||||
|
||||
tw = py.io.TerminalWriter()
|
||||
tw.line("cachedir: " + str(config.cache._cachedir))
|
||||
if not config.cache._cachedir.check():
|
||||
|
@ -293,8 +319,7 @@ def cacheshow(config, session):
|
|||
key = valpath.relto(vdir).replace(valpath.sep, "/")
|
||||
val = config.cache.get(key, dummy)
|
||||
if val is dummy:
|
||||
tw.line("%s contains unreadable content, "
|
||||
"will be ignored" % key)
|
||||
tw.line("%s contains unreadable content, " "will be ignored" % key)
|
||||
else:
|
||||
tw.line("%s contains:" % key)
|
||||
stream = py.io.TextIO()
|
||||
|
@ -310,6 +335,5 @@ def cacheshow(config, session):
|
|||
# print("%s/" % p.relto(basedir))
|
||||
if p.isfile():
|
||||
key = p.relto(basedir)
|
||||
tw.line("%s is a file of length %d" % (
|
||||
key, p.size()))
|
||||
tw.line("%s is a file of length %d" % (key, p.size()))
|
||||
return 0
|
||||
|
|
|
@ -17,19 +17,26 @@ import pytest
|
|||
from _pytest.compat import CaptureIO
|
||||
|
||||
|
||||
patchsysdict = {0: 'stdin', 1: 'stdout', 2: 'stderr'}
|
||||
patchsysdict = {0: "stdin", 1: "stdout", 2: "stderr"}
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("general")
|
||||
group._addoption(
|
||||
'--capture', action="store",
|
||||
"--capture",
|
||||
action="store",
|
||||
default="fd" if hasattr(os, "dup") else "sys",
|
||||
metavar="method", choices=['fd', 'sys', 'no'],
|
||||
help="per-test capturing method: one of fd|sys|no.")
|
||||
metavar="method",
|
||||
choices=["fd", "sys", "no"],
|
||||
help="per-test capturing method: one of fd|sys|no.",
|
||||
)
|
||||
group._addoption(
|
||||
'-s', action="store_const", const="no", dest="capture",
|
||||
help="shortcut for --capture=no.")
|
||||
"-s",
|
||||
action="store_const",
|
||||
const="no",
|
||||
dest="capture",
|
||||
help="shortcut for --capture=no.",
|
||||
)
|
||||
|
||||
|
||||
@pytest.hookimpl(hookwrapper=True)
|
||||
|
@ -50,6 +57,7 @@ def pytest_load_initial_conftests(early_config, parser, args):
|
|||
def silence_logging_at_shutdown():
|
||||
if "logging" in sys.modules:
|
||||
sys.modules["logging"].raiseExceptions = False
|
||||
|
||||
early_config.add_cleanup(silence_logging_at_shutdown)
|
||||
|
||||
# finally trigger conftest loading but while capturing (issue93)
|
||||
|
@ -180,7 +188,7 @@ class CaptureManager(object):
|
|||
item.add_report_section(when, "stderr", err)
|
||||
|
||||
|
||||
capture_fixtures = {'capfd', 'capfdbinary', 'capsys', 'capsysbinary'}
|
||||
capture_fixtures = {"capfd", "capfdbinary", "capsys", "capsysbinary"}
|
||||
|
||||
|
||||
def _ensure_only_one_capture_fixture(request, name):
|
||||
|
@ -189,9 +197,7 @@ def _ensure_only_one_capture_fixture(request, name):
|
|||
fixtures = sorted(fixtures)
|
||||
fixtures = fixtures[0] if len(fixtures) == 1 else fixtures
|
||||
raise request.raiseerror(
|
||||
"cannot use {} and {} at the same time".format(
|
||||
fixtures, name,
|
||||
),
|
||||
"cannot use {} and {} at the same time".format(fixtures, name)
|
||||
)
|
||||
|
||||
|
||||
|
@ -202,7 +208,7 @@ def capsys(request):
|
|||
which return a ``(out, err)`` namedtuple. ``out`` and ``err`` will be ``text``
|
||||
objects.
|
||||
"""
|
||||
_ensure_only_one_capture_fixture(request, 'capsys')
|
||||
_ensure_only_one_capture_fixture(request, "capsys")
|
||||
with _install_capture_fixture_on_item(request, SysCapture) as fixture:
|
||||
yield fixture
|
||||
|
||||
|
@ -214,11 +220,11 @@ def capsysbinary(request):
|
|||
which return a ``(out, err)`` tuple. ``out`` and ``err`` will be ``bytes``
|
||||
objects.
|
||||
"""
|
||||
_ensure_only_one_capture_fixture(request, 'capsysbinary')
|
||||
_ensure_only_one_capture_fixture(request, "capsysbinary")
|
||||
# Currently, the implementation uses the python3 specific `.buffer`
|
||||
# property of CaptureIO.
|
||||
if sys.version_info < (3,):
|
||||
raise request.raiseerror('capsysbinary is only supported on python 3')
|
||||
raise request.raiseerror("capsysbinary is only supported on python 3")
|
||||
with _install_capture_fixture_on_item(request, SysCaptureBinary) as fixture:
|
||||
yield fixture
|
||||
|
||||
|
@ -230,9 +236,11 @@ def capfd(request):
|
|||
which return a ``(out, err)`` tuple. ``out`` and ``err`` will be ``text``
|
||||
objects.
|
||||
"""
|
||||
_ensure_only_one_capture_fixture(request, 'capfd')
|
||||
if not hasattr(os, 'dup'):
|
||||
pytest.skip("capfd fixture needs os.dup function which is not available in this system")
|
||||
_ensure_only_one_capture_fixture(request, "capfd")
|
||||
if not hasattr(os, "dup"):
|
||||
pytest.skip(
|
||||
"capfd fixture needs os.dup function which is not available in this system"
|
||||
)
|
||||
with _install_capture_fixture_on_item(request, FDCapture) as fixture:
|
||||
yield fixture
|
||||
|
||||
|
@ -244,9 +252,11 @@ def capfdbinary(request):
|
|||
which return a ``(out, err)`` tuple. ``out`` and ``err`` will be
|
||||
``bytes`` objects.
|
||||
"""
|
||||
_ensure_only_one_capture_fixture(request, 'capfdbinary')
|
||||
if not hasattr(os, 'dup'):
|
||||
pytest.skip("capfdbinary fixture needs os.dup function which is not available in this system")
|
||||
_ensure_only_one_capture_fixture(request, "capfdbinary")
|
||||
if not hasattr(os, "dup"):
|
||||
pytest.skip(
|
||||
"capfdbinary fixture needs os.dup function which is not available in this system"
|
||||
)
|
||||
with _install_capture_fixture_on_item(request, FDCaptureBinary) as fixture:
|
||||
yield fixture
|
||||
|
||||
|
@ -261,7 +271,7 @@ def _install_capture_fixture_on_item(request, capture_class):
|
|||
by ``CaptureManager`` during its ``pytest_runtest_*`` hooks.
|
||||
"""
|
||||
request.node._capture_fixture = fixture = CaptureFixture(capture_class, request)
|
||||
capmanager = request.config.pluginmanager.getplugin('capturemanager')
|
||||
capmanager = request.config.pluginmanager.getplugin("capturemanager")
|
||||
# need to active this fixture right away in case it is being used by another fixture (setup phase)
|
||||
# if this fixture is being used only by a test function (call phase), then we wouldn't need this
|
||||
# activation, but it doesn't hurt
|
||||
|
@ -276,13 +286,15 @@ class CaptureFixture(object):
|
|||
Object returned by :py:func:`capsys`, :py:func:`capsysbinary`, :py:func:`capfd` and :py:func:`capfdbinary`
|
||||
fixtures.
|
||||
"""
|
||||
|
||||
def __init__(self, captureclass, request):
|
||||
self.captureclass = captureclass
|
||||
self.request = request
|
||||
|
||||
def _start(self):
|
||||
self._capture = MultiCapture(out=True, err=True, in_=False,
|
||||
Capture=self.captureclass)
|
||||
self._capture = MultiCapture(
|
||||
out=True, err=True, in_=False, Capture=self.captureclass
|
||||
)
|
||||
self._capture.start_capturing()
|
||||
|
||||
def close(self):
|
||||
|
@ -305,7 +317,7 @@ class CaptureFixture(object):
|
|||
def disabled(self):
|
||||
"""Temporarily disables capture while inside the 'with' block."""
|
||||
self._capture.suspend_capturing()
|
||||
capmanager = self.request.config.pluginmanager.getplugin('capturemanager')
|
||||
capmanager = self.request.config.pluginmanager.getplugin("capturemanager")
|
||||
capmanager.suspend_global_capture(item=None, in_=False)
|
||||
try:
|
||||
yield
|
||||
|
@ -346,7 +358,7 @@ class EncodedFile(object):
|
|||
self.buffer.write(obj)
|
||||
|
||||
def writelines(self, linelist):
|
||||
data = ''.join(linelist)
|
||||
data = "".join(linelist)
|
||||
self.write(data)
|
||||
|
||||
@property
|
||||
|
@ -409,7 +421,7 @@ class MultiCapture(object):
|
|||
|
||||
def stop_capturing(self):
|
||||
""" stop capturing and reset capturing streams """
|
||||
if hasattr(self, '_reset'):
|
||||
if hasattr(self, "_reset"):
|
||||
raise ValueError("was already stopped")
|
||||
self._reset = True
|
||||
if self.out:
|
||||
|
@ -421,8 +433,10 @@ class MultiCapture(object):
|
|||
|
||||
def readouterr(self):
|
||||
""" return snapshot unicode value of stdout/stderr capturings. """
|
||||
return CaptureResult(self.out.snap() if self.out is not None else "",
|
||||
self.err.snap() if self.err is not None else "")
|
||||
return CaptureResult(
|
||||
self.out.snap() if self.out is not None else "",
|
||||
self.err.snap() if self.err is not None else "",
|
||||
)
|
||||
|
||||
|
||||
class NoCapture(object):
|
||||
|
@ -507,6 +521,7 @@ class FDCapture(FDCaptureBinary):
|
|||
|
||||
snap() produces text
|
||||
"""
|
||||
|
||||
def snap(self):
|
||||
res = FDCaptureBinary.snap(self)
|
||||
enc = getattr(self.tmpfile, "encoding", None)
|
||||
|
@ -516,6 +531,7 @@ class FDCapture(FDCaptureBinary):
|
|||
|
||||
|
||||
class SysCapture(object):
|
||||
|
||||
def __init__(self, fd, tmpfile=None):
|
||||
name = patchsysdict[fd]
|
||||
self._old = getattr(sys, name)
|
||||
|
@ -553,6 +569,7 @@ class SysCapture(object):
|
|||
|
||||
|
||||
class SysCaptureBinary(SysCapture):
|
||||
|
||||
def snap(self):
|
||||
res = self.tmpfile.buffer.getvalue()
|
||||
self.tmpfile.seek(0)
|
||||
|
@ -572,6 +589,7 @@ class DontReadFromInput(six.Iterator):
|
|||
|
||||
def read(self, *args):
|
||||
raise IOError("reading from stdin while output is captured")
|
||||
|
||||
readline = read
|
||||
readlines = read
|
||||
__next__ = read
|
||||
|
@ -580,8 +598,7 @@ class DontReadFromInput(six.Iterator):
|
|||
return self
|
||||
|
||||
def fileno(self):
|
||||
raise UnsupportedOperation("redirected stdin is pseudofile, "
|
||||
"has no fileno()")
|
||||
raise UnsupportedOperation("redirected stdin is pseudofile, " "has no fileno()")
|
||||
|
||||
def isatty(self):
|
||||
return False
|
||||
|
@ -594,7 +611,7 @@ class DontReadFromInput(six.Iterator):
|
|||
if sys.version_info >= (3, 0):
|
||||
return self
|
||||
else:
|
||||
raise AttributeError('redirected stdin has no attribute buffer')
|
||||
raise AttributeError("redirected stdin has no attribute buffer")
|
||||
|
||||
|
||||
def _colorama_workaround():
|
||||
|
@ -607,7 +624,7 @@ def _colorama_workaround():
|
|||
fail in various ways.
|
||||
"""
|
||||
|
||||
if not sys.platform.startswith('win32'):
|
||||
if not sys.platform.startswith("win32"):
|
||||
return
|
||||
try:
|
||||
import colorama # noqa
|
||||
|
@ -634,7 +651,7 @@ def _readline_workaround():
|
|||
See https://github.com/pytest-dev/pytest/pull/1281
|
||||
"""
|
||||
|
||||
if not sys.platform.startswith('win32'):
|
||||
if not sys.platform.startswith("win32"):
|
||||
return
|
||||
try:
|
||||
import readline # noqa
|
||||
|
@ -664,21 +681,21 @@ def _py36_windowsconsoleio_workaround(stream):
|
|||
|
||||
See https://github.com/pytest-dev/py/issues/103
|
||||
"""
|
||||
if not sys.platform.startswith('win32') or sys.version_info[:2] < (3, 6):
|
||||
if not sys.platform.startswith("win32") or sys.version_info[:2] < (3, 6):
|
||||
return
|
||||
|
||||
# bail out if ``stream`` doesn't seem like a proper ``io`` stream (#2666)
|
||||
if not hasattr(stream, 'buffer'):
|
||||
if not hasattr(stream, "buffer"):
|
||||
return
|
||||
|
||||
buffered = hasattr(stream.buffer, 'raw')
|
||||
buffered = hasattr(stream.buffer, "raw")
|
||||
raw_stdout = stream.buffer.raw if buffered else stream.buffer
|
||||
|
||||
if not isinstance(raw_stdout, io._WindowsConsoleIO):
|
||||
return
|
||||
|
||||
def _reopen_stdio(f, mode):
|
||||
if not buffered and mode[0] == 'w':
|
||||
if not buffered and mode[0] == "w":
|
||||
buffering = 0
|
||||
else:
|
||||
buffering = -1
|
||||
|
@ -688,11 +705,12 @@ def _py36_windowsconsoleio_workaround(stream):
|
|||
f.encoding,
|
||||
f.errors,
|
||||
f.newlines,
|
||||
f.line_buffering)
|
||||
f.line_buffering,
|
||||
)
|
||||
|
||||
sys.__stdin__ = sys.stdin = _reopen_stdio(sys.stdin, 'rb')
|
||||
sys.__stdout__ = sys.stdout = _reopen_stdio(sys.stdout, 'wb')
|
||||
sys.__stderr__ = sys.stderr = _reopen_stdio(sys.stderr, 'wb')
|
||||
sys.__stdin__ = sys.stdin = _reopen_stdio(sys.stdin, "rb")
|
||||
sys.__stdout__ = sys.stdout = _reopen_stdio(sys.stdout, "wb")
|
||||
sys.__stderr__ = sys.stderr = _reopen_stdio(sys.stderr, "wb")
|
||||
|
||||
|
||||
def _attempt_to_close_capture_file(f):
|
||||
|
|
|
@ -36,7 +36,7 @@ NOTSET = object()
|
|||
|
||||
PY35 = sys.version_info[:2] >= (3, 5)
|
||||
PY36 = sys.version_info[:2] >= (3, 6)
|
||||
MODULE_NOT_FOUND_ERROR = 'ModuleNotFoundError' if PY36 else 'ImportError'
|
||||
MODULE_NOT_FOUND_ERROR = "ModuleNotFoundError" if PY36 else "ImportError"
|
||||
|
||||
if _PY3:
|
||||
from collections.abc import MutableMapping as MappingMixin # noqa
|
||||
|
@ -54,9 +54,9 @@ def _format_args(func):
|
|||
isfunction = inspect.isfunction
|
||||
isclass = inspect.isclass
|
||||
# used to work around a python2 exception info leak
|
||||
exc_clear = getattr(sys, 'exc_clear', lambda: None)
|
||||
exc_clear = getattr(sys, "exc_clear", lambda: None)
|
||||
# The type of re.compile objects is not exposed in Python.
|
||||
REGEX_TYPE = type(re.compile(''))
|
||||
REGEX_TYPE = type(re.compile(""))
|
||||
|
||||
|
||||
def is_generator(func):
|
||||
|
@ -70,8 +70,13 @@ def iscoroutinefunction(func):
|
|||
Note: copied and modified from Python 3.5's builtin couroutines.py to avoid import asyncio directly,
|
||||
which in turns also initializes the "logging" module as side-effect (see issue #8).
|
||||
"""
|
||||
return (getattr(func, '_is_coroutine', False) or
|
||||
(hasattr(inspect, 'iscoroutinefunction') and inspect.iscoroutinefunction(func)))
|
||||
return (
|
||||
getattr(func, "_is_coroutine", False)
|
||||
or (
|
||||
hasattr(inspect, "iscoroutinefunction")
|
||||
and inspect.iscoroutinefunction(func)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def getlocation(function, curdir):
|
||||
|
@ -90,8 +95,9 @@ def num_mock_patch_args(function):
|
|||
mock_modules = [sys.modules.get("mock"), sys.modules.get("unittest.mock")]
|
||||
if any(mock_modules):
|
||||
sentinels = [m.DEFAULT for m in mock_modules if m is not None]
|
||||
return len([p for p in patchings
|
||||
if not p.attribute_name and p.new in sentinels])
|
||||
return len(
|
||||
[p for p in patchings if not p.attribute_name and p.new in sentinels]
|
||||
)
|
||||
return len(patchings)
|
||||
|
||||
|
||||
|
@ -118,16 +124,25 @@ def getfuncargnames(function, is_method=False, cls=None):
|
|||
# ordered mapping of parameter names to Parameter instances. This
|
||||
# creates a tuple of the names of the parameters that don't have
|
||||
# defaults.
|
||||
arg_names = tuple(p.name for p in signature(function).parameters.values()
|
||||
if (p.kind is Parameter.POSITIONAL_OR_KEYWORD or
|
||||
p.kind is Parameter.KEYWORD_ONLY) and
|
||||
p.default is Parameter.empty)
|
||||
arg_names = tuple(
|
||||
p.name
|
||||
for p in signature(function).parameters.values()
|
||||
if (
|
||||
p.kind is Parameter.POSITIONAL_OR_KEYWORD
|
||||
or p.kind is Parameter.KEYWORD_ONLY
|
||||
)
|
||||
and p.default is Parameter.empty
|
||||
)
|
||||
# If this function should be treated as a bound method even though
|
||||
# it's passed as an unbound method or function, remove the first
|
||||
# parameter name.
|
||||
if (is_method or
|
||||
(cls and not isinstance(cls.__dict__.get(function.__name__, None),
|
||||
staticmethod))):
|
||||
if (
|
||||
is_method
|
||||
or (
|
||||
cls
|
||||
and not isinstance(cls.__dict__.get(function.__name__, None), staticmethod)
|
||||
)
|
||||
):
|
||||
arg_names = arg_names[1:]
|
||||
# Remove any names that will be replaced with mocks.
|
||||
if hasattr(function, "__wrapped__"):
|
||||
|
@ -138,9 +153,12 @@ def getfuncargnames(function, is_method=False, cls=None):
|
|||
def get_default_arg_names(function):
|
||||
# Note: this code intentionally mirrors the code at the beginning of getfuncargnames,
|
||||
# to get the arguments which were excluded from its result because they had default values
|
||||
return tuple(p.name for p in signature(function).parameters.values()
|
||||
if p.kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY) and
|
||||
p.default is not Parameter.empty)
|
||||
return tuple(
|
||||
p.name
|
||||
for p in signature(function).parameters.values()
|
||||
if p.kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY)
|
||||
and p.default is not Parameter.empty
|
||||
)
|
||||
|
||||
|
||||
if _PY3:
|
||||
|
@ -148,17 +166,20 @@ if _PY3:
|
|||
UNICODE_TYPES = str,
|
||||
|
||||
if PY35:
|
||||
|
||||
def _bytes_to_ascii(val):
|
||||
return val.decode('ascii', 'backslashreplace')
|
||||
return val.decode("ascii", "backslashreplace")
|
||||
|
||||
else:
|
||||
|
||||
def _bytes_to_ascii(val):
|
||||
if val:
|
||||
# source: http://goo.gl/bGsnwC
|
||||
encoded_bytes, _ = codecs.escape_encode(val)
|
||||
return encoded_bytes.decode('ascii')
|
||||
return encoded_bytes.decode("ascii")
|
||||
else:
|
||||
# empty bytes crashes codecs.escape_encode (#1087)
|
||||
return ''
|
||||
return ""
|
||||
|
||||
def ascii_escaped(val):
|
||||
"""If val is pure ascii, returns it as a str(). Otherwise, escapes
|
||||
|
@ -181,7 +202,9 @@ if _PY3:
|
|||
if isinstance(val, bytes):
|
||||
return _bytes_to_ascii(val)
|
||||
else:
|
||||
return val.encode('unicode_escape').decode('ascii')
|
||||
return val.encode("unicode_escape").decode("ascii")
|
||||
|
||||
|
||||
else:
|
||||
STRING_TYPES = bytes, str, unicode
|
||||
UNICODE_TYPES = unicode,
|
||||
|
@ -197,11 +220,11 @@ else:
|
|||
"""
|
||||
if isinstance(val, bytes):
|
||||
try:
|
||||
return val.encode('ascii')
|
||||
return val.encode("ascii")
|
||||
except UnicodeDecodeError:
|
||||
return val.encode('string-escape')
|
||||
return val.encode("string-escape")
|
||||
else:
|
||||
return val.encode('unicode-escape')
|
||||
return val.encode("unicode-escape")
|
||||
|
||||
|
||||
def get_real_func(obj):
|
||||
|
@ -210,16 +233,16 @@ def get_real_func(obj):
|
|||
"""
|
||||
start_obj = obj
|
||||
for i in range(100):
|
||||
new_obj = getattr(obj, '__wrapped__', None)
|
||||
new_obj = getattr(obj, "__wrapped__", None)
|
||||
if new_obj is None:
|
||||
break
|
||||
obj = new_obj
|
||||
else:
|
||||
raise ValueError(
|
||||
("could not find real function of {start}"
|
||||
"\nstopped at {current}").format(
|
||||
start=py.io.saferepr(start_obj),
|
||||
current=py.io.saferepr(obj)))
|
||||
("could not find real function of {start}" "\nstopped at {current}").format(
|
||||
start=py.io.saferepr(start_obj), current=py.io.saferepr(obj)
|
||||
)
|
||||
)
|
||||
if isinstance(obj, functools.partial):
|
||||
obj = obj.func
|
||||
return obj
|
||||
|
@ -228,7 +251,7 @@ def get_real_func(obj):
|
|||
def getfslineno(obj):
|
||||
# xxx let decorators etc specify a sane ordering
|
||||
obj = get_real_func(obj)
|
||||
if hasattr(obj, 'place_as'):
|
||||
if hasattr(obj, "place_as"):
|
||||
obj = obj.place_as
|
||||
fslineno = _pytest._code.getfslineno(obj)
|
||||
assert isinstance(fslineno[1], int), obj
|
||||
|
@ -267,10 +290,14 @@ def _is_unittest_unexpected_success_a_failure():
|
|||
|
||||
|
||||
if _PY3:
|
||||
|
||||
def safe_str(v):
|
||||
"""returns v as string"""
|
||||
return str(v)
|
||||
|
||||
|
||||
else:
|
||||
|
||||
def safe_str(v):
|
||||
"""returns v as string, converting to ascii if necessary"""
|
||||
try:
|
||||
|
@ -278,28 +305,29 @@ else:
|
|||
except UnicodeError:
|
||||
if not isinstance(v, unicode):
|
||||
v = unicode(v)
|
||||
errors = 'replace'
|
||||
return v.encode('utf-8', errors)
|
||||
errors = "replace"
|
||||
return v.encode("utf-8", errors)
|
||||
|
||||
|
||||
COLLECT_FAKEMODULE_ATTRIBUTES = (
|
||||
'Collector',
|
||||
'Module',
|
||||
'Generator',
|
||||
'Function',
|
||||
'Instance',
|
||||
'Session',
|
||||
'Item',
|
||||
'Class',
|
||||
'File',
|
||||
'_fillfuncargs',
|
||||
"Collector",
|
||||
"Module",
|
||||
"Generator",
|
||||
"Function",
|
||||
"Instance",
|
||||
"Session",
|
||||
"Item",
|
||||
"Class",
|
||||
"File",
|
||||
"_fillfuncargs",
|
||||
)
|
||||
|
||||
|
||||
def _setup_collect_fakemodule():
|
||||
from types import ModuleType
|
||||
import pytest
|
||||
pytest.collect = ModuleType('pytest.collect')
|
||||
|
||||
pytest.collect = ModuleType("pytest.collect")
|
||||
pytest.collect.__all__ = [] # used for setns
|
||||
for attr in COLLECT_FAKEMODULE_ATTRIBUTES:
|
||||
setattr(pytest.collect, attr, getattr(pytest, attr))
|
||||
|
@ -313,26 +341,28 @@ if _PY2:
|
|||
|
||||
@property
|
||||
def encoding(self):
|
||||
return getattr(self, '_encoding', 'UTF-8')
|
||||
return getattr(self, "_encoding", "UTF-8")
|
||||
|
||||
|
||||
else:
|
||||
import io
|
||||
|
||||
class CaptureIO(io.TextIOWrapper):
|
||||
|
||||
def __init__(self):
|
||||
super(CaptureIO, self).__init__(
|
||||
io.BytesIO(),
|
||||
encoding='UTF-8', newline='', write_through=True,
|
||||
io.BytesIO(), encoding="UTF-8", newline="", write_through=True
|
||||
)
|
||||
|
||||
def getvalue(self):
|
||||
return self.buffer.getvalue().decode('UTF-8')
|
||||
return self.buffer.getvalue().decode("UTF-8")
|
||||
|
||||
|
||||
class FuncargnamesCompatAttr(object):
|
||||
""" helper class so that Metafunc, Function and FixtureRequest
|
||||
don't need to each define the "funcargnames" compatibility attribute.
|
||||
"""
|
||||
|
||||
@property
|
||||
def funcargnames(self):
|
||||
""" alias attribute for ``fixturenames`` for pre-2.3 compatibility"""
|
||||
|
|
|
@ -8,6 +8,7 @@ import warnings
|
|||
import copy
|
||||
import six
|
||||
import py
|
||||
|
||||
# DON't import pytest here because it causes import cycle troubles
|
||||
import sys
|
||||
import os
|
||||
|
@ -27,6 +28,7 @@ hookspec = HookspecMarker("pytest")
|
|||
|
||||
|
||||
class ConftestImportFailure(Exception):
|
||||
|
||||
def __init__(self, path, excinfo):
|
||||
Exception.__init__(self, path, excinfo)
|
||||
self.path = path
|
||||
|
@ -36,7 +38,7 @@ class ConftestImportFailure(Exception):
|
|||
etype, evalue, etb = self.excinfo
|
||||
formatted = traceback.format_tb(etb)
|
||||
# The level of the tracebacks we want to print is hand crafted :(
|
||||
return repr(evalue) + '\n' + ''.join(formatted[2:])
|
||||
return repr(evalue) + "\n" + "".join(formatted[2:])
|
||||
|
||||
|
||||
def main(args=None, plugins=None):
|
||||
|
@ -108,7 +110,8 @@ default_plugins = (
|
|||
"mark main terminal runner python fixtures debugging unittest capture skipping "
|
||||
"tmpdir monkeypatch recwarn pastebin helpconfig nose assertion "
|
||||
"junitxml resultlog doctest cacheprovider freeze_support "
|
||||
"setuponly setupplan warnings logging").split()
|
||||
"setuponly setupplan warnings logging"
|
||||
).split()
|
||||
|
||||
|
||||
builtin_plugins = set(default_plugins)
|
||||
|
@ -147,6 +150,7 @@ def _prepareconfig(args=None, plugins=None):
|
|||
raise ValueError("not a string or argument list: %r" % (args,))
|
||||
args = shlex.split(args, posix=sys.platform != "win32")
|
||||
from _pytest import deprecated
|
||||
|
||||
warning = deprecated.MAIN_STR_ARGS
|
||||
config = get_config()
|
||||
pluginmanager = config.pluginmanager
|
||||
|
@ -158,9 +162,10 @@ def _prepareconfig(args=None, plugins=None):
|
|||
else:
|
||||
pluginmanager.register(plugin)
|
||||
if warning:
|
||||
config.warn('C1', warning)
|
||||
config.warn("C1", warning)
|
||||
return pluginmanager.hook.pytest_cmdline_parse(
|
||||
pluginmanager=pluginmanager, args=args)
|
||||
pluginmanager=pluginmanager, args=args
|
||||
)
|
||||
except BaseException:
|
||||
config._ensure_unconfigure()
|
||||
raise
|
||||
|
@ -189,9 +194,9 @@ class PytestPluginManager(PluginManager):
|
|||
|
||||
self.add_hookspecs(_pytest.hookspec)
|
||||
self.register(self)
|
||||
if os.environ.get('PYTEST_DEBUG'):
|
||||
if os.environ.get("PYTEST_DEBUG"):
|
||||
err = sys.stderr
|
||||
encoding = getattr(err, 'encoding', 'utf8')
|
||||
encoding = getattr(err, "encoding", "utf8")
|
||||
try:
|
||||
err = py.io.dupfile(err, encoding=encoding)
|
||||
except Exception:
|
||||
|
@ -211,11 +216,13 @@ class PytestPluginManager(PluginManager):
|
|||
Use :py:meth:`pluggy.PluginManager.add_hookspecs <PluginManager.add_hookspecs>`
|
||||
instead.
|
||||
"""
|
||||
warning = dict(code="I2",
|
||||
fslocation=_pytest._code.getfslineno(sys._getframe(1)),
|
||||
nodeid=None,
|
||||
message="use pluginmanager.add_hookspecs instead of "
|
||||
"deprecated addhooks() method.")
|
||||
warning = dict(
|
||||
code="I2",
|
||||
fslocation=_pytest._code.getfslineno(sys._getframe(1)),
|
||||
nodeid=None,
|
||||
message="use pluginmanager.add_hookspecs instead of "
|
||||
"deprecated addhooks() method.",
|
||||
)
|
||||
self._warn(warning)
|
||||
return self.add_hookspecs(module_or_class)
|
||||
|
||||
|
@ -243,24 +250,31 @@ class PytestPluginManager(PluginManager):
|
|||
|
||||
def parse_hookspec_opts(self, module_or_class, name):
|
||||
opts = super(PytestPluginManager, self).parse_hookspec_opts(
|
||||
module_or_class, name)
|
||||
module_or_class, name
|
||||
)
|
||||
if opts is None:
|
||||
method = getattr(module_or_class, name)
|
||||
if name.startswith("pytest_"):
|
||||
opts = {"firstresult": hasattr(method, "firstresult"),
|
||||
"historic": hasattr(method, "historic")}
|
||||
opts = {
|
||||
"firstresult": hasattr(method, "firstresult"),
|
||||
"historic": hasattr(method, "historic"),
|
||||
}
|
||||
return opts
|
||||
|
||||
def register(self, plugin, name=None):
|
||||
if name in ['pytest_catchlog', 'pytest_capturelog']:
|
||||
self._warn('{} plugin has been merged into the core, '
|
||||
'please remove it from your requirements.'.format(
|
||||
name.replace('_', '-')))
|
||||
if name in ["pytest_catchlog", "pytest_capturelog"]:
|
||||
self._warn(
|
||||
"{} plugin has been merged into the core, "
|
||||
"please remove it from your requirements.".format(
|
||||
name.replace("_", "-")
|
||||
)
|
||||
)
|
||||
return
|
||||
ret = super(PytestPluginManager, self).register(plugin, name)
|
||||
if ret:
|
||||
self.hook.pytest_plugin_registered.call_historic(
|
||||
kwargs=dict(plugin=plugin, manager=self))
|
||||
kwargs=dict(plugin=plugin, manager=self)
|
||||
)
|
||||
|
||||
if isinstance(plugin, types.ModuleType):
|
||||
self.consider_module(plugin)
|
||||
|
@ -277,20 +291,21 @@ class PytestPluginManager(PluginManager):
|
|||
def pytest_configure(self, config):
|
||||
# XXX now that the pluginmanager exposes hookimpl(tryfirst...)
|
||||
# we should remove tryfirst/trylast as markers
|
||||
config.addinivalue_line("markers",
|
||||
"tryfirst: mark a hook implementation function such that the "
|
||||
"plugin machinery will try to call it first/as early as possible.")
|
||||
config.addinivalue_line("markers",
|
||||
"trylast: mark a hook implementation function such that the "
|
||||
"plugin machinery will try to call it last/as late as possible.")
|
||||
config.addinivalue_line(
|
||||
"markers",
|
||||
"tryfirst: mark a hook implementation function such that the "
|
||||
"plugin machinery will try to call it first/as early as possible.",
|
||||
)
|
||||
config.addinivalue_line(
|
||||
"markers",
|
||||
"trylast: mark a hook implementation function such that the "
|
||||
"plugin machinery will try to call it last/as late as possible.",
|
||||
)
|
||||
self._configured = True
|
||||
|
||||
def _warn(self, message):
|
||||
kwargs = message if isinstance(message, dict) else {
|
||||
'code': 'I1',
|
||||
'message': message,
|
||||
'fslocation': None,
|
||||
'nodeid': None,
|
||||
"code": "I1", "message": message, "fslocation": None, "nodeid": None
|
||||
}
|
||||
self.hook.pytest_logwarning.call_historic(kwargs=kwargs)
|
||||
|
||||
|
@ -306,8 +321,9 @@ class PytestPluginManager(PluginManager):
|
|||
here.
|
||||
"""
|
||||
current = py.path.local()
|
||||
self._confcutdir = current.join(namespace.confcutdir, abs=True) \
|
||||
if namespace.confcutdir else None
|
||||
self._confcutdir = current.join(
|
||||
namespace.confcutdir, abs=True
|
||||
) if namespace.confcutdir else None
|
||||
self._noconftest = namespace.noconftest
|
||||
testpaths = namespace.file_or_dir
|
||||
foundanchor = False
|
||||
|
@ -374,8 +390,9 @@ class PytestPluginManager(PluginManager):
|
|||
_ensure_removed_sysmodule(conftestpath.purebasename)
|
||||
try:
|
||||
mod = conftestpath.pyimport()
|
||||
if hasattr(mod, 'pytest_plugins') and self._configured:
|
||||
if hasattr(mod, "pytest_plugins") and self._configured:
|
||||
from _pytest.deprecated import PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST
|
||||
|
||||
warnings.warn(PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST)
|
||||
except Exception:
|
||||
raise ConftestImportFailure(conftestpath, sys.exc_info())
|
||||
|
@ -418,7 +435,7 @@ class PytestPluginManager(PluginManager):
|
|||
self._import_plugin_specs(os.environ.get("PYTEST_PLUGINS"))
|
||||
|
||||
def consider_module(self, mod):
|
||||
self._import_plugin_specs(getattr(mod, 'pytest_plugins', []))
|
||||
self._import_plugin_specs(getattr(mod, "pytest_plugins", []))
|
||||
|
||||
def _import_plugin_specs(self, spec):
|
||||
plugins = _get_plugin_specs_as_list(spec)
|
||||
|
@ -430,7 +447,9 @@ class PytestPluginManager(PluginManager):
|
|||
# "terminal" or "capture". Those plugins are registered under their
|
||||
# basename for historic purposes but must be imported with the
|
||||
# _pytest prefix.
|
||||
assert isinstance(modname, (six.text_type, str)), "module name as text required, got %r" % modname
|
||||
assert isinstance(modname, (six.text_type, str)), (
|
||||
"module name as text required, got %r" % modname
|
||||
)
|
||||
modname = str(modname)
|
||||
if self.is_blocked(modname) or self.get_plugin(modname) is not None:
|
||||
return
|
||||
|
@ -443,7 +462,9 @@ class PytestPluginManager(PluginManager):
|
|||
__import__(importspec)
|
||||
except ImportError as e:
|
||||
new_exc_type = ImportError
|
||||
new_exc_message = 'Error importing plugin "%s": %s' % (modname, safe_str(e.args[0]))
|
||||
new_exc_message = 'Error importing plugin "%s": %s' % (
|
||||
modname, safe_str(e.args[0])
|
||||
)
|
||||
new_exc = new_exc_type(new_exc_message)
|
||||
|
||||
six.reraise(new_exc_type, new_exc, sys.exc_info()[2])
|
||||
|
@ -465,10 +486,12 @@ def _get_plugin_specs_as_list(specs):
|
|||
"""
|
||||
if specs is not None:
|
||||
if isinstance(specs, str):
|
||||
specs = specs.split(',') if specs else []
|
||||
specs = specs.split(",") if specs else []
|
||||
if not isinstance(specs, (list, tuple)):
|
||||
raise UsageError("Plugin specs must be a ','-separated string or a "
|
||||
"list/tuple of strings for plugin names. Given: %r" % specs)
|
||||
raise UsageError(
|
||||
"Plugin specs must be a ','-separated string or a "
|
||||
"list/tuple of strings for plugin names. Given: %r" % specs
|
||||
)
|
||||
return list(specs)
|
||||
return []
|
||||
|
||||
|
@ -535,12 +558,14 @@ class Parser(object):
|
|||
|
||||
def parse(self, args, namespace=None):
|
||||
from _pytest._argcomplete import try_argcomplete
|
||||
|
||||
self.optparser = self._getparser()
|
||||
try_argcomplete(self.optparser)
|
||||
return self.optparser.parse_args([str(x) for x in args], namespace=namespace)
|
||||
|
||||
def _getparser(self):
|
||||
from _pytest._argcomplete import filescompleter
|
||||
|
||||
optparser = MyOptionParser(self, self.extra_info)
|
||||
groups = self._groups + [self._anonymous]
|
||||
for group in groups:
|
||||
|
@ -552,7 +577,7 @@ class Parser(object):
|
|||
a = option.attrs()
|
||||
arggroup.add_argument(*n, **a)
|
||||
# bash like autocompletion for dirs (appending '/')
|
||||
optparser.add_argument(FILE_OR_DIR, nargs='*').completer = filescompleter
|
||||
optparser.add_argument(FILE_OR_DIR, nargs="*").completer = filescompleter
|
||||
return optparser
|
||||
|
||||
def parse_setoption(self, args, option, namespace=None):
|
||||
|
@ -615,77 +640,74 @@ class Argument(object):
|
|||
and ignoring choices and integer prefixes
|
||||
https://docs.python.org/3/library/optparse.html#optparse-standard-option-types
|
||||
"""
|
||||
_typ_map = {
|
||||
'int': int,
|
||||
'string': str,
|
||||
'float': float,
|
||||
'complex': complex,
|
||||
}
|
||||
_typ_map = {"int": int, "string": str, "float": float, "complex": complex}
|
||||
|
||||
def __init__(self, *names, **attrs):
|
||||
"""store parms in private vars for use in add_argument"""
|
||||
self._attrs = attrs
|
||||
self._short_opts = []
|
||||
self._long_opts = []
|
||||
self.dest = attrs.get('dest')
|
||||
if '%default' in (attrs.get('help') or ''):
|
||||
self.dest = attrs.get("dest")
|
||||
if "%default" in (attrs.get("help") or ""):
|
||||
warnings.warn(
|
||||
'pytest now uses argparse. "%default" should be'
|
||||
' changed to "%(default)s" ',
|
||||
DeprecationWarning,
|
||||
stacklevel=3)
|
||||
stacklevel=3,
|
||||
)
|
||||
try:
|
||||
typ = attrs['type']
|
||||
typ = attrs["type"]
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
# this might raise a keyerror as well, don't want to catch that
|
||||
if isinstance(typ, six.string_types):
|
||||
if typ == 'choice':
|
||||
if typ == "choice":
|
||||
warnings.warn(
|
||||
'type argument to addoption() is a string %r.'
|
||||
' For parsearg this is optional and when supplied'
|
||||
' should be a type.'
|
||||
' (options: %s)' % (typ, names),
|
||||
"type argument to addoption() is a string %r."
|
||||
" For parsearg this is optional and when supplied"
|
||||
" should be a type."
|
||||
" (options: %s)" % (typ, names),
|
||||
DeprecationWarning,
|
||||
stacklevel=3)
|
||||
stacklevel=3,
|
||||
)
|
||||
# argparse expects a type here take it from
|
||||
# the type of the first element
|
||||
attrs['type'] = type(attrs['choices'][0])
|
||||
attrs["type"] = type(attrs["choices"][0])
|
||||
else:
|
||||
warnings.warn(
|
||||
'type argument to addoption() is a string %r.'
|
||||
' For parsearg this should be a type.'
|
||||
' (options: %s)' % (typ, names),
|
||||
"type argument to addoption() is a string %r."
|
||||
" For parsearg this should be a type."
|
||||
" (options: %s)" % (typ, names),
|
||||
DeprecationWarning,
|
||||
stacklevel=3)
|
||||
attrs['type'] = Argument._typ_map[typ]
|
||||
stacklevel=3,
|
||||
)
|
||||
attrs["type"] = Argument._typ_map[typ]
|
||||
# used in test_parseopt -> test_parse_defaultgetter
|
||||
self.type = attrs['type']
|
||||
self.type = attrs["type"]
|
||||
else:
|
||||
self.type = typ
|
||||
try:
|
||||
# attribute existence is tested in Config._processopt
|
||||
self.default = attrs['default']
|
||||
self.default = attrs["default"]
|
||||
except KeyError:
|
||||
pass
|
||||
self._set_opt_strings(names)
|
||||
if not self.dest:
|
||||
if self._long_opts:
|
||||
self.dest = self._long_opts[0][2:].replace('-', '_')
|
||||
self.dest = self._long_opts[0][2:].replace("-", "_")
|
||||
else:
|
||||
try:
|
||||
self.dest = self._short_opts[0][1:]
|
||||
except IndexError:
|
||||
raise ArgumentError(
|
||||
'need a long or short option', self)
|
||||
raise ArgumentError("need a long or short option", self)
|
||||
|
||||
def names(self):
|
||||
return self._short_opts + self._long_opts
|
||||
|
||||
def attrs(self):
|
||||
# update any attributes set by processopt
|
||||
attrs = 'default dest help'.split()
|
||||
attrs = "default dest help".split()
|
||||
if self.dest:
|
||||
attrs.append(self.dest)
|
||||
for attr in attrs:
|
||||
|
@ -693,11 +715,11 @@ class Argument(object):
|
|||
self._attrs[attr] = getattr(self, attr)
|
||||
except AttributeError:
|
||||
pass
|
||||
if self._attrs.get('help'):
|
||||
a = self._attrs['help']
|
||||
a = a.replace('%default', '%(default)s')
|
||||
if self._attrs.get("help"):
|
||||
a = self._attrs["help"]
|
||||
a = a.replace("%default", "%(default)s")
|
||||
# a = a.replace('%prog', '%(prog)s')
|
||||
self._attrs['help'] = a
|
||||
self._attrs["help"] = a
|
||||
return self._attrs
|
||||
|
||||
def _set_opt_strings(self, opts):
|
||||
|
@ -708,37 +730,42 @@ class Argument(object):
|
|||
if len(opt) < 2:
|
||||
raise ArgumentError(
|
||||
"invalid option string %r: "
|
||||
"must be at least two characters long" % opt, self)
|
||||
"must be at least two characters long" % opt,
|
||||
self,
|
||||
)
|
||||
elif len(opt) == 2:
|
||||
if not (opt[0] == "-" and opt[1] != "-"):
|
||||
raise ArgumentError(
|
||||
"invalid short option string %r: "
|
||||
"must be of the form -x, (x any non-dash char)" % opt,
|
||||
self)
|
||||
self,
|
||||
)
|
||||
self._short_opts.append(opt)
|
||||
else:
|
||||
if not (opt[0:2] == "--" and opt[2] != "-"):
|
||||
raise ArgumentError(
|
||||
"invalid long option string %r: "
|
||||
"must start with --, followed by non-dash" % opt,
|
||||
self)
|
||||
self,
|
||||
)
|
||||
self._long_opts.append(opt)
|
||||
|
||||
def __repr__(self):
|
||||
args = []
|
||||
if self._short_opts:
|
||||
args += ['_short_opts: ' + repr(self._short_opts)]
|
||||
args += ["_short_opts: " + repr(self._short_opts)]
|
||||
if self._long_opts:
|
||||
args += ['_long_opts: ' + repr(self._long_opts)]
|
||||
args += ['dest: ' + repr(self.dest)]
|
||||
if hasattr(self, 'type'):
|
||||
args += ['type: ' + repr(self.type)]
|
||||
if hasattr(self, 'default'):
|
||||
args += ['default: ' + repr(self.default)]
|
||||
return 'Argument({})'.format(', '.join(args))
|
||||
args += ["_long_opts: " + repr(self._long_opts)]
|
||||
args += ["dest: " + repr(self.dest)]
|
||||
if hasattr(self, "type"):
|
||||
args += ["type: " + repr(self.type)]
|
||||
if hasattr(self, "default"):
|
||||
args += ["default: " + repr(self.default)]
|
||||
return "Argument({})".format(", ".join(args))
|
||||
|
||||
|
||||
class OptionGroup(object):
|
||||
|
||||
def __init__(self, name, description="", parser=None):
|
||||
self.name = name
|
||||
self.description = description
|
||||
|
@ -754,7 +781,8 @@ class OptionGroup(object):
|
|||
accepted **and** the automatic destination is in args.twowords
|
||||
"""
|
||||
conflict = set(optnames).intersection(
|
||||
name for opt in self.options for name in opt.names())
|
||||
name for opt in self.options for name in opt.names()
|
||||
)
|
||||
if conflict:
|
||||
raise ValueError("option names %s already added" % conflict)
|
||||
option = Argument(*optnames, **attrs)
|
||||
|
@ -767,7 +795,7 @@ class OptionGroup(object):
|
|||
def _addoption_instance(self, option, shortupper=False):
|
||||
if not shortupper:
|
||||
for opt in option._short_opts:
|
||||
if opt[0] == '-' and opt[1].islower():
|
||||
if opt[0] == "-" and opt[1].islower():
|
||||
raise ValueError("lowercase shortoptions reserved")
|
||||
if self.parser:
|
||||
self.parser.processoption(option)
|
||||
|
@ -775,12 +803,17 @@ class OptionGroup(object):
|
|||
|
||||
|
||||
class MyOptionParser(argparse.ArgumentParser):
|
||||
|
||||
def __init__(self, parser, extra_info=None):
|
||||
if not extra_info:
|
||||
extra_info = {}
|
||||
self._parser = parser
|
||||
argparse.ArgumentParser.__init__(self, usage=parser._usage,
|
||||
add_help=False, formatter_class=DropShorterLongHelpFormatter)
|
||||
argparse.ArgumentParser.__init__(
|
||||
self,
|
||||
usage=parser._usage,
|
||||
add_help=False,
|
||||
formatter_class=DropShorterLongHelpFormatter,
|
||||
)
|
||||
# extra_info is a dict of (param -> value) to display if there's
|
||||
# an usage error to provide more contextual information to the user
|
||||
self.extra_info = extra_info
|
||||
|
@ -790,11 +823,11 @@ class MyOptionParser(argparse.ArgumentParser):
|
|||
args, argv = self.parse_known_args(args, namespace)
|
||||
if argv:
|
||||
for arg in argv:
|
||||
if arg and arg[0] == '-':
|
||||
lines = ['unrecognized arguments: %s' % (' '.join(argv))]
|
||||
if arg and arg[0] == "-":
|
||||
lines = ["unrecognized arguments: %s" % (" ".join(argv))]
|
||||
for k, v in sorted(self.extra_info.items()):
|
||||
lines.append(' %s: %s' % (k, v))
|
||||
self.error('\n'.join(lines))
|
||||
lines.append(" %s: %s" % (k, v))
|
||||
self.error("\n".join(lines))
|
||||
getattr(args, FILE_OR_DIR).extend(argv)
|
||||
return args
|
||||
|
||||
|
@ -811,41 +844,44 @@ class DropShorterLongHelpFormatter(argparse.HelpFormatter):
|
|||
|
||||
def _format_action_invocation(self, action):
|
||||
orgstr = argparse.HelpFormatter._format_action_invocation(self, action)
|
||||
if orgstr and orgstr[0] != '-': # only optional arguments
|
||||
if orgstr and orgstr[0] != "-": # only optional arguments
|
||||
return orgstr
|
||||
res = getattr(action, '_formatted_action_invocation', None)
|
||||
res = getattr(action, "_formatted_action_invocation", None)
|
||||
if res:
|
||||
return res
|
||||
options = orgstr.split(', ')
|
||||
options = orgstr.split(", ")
|
||||
if len(options) == 2 and (len(options[0]) == 2 or len(options[1]) == 2):
|
||||
# a shortcut for '-h, --help' or '--abc', '-a'
|
||||
action._formatted_action_invocation = orgstr
|
||||
return orgstr
|
||||
return_list = []
|
||||
option_map = getattr(action, 'map_long_option', {})
|
||||
option_map = getattr(action, "map_long_option", {})
|
||||
if option_map is None:
|
||||
option_map = {}
|
||||
short_long = {}
|
||||
for option in options:
|
||||
if len(option) == 2 or option[2] == ' ':
|
||||
if len(option) == 2 or option[2] == " ":
|
||||
continue
|
||||
if not option.startswith('--'):
|
||||
raise ArgumentError('long optional argument without "--": [%s]'
|
||||
% (option), self)
|
||||
if not option.startswith("--"):
|
||||
raise ArgumentError(
|
||||
'long optional argument without "--": [%s]' % (option), self
|
||||
)
|
||||
xxoption = option[2:]
|
||||
if xxoption.split()[0] not in option_map:
|
||||
shortened = xxoption.replace('-', '')
|
||||
if shortened not in short_long or \
|
||||
len(short_long[shortened]) < len(xxoption):
|
||||
shortened = xxoption.replace("-", "")
|
||||
if (
|
||||
shortened not in short_long
|
||||
or len(short_long[shortened]) < len(xxoption)
|
||||
):
|
||||
short_long[shortened] = xxoption
|
||||
# now short_long has been filled out to the longest with dashes
|
||||
# **and** we keep the right option ordering from add_argument
|
||||
for option in options:
|
||||
if len(option) == 2 or option[2] == ' ':
|
||||
if len(option) == 2 or option[2] == " ":
|
||||
return_list.append(option)
|
||||
if option[2:] == short_long.get(option.replace('-', '')):
|
||||
return_list.append(option.replace(' ', '=', 1))
|
||||
action._formatted_action_invocation = ', '.join(return_list)
|
||||
if option[2:] == short_long.get(option.replace("-", "")):
|
||||
return_list.append(option.replace(" ", "=", 1))
|
||||
action._formatted_action_invocation = ", ".join(return_list)
|
||||
return action._formatted_action_invocation
|
||||
|
||||
|
||||
|
@ -857,18 +893,19 @@ def _ensure_removed_sysmodule(modname):
|
|||
|
||||
|
||||
class Notset(object):
|
||||
|
||||
def __repr__(self):
|
||||
return "<NOTSET>"
|
||||
|
||||
|
||||
notset = Notset()
|
||||
FILE_OR_DIR = 'file_or_dir'
|
||||
FILE_OR_DIR = "file_or_dir"
|
||||
|
||||
|
||||
def _iter_rewritable_modules(package_files):
|
||||
for fn in package_files:
|
||||
is_simple_module = '/' not in fn and fn.endswith('.py')
|
||||
is_package = fn.count('/') == 1 and fn.endswith('__init__.py')
|
||||
is_simple_module = "/" not in fn and fn.endswith(".py")
|
||||
is_package = fn.count("/") == 1 and fn.endswith("__init__.py")
|
||||
if is_simple_module:
|
||||
module_name, _ = os.path.splitext(fn)
|
||||
yield module_name
|
||||
|
@ -903,6 +940,7 @@ class Config(object):
|
|||
|
||||
def do_setns(dic):
|
||||
import pytest
|
||||
|
||||
setns(pytest, dic)
|
||||
|
||||
self.hook.pytest_namespace.call_historic(do_setns, {})
|
||||
|
@ -929,9 +967,11 @@ class Config(object):
|
|||
|
||||
def warn(self, code, message, fslocation=None, nodeid=None):
|
||||
""" generate a warning for this test session. """
|
||||
self.hook.pytest_logwarning.call_historic(kwargs=dict(
|
||||
code=code, message=message,
|
||||
fslocation=fslocation, nodeid=nodeid))
|
||||
self.hook.pytest_logwarning.call_historic(
|
||||
kwargs=dict(
|
||||
code=code, message=message, fslocation=fslocation, nodeid=nodeid
|
||||
)
|
||||
)
|
||||
|
||||
def get_terminal_writer(self):
|
||||
return self.pluginmanager.get_plugin("terminalreporter")._tw
|
||||
|
@ -946,12 +986,10 @@ class Config(object):
|
|||
style = "long"
|
||||
else:
|
||||
style = "native"
|
||||
excrepr = excinfo.getrepr(funcargs=True,
|
||||
showlocals=getattr(option, 'showlocals', False),
|
||||
style=style,
|
||||
)
|
||||
res = self.hook.pytest_internalerror(excrepr=excrepr,
|
||||
excinfo=excinfo)
|
||||
excrepr = excinfo.getrepr(
|
||||
funcargs=True, showlocals=getattr(option, "showlocals", False), style=style
|
||||
)
|
||||
res = self.hook.pytest_internalerror(excrepr=excrepr, excinfo=excinfo)
|
||||
if not any(res):
|
||||
for line in str(excrepr).split("\n"):
|
||||
sys.stderr.write("INTERNALERROR> %s\n" % line)
|
||||
|
@ -978,7 +1016,7 @@ class Config(object):
|
|||
for name in opt._short_opts + opt._long_opts:
|
||||
self._opt2dest[name] = opt.dest
|
||||
|
||||
if hasattr(opt, 'default') and opt.dest:
|
||||
if hasattr(opt, "default") and opt.dest:
|
||||
if not hasattr(self.option, opt.dest):
|
||||
setattr(self.option, opt.dest, opt.default)
|
||||
|
||||
|
@ -987,15 +1025,21 @@ class Config(object):
|
|||
self.pluginmanager._set_initial_conftests(early_config.known_args_namespace)
|
||||
|
||||
def _initini(self, args):
|
||||
ns, unknown_args = self._parser.parse_known_and_unknown_args(args, namespace=copy.copy(self.option))
|
||||
r = determine_setup(ns.inifilename, ns.file_or_dir + unknown_args, warnfunc=self.warn,
|
||||
rootdir_cmd_arg=ns.rootdir or None)
|
||||
ns, unknown_args = self._parser.parse_known_and_unknown_args(
|
||||
args, namespace=copy.copy(self.option)
|
||||
)
|
||||
r = determine_setup(
|
||||
ns.inifilename,
|
||||
ns.file_or_dir + unknown_args,
|
||||
warnfunc=self.warn,
|
||||
rootdir_cmd_arg=ns.rootdir or None,
|
||||
)
|
||||
self.rootdir, self.inifile, self.inicfg = r
|
||||
self._parser.extra_info['rootdir'] = self.rootdir
|
||||
self._parser.extra_info['inifile'] = self.inifile
|
||||
self._parser.extra_info["rootdir"] = self.rootdir
|
||||
self._parser.extra_info["inifile"] = self.inifile
|
||||
self.invocation_dir = py.path.local()
|
||||
self._parser.addini('addopts', 'extra command line options', 'args')
|
||||
self._parser.addini('minversion', 'minimally required pytest version')
|
||||
self._parser.addini("addopts", "extra command line options", "args")
|
||||
self._parser.addini("minversion", "minimally required pytest version")
|
||||
self._override_ini = ns.override_ini or ()
|
||||
|
||||
def _consider_importhook(self, args):
|
||||
|
@ -1007,11 +1051,11 @@ class Config(object):
|
|||
"""
|
||||
ns, unknown_args = self._parser.parse_known_and_unknown_args(args)
|
||||
mode = ns.assertmode
|
||||
if mode == 'rewrite':
|
||||
if mode == "rewrite":
|
||||
try:
|
||||
hook = _pytest.assertion.install_importhook(self)
|
||||
except SystemError:
|
||||
mode = 'plain'
|
||||
mode = "plain"
|
||||
else:
|
||||
self._mark_plugins_for_rewrite(hook)
|
||||
_warn_about_missing_assertion(mode)
|
||||
|
@ -1023,17 +1067,18 @@ class Config(object):
|
|||
all pytest plugins.
|
||||
"""
|
||||
import pkg_resources
|
||||
|
||||
self.pluginmanager.rewrite_hook = hook
|
||||
|
||||
# 'RECORD' available for plugins installed normally (pip install)
|
||||
# 'SOURCES.txt' available for plugins installed in dev mode (pip install -e)
|
||||
# for installed plugins 'SOURCES.txt' returns an empty list, and vice-versa
|
||||
# so it shouldn't be an issue
|
||||
metadata_files = 'RECORD', 'SOURCES.txt'
|
||||
metadata_files = "RECORD", "SOURCES.txt"
|
||||
|
||||
package_files = (
|
||||
entry.split(',')[0]
|
||||
for entrypoint in pkg_resources.iter_entry_points('pytest11')
|
||||
entry.split(",")[0]
|
||||
for entrypoint in pkg_resources.iter_entry_points("pytest11")
|
||||
for metadata in metadata_files
|
||||
for entry in entrypoint.dist._get_metadata(metadata)
|
||||
)
|
||||
|
@ -1043,23 +1088,25 @@ class Config(object):
|
|||
|
||||
def _preparse(self, args, addopts=True):
|
||||
if addopts:
|
||||
args[:] = shlex.split(os.environ.get('PYTEST_ADDOPTS', '')) + args
|
||||
args[:] = shlex.split(os.environ.get("PYTEST_ADDOPTS", "")) + args
|
||||
self._initini(args)
|
||||
if addopts:
|
||||
args[:] = self.getini("addopts") + args
|
||||
self._checkversion()
|
||||
self._consider_importhook(args)
|
||||
self.pluginmanager.consider_preparse(args)
|
||||
self.pluginmanager.load_setuptools_entrypoints('pytest11')
|
||||
self.pluginmanager.load_setuptools_entrypoints("pytest11")
|
||||
self.pluginmanager.consider_env()
|
||||
self.known_args_namespace = ns = self._parser.parse_known_args(
|
||||
args, namespace=copy.copy(self.option))
|
||||
args, namespace=copy.copy(self.option)
|
||||
)
|
||||
if self.known_args_namespace.confcutdir is None and self.inifile:
|
||||
confcutdir = py.path.local(self.inifile).dirname
|
||||
self.known_args_namespace.confcutdir = confcutdir
|
||||
try:
|
||||
self.hook.pytest_load_initial_conftests(early_config=self,
|
||||
args=args, parser=self._parser)
|
||||
self.hook.pytest_load_initial_conftests(
|
||||
early_config=self, args=args, parser=self._parser
|
||||
)
|
||||
except ConftestImportFailure:
|
||||
e = sys.exc_info()[1]
|
||||
if ns.help or ns.version:
|
||||
|
@ -1071,33 +1118,43 @@ class Config(object):
|
|||
|
||||
def _checkversion(self):
|
||||
import pytest
|
||||
minver = self.inicfg.get('minversion', None)
|
||||
|
||||
minver = self.inicfg.get("minversion", None)
|
||||
if minver:
|
||||
ver = minver.split(".")
|
||||
myver = pytest.__version__.split(".")
|
||||
if myver < ver:
|
||||
raise pytest.UsageError(
|
||||
"%s:%d: requires pytest-%s, actual pytest-%s'" % (
|
||||
self.inicfg.config.path, self.inicfg.lineof('minversion'),
|
||||
minver, pytest.__version__))
|
||||
"%s:%d: requires pytest-%s, actual pytest-%s'"
|
||||
% (
|
||||
self.inicfg.config.path,
|
||||
self.inicfg.lineof("minversion"),
|
||||
minver,
|
||||
pytest.__version__,
|
||||
)
|
||||
)
|
||||
|
||||
def parse(self, args, addopts=True):
|
||||
# parse given cmdline arguments into this config object.
|
||||
assert not hasattr(self, 'args'), (
|
||||
"can only parse cmdline args at most once per Config object")
|
||||
assert not hasattr(
|
||||
self, "args"
|
||||
), "can only parse cmdline args at most once per Config object"
|
||||
self._origargs = args
|
||||
self.hook.pytest_addhooks.call_historic(
|
||||
kwargs=dict(pluginmanager=self.pluginmanager))
|
||||
kwargs=dict(pluginmanager=self.pluginmanager)
|
||||
)
|
||||
self._preparse(args, addopts=addopts)
|
||||
# XXX deprecated hook:
|
||||
self.hook.pytest_cmdline_preparse(config=self, args=args)
|
||||
self._parser.after_preparse = True
|
||||
try:
|
||||
args = self._parser.parse_setoption(args, self.option, namespace=self.option)
|
||||
args = self._parser.parse_setoption(
|
||||
args, self.option, namespace=self.option
|
||||
)
|
||||
if not args:
|
||||
cwd = os.getcwd()
|
||||
if cwd == self.rootdir:
|
||||
args = self.getini('testpaths')
|
||||
args = self.getini("testpaths")
|
||||
if not args:
|
||||
args = [cwd]
|
||||
self.args = args
|
||||
|
@ -1136,7 +1193,7 @@ class Config(object):
|
|||
if default is not None:
|
||||
return default
|
||||
if type is None:
|
||||
return ''
|
||||
return ""
|
||||
return []
|
||||
if type == "pathlist":
|
||||
dp = py.path.local(self.inicfg.config.path).dirpath()
|
||||
|
@ -1203,6 +1260,7 @@ class Config(object):
|
|||
return default
|
||||
if skip:
|
||||
import pytest
|
||||
|
||||
pytest.skip("no %r option found" % (name,))
|
||||
raise ValueError("no option named %r" % (name,))
|
||||
|
||||
|
@ -1226,16 +1284,20 @@ def _assertion_supported():
|
|||
|
||||
def _warn_about_missing_assertion(mode):
|
||||
if not _assertion_supported():
|
||||
if mode == 'plain':
|
||||
sys.stderr.write("WARNING: ASSERTIONS ARE NOT EXECUTED"
|
||||
" and FAILING TESTS WILL PASS. Are you"
|
||||
" using python -O?")
|
||||
if mode == "plain":
|
||||
sys.stderr.write(
|
||||
"WARNING: ASSERTIONS ARE NOT EXECUTED"
|
||||
" and FAILING TESTS WILL PASS. Are you"
|
||||
" using python -O?"
|
||||
)
|
||||
else:
|
||||
sys.stderr.write("WARNING: assertions not in test modules or"
|
||||
" plugins will be ignored"
|
||||
" because assert statements are not executed "
|
||||
"by the underlying Python interpreter "
|
||||
"(are you using python -O?)\n")
|
||||
sys.stderr.write(
|
||||
"WARNING: assertions not in test modules or"
|
||||
" plugins will be ignored"
|
||||
" because assert statements are not executed "
|
||||
"by the underlying Python interpreter "
|
||||
"(are you using python -O?)\n"
|
||||
)
|
||||
|
||||
|
||||
def exists(path, ignore=EnvironmentError):
|
||||
|
@ -1256,6 +1318,7 @@ def getcfg(args, warnfunc=None):
|
|||
adopts standard deprecation warnings (#1804).
|
||||
"""
|
||||
from _pytest.deprecated import CFG_PYTEST_SECTION
|
||||
|
||||
inibasenames = ["pytest.ini", "tox.ini", "setup.cfg"]
|
||||
args = [x for x in args if not str(x).startswith("-")]
|
||||
if not args:
|
||||
|
@ -1267,12 +1330,17 @@ def getcfg(args, warnfunc=None):
|
|||
p = base.join(inibasename)
|
||||
if exists(p):
|
||||
iniconfig = py.iniconfig.IniConfig(p)
|
||||
if 'pytest' in iniconfig.sections:
|
||||
if inibasename == 'setup.cfg' and warnfunc:
|
||||
warnfunc('C1', CFG_PYTEST_SECTION.format(filename=inibasename))
|
||||
return base, p, iniconfig['pytest']
|
||||
if inibasename == 'setup.cfg' and 'tool:pytest' in iniconfig.sections:
|
||||
return base, p, iniconfig['tool:pytest']
|
||||
if "pytest" in iniconfig.sections:
|
||||
if inibasename == "setup.cfg" and warnfunc:
|
||||
warnfunc(
|
||||
"C1", CFG_PYTEST_SECTION.format(filename=inibasename)
|
||||
)
|
||||
return base, p, iniconfig["pytest"]
|
||||
if (
|
||||
inibasename == "setup.cfg"
|
||||
and "tool:pytest" in iniconfig.sections
|
||||
):
|
||||
return base, p, iniconfig["tool:pytest"]
|
||||
elif inibasename == "pytest.ini":
|
||||
# allowed to be empty
|
||||
return base, p, {}
|
||||
|
@ -1303,11 +1371,12 @@ def get_common_ancestor(paths):
|
|||
|
||||
|
||||
def get_dirs_from_args(args):
|
||||
|
||||
def is_option(x):
|
||||
return str(x).startswith('-')
|
||||
return str(x).startswith("-")
|
||||
|
||||
def get_file_part_from_node_id(x):
|
||||
return str(x).split('::')[0]
|
||||
return str(x).split("::")[0]
|
||||
|
||||
def get_dir_from_path(path):
|
||||
if path.isdir():
|
||||
|
@ -1321,26 +1390,23 @@ def get_dirs_from_args(args):
|
|||
if not is_option(arg)
|
||||
)
|
||||
|
||||
return [
|
||||
get_dir_from_path(path)
|
||||
for path in possible_paths
|
||||
if path.exists()
|
||||
]
|
||||
return [get_dir_from_path(path) for path in possible_paths if path.exists()]
|
||||
|
||||
|
||||
def determine_setup(inifile, args, warnfunc=None, rootdir_cmd_arg=None):
|
||||
dirs = get_dirs_from_args(args)
|
||||
if inifile:
|
||||
iniconfig = py.iniconfig.IniConfig(inifile)
|
||||
is_cfg_file = str(inifile).endswith('.cfg')
|
||||
is_cfg_file = str(inifile).endswith(".cfg")
|
||||
# TODO: [pytest] section in *.cfg files is depricated. Need refactoring.
|
||||
sections = ['tool:pytest', 'pytest'] if is_cfg_file else ['pytest']
|
||||
sections = ["tool:pytest", "pytest"] if is_cfg_file else ["pytest"]
|
||||
for section in sections:
|
||||
try:
|
||||
inicfg = iniconfig[section]
|
||||
if is_cfg_file and section == 'pytest' and warnfunc:
|
||||
if is_cfg_file and section == "pytest" and warnfunc:
|
||||
from _pytest.deprecated import CFG_PYTEST_SECTION
|
||||
warnfunc('C1', CFG_PYTEST_SECTION.format(filename=str(inifile)))
|
||||
|
||||
warnfunc("C1", CFG_PYTEST_SECTION.format(filename=str(inifile)))
|
||||
break
|
||||
except KeyError:
|
||||
inicfg = None
|
||||
|
@ -1356,19 +1422,24 @@ def determine_setup(inifile, args, warnfunc=None, rootdir_cmd_arg=None):
|
|||
rootdir, inifile, inicfg = getcfg(dirs, warnfunc=warnfunc)
|
||||
if rootdir is None:
|
||||
rootdir = get_common_ancestor([py.path.local(), ancestor])
|
||||
is_fs_root = os.path.splitdrive(str(rootdir))[1] == '/'
|
||||
is_fs_root = os.path.splitdrive(str(rootdir))[1] == "/"
|
||||
if is_fs_root:
|
||||
rootdir = ancestor
|
||||
if rootdir_cmd_arg:
|
||||
rootdir_abs_path = py.path.local(os.path.expandvars(rootdir_cmd_arg))
|
||||
if not os.path.isdir(str(rootdir_abs_path)):
|
||||
raise UsageError("Directory '{}' not found. Check your '--rootdir' option.".format(rootdir_abs_path))
|
||||
raise UsageError(
|
||||
"Directory '{}' not found. Check your '--rootdir' option.".format(
|
||||
rootdir_abs_path
|
||||
)
|
||||
)
|
||||
rootdir = rootdir_abs_path
|
||||
return rootdir, inifile, inicfg or {}
|
||||
|
||||
|
||||
def setns(obj, dic):
|
||||
import pytest
|
||||
|
||||
for name, value in dic.items():
|
||||
if isinstance(value, dict):
|
||||
mod = getattr(obj, name, None)
|
||||
|
@ -1394,9 +1465,9 @@ def create_terminal_writer(config, *args, **kwargs):
|
|||
and has access to a config object should use this function.
|
||||
"""
|
||||
tw = py.io.TerminalWriter(*args, **kwargs)
|
||||
if config.option.color == 'yes':
|
||||
if config.option.color == "yes":
|
||||
tw.hasmarkup = True
|
||||
if config.option.color == 'no':
|
||||
if config.option.color == "no":
|
||||
tw.hasmarkup = False
|
||||
return tw
|
||||
|
||||
|
@ -1411,9 +1482,9 @@ def _strtobool(val):
|
|||
.. note:: copied from distutils.util
|
||||
"""
|
||||
val = val.lower()
|
||||
if val in ('y', 'yes', 't', 'true', 'on', '1'):
|
||||
if val in ("y", "yes", "t", "true", "on", "1"):
|
||||
return 1
|
||||
elif val in ('n', 'no', 'f', 'false', 'off', '0'):
|
||||
elif val in ("n", "no", "f", "false", "off", "0"):
|
||||
return 0
|
||||
else:
|
||||
raise ValueError("invalid truth value %r" % (val,))
|
||||
|
|
|
@ -7,6 +7,7 @@ from doctest import UnexpectedException
|
|||
|
||||
try:
|
||||
from builtins import breakpoint # noqa
|
||||
|
||||
SUPPORTS_BREAKPOINT_BUILTIN = True
|
||||
except ImportError:
|
||||
SUPPORTS_BREAKPOINT_BUILTIN = False
|
||||
|
@ -15,12 +16,18 @@ except ImportError:
|
|||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("general")
|
||||
group._addoption(
|
||||
'--pdb', dest="usepdb", action="store_true",
|
||||
help="start the interactive Python debugger on errors or KeyboardInterrupt.")
|
||||
"--pdb",
|
||||
dest="usepdb",
|
||||
action="store_true",
|
||||
help="start the interactive Python debugger on errors or KeyboardInterrupt.",
|
||||
)
|
||||
group._addoption(
|
||||
'--pdbcls', dest="usepdb_cls", metavar="modulename:classname",
|
||||
"--pdbcls",
|
||||
dest="usepdb_cls",
|
||||
metavar="modulename:classname",
|
||||
help="start a custom interactive Python debugger on errors. "
|
||||
"For example: --pdbcls=IPython.terminal.debugger:TerminalPdb")
|
||||
"For example: --pdbcls=IPython.terminal.debugger:TerminalPdb",
|
||||
)
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
|
@ -32,12 +39,12 @@ def pytest_configure(config):
|
|||
pdb_cls = pdb.Pdb
|
||||
|
||||
if config.getvalue("usepdb"):
|
||||
config.pluginmanager.register(PdbInvoke(), 'pdbinvoke')
|
||||
config.pluginmanager.register(PdbInvoke(), "pdbinvoke")
|
||||
|
||||
# Use custom Pdb class set_trace instead of default Pdb on breakpoint() call
|
||||
if SUPPORTS_BREAKPOINT_BUILTIN:
|
||||
_environ_pythonbreakpoint = os.environ.get('PYTHONBREAKPOINT', '')
|
||||
if _environ_pythonbreakpoint == '':
|
||||
_environ_pythonbreakpoint = os.environ.get("PYTHONBREAKPOINT", "")
|
||||
if _environ_pythonbreakpoint == "":
|
||||
sys.breakpointhook = pytestPDB.set_trace
|
||||
|
||||
old = (pdb.set_trace, pytestPDB._pluginmanager)
|
||||
|
@ -66,6 +73,7 @@ class pytestPDB(object):
|
|||
def set_trace(cls):
|
||||
""" invoke PDB set_trace debugging, dropping any IO capturing. """
|
||||
import _pytest.config
|
||||
|
||||
frame = sys._getframe().f_back
|
||||
if cls._pluginmanager is not None:
|
||||
capman = cls._pluginmanager.getplugin("capturemanager")
|
||||
|
@ -79,6 +87,7 @@ class pytestPDB(object):
|
|||
|
||||
|
||||
class PdbInvoke(object):
|
||||
|
||||
def pytest_exception_interact(self, node, call, report):
|
||||
capman = node.config.pluginmanager.getplugin("capturemanager")
|
||||
if capman:
|
||||
|
@ -104,10 +113,10 @@ def _enter_pdb(node, excinfo, rep):
|
|||
|
||||
showcapture = node.config.option.showcapture
|
||||
|
||||
for sectionname, content in (('stdout', rep.capstdout),
|
||||
('stderr', rep.capstderr),
|
||||
('log', rep.caplog)):
|
||||
if showcapture in (sectionname, 'all') and content:
|
||||
for sectionname, content in (
|
||||
("stdout", rep.capstdout), ("stderr", rep.capstderr), ("log", rep.caplog)
|
||||
):
|
||||
if showcapture in (sectionname, "all") and content:
|
||||
tw.sep(">", "captured " + sectionname)
|
||||
if content[-1:] == "\n":
|
||||
content = content[:-1]
|
||||
|
@ -139,12 +148,15 @@ def _find_last_non_hidden_frame(stack):
|
|||
|
||||
|
||||
def post_mortem(t):
|
||||
|
||||
class Pdb(pytestPDB._pdb_cls):
|
||||
|
||||
def get_stack(self, f, t):
|
||||
stack, i = pdb.Pdb.get_stack(self, f, t)
|
||||
if f is None:
|
||||
i = _find_last_non_hidden_frame(stack)
|
||||
return stack, i
|
||||
|
||||
p = Pdb()
|
||||
p.reset()
|
||||
p.interaction(None, t)
|
||||
|
|
|
@ -12,23 +12,23 @@ class RemovedInPytest4Warning(DeprecationWarning):
|
|||
"""warning class for features removed in pytest 4.0"""
|
||||
|
||||
|
||||
MAIN_STR_ARGS = 'passing a string to pytest.main() is deprecated, ' \
|
||||
'pass a list of arguments instead.'
|
||||
MAIN_STR_ARGS = "passing a string to pytest.main() is deprecated, " "pass a list of arguments instead."
|
||||
|
||||
YIELD_TESTS = 'yield tests are deprecated, and scheduled to be removed in pytest 4.0'
|
||||
YIELD_TESTS = "yield tests are deprecated, and scheduled to be removed in pytest 4.0"
|
||||
|
||||
FUNCARG_PREFIX = (
|
||||
'{name}: declaring fixtures using "pytest_funcarg__" prefix is deprecated '
|
||||
'and scheduled to be removed in pytest 4.0. '
|
||||
'Please remove the prefix and use the @pytest.fixture decorator instead.')
|
||||
"and scheduled to be removed in pytest 4.0. "
|
||||
"Please remove the prefix and use the @pytest.fixture decorator instead."
|
||||
)
|
||||
|
||||
CFG_PYTEST_SECTION = '[pytest] section in {filename} files is deprecated, use [tool:pytest] instead.'
|
||||
CFG_PYTEST_SECTION = "[pytest] section in {filename} files is deprecated, use [tool:pytest] instead."
|
||||
|
||||
GETFUNCARGVALUE = "use of getfuncargvalue is deprecated, use getfixturevalue"
|
||||
|
||||
RESULT_LOG = (
|
||||
'--result-log is deprecated and scheduled for removal in pytest 4.0.\n'
|
||||
'See https://docs.pytest.org/en/latest/usage.html#creating-resultlog-format-files for more information.'
|
||||
"--result-log is deprecated and scheduled for removal in pytest 4.0.\n"
|
||||
"See https://docs.pytest.org/en/latest/usage.html#creating-resultlog-format-files for more information."
|
||||
)
|
||||
|
||||
MARK_INFO_ATTRIBUTE = RemovedInPytest4Warning(
|
||||
|
@ -45,13 +45,12 @@ MARK_PARAMETERSET_UNPACKING = RemovedInPytest4Warning(
|
|||
|
||||
RECORD_XML_PROPERTY = (
|
||||
'Fixture renamed from "record_xml_property" to "record_property" as user '
|
||||
'properties are now available to all reporters.\n'
|
||||
"properties are now available to all reporters.\n"
|
||||
'"record_xml_property" is now deprecated.'
|
||||
)
|
||||
|
||||
COLLECTOR_MAKEITEM = RemovedInPytest4Warning(
|
||||
"pycollector makeitem was removed "
|
||||
"as it is an accidentially leaked internal api"
|
||||
"pycollector makeitem was removed " "as it is an accidentially leaked internal api"
|
||||
)
|
||||
|
||||
METAFUNC_ADD_CALL = (
|
||||
|
|
|
@ -10,11 +10,11 @@ from _pytest._code.code import ExceptionInfo, ReprFileLocation, TerminalRepr
|
|||
from _pytest.fixtures import FixtureRequest
|
||||
|
||||
|
||||
DOCTEST_REPORT_CHOICE_NONE = 'none'
|
||||
DOCTEST_REPORT_CHOICE_CDIFF = 'cdiff'
|
||||
DOCTEST_REPORT_CHOICE_NDIFF = 'ndiff'
|
||||
DOCTEST_REPORT_CHOICE_UDIFF = 'udiff'
|
||||
DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE = 'only_first_failure'
|
||||
DOCTEST_REPORT_CHOICE_NONE = "none"
|
||||
DOCTEST_REPORT_CHOICE_CDIFF = "cdiff"
|
||||
DOCTEST_REPORT_CHOICE_NDIFF = "ndiff"
|
||||
DOCTEST_REPORT_CHOICE_UDIFF = "udiff"
|
||||
DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE = "only_first_failure"
|
||||
|
||||
DOCTEST_REPORT_CHOICES = (
|
||||
DOCTEST_REPORT_CHOICE_NONE,
|
||||
|
@ -29,31 +29,53 @@ RUNNER_CLASS = None
|
|||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
parser.addini('doctest_optionflags', 'option flags for doctests',
|
||||
type="args", default=["ELLIPSIS"])
|
||||
parser.addini("doctest_encoding", 'encoding used for doctest files', default="utf-8")
|
||||
parser.addini(
|
||||
"doctest_optionflags",
|
||||
"option flags for doctests",
|
||||
type="args",
|
||||
default=["ELLIPSIS"],
|
||||
)
|
||||
parser.addini(
|
||||
"doctest_encoding", "encoding used for doctest files", default="utf-8"
|
||||
)
|
||||
group = parser.getgroup("collect")
|
||||
group.addoption("--doctest-modules",
|
||||
action="store_true", default=False,
|
||||
help="run doctests in all .py modules",
|
||||
dest="doctestmodules")
|
||||
group.addoption("--doctest-report",
|
||||
type=str.lower, default="udiff",
|
||||
help="choose another output format for diffs on doctest failure",
|
||||
choices=DOCTEST_REPORT_CHOICES,
|
||||
dest="doctestreport")
|
||||
group.addoption("--doctest-glob",
|
||||
action="append", default=[], metavar="pat",
|
||||
help="doctests file matching pattern, default: test*.txt",
|
||||
dest="doctestglob")
|
||||
group.addoption("--doctest-ignore-import-errors",
|
||||
action="store_true", default=False,
|
||||
help="ignore doctest ImportErrors",
|
||||
dest="doctest_ignore_import_errors")
|
||||
group.addoption("--doctest-continue-on-failure",
|
||||
action="store_true", default=False,
|
||||
help="for a given doctest, continue to run after the first failure",
|
||||
dest="doctest_continue_on_failure")
|
||||
group.addoption(
|
||||
"--doctest-modules",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="run doctests in all .py modules",
|
||||
dest="doctestmodules",
|
||||
)
|
||||
group.addoption(
|
||||
"--doctest-report",
|
||||
type=str.lower,
|
||||
default="udiff",
|
||||
help="choose another output format for diffs on doctest failure",
|
||||
choices=DOCTEST_REPORT_CHOICES,
|
||||
dest="doctestreport",
|
||||
)
|
||||
group.addoption(
|
||||
"--doctest-glob",
|
||||
action="append",
|
||||
default=[],
|
||||
metavar="pat",
|
||||
help="doctests file matching pattern, default: test*.txt",
|
||||
dest="doctestglob",
|
||||
)
|
||||
group.addoption(
|
||||
"--doctest-ignore-import-errors",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="ignore doctest ImportErrors",
|
||||
dest="doctest_ignore_import_errors",
|
||||
)
|
||||
group.addoption(
|
||||
"--doctest-continue-on-failure",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="for a given doctest, continue to run after the first failure",
|
||||
dest="doctest_continue_on_failure",
|
||||
)
|
||||
|
||||
|
||||
def pytest_collect_file(path, parent):
|
||||
|
@ -69,13 +91,13 @@ def _is_setup_py(config, path, parent):
|
|||
if path.basename != "setup.py":
|
||||
return False
|
||||
contents = path.read()
|
||||
return 'setuptools' in contents or 'distutils' in contents
|
||||
return "setuptools" in contents or "distutils" in contents
|
||||
|
||||
|
||||
def _is_doctest(config, path, parent):
|
||||
if path.ext in ('.txt', '.rst') and parent.session.isinitpath(path):
|
||||
if path.ext in (".txt", ".rst") and parent.session.isinitpath(path):
|
||||
return True
|
||||
globs = config.getoption("doctestglob") or ['test*.txt']
|
||||
globs = config.getoption("doctestglob") or ["test*.txt"]
|
||||
for glob in globs:
|
||||
if path.check(fnmatch=glob):
|
||||
return True
|
||||
|
@ -96,6 +118,7 @@ class ReprFailDoctest(TerminalRepr):
|
|||
|
||||
|
||||
class MultipleDoctestFailures(Exception):
|
||||
|
||||
def __init__(self, failures):
|
||||
super(MultipleDoctestFailures, self).__init__()
|
||||
self.failures = failures
|
||||
|
@ -109,10 +132,13 @@ def _init_runner_class():
|
|||
Runner to collect failures. Note that the out variable in this case is
|
||||
a list instead of a stdout-like object
|
||||
"""
|
||||
def __init__(self, checker=None, verbose=None, optionflags=0,
|
||||
continue_on_failure=True):
|
||||
|
||||
def __init__(
|
||||
self, checker=None, verbose=None, optionflags=0, continue_on_failure=True
|
||||
):
|
||||
doctest.DebugRunner.__init__(
|
||||
self, checker=checker, verbose=verbose, optionflags=optionflags)
|
||||
self, checker=checker, verbose=verbose, optionflags=optionflags
|
||||
)
|
||||
self.continue_on_failure = continue_on_failure
|
||||
|
||||
def report_failure(self, out, test, example, got):
|
||||
|
@ -132,18 +158,21 @@ def _init_runner_class():
|
|||
return PytestDoctestRunner
|
||||
|
||||
|
||||
def _get_runner(checker=None, verbose=None, optionflags=0,
|
||||
continue_on_failure=True):
|
||||
def _get_runner(checker=None, verbose=None, optionflags=0, continue_on_failure=True):
|
||||
# We need this in order to do a lazy import on doctest
|
||||
global RUNNER_CLASS
|
||||
if RUNNER_CLASS is None:
|
||||
RUNNER_CLASS = _init_runner_class()
|
||||
return RUNNER_CLASS(
|
||||
checker=checker, verbose=verbose, optionflags=optionflags,
|
||||
continue_on_failure=continue_on_failure)
|
||||
checker=checker,
|
||||
verbose=verbose,
|
||||
optionflags=optionflags,
|
||||
continue_on_failure=continue_on_failure,
|
||||
)
|
||||
|
||||
|
||||
class DoctestItem(pytest.Item):
|
||||
|
||||
def __init__(self, name, parent, runner=None, dtest=None):
|
||||
super(DoctestItem, self).__init__(name, parent)
|
||||
self.runner = runner
|
||||
|
@ -155,7 +184,9 @@ class DoctestItem(pytest.Item):
|
|||
if self.dtest is not None:
|
||||
self.fixture_request = _setup_fixtures(self)
|
||||
globs = dict(getfixture=self.fixture_request.getfixturevalue)
|
||||
for name, value in self.fixture_request.getfixturevalue('doctest_namespace').items():
|
||||
for name, value in self.fixture_request.getfixturevalue(
|
||||
"doctest_namespace"
|
||||
).items():
|
||||
globs[name] = value
|
||||
self.dtest.globs.update(globs)
|
||||
|
||||
|
@ -171,7 +202,7 @@ class DoctestItem(pytest.Item):
|
|||
"""
|
||||
Disable output capturing. Otherwise, stdout is lost to doctest (#985)
|
||||
"""
|
||||
if platform.system() != 'Darwin':
|
||||
if platform.system() != "Darwin":
|
||||
return
|
||||
capman = self.config.pluginmanager.getplugin("capturemanager")
|
||||
if capman:
|
||||
|
@ -181,9 +212,9 @@ class DoctestItem(pytest.Item):
|
|||
|
||||
def repr_failure(self, excinfo):
|
||||
import doctest
|
||||
|
||||
failures = None
|
||||
if excinfo.errisinstance((doctest.DocTestFailure,
|
||||
doctest.UnexpectedException)):
|
||||
if excinfo.errisinstance((doctest.DocTestFailure, doctest.UnexpectedException)):
|
||||
failures = [excinfo.value]
|
||||
elif excinfo.errisinstance(MultipleDoctestFailures):
|
||||
failures = excinfo.value.failures
|
||||
|
@ -201,28 +232,35 @@ class DoctestItem(pytest.Item):
|
|||
message = type(failure).__name__
|
||||
reprlocation = ReprFileLocation(filename, lineno, message)
|
||||
checker = _get_checker()
|
||||
report_choice = _get_report_choice(self.config.getoption("doctestreport"))
|
||||
report_choice = _get_report_choice(
|
||||
self.config.getoption("doctestreport")
|
||||
)
|
||||
if lineno is not None:
|
||||
lines = failure.test.docstring.splitlines(False)
|
||||
# add line numbers to the left of the error message
|
||||
lines = ["%03d %s" % (i + test.lineno + 1, x)
|
||||
for (i, x) in enumerate(lines)]
|
||||
lines = [
|
||||
"%03d %s" % (i + test.lineno + 1, x)
|
||||
for (i, x) in enumerate(lines)
|
||||
]
|
||||
# trim docstring error lines to 10
|
||||
lines = lines[max(example.lineno - 9, 0):example.lineno + 1]
|
||||
else:
|
||||
lines = ['EXAMPLE LOCATION UNKNOWN, not showing all tests of that example']
|
||||
indent = '>>>'
|
||||
lines = [
|
||||
"EXAMPLE LOCATION UNKNOWN, not showing all tests of that example"
|
||||
]
|
||||
indent = ">>>"
|
||||
for line in example.source.splitlines():
|
||||
lines.append('??? %s %s' % (indent, line))
|
||||
indent = '...'
|
||||
lines.append("??? %s %s" % (indent, line))
|
||||
indent = "..."
|
||||
if isinstance(failure, doctest.DocTestFailure):
|
||||
lines += checker.output_difference(example,
|
||||
failure.got,
|
||||
report_choice).split("\n")
|
||||
lines += checker.output_difference(
|
||||
example, failure.got, report_choice
|
||||
).split(
|
||||
"\n"
|
||||
)
|
||||
else:
|
||||
inner_excinfo = ExceptionInfo(failure.exc_info)
|
||||
lines += ["UNEXPECTED EXCEPTION: %s" %
|
||||
repr(inner_excinfo.value)]
|
||||
lines += ["UNEXPECTED EXCEPTION: %s" % repr(inner_excinfo.value)]
|
||||
lines += traceback.format_exception(*failure.exc_info)
|
||||
reprlocation_lines.append((reprlocation, lines))
|
||||
return ReprFailDoctest(reprlocation_lines)
|
||||
|
@ -235,15 +273,17 @@ class DoctestItem(pytest.Item):
|
|||
|
||||
def _get_flag_lookup():
|
||||
import doctest
|
||||
return dict(DONT_ACCEPT_TRUE_FOR_1=doctest.DONT_ACCEPT_TRUE_FOR_1,
|
||||
DONT_ACCEPT_BLANKLINE=doctest.DONT_ACCEPT_BLANKLINE,
|
||||
NORMALIZE_WHITESPACE=doctest.NORMALIZE_WHITESPACE,
|
||||
ELLIPSIS=doctest.ELLIPSIS,
|
||||
IGNORE_EXCEPTION_DETAIL=doctest.IGNORE_EXCEPTION_DETAIL,
|
||||
COMPARISON_FLAGS=doctest.COMPARISON_FLAGS,
|
||||
ALLOW_UNICODE=_get_allow_unicode_flag(),
|
||||
ALLOW_BYTES=_get_allow_bytes_flag(),
|
||||
)
|
||||
|
||||
return dict(
|
||||
DONT_ACCEPT_TRUE_FOR_1=doctest.DONT_ACCEPT_TRUE_FOR_1,
|
||||
DONT_ACCEPT_BLANKLINE=doctest.DONT_ACCEPT_BLANKLINE,
|
||||
NORMALIZE_WHITESPACE=doctest.NORMALIZE_WHITESPACE,
|
||||
ELLIPSIS=doctest.ELLIPSIS,
|
||||
IGNORE_EXCEPTION_DETAIL=doctest.IGNORE_EXCEPTION_DETAIL,
|
||||
COMPARISON_FLAGS=doctest.COMPARISON_FLAGS,
|
||||
ALLOW_UNICODE=_get_allow_unicode_flag(),
|
||||
ALLOW_BYTES=_get_allow_bytes_flag(),
|
||||
)
|
||||
|
||||
|
||||
def get_optionflags(parent):
|
||||
|
@ -256,7 +296,7 @@ def get_optionflags(parent):
|
|||
|
||||
|
||||
def _get_continue_on_failure(config):
|
||||
continue_on_failure = config.getvalue('doctest_continue_on_failure')
|
||||
continue_on_failure = config.getvalue("doctest_continue_on_failure")
|
||||
if continue_on_failure:
|
||||
# We need to turn off this if we use pdb since we should stop at
|
||||
# the first failure
|
||||
|
@ -277,14 +317,16 @@ class DoctestTextfile(pytest.Module):
|
|||
text = self.fspath.read_text(encoding)
|
||||
filename = str(self.fspath)
|
||||
name = self.fspath.basename
|
||||
globs = {'__name__': '__main__'}
|
||||
globs = {"__name__": "__main__"}
|
||||
|
||||
optionflags = get_optionflags(self)
|
||||
|
||||
runner = _get_runner(
|
||||
verbose=0, optionflags=optionflags,
|
||||
verbose=0,
|
||||
optionflags=optionflags,
|
||||
checker=_get_checker(),
|
||||
continue_on_failure=_get_continue_on_failure(self.config))
|
||||
continue_on_failure=_get_continue_on_failure(self.config),
|
||||
)
|
||||
_fix_spoof_python2(runner, encoding)
|
||||
|
||||
parser = doctest.DocTestParser()
|
||||
|
@ -298,31 +340,36 @@ def _check_all_skipped(test):
|
|||
option set.
|
||||
"""
|
||||
import doctest
|
||||
|
||||
all_skipped = all(x.options.get(doctest.SKIP, False) for x in test.examples)
|
||||
if all_skipped:
|
||||
pytest.skip('all tests skipped by +SKIP option')
|
||||
pytest.skip("all tests skipped by +SKIP option")
|
||||
|
||||
|
||||
class DoctestModule(pytest.Module):
|
||||
|
||||
def collect(self):
|
||||
import doctest
|
||||
|
||||
if self.fspath.basename == "conftest.py":
|
||||
module = self.config.pluginmanager._importconftest(self.fspath)
|
||||
else:
|
||||
try:
|
||||
module = self.fspath.pyimport()
|
||||
except ImportError:
|
||||
if self.config.getvalue('doctest_ignore_import_errors'):
|
||||
pytest.skip('unable to import module %r' % self.fspath)
|
||||
if self.config.getvalue("doctest_ignore_import_errors"):
|
||||
pytest.skip("unable to import module %r" % self.fspath)
|
||||
else:
|
||||
raise
|
||||
# uses internal doctest module parsing mechanism
|
||||
finder = doctest.DocTestFinder()
|
||||
optionflags = get_optionflags(self)
|
||||
runner = _get_runner(
|
||||
verbose=0, optionflags=optionflags,
|
||||
verbose=0,
|
||||
optionflags=optionflags,
|
||||
checker=_get_checker(),
|
||||
continue_on_failure=_get_continue_on_failure(self.config))
|
||||
continue_on_failure=_get_continue_on_failure(self.config),
|
||||
)
|
||||
|
||||
for test in finder.find(module, module.__name__):
|
||||
if test.examples: # skip empty doctests
|
||||
|
@ -333,13 +380,15 @@ def _setup_fixtures(doctest_item):
|
|||
"""
|
||||
Used by DoctestTextfile and DoctestItem to setup fixture information.
|
||||
"""
|
||||
|
||||
def func():
|
||||
pass
|
||||
|
||||
doctest_item.funcargs = {}
|
||||
fm = doctest_item.session._fixturemanager
|
||||
doctest_item._fixtureinfo = fm.getfixtureinfo(node=doctest_item, func=func,
|
||||
cls=None, funcargs=False)
|
||||
doctest_item._fixtureinfo = fm.getfixtureinfo(
|
||||
node=doctest_item, func=func, cls=None, funcargs=False
|
||||
)
|
||||
fixture_request = FixtureRequest(doctest_item)
|
||||
fixture_request._fillfixtures()
|
||||
return fixture_request
|
||||
|
@ -355,7 +404,7 @@ def _get_checker():
|
|||
An inner class is used to avoid importing "doctest" at the module
|
||||
level.
|
||||
"""
|
||||
if hasattr(_get_checker, 'LiteralsOutputChecker'):
|
||||
if hasattr(_get_checker, "LiteralsOutputChecker"):
|
||||
return _get_checker.LiteralsOutputChecker()
|
||||
|
||||
import doctest
|
||||
|
@ -373,8 +422,7 @@ def _get_checker():
|
|||
_bytes_literal_re = re.compile(r"(\W|^)[bB]([rR]?[\'\"])", re.UNICODE)
|
||||
|
||||
def check_output(self, want, got, optionflags):
|
||||
res = doctest.OutputChecker.check_output(self, want, got,
|
||||
optionflags)
|
||||
res = doctest.OutputChecker.check_output(self, want, got, optionflags)
|
||||
if res:
|
||||
return True
|
||||
|
||||
|
@ -384,8 +432,9 @@ def _get_checker():
|
|||
return False
|
||||
|
||||
else: # pragma: no cover
|
||||
|
||||
def remove_prefixes(regex, txt):
|
||||
return re.sub(regex, r'\1\2', txt)
|
||||
return re.sub(regex, r"\1\2", txt)
|
||||
|
||||
if allow_unicode:
|
||||
want = remove_prefixes(self._unicode_literal_re, want)
|
||||
|
@ -393,8 +442,7 @@ def _get_checker():
|
|||
if allow_bytes:
|
||||
want = remove_prefixes(self._bytes_literal_re, want)
|
||||
got = remove_prefixes(self._bytes_literal_re, got)
|
||||
res = doctest.OutputChecker.check_output(self, want, got,
|
||||
optionflags)
|
||||
res = doctest.OutputChecker.check_output(self, want, got, optionflags)
|
||||
return res
|
||||
|
||||
_get_checker.LiteralsOutputChecker = LiteralsOutputChecker
|
||||
|
@ -406,7 +454,8 @@ def _get_allow_unicode_flag():
|
|||
Registers and returns the ALLOW_UNICODE flag.
|
||||
"""
|
||||
import doctest
|
||||
return doctest.register_optionflag('ALLOW_UNICODE')
|
||||
|
||||
return doctest.register_optionflag("ALLOW_UNICODE")
|
||||
|
||||
|
||||
def _get_allow_bytes_flag():
|
||||
|
@ -414,7 +463,8 @@ def _get_allow_bytes_flag():
|
|||
Registers and returns the ALLOW_BYTES flag.
|
||||
"""
|
||||
import doctest
|
||||
return doctest.register_optionflag('ALLOW_BYTES')
|
||||
|
||||
return doctest.register_optionflag("ALLOW_BYTES")
|
||||
|
||||
|
||||
def _get_report_choice(key):
|
||||
|
@ -430,7 +480,9 @@ def _get_report_choice(key):
|
|||
DOCTEST_REPORT_CHOICE_NDIFF: doctest.REPORT_NDIFF,
|
||||
DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE: doctest.REPORT_ONLY_FIRST_FAILURE,
|
||||
DOCTEST_REPORT_CHOICE_NONE: 0,
|
||||
}[key]
|
||||
}[
|
||||
key
|
||||
]
|
||||
|
||||
|
||||
def _fix_spoof_python2(runner, encoding):
|
||||
|
@ -443,6 +495,7 @@ def _fix_spoof_python2(runner, encoding):
|
|||
This fixes the problem related in issue #2434.
|
||||
"""
|
||||
from _pytest.compat import _PY2
|
||||
|
||||
if not _PY2:
|
||||
return
|
||||
|
||||
|
@ -459,7 +512,7 @@ def _fix_spoof_python2(runner, encoding):
|
|||
runner._fakeout = UnicodeSpoof()
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
@pytest.fixture(scope="session")
|
||||
def doctest_namespace():
|
||||
"""
|
||||
Fixture that returns a :py:class:`dict` that will be injected into the namespace of doctests.
|
||||
|
|
|
@ -15,10 +15,16 @@ import _pytest
|
|||
from _pytest import nodes
|
||||
from _pytest._code.code import TerminalRepr
|
||||
from _pytest.compat import (
|
||||
NOTSET, exc_clear, _format_args,
|
||||
getfslineno, get_real_func,
|
||||
is_generator, isclass, getimfunc,
|
||||
getlocation, getfuncargnames,
|
||||
NOTSET,
|
||||
exc_clear,
|
||||
_format_args,
|
||||
getfslineno,
|
||||
get_real_func,
|
||||
is_generator,
|
||||
isclass,
|
||||
getimfunc,
|
||||
getlocation,
|
||||
getfuncargnames,
|
||||
safe_getattr,
|
||||
FuncargnamesCompatAttr,
|
||||
)
|
||||
|
@ -35,12 +41,14 @@ def pytest_sessionstart(session):
|
|||
import _pytest.python
|
||||
import _pytest.nodes
|
||||
|
||||
scopename2class.update({
|
||||
'class': _pytest.python.Class,
|
||||
'module': _pytest.python.Module,
|
||||
'function': _pytest.nodes.Item,
|
||||
'session': _pytest.main.Session,
|
||||
})
|
||||
scopename2class.update(
|
||||
{
|
||||
"class": _pytest.python.Class,
|
||||
"module": _pytest.python.Module,
|
||||
"function": _pytest.nodes.Item,
|
||||
"session": _pytest.main.Session,
|
||||
}
|
||||
)
|
||||
session._fixturemanager = FixtureManager(session)
|
||||
|
||||
|
||||
|
@ -50,21 +58,24 @@ scopename2class = {}
|
|||
scope2props = dict(session=())
|
||||
scope2props["module"] = ("fspath", "module")
|
||||
scope2props["class"] = scope2props["module"] + ("cls",)
|
||||
scope2props["instance"] = scope2props["class"] + ("instance", )
|
||||
scope2props["instance"] = scope2props["class"] + ("instance",)
|
||||
scope2props["function"] = scope2props["instance"] + ("function", "keywords")
|
||||
|
||||
|
||||
def scopeproperty(name=None, doc=None):
|
||||
|
||||
def decoratescope(func):
|
||||
scopename = name or func.__name__
|
||||
|
||||
def provide(self):
|
||||
if func.__name__ in scope2props[self.scope]:
|
||||
return func(self)
|
||||
raise AttributeError("%s not available in %s-scoped context" % (
|
||||
scopename, self.scope))
|
||||
raise AttributeError(
|
||||
"%s not available in %s-scoped context" % (scopename, self.scope)
|
||||
)
|
||||
|
||||
return property(provide, None, None, func.__doc__)
|
||||
|
||||
return decoratescope
|
||||
|
||||
|
||||
|
@ -95,8 +106,7 @@ def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager):
|
|||
callspec.indices[argname] = len(arg2params_list)
|
||||
arg2params_list.append(argvalue)
|
||||
if argname not in arg2scope:
|
||||
scopenum = callspec._arg2scopenum.get(argname,
|
||||
scopenum_function)
|
||||
scopenum = callspec._arg2scopenum.get(argname, scopenum_function)
|
||||
arg2scope[argname] = scopes[scopenum]
|
||||
callspec.funcargs.clear()
|
||||
|
||||
|
@ -119,10 +129,16 @@ def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager):
|
|||
if node and argname in node._name2pseudofixturedef:
|
||||
arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]]
|
||||
else:
|
||||
fixturedef = FixtureDef(fixturemanager, '', argname,
|
||||
get_direct_param_fixture_func,
|
||||
arg2scope[argname],
|
||||
valuelist, False, False)
|
||||
fixturedef = FixtureDef(
|
||||
fixturemanager,
|
||||
"",
|
||||
argname,
|
||||
get_direct_param_fixture_func,
|
||||
arg2scope[argname],
|
||||
valuelist,
|
||||
False,
|
||||
False,
|
||||
)
|
||||
arg2fixturedefs[argname] = [fixturedef]
|
||||
if node is not None:
|
||||
node._name2pseudofixturedef[argname] = fixturedef
|
||||
|
@ -154,7 +170,7 @@ def get_parametrized_fixture_keys(item, scopenum):
|
|||
for argname, param_index in sorted(cs.indices.items()):
|
||||
if cs._arg2scopenum[argname] != scopenum:
|
||||
continue
|
||||
if scopenum == 0: # session
|
||||
if scopenum == 0: # session
|
||||
key = (argname, param_index)
|
||||
elif scopenum == 1: # module
|
||||
key = (argname, param_index, item.fspath)
|
||||
|
@ -168,6 +184,7 @@ def get_parametrized_fixture_keys(item, scopenum):
|
|||
# down to the lower scopes such as to minimize number of "high scope"
|
||||
# setups and teardowns
|
||||
|
||||
|
||||
def reorder_items(items):
|
||||
argkeys_cache = {}
|
||||
items_by_argkey = {}
|
||||
|
@ -205,20 +222,25 @@ def reorder_items_atscope(items, argkeys_cache, items_by_argkey, scopenum):
|
|||
item = items_deque.popleft()
|
||||
if item in items_done or item in no_argkey_group:
|
||||
continue
|
||||
argkeys = OrderedDict.fromkeys(k for k in scoped_argkeys_cache.get(item, []) if k not in ignore)
|
||||
argkeys = OrderedDict.fromkeys(
|
||||
k for k in scoped_argkeys_cache.get(item, []) if k not in ignore
|
||||
)
|
||||
if not argkeys:
|
||||
no_argkey_group[item] = None
|
||||
else:
|
||||
slicing_argkey, _ = argkeys.popitem()
|
||||
# we don't have to remove relevant items from later in the deque because they'll just be ignored
|
||||
matching_items = [i for i in scoped_items_by_argkey[slicing_argkey] if i in items]
|
||||
matching_items = [
|
||||
i for i in scoped_items_by_argkey[slicing_argkey] if i in items
|
||||
]
|
||||
for i in reversed(matching_items):
|
||||
fix_cache_order(i, argkeys_cache, items_by_argkey)
|
||||
items_deque.appendleft(i)
|
||||
break
|
||||
if no_argkey_group:
|
||||
no_argkey_group = reorder_items_atscope(
|
||||
no_argkey_group, argkeys_cache, items_by_argkey, scopenum + 1)
|
||||
no_argkey_group, argkeys_cache, items_by_argkey, scopenum + 1
|
||||
)
|
||||
for item in no_argkey_group:
|
||||
items_done[item] = None
|
||||
ignore.add(slicing_argkey)
|
||||
|
@ -252,6 +274,7 @@ def get_direct_param_fixture_func(request):
|
|||
|
||||
|
||||
class FuncFixtureInfo(object):
|
||||
|
||||
def __init__(self, argnames, names_closure, name2fixturedefs):
|
||||
self.argnames = argnames
|
||||
self.names_closure = names_closure
|
||||
|
@ -362,7 +385,8 @@ class FixtureRequest(FuncargnamesCompatAttr):
|
|||
def _addfinalizer(self, finalizer, scope):
|
||||
colitem = self._getscopeitem(scope)
|
||||
self._pyfuncitem.session._setupstate.addfinalizer(
|
||||
finalizer=finalizer, colitem=colitem)
|
||||
finalizer=finalizer, colitem=colitem
|
||||
)
|
||||
|
||||
def applymarker(self, marker):
|
||||
""" Apply a marker to a single test function invocation.
|
||||
|
@ -400,7 +424,7 @@ class FixtureRequest(FuncargnamesCompatAttr):
|
|||
or ``session`` indicating the caching lifecycle of the resource.
|
||||
:arg extrakey: added to internal caching key of (funcargname, scope).
|
||||
"""
|
||||
if not hasattr(self.config, '_setupcache'):
|
||||
if not hasattr(self.config, "_setupcache"):
|
||||
self.config._setupcache = {} # XXX weakref?
|
||||
cachekey = (self.fixturename, self._getscopeitem(scope), extrakey)
|
||||
cache = self.config._setupcache
|
||||
|
@ -411,9 +435,11 @@ class FixtureRequest(FuncargnamesCompatAttr):
|
|||
val = setup()
|
||||
cache[cachekey] = val
|
||||
if teardown is not None:
|
||||
|
||||
def finalizer():
|
||||
del cache[cachekey]
|
||||
teardown(val)
|
||||
|
||||
self._addfinalizer(finalizer, scope=scope)
|
||||
return val
|
||||
|
||||
|
@ -430,10 +456,8 @@ class FixtureRequest(FuncargnamesCompatAttr):
|
|||
def getfuncargvalue(self, argname):
|
||||
""" Deprecated, use getfixturevalue. """
|
||||
from _pytest import deprecated
|
||||
warnings.warn(
|
||||
deprecated.GETFUNCARGVALUE,
|
||||
DeprecationWarning,
|
||||
stacklevel=2)
|
||||
|
||||
warnings.warn(deprecated.GETFUNCARGVALUE, DeprecationWarning, stacklevel=2)
|
||||
return self.getfixturevalue(argname)
|
||||
|
||||
def _get_active_fixturedef(self, argname):
|
||||
|
@ -524,8 +548,10 @@ class FixtureRequest(FuncargnamesCompatAttr):
|
|||
fixturedef.execute(request=subrequest)
|
||||
finally:
|
||||
# if fixture function failed it might have registered finalizers
|
||||
self.session._setupstate.addfinalizer(functools.partial(fixturedef.finish, request=subrequest),
|
||||
subrequest.node)
|
||||
self.session._setupstate.addfinalizer(
|
||||
functools.partial(fixturedef.finish, request=subrequest),
|
||||
subrequest.node,
|
||||
)
|
||||
|
||||
def _check_scope(self, argname, invoking_scope, requested_scope):
|
||||
if argname == "request":
|
||||
|
@ -533,11 +559,13 @@ class FixtureRequest(FuncargnamesCompatAttr):
|
|||
if scopemismatch(invoking_scope, requested_scope):
|
||||
# try to report something helpful
|
||||
lines = self._factorytraceback()
|
||||
fail("ScopeMismatch: You tried to access the %r scoped "
|
||||
"fixture %r with a %r scoped request object, "
|
||||
"involved factories\n%s" % (
|
||||
(requested_scope, argname, invoking_scope, "\n".join(lines))),
|
||||
pytrace=False)
|
||||
fail(
|
||||
"ScopeMismatch: You tried to access the %r scoped "
|
||||
"fixture %r with a %r scoped request object, "
|
||||
"involved factories\n%s"
|
||||
% ((requested_scope, argname, invoking_scope, "\n".join(lines))),
|
||||
pytrace=False,
|
||||
)
|
||||
|
||||
def _factorytraceback(self):
|
||||
lines = []
|
||||
|
@ -546,8 +574,7 @@ class FixtureRequest(FuncargnamesCompatAttr):
|
|||
fs, lineno = getfslineno(factory)
|
||||
p = self._pyfuncitem.session.fspath.bestrelpath(fs)
|
||||
args = _format_args(factory)
|
||||
lines.append("%s:%d: def %s%s" % (
|
||||
p, lineno, factory.__name__, args))
|
||||
lines.append("%s:%d: def %s%s" % (p, lineno, factory.__name__, args))
|
||||
return lines
|
||||
|
||||
def _getscopeitem(self, scope):
|
||||
|
@ -558,7 +585,9 @@ class FixtureRequest(FuncargnamesCompatAttr):
|
|||
if node is None and scope == "class":
|
||||
# fallback to function item itself
|
||||
node = self._pyfuncitem
|
||||
assert node, 'Could not obtain a node for scope "{}" for function {!r}'.format(scope, self._pyfuncitem)
|
||||
assert node, 'Could not obtain a node for scope "{}" for function {!r}'.format(
|
||||
scope, self._pyfuncitem
|
||||
)
|
||||
return node
|
||||
|
||||
def __repr__(self):
|
||||
|
@ -613,8 +642,8 @@ def scope2index(scope, descr, where=None):
|
|||
except ValueError:
|
||||
raise ValueError(
|
||||
"{} {}has an unsupported scope value '{}'".format(
|
||||
descr, 'from {} '.format(where) if where else '',
|
||||
scope)
|
||||
descr, "from {} ".format(where) if where else "", scope
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
|
@ -649,7 +678,7 @@ class FixtureLookupError(LookupError):
|
|||
for i, line in enumerate(lines):
|
||||
line = line.rstrip()
|
||||
addline(" " + line)
|
||||
if line.lstrip().startswith('def'):
|
||||
if line.lstrip().startswith("def"):
|
||||
break
|
||||
|
||||
if msg is None:
|
||||
|
@ -668,6 +697,7 @@ class FixtureLookupError(LookupError):
|
|||
|
||||
|
||||
class FixtureLookupErrorRepr(TerminalRepr):
|
||||
|
||||
def __init__(self, filename, firstlineno, tblines, errorstring, argname):
|
||||
self.tblines = tblines
|
||||
self.errorstring = errorstring
|
||||
|
@ -681,11 +711,15 @@ class FixtureLookupErrorRepr(TerminalRepr):
|
|||
tw.line(tbline.rstrip())
|
||||
lines = self.errorstring.split("\n")
|
||||
if lines:
|
||||
tw.line('{} {}'.format(FormattedExcinfo.fail_marker,
|
||||
lines[0].strip()), red=True)
|
||||
tw.line(
|
||||
"{} {}".format(FormattedExcinfo.fail_marker, lines[0].strip()),
|
||||
red=True,
|
||||
)
|
||||
for line in lines[1:]:
|
||||
tw.line('{} {}'.format(FormattedExcinfo.flow_marker,
|
||||
line.strip()), red=True)
|
||||
tw.line(
|
||||
"{} {}".format(FormattedExcinfo.flow_marker, line.strip()),
|
||||
red=True,
|
||||
)
|
||||
tw.line()
|
||||
tw.line("%s:%d" % (self.filename, self.firstlineno + 1))
|
||||
|
||||
|
@ -694,8 +728,7 @@ def fail_fixturefunc(fixturefunc, msg):
|
|||
fs, lineno = getfslineno(fixturefunc)
|
||||
location = "%s:%s" % (fs, lineno + 1)
|
||||
source = _pytest._code.Source(fixturefunc)
|
||||
fail(msg + ":\n\n" + str(source.indent()) + "\n" + location,
|
||||
pytrace=False)
|
||||
fail(msg + ":\n\n" + str(source.indent()) + "\n" + location, pytrace=False)
|
||||
|
||||
|
||||
def call_fixture_func(fixturefunc, request, kwargs):
|
||||
|
@ -710,8 +743,9 @@ def call_fixture_func(fixturefunc, request, kwargs):
|
|||
except StopIteration:
|
||||
pass
|
||||
else:
|
||||
fail_fixturefunc(fixturefunc,
|
||||
"yield_fixture function has more than one 'yield'")
|
||||
fail_fixturefunc(
|
||||
fixturefunc, "yield_fixture function has more than one 'yield'"
|
||||
)
|
||||
|
||||
request.addfinalizer(teardown)
|
||||
else:
|
||||
|
@ -722,18 +756,25 @@ def call_fixture_func(fixturefunc, request, kwargs):
|
|||
class FixtureDef(object):
|
||||
""" A container for a factory definition. """
|
||||
|
||||
def __init__(self, fixturemanager, baseid, argname, func, scope, params,
|
||||
unittest=False, ids=None):
|
||||
def __init__(
|
||||
self,
|
||||
fixturemanager,
|
||||
baseid,
|
||||
argname,
|
||||
func,
|
||||
scope,
|
||||
params,
|
||||
unittest=False,
|
||||
ids=None,
|
||||
):
|
||||
self._fixturemanager = fixturemanager
|
||||
self.baseid = baseid or ''
|
||||
self.baseid = baseid or ""
|
||||
self.has_location = baseid is not None
|
||||
self.func = func
|
||||
self.argname = argname
|
||||
self.scope = scope
|
||||
self.scopenum = scope2index(
|
||||
scope or "function",
|
||||
descr='fixture {}'.format(func.__name__),
|
||||
where=baseid
|
||||
scope or "function", descr="fixture {}".format(func.__name__), where=baseid
|
||||
)
|
||||
self.params = params
|
||||
self.argnames = getfuncargnames(func, is_method=unittest)
|
||||
|
@ -795,8 +836,10 @@ class FixtureDef(object):
|
|||
return hook.pytest_fixture_setup(fixturedef=self, request=request)
|
||||
|
||||
def __repr__(self):
|
||||
return ("<FixtureDef name=%r scope=%r baseid=%r >" %
|
||||
(self.argname, self.scope, self.baseid))
|
||||
return (
|
||||
"<FixtureDef name=%r scope=%r baseid=%r >"
|
||||
% (self.argname, self.scope, self.baseid)
|
||||
)
|
||||
|
||||
|
||||
def pytest_fixture_setup(fixturedef, request):
|
||||
|
@ -849,12 +892,12 @@ class FixtureFunctionMarker(object):
|
|||
|
||||
def __call__(self, function):
|
||||
if isclass(function):
|
||||
raise ValueError(
|
||||
"class fixtures not supported (may be in the future)")
|
||||
raise ValueError("class fixtures not supported (may be in the future)")
|
||||
|
||||
if getattr(function, "_pytestfixturefunction", False):
|
||||
raise ValueError(
|
||||
"fixture is being applied more than once to the same function")
|
||||
"fixture is being applied more than once to the same function"
|
||||
)
|
||||
|
||||
function._pytestfixturefunction = self
|
||||
return function
|
||||
|
@ -900,8 +943,7 @@ def fixture(scope="function", params=None, autouse=False, ids=None, name=None):
|
|||
"""
|
||||
if callable(scope) and params is None and autouse is False:
|
||||
# direct decoration
|
||||
return FixtureFunctionMarker(
|
||||
"function", params, autouse, name=name)(scope)
|
||||
return FixtureFunctionMarker("function", params, autouse, name=name)(scope)
|
||||
if params is not None and not isinstance(params, (list, tuple)):
|
||||
params = list(params)
|
||||
return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name)
|
||||
|
@ -915,8 +957,9 @@ def yield_fixture(scope="function", params=None, autouse=False, ids=None, name=N
|
|||
"""
|
||||
if callable(scope) and params is None and not autouse:
|
||||
# direct decoration
|
||||
return FixtureFunctionMarker(
|
||||
"function", params, autouse, ids=ids, name=name)(scope)
|
||||
return FixtureFunctionMarker("function", params, autouse, ids=ids, name=name)(
|
||||
scope
|
||||
)
|
||||
else:
|
||||
return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name)
|
||||
|
||||
|
@ -988,12 +1031,13 @@ class FixtureManager(object):
|
|||
argnames = getfuncargnames(func, cls=cls)
|
||||
else:
|
||||
argnames = ()
|
||||
usefixtures = flatten(mark.args for mark in node.iter_markers(name="usefixtures"))
|
||||
usefixtures = flatten(
|
||||
mark.args for mark in node.iter_markers(name="usefixtures")
|
||||
)
|
||||
initialnames = argnames
|
||||
initialnames = tuple(usefixtures) + initialnames
|
||||
fm = node.session._fixturemanager
|
||||
names_closure, arg2fixturedefs = fm.getfixtureclosure(initialnames,
|
||||
node)
|
||||
names_closure, arg2fixturedefs = fm.getfixtureclosure(initialnames, node)
|
||||
return FuncFixtureInfo(argnames, names_closure, arg2fixturedefs)
|
||||
|
||||
def pytest_plugin_registered(self, plugin):
|
||||
|
@ -1058,7 +1102,7 @@ class FixtureManager(object):
|
|||
try:
|
||||
fixturedefs = arg2fixturedefs[arg_name]
|
||||
except KeyError:
|
||||
return scopes.index('function')
|
||||
return scopes.index("function")
|
||||
else:
|
||||
return fixturedefs[-1].scopenum
|
||||
|
||||
|
@ -1071,11 +1115,11 @@ class FixtureManager(object):
|
|||
if faclist:
|
||||
fixturedef = faclist[-1]
|
||||
if fixturedef.params is not None:
|
||||
parametrize_func = getattr(metafunc.function, 'parametrize', None)
|
||||
parametrize_func = getattr(metafunc.function, "parametrize", None)
|
||||
if parametrize_func is not None:
|
||||
parametrize_func = parametrize_func.combined
|
||||
func_params = getattr(parametrize_func, 'args', [[None]])
|
||||
func_kwargs = getattr(parametrize_func, 'kwargs', {})
|
||||
func_params = getattr(parametrize_func, "args", [[None]])
|
||||
func_kwargs = getattr(parametrize_func, "kwargs", {})
|
||||
# skip directly parametrized arguments
|
||||
if "argnames" in func_kwargs:
|
||||
argnames = parametrize_func.kwargs["argnames"]
|
||||
|
@ -1084,9 +1128,13 @@ class FixtureManager(object):
|
|||
if not isinstance(argnames, (tuple, list)):
|
||||
argnames = [x.strip() for x in argnames.split(",") if x.strip()]
|
||||
if argname not in func_params and argname not in argnames:
|
||||
metafunc.parametrize(argname, fixturedef.params,
|
||||
indirect=True, scope=fixturedef.scope,
|
||||
ids=fixturedef.ids)
|
||||
metafunc.parametrize(
|
||||
argname,
|
||||
fixturedef.params,
|
||||
indirect=True,
|
||||
scope=fixturedef.scope,
|
||||
ids=fixturedef.ids,
|
||||
)
|
||||
else:
|
||||
continue # will raise FixtureLookupError at setup time
|
||||
|
||||
|
@ -1118,7 +1166,10 @@ class FixtureManager(object):
|
|||
continue
|
||||
marker = defaultfuncargprefixmarker
|
||||
from _pytest import deprecated
|
||||
self.config.warn('C1', deprecated.FUNCARG_PREFIX.format(name=name), nodeid=nodeid)
|
||||
|
||||
self.config.warn(
|
||||
"C1", deprecated.FUNCARG_PREFIX.format(name=name), nodeid=nodeid
|
||||
)
|
||||
name = name[len(self._argprefix):]
|
||||
elif not isinstance(marker, FixtureFunctionMarker):
|
||||
# magic globals with __getattr__ might have got us a wrong
|
||||
|
@ -1127,13 +1178,19 @@ class FixtureManager(object):
|
|||
else:
|
||||
if marker.name:
|
||||
name = marker.name
|
||||
msg = 'fixtures cannot have "pytest_funcarg__" prefix ' \
|
||||
'and be decorated with @pytest.fixture:\n%s' % name
|
||||
msg = 'fixtures cannot have "pytest_funcarg__" prefix ' "and be decorated with @pytest.fixture:\n%s" % name
|
||||
assert not name.startswith(self._argprefix), msg
|
||||
|
||||
fixture_def = FixtureDef(self, nodeid, name, obj,
|
||||
marker.scope, marker.params,
|
||||
unittest=unittest, ids=marker.ids)
|
||||
fixture_def = FixtureDef(
|
||||
self,
|
||||
nodeid,
|
||||
name,
|
||||
obj,
|
||||
marker.scope,
|
||||
marker.params,
|
||||
unittest=unittest,
|
||||
ids=marker.ids,
|
||||
)
|
||||
|
||||
faclist = self._arg2fixturedefs.setdefault(name, [])
|
||||
if fixture_def.has_location:
|
||||
|
@ -1149,7 +1206,7 @@ class FixtureManager(object):
|
|||
autousenames.append(name)
|
||||
|
||||
if autousenames:
|
||||
self._nodeid_and_autousenames.append((nodeid or '', autousenames))
|
||||
self._nodeid_and_autousenames.append((nodeid or "", autousenames))
|
||||
|
||||
def getfixturedefs(self, argname, nodeid):
|
||||
"""
|
||||
|
|
|
@ -12,12 +12,13 @@ def freeze_includes():
|
|||
"""
|
||||
import py
|
||||
import _pytest
|
||||
|
||||
result = list(_iter_all_modules(py))
|
||||
result += list(_iter_all_modules(_pytest))
|
||||
return result
|
||||
|
||||
|
||||
def _iter_all_modules(package, prefix=''):
|
||||
def _iter_all_modules(package, prefix=""):
|
||||
"""
|
||||
Iterates over the names of all modules that can be found in the given
|
||||
package, recursively.
|
||||
|
@ -31,13 +32,14 @@ def _iter_all_modules(package, prefix=''):
|
|||
"""
|
||||
import os
|
||||
import pkgutil
|
||||
|
||||
if type(package) is not str:
|
||||
path, prefix = package.__path__[0], package.__name__ + '.'
|
||||
path, prefix = package.__path__[0], package.__name__ + "."
|
||||
else:
|
||||
path = package
|
||||
for _, name, is_package in pkgutil.iter_modules([path]):
|
||||
if is_package:
|
||||
for m in _iter_all_modules(os.path.join(path, name), prefix=name + '.'):
|
||||
for m in _iter_all_modules(os.path.join(path, name), prefix=name + "."):
|
||||
yield prefix + m
|
||||
else:
|
||||
yield prefix + name
|
||||
|
|
|
@ -18,48 +18,69 @@ class HelpAction(Action):
|
|||
implemented by raising SystemExit.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
option_strings,
|
||||
dest=None,
|
||||
default=False,
|
||||
help=None):
|
||||
def __init__(self, option_strings, dest=None, default=False, help=None):
|
||||
super(HelpAction, self).__init__(
|
||||
option_strings=option_strings,
|
||||
dest=dest,
|
||||
const=True,
|
||||
default=default,
|
||||
nargs=0,
|
||||
help=help)
|
||||
help=help,
|
||||
)
|
||||
|
||||
def __call__(self, parser, namespace, values, option_string=None):
|
||||
setattr(namespace, self.dest, self.const)
|
||||
|
||||
# We should only skip the rest of the parsing after preparse is done
|
||||
if getattr(parser._parser, 'after_preparse', False):
|
||||
if getattr(parser._parser, "after_preparse", False):
|
||||
raise PrintHelp
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup('debugconfig')
|
||||
group.addoption('--version', action="store_true",
|
||||
help="display pytest lib version and import information.")
|
||||
group._addoption("-h", "--help", action=HelpAction, dest="help",
|
||||
help="show help message and configuration info")
|
||||
group._addoption('-p', action="append", dest="plugins", default=[],
|
||||
metavar="name",
|
||||
help="early-load given plugin (multi-allowed). "
|
||||
"To avoid loading of plugins, use the `no:` prefix, e.g. "
|
||||
"`no:doctest`.")
|
||||
group.addoption('--traceconfig', '--trace-config',
|
||||
action="store_true", default=False,
|
||||
help="trace considerations of conftest.py files."),
|
||||
group.addoption('--debug',
|
||||
action="store_true", dest="debug", default=False,
|
||||
help="store internal tracing debug information in 'pytestdebug.log'.")
|
||||
group = parser.getgroup("debugconfig")
|
||||
group.addoption(
|
||||
"--version",
|
||||
action="store_true",
|
||||
help="display pytest lib version and import information.",
|
||||
)
|
||||
group._addoption(
|
||||
'-o', '--override-ini', dest="override_ini",
|
||||
"-h",
|
||||
"--help",
|
||||
action=HelpAction,
|
||||
dest="help",
|
||||
help="show help message and configuration info",
|
||||
)
|
||||
group._addoption(
|
||||
"-p",
|
||||
action="append",
|
||||
help='override ini option with "option=value" style, e.g. `-o xfail_strict=True -o cache_dir=cache`.')
|
||||
dest="plugins",
|
||||
default=[],
|
||||
metavar="name",
|
||||
help="early-load given plugin (multi-allowed). "
|
||||
"To avoid loading of plugins, use the `no:` prefix, e.g. "
|
||||
"`no:doctest`.",
|
||||
)
|
||||
group.addoption(
|
||||
"--traceconfig",
|
||||
"--trace-config",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="trace considerations of conftest.py files.",
|
||||
),
|
||||
group.addoption(
|
||||
"--debug",
|
||||
action="store_true",
|
||||
dest="debug",
|
||||
default=False,
|
||||
help="store internal tracing debug information in 'pytestdebug.log'.",
|
||||
)
|
||||
group._addoption(
|
||||
"-o",
|
||||
"--override-ini",
|
||||
dest="override_ini",
|
||||
action="append",
|
||||
help='override ini option with "option=value" style, e.g. `-o xfail_strict=True -o cache_dir=cache`.',
|
||||
)
|
||||
|
||||
|
||||
@pytest.hookimpl(hookwrapper=True)
|
||||
|
@ -68,20 +89,25 @@ def pytest_cmdline_parse():
|
|||
config = outcome.get_result()
|
||||
if config.option.debug:
|
||||
path = os.path.abspath("pytestdebug.log")
|
||||
debugfile = open(path, 'w')
|
||||
debugfile.write("versions pytest-%s, py-%s, "
|
||||
"python-%s\ncwd=%s\nargs=%s\n\n" % (
|
||||
pytest.__version__, py.__version__,
|
||||
".".join(map(str, sys.version_info)),
|
||||
os.getcwd(), config._origargs))
|
||||
debugfile = open(path, "w")
|
||||
debugfile.write(
|
||||
"versions pytest-%s, py-%s, "
|
||||
"python-%s\ncwd=%s\nargs=%s\n\n"
|
||||
% (
|
||||
pytest.__version__,
|
||||
py.__version__,
|
||||
".".join(map(str, sys.version_info)),
|
||||
os.getcwd(),
|
||||
config._origargs,
|
||||
)
|
||||
)
|
||||
config.trace.root.setwriter(debugfile.write)
|
||||
undo_tracing = config.pluginmanager.enable_tracing()
|
||||
sys.stderr.write("writing pytestdebug information to %s\n" % path)
|
||||
|
||||
def unset_tracing():
|
||||
debugfile.close()
|
||||
sys.stderr.write("wrote pytestdebug information to %s\n" %
|
||||
debugfile.name)
|
||||
sys.stderr.write("wrote pytestdebug information to %s\n" % debugfile.name)
|
||||
config.trace.root.setwriter(None)
|
||||
undo_tracing()
|
||||
|
||||
|
@ -91,8 +117,9 @@ def pytest_cmdline_parse():
|
|||
def pytest_cmdline_main(config):
|
||||
if config.option.version:
|
||||
p = py.path.local(pytest.__file__)
|
||||
sys.stderr.write("This is pytest version %s, imported from %s\n" %
|
||||
(pytest.__version__, p))
|
||||
sys.stderr.write(
|
||||
"This is pytest version %s, imported from %s\n" % (pytest.__version__, p)
|
||||
)
|
||||
plugininfo = getpluginversioninfo(config)
|
||||
if plugininfo:
|
||||
for line in plugininfo:
|
||||
|
@ -106,13 +133,14 @@ def pytest_cmdline_main(config):
|
|||
|
||||
|
||||
def showhelp(config):
|
||||
reporter = config.pluginmanager.get_plugin('terminalreporter')
|
||||
reporter = config.pluginmanager.get_plugin("terminalreporter")
|
||||
tw = reporter._tw
|
||||
tw.write(config._parser.optparser.format_help())
|
||||
tw.line()
|
||||
tw.line()
|
||||
tw.line("[pytest] ini-options in the first "
|
||||
"pytest.ini|tox.ini|setup.cfg file found:")
|
||||
tw.line(
|
||||
"[pytest] ini-options in the first " "pytest.ini|tox.ini|setup.cfg file found:"
|
||||
)
|
||||
tw.line()
|
||||
|
||||
for name in config._parser._ininames:
|
||||
|
@ -128,7 +156,7 @@ def showhelp(config):
|
|||
vars = [
|
||||
("PYTEST_ADDOPTS", "extra command line options"),
|
||||
("PYTEST_PLUGINS", "comma-separated plugins to load during startup"),
|
||||
("PYTEST_DEBUG", "set to enable debug tracing of pytest's internals")
|
||||
("PYTEST_DEBUG", "set to enable debug tracing of pytest's internals"),
|
||||
]
|
||||
for name, help in vars:
|
||||
tw.line(" %-24s %s" % (name, help))
|
||||
|
@ -137,18 +165,18 @@ def showhelp(config):
|
|||
|
||||
tw.line("to see available markers type: pytest --markers")
|
||||
tw.line("to see available fixtures type: pytest --fixtures")
|
||||
tw.line("(shown according to specified file_or_dir or current dir "
|
||||
"if not specified; fixtures with leading '_' are only shown "
|
||||
"with the '-v' option")
|
||||
tw.line(
|
||||
"(shown according to specified file_or_dir or current dir "
|
||||
"if not specified; fixtures with leading '_' are only shown "
|
||||
"with the '-v' option"
|
||||
)
|
||||
|
||||
for warningreport in reporter.stats.get('warnings', []):
|
||||
for warningreport in reporter.stats.get("warnings", []):
|
||||
tw.line("warning : " + warningreport.message, red=True)
|
||||
return
|
||||
|
||||
|
||||
conftest_options = [
|
||||
('pytest_plugins', 'list of plugin names to load'),
|
||||
]
|
||||
conftest_options = [("pytest_plugins", "list of plugin names to load")]
|
||||
|
||||
|
||||
def getpluginversioninfo(config):
|
||||
|
@ -157,7 +185,7 @@ def getpluginversioninfo(config):
|
|||
if plugininfo:
|
||||
lines.append("setuptools registered plugins:")
|
||||
for plugin, dist in plugininfo:
|
||||
loc = getattr(plugin, '__file__', repr(plugin))
|
||||
loc = getattr(plugin, "__file__", repr(plugin))
|
||||
content = "%s-%s at %s" % (dist.project_name, dist.version, loc)
|
||||
lines.append(" " + content)
|
||||
return lines
|
||||
|
@ -166,8 +194,7 @@ def getpluginversioninfo(config):
|
|||
def pytest_report_header(config):
|
||||
lines = []
|
||||
if config.option.debug or config.option.traceconfig:
|
||||
lines.append("using: pytest-%s pylib-%s" %
|
||||
(pytest.__version__, py.__version__))
|
||||
lines.append("using: pytest-%s pylib-%s" % (pytest.__version__, py.__version__))
|
||||
|
||||
verinfo = getpluginversioninfo(config)
|
||||
if verinfo:
|
||||
|
@ -177,7 +204,7 @@ def pytest_report_header(config):
|
|||
lines.append("active plugins:")
|
||||
items = config.pluginmanager.list_name_plugin()
|
||||
for name, plugin in items:
|
||||
if hasattr(plugin, '__file__'):
|
||||
if hasattr(plugin, "__file__"):
|
||||
r = plugin.__file__
|
||||
else:
|
||||
r = repr(plugin)
|
||||
|
|
|
@ -98,6 +98,7 @@ def pytest_configure(config):
|
|||
:arg _pytest.config.Config config: pytest config object
|
||||
"""
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Bootstrapping hooks called for plugins registered early enough:
|
||||
# internal and 3rd party plugins.
|
||||
|
@ -163,6 +164,7 @@ def pytest_load_initial_conftests(early_config, parser, args):
|
|||
# collection hooks
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
|
||||
@hookspec(firstresult=True)
|
||||
def pytest_collection(session):
|
||||
"""Perform the collection protocol for the given session.
|
||||
|
@ -220,6 +222,7 @@ def pytest_collect_file(path, parent):
|
|||
:param str path: the path to collect
|
||||
"""
|
||||
|
||||
|
||||
# logging hooks for collection
|
||||
|
||||
|
||||
|
@ -245,6 +248,7 @@ def pytest_make_collect_report(collector):
|
|||
|
||||
Stops at first non-None result, see :ref:`firstresult` """
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Python test function related hooks
|
||||
# -------------------------------------------------------------------------
|
||||
|
@ -291,6 +295,7 @@ def pytest_make_parametrize_id(config, val, argname):
|
|||
:param str argname: the automatic parameter name produced by pytest
|
||||
"""
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# generic runtest related hooks
|
||||
# -------------------------------------------------------------------------
|
||||
|
@ -382,6 +387,7 @@ def pytest_runtest_logreport(report):
|
|||
""" process a test setup/call/teardown report relating to
|
||||
the respective phase of executing a test. """
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Fixture related hooks
|
||||
# -------------------------------------------------------------------------
|
||||
|
@ -407,6 +413,7 @@ def pytest_fixture_post_finalizer(fixturedef, request):
|
|||
the fixture result cache ``fixturedef.cached_result`` can
|
||||
still be accessed."""
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# test session related hooks
|
||||
# -------------------------------------------------------------------------
|
||||
|
@ -439,6 +446,7 @@ def pytest_unconfigure(config):
|
|||
# hooks for customizing the assert methods
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
|
||||
def pytest_assertrepr_compare(config, op, left, right):
|
||||
"""return explanation for comparisons in failing assert expressions.
|
||||
|
||||
|
@ -450,6 +458,7 @@ def pytest_assertrepr_compare(config, op, left, right):
|
|||
:param _pytest.config.Config config: pytest config object
|
||||
"""
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# hooks for influencing reporting (invoked from _pytest_terminal)
|
||||
# -------------------------------------------------------------------------
|
||||
|
@ -511,6 +520,7 @@ def pytest_logwarning(message, code, nodeid, fslocation):
|
|||
This hook is incompatible with ``hookwrapper=True``.
|
||||
"""
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# doctest hooks
|
||||
# -------------------------------------------------------------------------
|
||||
|
@ -522,6 +532,7 @@ def pytest_doctest_prepare_content(content):
|
|||
|
||||
Stops at first non-None result, see :ref:`firstresult` """
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# error handling and internal debugging hooks
|
||||
# -------------------------------------------------------------------------
|
||||
|
|
|
@ -39,15 +39,14 @@ class Junit(py.xml.Namespace):
|
|||
# chars is: Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD]
|
||||
# | [#x10000-#x10FFFF]
|
||||
_legal_chars = (0x09, 0x0A, 0x0d)
|
||||
_legal_ranges = (
|
||||
(0x20, 0x7E), (0x80, 0xD7FF), (0xE000, 0xFFFD), (0x10000, 0x10FFFF),
|
||||
)
|
||||
_legal_ranges = ((0x20, 0x7E), (0x80, 0xD7FF), (0xE000, 0xFFFD), (0x10000, 0x10FFFF))
|
||||
_legal_xml_re = [
|
||||
unicode("%s-%s") % (unichr(low), unichr(high))
|
||||
for (low, high) in _legal_ranges if low < sys.maxunicode
|
||||
for (low, high) in _legal_ranges
|
||||
if low < sys.maxunicode
|
||||
]
|
||||
_legal_xml_re = [unichr(x) for x in _legal_chars] + _legal_xml_re
|
||||
illegal_xml_re = re.compile(unicode('[^%s]') % unicode('').join(_legal_xml_re))
|
||||
illegal_xml_re = re.compile(unicode("[^%s]") % unicode("").join(_legal_xml_re))
|
||||
del _legal_chars
|
||||
del _legal_ranges
|
||||
del _legal_xml_re
|
||||
|
@ -56,17 +55,19 @@ _py_ext_re = re.compile(r"\.py$")
|
|||
|
||||
|
||||
def bin_xml_escape(arg):
|
||||
|
||||
def repl(matchobj):
|
||||
i = ord(matchobj.group())
|
||||
if i <= 0xFF:
|
||||
return unicode('#x%02X') % i
|
||||
return unicode("#x%02X") % i
|
||||
else:
|
||||
return unicode('#x%04X') % i
|
||||
return unicode("#x%04X") % i
|
||||
|
||||
return py.xml.raw(illegal_xml_re.sub(repl, py.xml.escape(arg)))
|
||||
|
||||
|
||||
class _NodeReporter(object):
|
||||
|
||||
def __init__(self, nodeid, xml):
|
||||
|
||||
self.id = nodeid
|
||||
|
@ -92,11 +93,13 @@ class _NodeReporter(object):
|
|||
"""Return a Junit node containing custom properties, if any.
|
||||
"""
|
||||
if self.properties:
|
||||
return Junit.properties([
|
||||
Junit.property(name=name, value=value)
|
||||
for name, value in self.properties
|
||||
])
|
||||
return ''
|
||||
return Junit.properties(
|
||||
[
|
||||
Junit.property(name=name, value=value)
|
||||
for name, value in self.properties
|
||||
]
|
||||
)
|
||||
return ""
|
||||
|
||||
def record_testreport(self, testreport):
|
||||
assert not self.testcase
|
||||
|
@ -135,53 +138,57 @@ class _NodeReporter(object):
|
|||
content_err = report.capstderr
|
||||
|
||||
if content_log or content_out:
|
||||
if content_log and self.xml.logging == 'system-out':
|
||||
if content_log and self.xml.logging == "system-out":
|
||||
if content_out:
|
||||
# syncing stdout and the log-output is not done yet. It's
|
||||
# probably not worth the effort. Therefore, first the captured
|
||||
# stdout is shown and then the captured logs.
|
||||
content = '\n'.join([
|
||||
' Captured Stdout '.center(80, '-'),
|
||||
content_out,
|
||||
'',
|
||||
' Captured Log '.center(80, '-'),
|
||||
content_log])
|
||||
content = "\n".join(
|
||||
[
|
||||
" Captured Stdout ".center(80, "-"),
|
||||
content_out,
|
||||
"",
|
||||
" Captured Log ".center(80, "-"),
|
||||
content_log,
|
||||
]
|
||||
)
|
||||
else:
|
||||
content = content_log
|
||||
else:
|
||||
content = content_out
|
||||
|
||||
if content:
|
||||
tag = getattr(Junit, 'system-out')
|
||||
tag = getattr(Junit, "system-out")
|
||||
self.append(tag(bin_xml_escape(content)))
|
||||
|
||||
if content_log or content_err:
|
||||
if content_log and self.xml.logging == 'system-err':
|
||||
if content_log and self.xml.logging == "system-err":
|
||||
if content_err:
|
||||
content = '\n'.join([
|
||||
' Captured Stderr '.center(80, '-'),
|
||||
content_err,
|
||||
'',
|
||||
' Captured Log '.center(80, '-'),
|
||||
content_log])
|
||||
content = "\n".join(
|
||||
[
|
||||
" Captured Stderr ".center(80, "-"),
|
||||
content_err,
|
||||
"",
|
||||
" Captured Log ".center(80, "-"),
|
||||
content_log,
|
||||
]
|
||||
)
|
||||
else:
|
||||
content = content_log
|
||||
else:
|
||||
content = content_err
|
||||
|
||||
if content:
|
||||
tag = getattr(Junit, 'system-err')
|
||||
tag = getattr(Junit, "system-err")
|
||||
self.append(tag(bin_xml_escape(content)))
|
||||
|
||||
def append_pass(self, report):
|
||||
self.add_stats('passed')
|
||||
self.add_stats("passed")
|
||||
|
||||
def append_failure(self, report):
|
||||
# msg = str(report.longrepr.reprtraceback.extraline)
|
||||
if hasattr(report, "wasxfail"):
|
||||
self._add_simple(
|
||||
Junit.skipped,
|
||||
"xfail-marked test passes unexpectedly")
|
||||
self._add_simple(Junit.skipped, "xfail-marked test passes unexpectedly")
|
||||
else:
|
||||
if hasattr(report.longrepr, "reprcrash"):
|
||||
message = report.longrepr.reprcrash.message
|
||||
|
@ -196,34 +203,34 @@ class _NodeReporter(object):
|
|||
|
||||
def append_collect_error(self, report):
|
||||
# msg = str(report.longrepr.reprtraceback.extraline)
|
||||
self.append(Junit.error(bin_xml_escape(report.longrepr),
|
||||
message="collection failure"))
|
||||
self.append(
|
||||
Junit.error(bin_xml_escape(report.longrepr), message="collection failure")
|
||||
)
|
||||
|
||||
def append_collect_skipped(self, report):
|
||||
self._add_simple(
|
||||
Junit.skipped, "collection skipped", report.longrepr)
|
||||
self._add_simple(Junit.skipped, "collection skipped", report.longrepr)
|
||||
|
||||
def append_error(self, report):
|
||||
if getattr(report, 'when', None) == 'teardown':
|
||||
if getattr(report, "when", None) == "teardown":
|
||||
msg = "test teardown failure"
|
||||
else:
|
||||
msg = "test setup failure"
|
||||
self._add_simple(
|
||||
Junit.error, msg, report.longrepr)
|
||||
self._add_simple(Junit.error, msg, report.longrepr)
|
||||
|
||||
def append_skipped(self, report):
|
||||
if hasattr(report, "wasxfail"):
|
||||
self._add_simple(
|
||||
Junit.skipped, "expected test failure", report.wasxfail
|
||||
)
|
||||
self._add_simple(Junit.skipped, "expected test failure", report.wasxfail)
|
||||
else:
|
||||
filename, lineno, skipreason = report.longrepr
|
||||
if skipreason.startswith("Skipped: "):
|
||||
skipreason = bin_xml_escape(skipreason[9:])
|
||||
self.append(
|
||||
Junit.skipped("%s:%s: %s" % (filename, lineno, skipreason),
|
||||
type="pytest.skip",
|
||||
message=skipreason))
|
||||
Junit.skipped(
|
||||
"%s:%s: %s" % (filename, lineno, skipreason),
|
||||
type="pytest.skip",
|
||||
message=skipreason,
|
||||
)
|
||||
)
|
||||
self.write_captured_output(report)
|
||||
|
||||
def finalize(self):
|
||||
|
@ -245,8 +252,10 @@ def record_property(request):
|
|||
def test_function(record_property):
|
||||
record_property("example_key", 1)
|
||||
"""
|
||||
|
||||
def append_property(name, value):
|
||||
request.node.user_properties.append((name, value))
|
||||
|
||||
return append_property
|
||||
|
||||
|
||||
|
@ -255,11 +264,8 @@ def record_xml_property(record_property):
|
|||
"""(Deprecated) use record_property."""
|
||||
import warnings
|
||||
from _pytest import deprecated
|
||||
warnings.warn(
|
||||
deprecated.RECORD_XML_PROPERTY,
|
||||
DeprecationWarning,
|
||||
stacklevel=2
|
||||
)
|
||||
|
||||
warnings.warn(deprecated.RECORD_XML_PROPERTY, DeprecationWarning, stacklevel=2)
|
||||
|
||||
return record_property
|
||||
|
||||
|
@ -271,14 +277,14 @@ def record_xml_attribute(request):
|
|||
automatically xml-encoded
|
||||
"""
|
||||
request.node.warn(
|
||||
code='C3',
|
||||
message='record_xml_attribute is an experimental feature',
|
||||
code="C3", message="record_xml_attribute is an experimental feature"
|
||||
)
|
||||
xml = getattr(request.config, "_xml", None)
|
||||
if xml is not None:
|
||||
node_reporter = xml.node_reporter(request.node.nodeid)
|
||||
return node_reporter.add_attribute
|
||||
else:
|
||||
|
||||
def add_attr_noop(name, value):
|
||||
pass
|
||||
|
||||
|
@ -288,51 +294,63 @@ def record_xml_attribute(request):
|
|||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("terminal reporting")
|
||||
group.addoption(
|
||||
'--junitxml', '--junit-xml',
|
||||
"--junitxml",
|
||||
"--junit-xml",
|
||||
action="store",
|
||||
dest="xmlpath",
|
||||
metavar="path",
|
||||
type=functools.partial(filename_arg, optname="--junitxml"),
|
||||
default=None,
|
||||
help="create junit-xml style report file at given path.")
|
||||
help="create junit-xml style report file at given path.",
|
||||
)
|
||||
group.addoption(
|
||||
'--junitprefix', '--junit-prefix',
|
||||
"--junitprefix",
|
||||
"--junit-prefix",
|
||||
action="store",
|
||||
metavar="str",
|
||||
default=None,
|
||||
help="prepend prefix to classnames in junit-xml output")
|
||||
parser.addini("junit_suite_name", "Test suite name for JUnit report", default="pytest")
|
||||
parser.addini("junit_logging", "Write captured log messages to JUnit report: "
|
||||
"one of no|system-out|system-err",
|
||||
default="no") # choices=['no', 'stdout', 'stderr'])
|
||||
help="prepend prefix to classnames in junit-xml output",
|
||||
)
|
||||
parser.addini(
|
||||
"junit_suite_name", "Test suite name for JUnit report", default="pytest"
|
||||
)
|
||||
parser.addini(
|
||||
"junit_logging",
|
||||
"Write captured log messages to JUnit report: "
|
||||
"one of no|system-out|system-err",
|
||||
default="no",
|
||||
) # choices=['no', 'stdout', 'stderr'])
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
xmlpath = config.option.xmlpath
|
||||
# prevent opening xmllog on slave nodes (xdist)
|
||||
if xmlpath and not hasattr(config, 'slaveinput'):
|
||||
config._xml = LogXML(xmlpath, config.option.junitprefix,
|
||||
config.getini("junit_suite_name"),
|
||||
config.getini("junit_logging"))
|
||||
if xmlpath and not hasattr(config, "slaveinput"):
|
||||
config._xml = LogXML(
|
||||
xmlpath,
|
||||
config.option.junitprefix,
|
||||
config.getini("junit_suite_name"),
|
||||
config.getini("junit_logging"),
|
||||
)
|
||||
config.pluginmanager.register(config._xml)
|
||||
|
||||
|
||||
def pytest_unconfigure(config):
|
||||
xml = getattr(config, '_xml', None)
|
||||
xml = getattr(config, "_xml", None)
|
||||
if xml:
|
||||
del config._xml
|
||||
config.pluginmanager.unregister(xml)
|
||||
|
||||
|
||||
def mangle_test_address(address):
|
||||
path, possible_open_bracket, params = address.partition('[')
|
||||
path, possible_open_bracket, params = address.partition("[")
|
||||
names = path.split("::")
|
||||
try:
|
||||
names.remove('()')
|
||||
names.remove("()")
|
||||
except ValueError:
|
||||
pass
|
||||
# convert file path to dotted path
|
||||
names[0] = names[0].replace(nodes.SEP, '.')
|
||||
names[0] = names[0].replace(nodes.SEP, ".")
|
||||
names[0] = _py_ext_re.sub("", names[0])
|
||||
# put any params back
|
||||
names[-1] += possible_open_bracket + params
|
||||
|
@ -340,18 +358,14 @@ def mangle_test_address(address):
|
|||
|
||||
|
||||
class LogXML(object):
|
||||
|
||||
def __init__(self, logfile, prefix, suite_name="pytest", logging="no"):
|
||||
logfile = os.path.expanduser(os.path.expandvars(logfile))
|
||||
self.logfile = os.path.normpath(os.path.abspath(logfile))
|
||||
self.prefix = prefix
|
||||
self.suite_name = suite_name
|
||||
self.logging = logging
|
||||
self.stats = dict.fromkeys([
|
||||
'error',
|
||||
'passed',
|
||||
'failure',
|
||||
'skipped',
|
||||
], 0)
|
||||
self.stats = dict.fromkeys(["error", "passed", "failure", "skipped"], 0)
|
||||
self.node_reporters = {} # nodeid -> _NodeReporter
|
||||
self.node_reporters_ordered = []
|
||||
self.global_properties = []
|
||||
|
@ -360,17 +374,17 @@ class LogXML(object):
|
|||
self.cnt_double_fail_tests = 0
|
||||
|
||||
def finalize(self, report):
|
||||
nodeid = getattr(report, 'nodeid', report)
|
||||
nodeid = getattr(report, "nodeid", report)
|
||||
# local hack to handle xdist report order
|
||||
slavenode = getattr(report, 'node', None)
|
||||
slavenode = getattr(report, "node", None)
|
||||
reporter = self.node_reporters.pop((nodeid, slavenode))
|
||||
if reporter is not None:
|
||||
reporter.finalize()
|
||||
|
||||
def node_reporter(self, report):
|
||||
nodeid = getattr(report, 'nodeid', report)
|
||||
nodeid = getattr(report, "nodeid", report)
|
||||
# local hack to handle xdist report order
|
||||
slavenode = getattr(report, 'node', None)
|
||||
slavenode = getattr(report, "node", None)
|
||||
|
||||
key = nodeid, slavenode
|
||||
|
||||
|
@ -428,12 +442,17 @@ class LogXML(object):
|
|||
report_wid = getattr(report, "worker_id", None)
|
||||
report_ii = getattr(report, "item_index", None)
|
||||
close_report = next(
|
||||
(rep for rep in self.open_reports
|
||||
if (rep.nodeid == report.nodeid and
|
||||
getattr(rep, "item_index", None) == report_ii and
|
||||
getattr(rep, "worker_id", None) == report_wid
|
||||
)
|
||||
), None)
|
||||
(
|
||||
rep
|
||||
for rep in self.open_reports
|
||||
if (
|
||||
rep.nodeid == report.nodeid
|
||||
and getattr(rep, "item_index", None) == report_ii
|
||||
and getattr(rep, "worker_id", None) == report_wid
|
||||
)
|
||||
),
|
||||
None,
|
||||
)
|
||||
if close_report:
|
||||
# We need to open new testcase in case we have failure in
|
||||
# call and error in teardown in order to follow junit
|
||||
|
@ -461,12 +480,17 @@ class LogXML(object):
|
|||
report_wid = getattr(report, "worker_id", None)
|
||||
report_ii = getattr(report, "item_index", None)
|
||||
close_report = next(
|
||||
(rep for rep in self.open_reports
|
||||
if (rep.nodeid == report.nodeid and
|
||||
getattr(rep, "item_index", None) == report_ii and
|
||||
getattr(rep, "worker_id", None) == report_wid
|
||||
)
|
||||
), None)
|
||||
(
|
||||
rep
|
||||
for rep in self.open_reports
|
||||
if (
|
||||
rep.nodeid == report.nodeid
|
||||
and getattr(rep, "item_index", None) == report_ii
|
||||
and getattr(rep, "worker_id", None) == report_wid
|
||||
)
|
||||
),
|
||||
None,
|
||||
)
|
||||
if close_report:
|
||||
self.open_reports.remove(close_report)
|
||||
|
||||
|
@ -475,7 +499,7 @@ class LogXML(object):
|
|||
the Junit.testcase with the new total if already created.
|
||||
"""
|
||||
reporter = self.node_reporter(report)
|
||||
reporter.duration += getattr(report, 'duration', 0.0)
|
||||
reporter.duration += getattr(report, "duration", 0.0)
|
||||
|
||||
def pytest_collectreport(self, report):
|
||||
if not report.passed:
|
||||
|
@ -486,9 +510,9 @@ class LogXML(object):
|
|||
reporter.append_collect_skipped(report)
|
||||
|
||||
def pytest_internalerror(self, excrepr):
|
||||
reporter = self.node_reporter('internal')
|
||||
reporter.attrs.update(classname="pytest", name='internal')
|
||||
reporter._add_simple(Junit.error, 'internal error', excrepr)
|
||||
reporter = self.node_reporter("internal")
|
||||
reporter.attrs.update(classname="pytest", name="internal")
|
||||
reporter._add_simple(Junit.error, "internal error", excrepr)
|
||||
|
||||
def pytest_sessionstart(self):
|
||||
self.suite_start_time = time.time()
|
||||
|
@ -497,29 +521,37 @@ class LogXML(object):
|
|||
dirname = os.path.dirname(os.path.abspath(self.logfile))
|
||||
if not os.path.isdir(dirname):
|
||||
os.makedirs(dirname)
|
||||
logfile = open(self.logfile, 'w', encoding='utf-8')
|
||||
logfile = open(self.logfile, "w", encoding="utf-8")
|
||||
suite_stop_time = time.time()
|
||||
suite_time_delta = suite_stop_time - self.suite_start_time
|
||||
|
||||
numtests = (self.stats['passed'] + self.stats['failure'] +
|
||||
self.stats['skipped'] + self.stats['error'] -
|
||||
self.cnt_double_fail_tests)
|
||||
numtests = (
|
||||
self.stats["passed"]
|
||||
+ self.stats["failure"]
|
||||
+ self.stats["skipped"]
|
||||
+ self.stats["error"]
|
||||
- self.cnt_double_fail_tests
|
||||
)
|
||||
logfile.write('<?xml version="1.0" encoding="utf-8"?>')
|
||||
|
||||
logfile.write(Junit.testsuite(
|
||||
self._get_global_properties_node(),
|
||||
[x.to_xml() for x in self.node_reporters_ordered],
|
||||
name=self.suite_name,
|
||||
errors=self.stats['error'],
|
||||
failures=self.stats['failure'],
|
||||
skips=self.stats['skipped'],
|
||||
tests=numtests,
|
||||
time="%.3f" % suite_time_delta, ).unicode(indent=0))
|
||||
logfile.write(
|
||||
Junit.testsuite(
|
||||
self._get_global_properties_node(),
|
||||
[x.to_xml() for x in self.node_reporters_ordered],
|
||||
name=self.suite_name,
|
||||
errors=self.stats["error"],
|
||||
failures=self.stats["failure"],
|
||||
skips=self.stats["skipped"],
|
||||
tests=numtests,
|
||||
time="%.3f" % suite_time_delta,
|
||||
).unicode(
|
||||
indent=0
|
||||
)
|
||||
)
|
||||
logfile.close()
|
||||
|
||||
def pytest_terminal_summary(self, terminalreporter):
|
||||
terminalreporter.write_sep("-",
|
||||
"generated xml file: %s" % (self.logfile))
|
||||
terminalreporter.write_sep("-", "generated xml file: %s" % (self.logfile))
|
||||
|
||||
def add_global_property(self, name, value):
|
||||
self.global_properties.append((str(name), bin_xml_escape(value)))
|
||||
|
@ -534,4 +566,4 @@ class LogXML(object):
|
|||
for name, value in self.global_properties
|
||||
]
|
||||
)
|
||||
return ''
|
||||
return ""
|
||||
|
|
|
@ -11,8 +11,8 @@ import pytest
|
|||
import py
|
||||
|
||||
|
||||
DEFAULT_LOG_FORMAT = '%(filename)-25s %(lineno)4d %(levelname)-8s %(message)s'
|
||||
DEFAULT_LOG_DATE_FORMAT = '%H:%M:%S'
|
||||
DEFAULT_LOG_FORMAT = "%(filename)-25s %(lineno)4d %(levelname)-8s %(message)s"
|
||||
DEFAULT_LOG_DATE_FORMAT = "%H:%M:%S"
|
||||
|
||||
|
||||
class ColoredLevelFormatter(logging.Formatter):
|
||||
|
@ -21,19 +21,18 @@ class ColoredLevelFormatter(logging.Formatter):
|
|||
"""
|
||||
|
||||
LOGLEVEL_COLOROPTS = {
|
||||
logging.CRITICAL: {'red'},
|
||||
logging.ERROR: {'red', 'bold'},
|
||||
logging.WARNING: {'yellow'},
|
||||
logging.WARN: {'yellow'},
|
||||
logging.INFO: {'green'},
|
||||
logging.DEBUG: {'purple'},
|
||||
logging.CRITICAL: {"red"},
|
||||
logging.ERROR: {"red", "bold"},
|
||||
logging.WARNING: {"yellow"},
|
||||
logging.WARN: {"yellow"},
|
||||
logging.INFO: {"green"},
|
||||
logging.DEBUG: {"purple"},
|
||||
logging.NOTSET: set(),
|
||||
}
|
||||
LEVELNAME_FMT_REGEX = re.compile(r'%\(levelname\)([+-]?\d*s)')
|
||||
LEVELNAME_FMT_REGEX = re.compile(r"%\(levelname\)([+-]?\d*s)")
|
||||
|
||||
def __init__(self, terminalwriter, *args, **kwargs):
|
||||
super(ColoredLevelFormatter, self).__init__(
|
||||
*args, **kwargs)
|
||||
super(ColoredLevelFormatter, self).__init__(*args, **kwargs)
|
||||
if six.PY2:
|
||||
self._original_fmt = self._fmt
|
||||
else:
|
||||
|
@ -47,19 +46,20 @@ class ColoredLevelFormatter(logging.Formatter):
|
|||
|
||||
for level, color_opts in self.LOGLEVEL_COLOROPTS.items():
|
||||
formatted_levelname = levelname_fmt % {
|
||||
'levelname': logging.getLevelName(level)}
|
||||
"levelname": logging.getLevelName(level)
|
||||
}
|
||||
|
||||
# add ANSI escape sequences around the formatted levelname
|
||||
color_kwargs = {name: True for name in color_opts}
|
||||
colorized_formatted_levelname = terminalwriter.markup(
|
||||
formatted_levelname, **color_kwargs)
|
||||
formatted_levelname, **color_kwargs
|
||||
)
|
||||
self._level_to_fmt_mapping[level] = self.LEVELNAME_FMT_REGEX.sub(
|
||||
colorized_formatted_levelname,
|
||||
self._fmt)
|
||||
colorized_formatted_levelname, self._fmt
|
||||
)
|
||||
|
||||
def format(self, record):
|
||||
fmt = self._level_to_fmt_mapping.get(
|
||||
record.levelno, self._original_fmt)
|
||||
fmt = self._level_to_fmt_mapping.get(record.levelno, self._original_fmt)
|
||||
if six.PY2:
|
||||
self._fmt = fmt
|
||||
else:
|
||||
|
@ -78,61 +78,86 @@ def get_option_ini(config, *names):
|
|||
|
||||
def pytest_addoption(parser):
|
||||
"""Add options to control log capturing."""
|
||||
group = parser.getgroup('logging')
|
||||
group = parser.getgroup("logging")
|
||||
|
||||
def add_option_ini(option, dest, default=None, type=None, **kwargs):
|
||||
parser.addini(dest, default=default, type=type,
|
||||
help='default value for ' + option)
|
||||
parser.addini(
|
||||
dest, default=default, type=type, help="default value for " + option
|
||||
)
|
||||
group.addoption(option, dest=dest, **kwargs)
|
||||
|
||||
add_option_ini(
|
||||
'--no-print-logs',
|
||||
dest='log_print', action='store_const', const=False, default=True,
|
||||
type='bool',
|
||||
help='disable printing caught logs on failed tests.')
|
||||
"--no-print-logs",
|
||||
dest="log_print",
|
||||
action="store_const",
|
||||
const=False,
|
||||
default=True,
|
||||
type="bool",
|
||||
help="disable printing caught logs on failed tests.",
|
||||
)
|
||||
add_option_ini(
|
||||
'--log-level',
|
||||
dest='log_level', default=None,
|
||||
help='logging level used by the logging module')
|
||||
"--log-level",
|
||||
dest="log_level",
|
||||
default=None,
|
||||
help="logging level used by the logging module",
|
||||
)
|
||||
add_option_ini(
|
||||
'--log-format',
|
||||
dest='log_format', default=DEFAULT_LOG_FORMAT,
|
||||
help='log format as used by the logging module.')
|
||||
"--log-format",
|
||||
dest="log_format",
|
||||
default=DEFAULT_LOG_FORMAT,
|
||||
help="log format as used by the logging module.",
|
||||
)
|
||||
add_option_ini(
|
||||
'--log-date-format',
|
||||
dest='log_date_format', default=DEFAULT_LOG_DATE_FORMAT,
|
||||
help='log date format as used by the logging module.')
|
||||
"--log-date-format",
|
||||
dest="log_date_format",
|
||||
default=DEFAULT_LOG_DATE_FORMAT,
|
||||
help="log date format as used by the logging module.",
|
||||
)
|
||||
parser.addini(
|
||||
'log_cli', default=False, type='bool',
|
||||
help='enable log display during test run (also known as "live logging").')
|
||||
"log_cli",
|
||||
default=False,
|
||||
type="bool",
|
||||
help='enable log display during test run (also known as "live logging").',
|
||||
)
|
||||
add_option_ini(
|
||||
'--log-cli-level',
|
||||
dest='log_cli_level', default=None,
|
||||
help='cli logging level.')
|
||||
"--log-cli-level", dest="log_cli_level", default=None, help="cli logging level."
|
||||
)
|
||||
add_option_ini(
|
||||
'--log-cli-format',
|
||||
dest='log_cli_format', default=None,
|
||||
help='log format as used by the logging module.')
|
||||
"--log-cli-format",
|
||||
dest="log_cli_format",
|
||||
default=None,
|
||||
help="log format as used by the logging module.",
|
||||
)
|
||||
add_option_ini(
|
||||
'--log-cli-date-format',
|
||||
dest='log_cli_date_format', default=None,
|
||||
help='log date format as used by the logging module.')
|
||||
"--log-cli-date-format",
|
||||
dest="log_cli_date_format",
|
||||
default=None,
|
||||
help="log date format as used by the logging module.",
|
||||
)
|
||||
add_option_ini(
|
||||
'--log-file',
|
||||
dest='log_file', default=None,
|
||||
help='path to a file when logging will be written to.')
|
||||
"--log-file",
|
||||
dest="log_file",
|
||||
default=None,
|
||||
help="path to a file when logging will be written to.",
|
||||
)
|
||||
add_option_ini(
|
||||
'--log-file-level',
|
||||
dest='log_file_level', default=None,
|
||||
help='log file logging level.')
|
||||
"--log-file-level",
|
||||
dest="log_file_level",
|
||||
default=None,
|
||||
help="log file logging level.",
|
||||
)
|
||||
add_option_ini(
|
||||
'--log-file-format',
|
||||
dest='log_file_format', default=DEFAULT_LOG_FORMAT,
|
||||
help='log format as used by the logging module.')
|
||||
"--log-file-format",
|
||||
dest="log_file_format",
|
||||
default=DEFAULT_LOG_FORMAT,
|
||||
help="log format as used by the logging module.",
|
||||
)
|
||||
add_option_ini(
|
||||
'--log-file-date-format',
|
||||
dest='log_file_date_format', default=DEFAULT_LOG_DATE_FORMAT,
|
||||
help='log date format as used by the logging module.')
|
||||
"--log-file-date-format",
|
||||
dest="log_file_date_format",
|
||||
default=DEFAULT_LOG_DATE_FORMAT,
|
||||
help="log date format as used by the logging module.",
|
||||
)
|
||||
|
||||
|
||||
@contextmanager
|
||||
|
@ -320,13 +345,12 @@ def get_actual_log_level(config, *setting_names):
|
|||
raise pytest.UsageError(
|
||||
"'{}' is not recognized as a logging level name for "
|
||||
"'{}'. Please consider passing the "
|
||||
"logging level num instead.".format(
|
||||
log_level,
|
||||
setting_name))
|
||||
"logging level num instead.".format(log_level, setting_name)
|
||||
)
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
config.pluginmanager.register(LoggingPlugin(config), 'logging-plugin')
|
||||
config.pluginmanager.register(LoggingPlugin(config), "logging-plugin")
|
||||
|
||||
|
||||
@contextmanager
|
||||
|
@ -347,25 +371,31 @@ class LoggingPlugin(object):
|
|||
self._config = config
|
||||
|
||||
# enable verbose output automatically if live logging is enabled
|
||||
if self._log_cli_enabled() and not config.getoption('verbose'):
|
||||
if self._log_cli_enabled() and not config.getoption("verbose"):
|
||||
# sanity check: terminal reporter should not have been loaded at this point
|
||||
assert self._config.pluginmanager.get_plugin('terminalreporter') is None
|
||||
assert self._config.pluginmanager.get_plugin("terminalreporter") is None
|
||||
config.option.verbose = 1
|
||||
|
||||
self.print_logs = get_option_ini(config, 'log_print')
|
||||
self.formatter = logging.Formatter(get_option_ini(config, 'log_format'),
|
||||
get_option_ini(config, 'log_date_format'))
|
||||
self.log_level = get_actual_log_level(config, 'log_level')
|
||||
self.print_logs = get_option_ini(config, "log_print")
|
||||
self.formatter = logging.Formatter(
|
||||
get_option_ini(config, "log_format"),
|
||||
get_option_ini(config, "log_date_format"),
|
||||
)
|
||||
self.log_level = get_actual_log_level(config, "log_level")
|
||||
|
||||
log_file = get_option_ini(config, 'log_file')
|
||||
log_file = get_option_ini(config, "log_file")
|
||||
if log_file:
|
||||
self.log_file_level = get_actual_log_level(config, 'log_file_level')
|
||||
self.log_file_level = get_actual_log_level(config, "log_file_level")
|
||||
|
||||
log_file_format = get_option_ini(config, 'log_file_format', 'log_format')
|
||||
log_file_date_format = get_option_ini(config, 'log_file_date_format', 'log_date_format')
|
||||
log_file_format = get_option_ini(config, "log_file_format", "log_format")
|
||||
log_file_date_format = get_option_ini(
|
||||
config, "log_file_date_format", "log_date_format"
|
||||
)
|
||||
# Each pytest runtests session will write to a clean logfile
|
||||
self.log_file_handler = logging.FileHandler(log_file, mode='w')
|
||||
log_file_formatter = logging.Formatter(log_file_format, datefmt=log_file_date_format)
|
||||
self.log_file_handler = logging.FileHandler(log_file, mode="w")
|
||||
log_file_formatter = logging.Formatter(
|
||||
log_file_format, datefmt=log_file_date_format
|
||||
)
|
||||
self.log_file_handler.setFormatter(log_file_formatter)
|
||||
else:
|
||||
self.log_file_handler = None
|
||||
|
@ -377,14 +407,18 @@ class LoggingPlugin(object):
|
|||
"""Return True if log_cli should be considered enabled, either explicitly
|
||||
or because --log-cli-level was given in the command-line.
|
||||
"""
|
||||
return self._config.getoption('--log-cli-level') is not None or \
|
||||
self._config.getini('log_cli')
|
||||
return self._config.getoption(
|
||||
"--log-cli-level"
|
||||
) is not None or self._config.getini(
|
||||
"log_cli"
|
||||
)
|
||||
|
||||
@contextmanager
|
||||
def _runtest_for(self, item, when):
|
||||
"""Implements the internals of pytest_runtest_xxx() hook."""
|
||||
with catching_logs(LogCaptureHandler(),
|
||||
formatter=self.formatter, level=self.log_level) as log_handler:
|
||||
with catching_logs(
|
||||
LogCaptureHandler(), formatter=self.formatter, level=self.log_level
|
||||
) as log_handler:
|
||||
if self.log_cli_handler:
|
||||
self.log_cli_handler.set_when(when)
|
||||
|
||||
|
@ -392,7 +426,7 @@ class LoggingPlugin(object):
|
|||
yield # run the test
|
||||
return
|
||||
|
||||
if not hasattr(item, 'catch_log_handlers'):
|
||||
if not hasattr(item, "catch_log_handlers"):
|
||||
item.catch_log_handlers = {}
|
||||
item.catch_log_handlers[when] = log_handler
|
||||
item.catch_log_handler = log_handler
|
||||
|
@ -400,39 +434,39 @@ class LoggingPlugin(object):
|
|||
yield # run test
|
||||
finally:
|
||||
del item.catch_log_handler
|
||||
if when == 'teardown':
|
||||
if when == "teardown":
|
||||
del item.catch_log_handlers
|
||||
|
||||
if self.print_logs:
|
||||
# Add a captured log section to the report.
|
||||
log = log_handler.stream.getvalue().strip()
|
||||
item.add_report_section(when, 'log', log)
|
||||
item.add_report_section(when, "log", log)
|
||||
|
||||
@pytest.hookimpl(hookwrapper=True)
|
||||
def pytest_runtest_setup(self, item):
|
||||
with self._runtest_for(item, 'setup'):
|
||||
with self._runtest_for(item, "setup"):
|
||||
yield
|
||||
|
||||
@pytest.hookimpl(hookwrapper=True)
|
||||
def pytest_runtest_call(self, item):
|
||||
with self._runtest_for(item, 'call'):
|
||||
with self._runtest_for(item, "call"):
|
||||
yield
|
||||
|
||||
@pytest.hookimpl(hookwrapper=True)
|
||||
def pytest_runtest_teardown(self, item):
|
||||
with self._runtest_for(item, 'teardown'):
|
||||
with self._runtest_for(item, "teardown"):
|
||||
yield
|
||||
|
||||
@pytest.hookimpl(hookwrapper=True)
|
||||
def pytest_runtest_logstart(self):
|
||||
if self.log_cli_handler:
|
||||
self.log_cli_handler.reset()
|
||||
with self._runtest_for(None, 'start'):
|
||||
with self._runtest_for(None, "start"):
|
||||
yield
|
||||
|
||||
@pytest.hookimpl(hookwrapper=True)
|
||||
def pytest_runtest_logfinish(self):
|
||||
with self._runtest_for(None, 'finish'):
|
||||
with self._runtest_for(None, "finish"):
|
||||
yield
|
||||
|
||||
@pytest.hookimpl(hookwrapper=True)
|
||||
|
@ -442,8 +476,9 @@ class LoggingPlugin(object):
|
|||
with self.live_logs_context:
|
||||
if self.log_file_handler is not None:
|
||||
with closing(self.log_file_handler):
|
||||
with catching_logs(self.log_file_handler,
|
||||
level=self.log_file_level):
|
||||
with catching_logs(
|
||||
self.log_file_handler, level=self.log_file_level
|
||||
):
|
||||
yield # run all the tests
|
||||
else:
|
||||
yield # run all the tests
|
||||
|
@ -453,20 +488,38 @@ class LoggingPlugin(object):
|
|||
|
||||
This must be done right before starting the loop so we can access the terminal reporter plugin.
|
||||
"""
|
||||
terminal_reporter = self._config.pluginmanager.get_plugin('terminalreporter')
|
||||
terminal_reporter = self._config.pluginmanager.get_plugin("terminalreporter")
|
||||
if self._log_cli_enabled() and terminal_reporter is not None:
|
||||
capture_manager = self._config.pluginmanager.get_plugin('capturemanager')
|
||||
log_cli_handler = _LiveLoggingStreamHandler(terminal_reporter, capture_manager)
|
||||
log_cli_format = get_option_ini(self._config, 'log_cli_format', 'log_format')
|
||||
log_cli_date_format = get_option_ini(self._config, 'log_cli_date_format', 'log_date_format')
|
||||
if self._config.option.color != 'no' and ColoredLevelFormatter.LEVELNAME_FMT_REGEX.search(log_cli_format):
|
||||
log_cli_formatter = ColoredLevelFormatter(create_terminal_writer(self._config),
|
||||
log_cli_format, datefmt=log_cli_date_format)
|
||||
capture_manager = self._config.pluginmanager.get_plugin("capturemanager")
|
||||
log_cli_handler = _LiveLoggingStreamHandler(
|
||||
terminal_reporter, capture_manager
|
||||
)
|
||||
log_cli_format = get_option_ini(
|
||||
self._config, "log_cli_format", "log_format"
|
||||
)
|
||||
log_cli_date_format = get_option_ini(
|
||||
self._config, "log_cli_date_format", "log_date_format"
|
||||
)
|
||||
if (
|
||||
self._config.option.color != "no"
|
||||
and ColoredLevelFormatter.LEVELNAME_FMT_REGEX.search(log_cli_format)
|
||||
):
|
||||
log_cli_formatter = ColoredLevelFormatter(
|
||||
create_terminal_writer(self._config),
|
||||
log_cli_format,
|
||||
datefmt=log_cli_date_format,
|
||||
)
|
||||
else:
|
||||
log_cli_formatter = logging.Formatter(log_cli_format, datefmt=log_cli_date_format)
|
||||
log_cli_level = get_actual_log_level(self._config, 'log_cli_level', 'log_level')
|
||||
log_cli_formatter = logging.Formatter(
|
||||
log_cli_format, datefmt=log_cli_date_format
|
||||
)
|
||||
log_cli_level = get_actual_log_level(
|
||||
self._config, "log_cli_level", "log_level"
|
||||
)
|
||||
self.log_cli_handler = log_cli_handler
|
||||
self.live_logs_context = catching_logs(log_cli_handler, formatter=log_cli_formatter, level=log_cli_level)
|
||||
self.live_logs_context = catching_logs(
|
||||
log_cli_handler, formatter=log_cli_formatter, level=log_cli_level
|
||||
)
|
||||
else:
|
||||
self.live_logs_context = _dummy_context_manager()
|
||||
|
||||
|
@ -499,7 +552,7 @@ class _LiveLoggingStreamHandler(logging.StreamHandler):
|
|||
"""Prepares for the given test phase (setup/call/teardown)"""
|
||||
self._when = when
|
||||
self._section_name_shown = False
|
||||
if when == 'start':
|
||||
if when == "start":
|
||||
self._test_outcome_written = False
|
||||
|
||||
def emit(self, record):
|
||||
|
@ -507,14 +560,14 @@ class _LiveLoggingStreamHandler(logging.StreamHandler):
|
|||
self.capture_manager.suspend_global_capture()
|
||||
try:
|
||||
if not self._first_record_emitted:
|
||||
self.stream.write('\n')
|
||||
self.stream.write("\n")
|
||||
self._first_record_emitted = True
|
||||
elif self._when in ('teardown', 'finish'):
|
||||
elif self._when in ("teardown", "finish"):
|
||||
if not self._test_outcome_written:
|
||||
self._test_outcome_written = True
|
||||
self.stream.write('\n')
|
||||
self.stream.write("\n")
|
||||
if not self._section_name_shown and self._when:
|
||||
self.stream.section('live log ' + self._when, sep='-', bold=True)
|
||||
self.stream.section("live log " + self._when, sep="-", bold=True)
|
||||
self._section_name_shown = True
|
||||
logging.StreamHandler.emit(self, record)
|
||||
finally:
|
||||
|
|
225
_pytest/main.py
225
_pytest/main.py
|
@ -28,69 +28,140 @@ EXIT_NOTESTSCOLLECTED = 5
|
|||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
parser.addini("norecursedirs", "directory patterns to avoid for recursion",
|
||||
type="args", default=['.*', 'build', 'dist', 'CVS', '_darcs', '{arch}', '*.egg', 'venv'])
|
||||
parser.addini("testpaths", "directories to search for tests when no files or directories are given in the "
|
||||
"command line.",
|
||||
type="args", default=[])
|
||||
parser.addini(
|
||||
"norecursedirs",
|
||||
"directory patterns to avoid for recursion",
|
||||
type="args",
|
||||
default=[".*", "build", "dist", "CVS", "_darcs", "{arch}", "*.egg", "venv"],
|
||||
)
|
||||
parser.addini(
|
||||
"testpaths",
|
||||
"directories to search for tests when no files or directories are given in the "
|
||||
"command line.",
|
||||
type="args",
|
||||
default=[],
|
||||
)
|
||||
# parser.addini("dirpatterns",
|
||||
# "patterns specifying possible locations of test files",
|
||||
# type="linelist", default=["**/test_*.txt",
|
||||
# "**/test_*.py", "**/*_test.py"]
|
||||
# )
|
||||
group = parser.getgroup("general", "running and selection options")
|
||||
group._addoption('-x', '--exitfirst', action="store_const",
|
||||
dest="maxfail", const=1,
|
||||
help="exit instantly on first error or failed test."),
|
||||
group._addoption('--maxfail', metavar="num",
|
||||
action="store", type=int, dest="maxfail", default=0,
|
||||
help="exit after first num failures or errors.")
|
||||
group._addoption('--strict', action="store_true",
|
||||
help="marks not registered in configuration file raise errors.")
|
||||
group._addoption("-c", metavar="file", type=str, dest="inifilename",
|
||||
help="load configuration from `file` instead of trying to locate one of the implicit "
|
||||
"configuration files.")
|
||||
group._addoption("--continue-on-collection-errors", action="store_true",
|
||||
default=False, dest="continue_on_collection_errors",
|
||||
help="Force test execution even if collection errors occur.")
|
||||
group._addoption("--rootdir", action="store",
|
||||
dest="rootdir",
|
||||
help="Define root directory for tests. Can be relative path: 'root_dir', './root_dir', "
|
||||
"'root_dir/another_dir/'; absolute path: '/home/user/root_dir'; path with variables: "
|
||||
"'$HOME/root_dir'.")
|
||||
group._addoption(
|
||||
"-x",
|
||||
"--exitfirst",
|
||||
action="store_const",
|
||||
dest="maxfail",
|
||||
const=1,
|
||||
help="exit instantly on first error or failed test.",
|
||||
),
|
||||
group._addoption(
|
||||
"--maxfail",
|
||||
metavar="num",
|
||||
action="store",
|
||||
type=int,
|
||||
dest="maxfail",
|
||||
default=0,
|
||||
help="exit after first num failures or errors.",
|
||||
)
|
||||
group._addoption(
|
||||
"--strict",
|
||||
action="store_true",
|
||||
help="marks not registered in configuration file raise errors.",
|
||||
)
|
||||
group._addoption(
|
||||
"-c",
|
||||
metavar="file",
|
||||
type=str,
|
||||
dest="inifilename",
|
||||
help="load configuration from `file` instead of trying to locate one of the implicit "
|
||||
"configuration files.",
|
||||
)
|
||||
group._addoption(
|
||||
"--continue-on-collection-errors",
|
||||
action="store_true",
|
||||
default=False,
|
||||
dest="continue_on_collection_errors",
|
||||
help="Force test execution even if collection errors occur.",
|
||||
)
|
||||
group._addoption(
|
||||
"--rootdir",
|
||||
action="store",
|
||||
dest="rootdir",
|
||||
help="Define root directory for tests. Can be relative path: 'root_dir', './root_dir', "
|
||||
"'root_dir/another_dir/'; absolute path: '/home/user/root_dir'; path with variables: "
|
||||
"'$HOME/root_dir'.",
|
||||
)
|
||||
|
||||
group = parser.getgroup("collect", "collection")
|
||||
group.addoption('--collectonly', '--collect-only', action="store_true",
|
||||
help="only collect tests, don't execute them."),
|
||||
group.addoption('--pyargs', action="store_true",
|
||||
help="try to interpret all arguments as python packages.")
|
||||
group.addoption("--ignore", action="append", metavar="path",
|
||||
help="ignore path during collection (multi-allowed).")
|
||||
group.addoption("--deselect", action="append", metavar="nodeid_prefix",
|
||||
help="deselect item during collection (multi-allowed).")
|
||||
group.addoption(
|
||||
"--collectonly",
|
||||
"--collect-only",
|
||||
action="store_true",
|
||||
help="only collect tests, don't execute them.",
|
||||
),
|
||||
group.addoption(
|
||||
"--pyargs",
|
||||
action="store_true",
|
||||
help="try to interpret all arguments as python packages.",
|
||||
)
|
||||
group.addoption(
|
||||
"--ignore",
|
||||
action="append",
|
||||
metavar="path",
|
||||
help="ignore path during collection (multi-allowed).",
|
||||
)
|
||||
group.addoption(
|
||||
"--deselect",
|
||||
action="append",
|
||||
metavar="nodeid_prefix",
|
||||
help="deselect item during collection (multi-allowed).",
|
||||
)
|
||||
# when changing this to --conf-cut-dir, config.py Conftest.setinitial
|
||||
# needs upgrading as well
|
||||
group.addoption('--confcutdir', dest="confcutdir", default=None,
|
||||
metavar="dir", type=functools.partial(directory_arg, optname="--confcutdir"),
|
||||
help="only load conftest.py's relative to specified dir.")
|
||||
group.addoption('--noconftest', action="store_true",
|
||||
dest="noconftest", default=False,
|
||||
help="Don't load any conftest.py files.")
|
||||
group.addoption('--keepduplicates', '--keep-duplicates', action="store_true",
|
||||
dest="keepduplicates", default=False,
|
||||
help="Keep duplicate tests.")
|
||||
group.addoption('--collect-in-virtualenv', action='store_true',
|
||||
dest='collect_in_virtualenv', default=False,
|
||||
help="Don't ignore tests in a local virtualenv directory")
|
||||
group.addoption(
|
||||
"--confcutdir",
|
||||
dest="confcutdir",
|
||||
default=None,
|
||||
metavar="dir",
|
||||
type=functools.partial(directory_arg, optname="--confcutdir"),
|
||||
help="only load conftest.py's relative to specified dir.",
|
||||
)
|
||||
group.addoption(
|
||||
"--noconftest",
|
||||
action="store_true",
|
||||
dest="noconftest",
|
||||
default=False,
|
||||
help="Don't load any conftest.py files.",
|
||||
)
|
||||
group.addoption(
|
||||
"--keepduplicates",
|
||||
"--keep-duplicates",
|
||||
action="store_true",
|
||||
dest="keepduplicates",
|
||||
default=False,
|
||||
help="Keep duplicate tests.",
|
||||
)
|
||||
group.addoption(
|
||||
"--collect-in-virtualenv",
|
||||
action="store_true",
|
||||
dest="collect_in_virtualenv",
|
||||
default=False,
|
||||
help="Don't ignore tests in a local virtualenv directory",
|
||||
)
|
||||
|
||||
group = parser.getgroup("debugconfig",
|
||||
"test session debugging and configuration")
|
||||
group.addoption('--basetemp', dest="basetemp", default=None, metavar="dir",
|
||||
help="base temporary directory for this test run.")
|
||||
group = parser.getgroup("debugconfig", "test session debugging and configuration")
|
||||
group.addoption(
|
||||
"--basetemp",
|
||||
dest="basetemp",
|
||||
default=None,
|
||||
metavar="dir",
|
||||
help="base temporary directory for this test run.",
|
||||
)
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
__import__('pytest').config = config # compatibility
|
||||
__import__("pytest").config = config # compatibility
|
||||
|
||||
|
||||
def wrap_session(config, doit):
|
||||
|
@ -112,8 +183,7 @@ def wrap_session(config, doit):
|
|||
except KeyboardInterrupt:
|
||||
excinfo = _pytest._code.ExceptionInfo()
|
||||
if initstate < 2 and isinstance(excinfo.value, exit.Exception):
|
||||
sys.stderr.write('{}: {}\n'.format(
|
||||
excinfo.typename, excinfo.value.msg))
|
||||
sys.stderr.write("{}: {}\n".format(excinfo.typename, excinfo.value.msg))
|
||||
config.hook.pytest_keyboard_interrupt(excinfo=excinfo)
|
||||
session.exitstatus = EXIT_INTERRUPTED
|
||||
except: # noqa
|
||||
|
@ -128,8 +198,8 @@ def wrap_session(config, doit):
|
|||
session.startdir.chdir()
|
||||
if initstate >= 2:
|
||||
config.hook.pytest_sessionfinish(
|
||||
session=session,
|
||||
exitstatus=session.exitstatus)
|
||||
session=session, exitstatus=session.exitstatus
|
||||
)
|
||||
config._ensure_unconfigure()
|
||||
return session.exitstatus
|
||||
|
||||
|
@ -155,10 +225,8 @@ def pytest_collection(session):
|
|||
|
||||
|
||||
def pytest_runtestloop(session):
|
||||
if (session.testsfailed and
|
||||
not session.config.option.continue_on_collection_errors):
|
||||
raise session.Interrupted(
|
||||
"%d errors during collection" % session.testsfailed)
|
||||
if session.testsfailed and not session.config.option.continue_on_collection_errors:
|
||||
raise session.Interrupted("%d errors during collection" % session.testsfailed)
|
||||
|
||||
if session.config.option.collectonly:
|
||||
return True
|
||||
|
@ -176,11 +244,17 @@ def pytest_runtestloop(session):
|
|||
def _in_venv(path):
|
||||
"""Attempts to detect if ``path`` is the root of a Virtual Environment by
|
||||
checking for the existence of the appropriate activate script"""
|
||||
bindir = path.join('Scripts' if sys.platform.startswith('win') else 'bin')
|
||||
bindir = path.join("Scripts" if sys.platform.startswith("win") else "bin")
|
||||
if not bindir.isdir():
|
||||
return False
|
||||
activates = ('activate', 'activate.csh', 'activate.fish',
|
||||
'Activate', 'Activate.bat', 'Activate.ps1')
|
||||
activates = (
|
||||
"activate",
|
||||
"activate.csh",
|
||||
"activate.fish",
|
||||
"Activate",
|
||||
"Activate.bat",
|
||||
"Activate.ps1",
|
||||
)
|
||||
return any([fname.basename in activates for fname in bindir.listdir()])
|
||||
|
||||
|
||||
|
@ -241,6 +315,7 @@ def _patched_find_module():
|
|||
The only supported python<3.4 by pytest is python 2.7.
|
||||
"""
|
||||
if six.PY2: # python 3.4+ uses importlib instead
|
||||
|
||||
def find_module_patched(self, fullname, path=None):
|
||||
# Note: we ignore 'path' argument since it is only used via meta_path
|
||||
subname = fullname.split(".")[-1]
|
||||
|
@ -252,8 +327,7 @@ def _patched_find_module():
|
|||
# original: path = [os.path.realpath(self.path)]
|
||||
path = [self.path]
|
||||
try:
|
||||
file, filename, etc = pkgutil.imp.find_module(subname,
|
||||
path)
|
||||
file, filename, etc = pkgutil.imp.find_module(subname, path)
|
||||
except ImportError:
|
||||
return None
|
||||
return pkgutil.ImpLoader(fullname, file, filename, etc)
|
||||
|
@ -269,6 +343,7 @@ def _patched_find_module():
|
|||
|
||||
|
||||
class FSHookProxy(object):
|
||||
|
||||
def __init__(self, fspath, pm, remove_mods):
|
||||
self.fspath = fspath
|
||||
self.pm = pm
|
||||
|
@ -286,7 +361,7 @@ class NoMatch(Exception):
|
|||
|
||||
class Interrupted(KeyboardInterrupt):
|
||||
""" signals an interrupted test run. """
|
||||
__module__ = 'builtins' # for py3
|
||||
__module__ = "builtins" # for py3
|
||||
|
||||
|
||||
class Failed(Exception):
|
||||
|
@ -299,8 +374,8 @@ class Session(nodes.FSCollector):
|
|||
|
||||
def __init__(self, config):
|
||||
nodes.FSCollector.__init__(
|
||||
self, config.rootdir, parent=None,
|
||||
config=config, session=self, nodeid="")
|
||||
self, config.rootdir, parent=None, config=config, session=self, nodeid=""
|
||||
)
|
||||
self.testsfailed = 0
|
||||
self.testscollected = 0
|
||||
self.shouldstop = False
|
||||
|
@ -320,12 +395,12 @@ class Session(nodes.FSCollector):
|
|||
|
||||
@hookimpl(tryfirst=True)
|
||||
def pytest_runtest_logreport(self, report):
|
||||
if report.failed and not hasattr(report, 'wasxfail'):
|
||||
if report.failed and not hasattr(report, "wasxfail"):
|
||||
self.testsfailed += 1
|
||||
maxfail = self.config.getvalue("maxfail")
|
||||
if maxfail and self.testsfailed >= maxfail:
|
||||
self.shouldfail = "stopping after %d failures" % (
|
||||
self.testsfailed)
|
||||
self.shouldfail = "stopping after %d failures" % (self.testsfailed)
|
||||
|
||||
pytest_collectreport = pytest_runtest_logreport
|
||||
|
||||
def isinitpath(self, path):
|
||||
|
@ -350,8 +425,9 @@ class Session(nodes.FSCollector):
|
|||
try:
|
||||
items = self._perform_collect(args, genitems)
|
||||
self.config.pluginmanager.check_pending()
|
||||
hook.pytest_collection_modifyitems(session=self,
|
||||
config=self.config, items=items)
|
||||
hook.pytest_collection_modifyitems(
|
||||
session=self, config=self.config, items=items
|
||||
)
|
||||
finally:
|
||||
hook.pytest_collection_finish(session=self)
|
||||
self.testscollected = len(items)
|
||||
|
@ -408,8 +484,9 @@ class Session(nodes.FSCollector):
|
|||
path = names.pop(0)
|
||||
if path.check(dir=1):
|
||||
assert not names, "invalid arg %r" % (arg,)
|
||||
for path in path.visit(fil=lambda x: x.check(file=1),
|
||||
rec=self._recurse, bf=True, sort=True):
|
||||
for path in path.visit(
|
||||
fil=lambda x: x.check(file=1), rec=self._recurse, bf=True, sort=True
|
||||
):
|
||||
for x in self._collectfile(path):
|
||||
yield x
|
||||
else:
|
||||
|
@ -469,8 +546,8 @@ class Session(nodes.FSCollector):
|
|||
if not path.check():
|
||||
if self.config.option.pyargs:
|
||||
raise UsageError(
|
||||
"file or package not found: " + arg +
|
||||
" (missing __init__.py?)")
|
||||
"file or package not found: " + arg + " (missing __init__.py?)"
|
||||
)
|
||||
else:
|
||||
raise UsageError("file not found: " + arg)
|
||||
parts[0] = path
|
||||
|
|
|
@ -2,15 +2,25 @@
|
|||
from __future__ import absolute_import, division, print_function
|
||||
from _pytest.config import UsageError
|
||||
from .structures import (
|
||||
ParameterSet, EMPTY_PARAMETERSET_OPTION, MARK_GEN,
|
||||
Mark, MarkInfo, MarkDecorator, MarkGenerator,
|
||||
transfer_markers, get_empty_parameterset_mark
|
||||
ParameterSet,
|
||||
EMPTY_PARAMETERSET_OPTION,
|
||||
MARK_GEN,
|
||||
Mark,
|
||||
MarkInfo,
|
||||
MarkDecorator,
|
||||
MarkGenerator,
|
||||
transfer_markers,
|
||||
get_empty_parameterset_mark,
|
||||
)
|
||||
from .legacy import matchkeyword, matchmark
|
||||
|
||||
__all__ = [
|
||||
'Mark', 'MarkInfo', 'MarkDecorator', 'MarkGenerator',
|
||||
'transfer_markers', 'get_empty_parameterset_mark'
|
||||
"Mark",
|
||||
"MarkInfo",
|
||||
"MarkDecorator",
|
||||
"MarkGenerator",
|
||||
"transfer_markers",
|
||||
"get_empty_parameterset_mark",
|
||||
]
|
||||
|
||||
|
||||
|
@ -42,47 +52,53 @@ def param(*values, **kw):
|
|||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("general")
|
||||
group._addoption(
|
||||
'-k',
|
||||
action="store", dest="keyword", default='', metavar="EXPRESSION",
|
||||
"-k",
|
||||
action="store",
|
||||
dest="keyword",
|
||||
default="",
|
||||
metavar="EXPRESSION",
|
||||
help="only run tests which match the given substring expression. "
|
||||
"An expression is a python evaluatable expression "
|
||||
"where all names are substring-matched against test names "
|
||||
"and their parent classes. Example: -k 'test_method or test_"
|
||||
"other' matches all test functions and classes whose name "
|
||||
"contains 'test_method' or 'test_other', while -k 'not test_method' "
|
||||
"matches those that don't contain 'test_method' in their names. "
|
||||
"Additionally keywords are matched to classes and functions "
|
||||
"containing extra names in their 'extra_keyword_matches' set, "
|
||||
"as well as functions which have names assigned directly to them."
|
||||
"An expression is a python evaluatable expression "
|
||||
"where all names are substring-matched against test names "
|
||||
"and their parent classes. Example: -k 'test_method or test_"
|
||||
"other' matches all test functions and classes whose name "
|
||||
"contains 'test_method' or 'test_other', while -k 'not test_method' "
|
||||
"matches those that don't contain 'test_method' in their names. "
|
||||
"Additionally keywords are matched to classes and functions "
|
||||
"containing extra names in their 'extra_keyword_matches' set, "
|
||||
"as well as functions which have names assigned directly to them.",
|
||||
)
|
||||
|
||||
group._addoption(
|
||||
"-m",
|
||||
action="store", dest="markexpr", default="", metavar="MARKEXPR",
|
||||
action="store",
|
||||
dest="markexpr",
|
||||
default="",
|
||||
metavar="MARKEXPR",
|
||||
help="only run tests matching given mark expression. "
|
||||
"example: -m 'mark1 and not mark2'."
|
||||
"example: -m 'mark1 and not mark2'.",
|
||||
)
|
||||
|
||||
group.addoption(
|
||||
"--markers", action="store_true",
|
||||
help="show markers (builtin, plugin and per-project ones)."
|
||||
"--markers",
|
||||
action="store_true",
|
||||
help="show markers (builtin, plugin and per-project ones).",
|
||||
)
|
||||
|
||||
parser.addini("markers", "markers for test functions", 'linelist')
|
||||
parser.addini(
|
||||
EMPTY_PARAMETERSET_OPTION,
|
||||
"default marker for empty parametersets")
|
||||
parser.addini("markers", "markers for test functions", "linelist")
|
||||
parser.addini(EMPTY_PARAMETERSET_OPTION, "default marker for empty parametersets")
|
||||
|
||||
|
||||
def pytest_cmdline_main(config):
|
||||
import _pytest.config
|
||||
|
||||
if config.option.markers:
|
||||
config._do_configure()
|
||||
tw = _pytest.config.create_terminal_writer(config)
|
||||
for line in config.getini("markers"):
|
||||
parts = line.split(":", 1)
|
||||
name = parts[0]
|
||||
rest = parts[1] if len(parts) == 2 else ''
|
||||
rest = parts[1] if len(parts) == 2 else ""
|
||||
tw.write("@pytest.mark.%s:" % name, bold=True)
|
||||
tw.line(rest)
|
||||
tw.line()
|
||||
|
@ -147,11 +163,12 @@ def pytest_configure(config):
|
|||
|
||||
empty_parameterset = config.getini(EMPTY_PARAMETERSET_OPTION)
|
||||
|
||||
if empty_parameterset not in ('skip', 'xfail', None, ''):
|
||||
if empty_parameterset not in ("skip", "xfail", None, ""):
|
||||
raise UsageError(
|
||||
"{!s} must be one of skip and xfail,"
|
||||
" but it is {!r}".format(EMPTY_PARAMETERSET_OPTION, empty_parameterset))
|
||||
" but it is {!r}".format(EMPTY_PARAMETERSET_OPTION, empty_parameterset)
|
||||
)
|
||||
|
||||
|
||||
def pytest_unconfigure(config):
|
||||
MARK_GEN._config = getattr(config, '_old_mark_config', None)
|
||||
MARK_GEN._config = getattr(config, "_old_mark_config", None)
|
||||
|
|
|
@ -8,18 +8,20 @@ from ..outcomes import fail, TEST_OUTCOME
|
|||
|
||||
|
||||
def cached_eval(config, expr, d):
|
||||
if not hasattr(config, '_evalcache'):
|
||||
if not hasattr(config, "_evalcache"):
|
||||
config._evalcache = {}
|
||||
try:
|
||||
return config._evalcache[expr]
|
||||
except KeyError:
|
||||
import _pytest._code
|
||||
|
||||
exprcode = _pytest._code.compile(expr, mode="eval")
|
||||
config._evalcache[expr] = x = eval(exprcode, d)
|
||||
return x
|
||||
|
||||
|
||||
class MarkEvaluator(object):
|
||||
|
||||
def __init__(self, item, name):
|
||||
self.item = item
|
||||
self._marks = None
|
||||
|
@ -29,16 +31,17 @@ class MarkEvaluator(object):
|
|||
def __bool__(self):
|
||||
# dont cache here to prevent staleness
|
||||
return bool(self._get_marks())
|
||||
|
||||
__nonzero__ = __bool__
|
||||
|
||||
def wasvalid(self):
|
||||
return not hasattr(self, 'exc')
|
||||
return not hasattr(self, "exc")
|
||||
|
||||
def _get_marks(self):
|
||||
return list(self.item.iter_markers(name=self._mark_name))
|
||||
|
||||
def invalidraise(self, exc):
|
||||
raises = self.get('raises')
|
||||
raises = self.get("raises")
|
||||
if not raises:
|
||||
return
|
||||
return not isinstance(exc, raises)
|
||||
|
@ -49,24 +52,25 @@ class MarkEvaluator(object):
|
|||
except TEST_OUTCOME:
|
||||
self.exc = sys.exc_info()
|
||||
if isinstance(self.exc[1], SyntaxError):
|
||||
msg = [" " * (self.exc[1].offset + 4) + "^", ]
|
||||
msg = [" " * (self.exc[1].offset + 4) + "^"]
|
||||
msg.append("SyntaxError: invalid syntax")
|
||||
else:
|
||||
msg = traceback.format_exception_only(*self.exc[:2])
|
||||
fail("Error evaluating %r expression\n"
|
||||
" %s\n"
|
||||
"%s"
|
||||
% (self._mark_name, self.expr, "\n".join(msg)),
|
||||
pytrace=False)
|
||||
fail(
|
||||
"Error evaluating %r expression\n"
|
||||
" %s\n"
|
||||
"%s" % (self._mark_name, self.expr, "\n".join(msg)),
|
||||
pytrace=False,
|
||||
)
|
||||
|
||||
def _getglobals(self):
|
||||
d = {'os': os, 'sys': sys, 'platform': platform, 'config': self.item.config}
|
||||
if hasattr(self.item, 'obj'):
|
||||
d = {"os": os, "sys": sys, "platform": platform, "config": self.item.config}
|
||||
if hasattr(self.item, "obj"):
|
||||
d.update(self.item.obj.__globals__)
|
||||
return d
|
||||
|
||||
def _istrue(self):
|
||||
if hasattr(self, 'result'):
|
||||
if hasattr(self, "result"):
|
||||
return self.result
|
||||
self._marks = self._get_marks()
|
||||
|
||||
|
@ -74,8 +78,8 @@ class MarkEvaluator(object):
|
|||
self.result = False
|
||||
for mark in self._marks:
|
||||
self._mark = mark
|
||||
if 'condition' in mark.kwargs:
|
||||
args = (mark.kwargs['condition'],)
|
||||
if "condition" in mark.kwargs:
|
||||
args = (mark.kwargs["condition"],)
|
||||
else:
|
||||
args = mark.args
|
||||
|
||||
|
@ -87,19 +91,18 @@ class MarkEvaluator(object):
|
|||
else:
|
||||
if "reason" not in mark.kwargs:
|
||||
# XXX better be checked at collection time
|
||||
msg = "you need to specify reason=STRING " \
|
||||
"when using booleans as conditions."
|
||||
msg = "you need to specify reason=STRING " "when using booleans as conditions."
|
||||
fail(msg)
|
||||
result = bool(expr)
|
||||
if result:
|
||||
self.result = True
|
||||
self.reason = mark.kwargs.get('reason', None)
|
||||
self.reason = mark.kwargs.get("reason", None)
|
||||
self.expr = expr
|
||||
return self.result
|
||||
|
||||
if not args:
|
||||
self.result = True
|
||||
self.reason = mark.kwargs.get('reason', None)
|
||||
self.reason = mark.kwargs.get("reason", None)
|
||||
return self.result
|
||||
return False
|
||||
|
||||
|
@ -109,9 +112,9 @@ class MarkEvaluator(object):
|
|||
return self._mark.kwargs.get(attr, default)
|
||||
|
||||
def getexplanation(self):
|
||||
expl = getattr(self, 'reason', None) or self.get('reason', None)
|
||||
expl = getattr(self, "reason", None) or self.get("reason", None)
|
||||
if not expl:
|
||||
if not hasattr(self, 'expr'):
|
||||
if not hasattr(self, "expr"):
|
||||
return ""
|
||||
else:
|
||||
return "condition: " + str(self.expr)
|
||||
|
|
|
@ -38,6 +38,7 @@ class KeywordMapping(object):
|
|||
|
||||
# Add the names of the current item and any parent items
|
||||
import pytest
|
||||
|
||||
for item in item.listchain():
|
||||
if not isinstance(item, pytest.Instance):
|
||||
mapped_names.add(item.name)
|
||||
|
@ -47,7 +48,7 @@ class KeywordMapping(object):
|
|||
mapped_names.add(name)
|
||||
|
||||
# Add the names attached to the current function through direct assignment
|
||||
if hasattr(item, 'function'):
|
||||
if hasattr(item, "function"):
|
||||
for name in item.function.__dict__:
|
||||
mapped_names.add(name)
|
||||
|
||||
|
@ -85,7 +86,11 @@ def matchkeyword(colitem, keywordexpr):
|
|||
return not mapping[keywordexpr[4:]]
|
||||
for kwd in keywordexpr.split():
|
||||
if keyword.iskeyword(kwd) and kwd not in python_keywords_allowed_list:
|
||||
raise UsageError("Python keyword '{}' not accepted in expressions passed to '-k'".format(kwd))
|
||||
raise UsageError(
|
||||
"Python keyword '{}' not accepted in expressions passed to '-k'".format(
|
||||
kwd
|
||||
)
|
||||
)
|
||||
try:
|
||||
return eval(keywordexpr, {}, mapping)
|
||||
except SyntaxError:
|
||||
|
|
|
@ -20,32 +20,35 @@ def alias(name, warning=None):
|
|||
warnings.warn(warning, stacklevel=2)
|
||||
return getter(self)
|
||||
|
||||
return property(getter if warning is None else warned, doc='alias for ' + name)
|
||||
return property(getter if warning is None else warned, doc="alias for " + name)
|
||||
|
||||
|
||||
def istestfunc(func):
|
||||
return hasattr(func, "__call__") and \
|
||||
getattr(func, "__name__", "<lambda>") != "<lambda>"
|
||||
return hasattr(func, "__call__") and getattr(
|
||||
func, "__name__", "<lambda>"
|
||||
) != "<lambda>"
|
||||
|
||||
|
||||
def get_empty_parameterset_mark(config, argnames, func):
|
||||
requested_mark = config.getini(EMPTY_PARAMETERSET_OPTION)
|
||||
if requested_mark in ('', None, 'skip'):
|
||||
if requested_mark in ("", None, "skip"):
|
||||
mark = MARK_GEN.skip
|
||||
elif requested_mark == 'xfail':
|
||||
elif requested_mark == "xfail":
|
||||
mark = MARK_GEN.xfail(run=False)
|
||||
else:
|
||||
raise LookupError(requested_mark)
|
||||
fs, lineno = getfslineno(func)
|
||||
reason = "got empty parameter set %r, function %s at %s:%d" % (
|
||||
argnames, func.__name__, fs, lineno)
|
||||
argnames, func.__name__, fs, lineno
|
||||
)
|
||||
return mark(reason=reason)
|
||||
|
||||
|
||||
class ParameterSet(namedtuple('ParameterSet', 'values, marks, id')):
|
||||
class ParameterSet(namedtuple("ParameterSet", "values, marks, id")):
|
||||
|
||||
@classmethod
|
||||
def param(cls, *values, **kw):
|
||||
marks = kw.pop('marks', ())
|
||||
marks = kw.pop("marks", ())
|
||||
if isinstance(marks, MarkDecorator):
|
||||
marks = marks,
|
||||
else:
|
||||
|
@ -78,8 +81,9 @@ class ParameterSet(namedtuple('ParameterSet', 'values, marks, id')):
|
|||
newmarks = []
|
||||
argval = parameterset
|
||||
while isinstance(argval, MarkDecorator):
|
||||
newmarks.append(MarkDecorator(Mark(
|
||||
argval.markname, argval.args[:-1], argval.kwargs)))
|
||||
newmarks.append(
|
||||
MarkDecorator(Mark(argval.markname, argval.args[:-1], argval.kwargs))
|
||||
)
|
||||
argval = argval.args[-1]
|
||||
assert not isinstance(argval, ParameterSet)
|
||||
if legacy_force_tuple:
|
||||
|
@ -99,16 +103,15 @@ class ParameterSet(namedtuple('ParameterSet', 'values, marks, id')):
|
|||
force_tuple = False
|
||||
parameters = [
|
||||
ParameterSet.extract_from(x, legacy_force_tuple=force_tuple)
|
||||
for x in argvalues]
|
||||
for x in argvalues
|
||||
]
|
||||
del argvalues
|
||||
|
||||
if not parameters:
|
||||
mark = get_empty_parameterset_mark(config, argnames, func)
|
||||
parameters.append(ParameterSet(
|
||||
values=(NOTSET,) * len(argnames),
|
||||
marks=[mark],
|
||||
id=None,
|
||||
))
|
||||
parameters.append(
|
||||
ParameterSet(values=(NOTSET,) * len(argnames), marks=[mark], id=None)
|
||||
)
|
||||
return argnames, parameters
|
||||
|
||||
|
||||
|
@ -131,8 +134,8 @@ class Mark(object):
|
|||
"""
|
||||
assert self.name == other.name
|
||||
return Mark(
|
||||
self.name, self.args + other.args,
|
||||
dict(self.kwargs, **other.kwargs))
|
||||
self.name, self.args + other.args, dict(self.kwargs, **other.kwargs)
|
||||
)
|
||||
|
||||
|
||||
@attr.s
|
||||
|
@ -172,9 +175,9 @@ class MarkDecorator(object):
|
|||
|
||||
mark = attr.ib(validator=attr.validators.instance_of(Mark))
|
||||
|
||||
name = alias('mark.name')
|
||||
args = alias('mark.args')
|
||||
kwargs = alias('mark.kwargs')
|
||||
name = alias("mark.name")
|
||||
args = alias("mark.args")
|
||||
kwargs = alias("mark.kwargs")
|
||||
|
||||
@property
|
||||
def markname(self):
|
||||
|
@ -217,14 +220,11 @@ def get_unpacked_marks(obj):
|
|||
"""
|
||||
obtain the unpacked marks that are stored on an object
|
||||
"""
|
||||
mark_list = getattr(obj, 'pytestmark', [])
|
||||
mark_list = getattr(obj, "pytestmark", [])
|
||||
|
||||
if not isinstance(mark_list, list):
|
||||
mark_list = [mark_list]
|
||||
return [
|
||||
getattr(mark, 'mark', mark) # unpack MarkDecorator
|
||||
for mark in mark_list
|
||||
]
|
||||
return [getattr(mark, "mark", mark) for mark in mark_list] # unpack MarkDecorator
|
||||
|
||||
|
||||
def store_mark(obj, mark):
|
||||
|
@ -271,7 +271,7 @@ def _marked(func, mark):
|
|||
invoked more than once.
|
||||
"""
|
||||
try:
|
||||
func_mark = getattr(func, getattr(mark, 'combined', mark).name)
|
||||
func_mark = getattr(func, getattr(mark, "combined", mark).name)
|
||||
except AttributeError:
|
||||
return False
|
||||
return any(mark == info.combined for info in func_mark)
|
||||
|
@ -284,12 +284,14 @@ class MarkInfo(object):
|
|||
_marks = attr.ib()
|
||||
combined = attr.ib(
|
||||
repr=False,
|
||||
default=attr.Factory(lambda self: reduce(Mark.combined_with, self._marks),
|
||||
takes_self=True))
|
||||
default=attr.Factory(
|
||||
lambda self: reduce(Mark.combined_with, self._marks), takes_self=True
|
||||
),
|
||||
)
|
||||
|
||||
name = alias('combined.name', warning=MARK_INFO_ATTRIBUTE)
|
||||
args = alias('combined.args', warning=MARK_INFO_ATTRIBUTE)
|
||||
kwargs = alias('combined.kwargs', warning=MARK_INFO_ATTRIBUTE)
|
||||
name = alias("combined.name", warning=MARK_INFO_ATTRIBUTE)
|
||||
args = alias("combined.args", warning=MARK_INFO_ATTRIBUTE)
|
||||
kwargs = alias("combined.kwargs", warning=MARK_INFO_ATTRIBUTE)
|
||||
|
||||
@classmethod
|
||||
def for_mark(cls, mark):
|
||||
|
@ -348,6 +350,7 @@ MARK_GEN = MarkGenerator()
|
|||
|
||||
|
||||
class NodeKeywords(MappingMixin):
|
||||
|
||||
def __init__(self, node):
|
||||
self.node = node
|
||||
self.parent = node.parent
|
||||
|
@ -381,7 +384,7 @@ class NodeKeywords(MappingMixin):
|
|||
return len(self._seen())
|
||||
|
||||
def __repr__(self):
|
||||
return "<NodeKeywords for node %s>" % (self.node, )
|
||||
return "<NodeKeywords for node %s>" % (self.node,)
|
||||
|
||||
|
||||
@attr.s(cmp=False, hash=False)
|
||||
|
|
|
@ -38,12 +38,12 @@ def monkeypatch():
|
|||
|
||||
def resolve(name):
|
||||
# simplified from zope.dottedname
|
||||
parts = name.split('.')
|
||||
parts = name.split(".")
|
||||
|
||||
used = parts.pop(0)
|
||||
found = __import__(used)
|
||||
for part in parts:
|
||||
used += '.' + part
|
||||
used += "." + part
|
||||
try:
|
||||
found = getattr(found, part)
|
||||
except AttributeError:
|
||||
|
@ -60,9 +60,7 @@ def resolve(name):
|
|||
if expected == used:
|
||||
raise
|
||||
else:
|
||||
raise ImportError(
|
||||
'import error in %s: %s' % (used, ex)
|
||||
)
|
||||
raise ImportError("import error in %s: %s" % (used, ex))
|
||||
found = annotated_getattr(found, part, used)
|
||||
return found
|
||||
|
||||
|
@ -72,18 +70,15 @@ def annotated_getattr(obj, name, ann):
|
|||
obj = getattr(obj, name)
|
||||
except AttributeError:
|
||||
raise AttributeError(
|
||||
'%r object at %s has no attribute %r' % (
|
||||
type(obj).__name__, ann, name
|
||||
)
|
||||
"%r object at %s has no attribute %r" % (type(obj).__name__, ann, name)
|
||||
)
|
||||
return obj
|
||||
|
||||
|
||||
def derive_importpath(import_path, raising):
|
||||
if not isinstance(import_path, six.string_types) or "." not in import_path:
|
||||
raise TypeError("must be absolute import path string, not %r" %
|
||||
(import_path,))
|
||||
module, attr = import_path.rsplit('.', 1)
|
||||
raise TypeError("must be absolute import path string, not %r" % (import_path,))
|
||||
module, attr = import_path.rsplit(".", 1)
|
||||
target = resolve(module)
|
||||
if raising:
|
||||
annotated_getattr(target, attr, ann=module)
|
||||
|
@ -91,6 +86,7 @@ def derive_importpath(import_path, raising):
|
|||
|
||||
|
||||
class Notset(object):
|
||||
|
||||
def __repr__(self):
|
||||
return "<notset>"
|
||||
|
||||
|
@ -150,9 +146,11 @@ class MonkeyPatch(object):
|
|||
|
||||
if value is notset:
|
||||
if not isinstance(target, six.string_types):
|
||||
raise TypeError("use setattr(target, name, value) or "
|
||||
"setattr(target, value) with target being a dotted "
|
||||
"import string")
|
||||
raise TypeError(
|
||||
"use setattr(target, name, value) or "
|
||||
"setattr(target, value) with target being a dotted "
|
||||
"import string"
|
||||
)
|
||||
value = name
|
||||
name, target = derive_importpath(target, raising)
|
||||
|
||||
|
@ -180,9 +178,11 @@ class MonkeyPatch(object):
|
|||
__tracebackhide__ = True
|
||||
if name is notset:
|
||||
if not isinstance(target, six.string_types):
|
||||
raise TypeError("use delattr(target, name) or "
|
||||
"delattr(target) with target being a dotted "
|
||||
"import string")
|
||||
raise TypeError(
|
||||
"use delattr(target, name) or "
|
||||
"delattr(target) with target being a dotted "
|
||||
"import string"
|
||||
)
|
||||
name, target = derive_importpath(target, raising)
|
||||
|
||||
if not hasattr(target, name):
|
||||
|
|
|
@ -30,7 +30,7 @@ def _splitnode(nodeid):
|
|||
['testing', 'code', 'test_excinfo.py']
|
||||
['testing', 'code', 'test_excinfo.py', 'TestFormattedExcinfo', '()']
|
||||
"""
|
||||
if nodeid == '':
|
||||
if nodeid == "":
|
||||
# If there is no root node at all, return an empty list so the caller's logic can remain sane
|
||||
return []
|
||||
parts = nodeid.split(SEP)
|
||||
|
@ -64,14 +64,16 @@ class _CompatProperty(object):
|
|||
# "usage of {owner!r}.{name} is deprecated, please use pytest.{name} instead".format(
|
||||
# name=self.name, owner=type(owner).__name__),
|
||||
# PendingDeprecationWarning, stacklevel=2)
|
||||
return getattr(__import__('pytest'), self.name)
|
||||
return getattr(__import__("pytest"), self.name)
|
||||
|
||||
|
||||
class Node(object):
|
||||
""" base class for Collector and Item the test collection tree.
|
||||
Collector subclasses have children, Items are terminal nodes."""
|
||||
|
||||
def __init__(self, name, parent=None, config=None, session=None, fspath=None, nodeid=None):
|
||||
def __init__(
|
||||
self, name, parent=None, config=None, session=None, fspath=None, nodeid=None
|
||||
):
|
||||
#: a unique name within the scope of the parent node
|
||||
self.name = name
|
||||
|
||||
|
@ -85,7 +87,7 @@ class Node(object):
|
|||
self.session = session or parent.session
|
||||
|
||||
#: filesystem path where this node was collected from (can be None)
|
||||
self.fspath = fspath or getattr(parent, 'fspath', None)
|
||||
self.fspath = fspath or getattr(parent, "fspath", None)
|
||||
|
||||
#: keywords/markers collected from all scopes
|
||||
self.keywords = NodeKeywords(self)
|
||||
|
@ -120,7 +122,7 @@ class Node(object):
|
|||
def _getcustomclass(self, name):
|
||||
maybe_compatprop = getattr(type(self), name)
|
||||
if isinstance(maybe_compatprop, _CompatProperty):
|
||||
return getattr(__import__('pytest'), name)
|
||||
return getattr(__import__("pytest"), name)
|
||||
else:
|
||||
cls = getattr(self, name)
|
||||
# TODO: reenable in the features branch
|
||||
|
@ -130,8 +132,7 @@ class Node(object):
|
|||
return cls
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s %r>" % (self.__class__.__name__,
|
||||
getattr(self, 'name', None))
|
||||
return "<%s %r>" % (self.__class__.__name__, getattr(self, "name", None))
|
||||
|
||||
def warn(self, code, message):
|
||||
""" generate a warning with the given code and message for this
|
||||
|
@ -140,9 +141,11 @@ class Node(object):
|
|||
fslocation = getattr(self, "location", None)
|
||||
if fslocation is None:
|
||||
fslocation = getattr(self, "fspath", None)
|
||||
self.ihook.pytest_logwarning.call_historic(kwargs=dict(
|
||||
code=code, message=message,
|
||||
nodeid=self.nodeid, fslocation=fslocation))
|
||||
self.ihook.pytest_logwarning.call_historic(
|
||||
kwargs=dict(
|
||||
code=code, message=message, nodeid=self.nodeid, fslocation=fslocation
|
||||
)
|
||||
)
|
||||
|
||||
# methods for ordering nodes
|
||||
@property
|
||||
|
@ -176,6 +179,7 @@ class Node(object):
|
|||
``marker`` can be a string or pytest.mark.* instance.
|
||||
"""
|
||||
from _pytest.mark import MarkDecorator, MARK_GEN
|
||||
|
||||
if isinstance(marker, six.string_types):
|
||||
marker = getattr(MARK_GEN, marker)
|
||||
elif not isinstance(marker, MarkDecorator):
|
||||
|
@ -200,7 +204,7 @@ class Node(object):
|
|||
"""
|
||||
for node in reversed(self.listchain()):
|
||||
for mark in node.own_markers:
|
||||
if name is None or getattr(mark, 'name', None) == name:
|
||||
if name is None or getattr(mark, "name", None) == name:
|
||||
yield node, mark
|
||||
|
||||
def get_closest_marker(self, name, default=None):
|
||||
|
@ -283,9 +287,13 @@ class Node(object):
|
|||
except OSError:
|
||||
abspath = True
|
||||
|
||||
return excinfo.getrepr(funcargs=True, abspath=abspath,
|
||||
showlocals=self.config.option.showlocals,
|
||||
style=style, tbfilter=tbfilter)
|
||||
return excinfo.getrepr(
|
||||
funcargs=True,
|
||||
abspath=abspath,
|
||||
showlocals=self.config.option.showlocals,
|
||||
style=style,
|
||||
tbfilter=tbfilter,
|
||||
)
|
||||
|
||||
repr_failure = _repr_failure_py
|
||||
|
||||
|
@ -312,7 +320,7 @@ class Collector(Node):
|
|||
return self._repr_failure_py(excinfo, style="short")
|
||||
|
||||
def _prunetraceback(self, excinfo):
|
||||
if hasattr(self, 'fspath'):
|
||||
if hasattr(self, "fspath"):
|
||||
traceback = excinfo.traceback
|
||||
ntraceback = traceback.cut(path=self.fspath)
|
||||
if ntraceback == traceback:
|
||||
|
@ -327,6 +335,7 @@ def _check_initialpaths_for_relpath(session, fspath):
|
|||
|
||||
|
||||
class FSCollector(Collector):
|
||||
|
||||
def __init__(self, fspath, parent=None, config=None, session=None, nodeid=None):
|
||||
fspath = py.path.local(fspath) # xxx only for test_resultlog.py?
|
||||
name = fspath.basename
|
||||
|
@ -347,7 +356,9 @@ class FSCollector(Collector):
|
|||
if os.sep != SEP:
|
||||
nodeid = nodeid.replace(os.sep, SEP)
|
||||
|
||||
super(FSCollector, self).__init__(name, parent, config, session, nodeid=nodeid, fspath=fspath)
|
||||
super(FSCollector, self).__init__(
|
||||
name, parent, config, session, nodeid=nodeid, fspath=fspath
|
||||
)
|
||||
|
||||
|
||||
class File(FSCollector):
|
||||
|
|
|
@ -9,9 +9,9 @@ from _pytest.config import hookimpl
|
|||
|
||||
def get_skip_exceptions():
|
||||
skip_classes = set()
|
||||
for module_name in ('unittest', 'unittest2', 'nose'):
|
||||
for module_name in ("unittest", "unittest2", "nose"):
|
||||
mod = sys.modules.get(module_name)
|
||||
if hasattr(mod, 'SkipTest'):
|
||||
if hasattr(mod, "SkipTest"):
|
||||
skip_classes.add(mod.SkipTest)
|
||||
return tuple(skip_classes)
|
||||
|
||||
|
@ -19,8 +19,7 @@ def get_skip_exceptions():
|
|||
def pytest_runtest_makereport(item, call):
|
||||
if call.excinfo and call.excinfo.errisinstance(get_skip_exceptions()):
|
||||
# let's substitute the excinfo with a pytest.skip one
|
||||
call2 = call.__class__(
|
||||
lambda: runner.skip(str(call.excinfo.value)), call.when)
|
||||
call2 = call.__class__(lambda: runner.skip(str(call.excinfo.value)), call.when)
|
||||
call.excinfo = call2.excinfo
|
||||
|
||||
|
||||
|
@ -29,22 +28,22 @@ def pytest_runtest_setup(item):
|
|||
if is_potential_nosetest(item):
|
||||
if isinstance(item.parent, python.Generator):
|
||||
gen = item.parent
|
||||
if not hasattr(gen, '_nosegensetup'):
|
||||
call_optional(gen.obj, 'setup')
|
||||
if not hasattr(gen, "_nosegensetup"):
|
||||
call_optional(gen.obj, "setup")
|
||||
if isinstance(gen.parent, python.Instance):
|
||||
call_optional(gen.parent.obj, 'setup')
|
||||
call_optional(gen.parent.obj, "setup")
|
||||
gen._nosegensetup = True
|
||||
if not call_optional(item.obj, 'setup'):
|
||||
if not call_optional(item.obj, "setup"):
|
||||
# call module level setup if there is no object level one
|
||||
call_optional(item.parent.obj, 'setup')
|
||||
call_optional(item.parent.obj, "setup")
|
||||
# XXX this implies we only call teardown when setup worked
|
||||
item.session._setupstate.addfinalizer((lambda: teardown_nose(item)), item)
|
||||
|
||||
|
||||
def teardown_nose(item):
|
||||
if is_potential_nosetest(item):
|
||||
if not call_optional(item.obj, 'teardown'):
|
||||
call_optional(item.parent.obj, 'teardown')
|
||||
if not call_optional(item.obj, "teardown"):
|
||||
call_optional(item.parent.obj, "teardown")
|
||||
# if hasattr(item.parent, '_nosegensetup'):
|
||||
# #call_optional(item._nosegensetup, 'teardown')
|
||||
# del item.parent._nosegensetup
|
||||
|
@ -52,14 +51,15 @@ def teardown_nose(item):
|
|||
|
||||
def pytest_make_collect_report(collector):
|
||||
if isinstance(collector, python.Generator):
|
||||
call_optional(collector.obj, 'setup')
|
||||
call_optional(collector.obj, "setup")
|
||||
|
||||
|
||||
def is_potential_nosetest(item):
|
||||
# extra check needed since we do not do nose style setup/teardown
|
||||
# on direct unittest style classes
|
||||
return isinstance(item, python.Function) and \
|
||||
not isinstance(item, unittest.TestCaseFunction)
|
||||
return isinstance(item, python.Function) and not isinstance(
|
||||
item, unittest.TestCaseFunction
|
||||
)
|
||||
|
||||
|
||||
def call_optional(obj, name):
|
||||
|
|
|
@ -11,6 +11,7 @@ class OutcomeException(BaseException):
|
|||
""" OutcomeException and its subclass instances indicate and
|
||||
contain info about test and collection outcomes.
|
||||
"""
|
||||
|
||||
def __init__(self, msg=None, pytrace=True):
|
||||
BaseException.__init__(self, msg)
|
||||
self.msg = msg
|
||||
|
@ -20,9 +21,10 @@ class OutcomeException(BaseException):
|
|||
if self.msg:
|
||||
val = self.msg
|
||||
if isinstance(val, bytes):
|
||||
val = py._builtin._totext(val, errors='replace')
|
||||
val = py._builtin._totext(val, errors="replace")
|
||||
return val
|
||||
return "<%s instance>" % (self.__class__.__name__,)
|
||||
|
||||
__str__ = __repr__
|
||||
|
||||
|
||||
|
@ -32,7 +34,7 @@ TEST_OUTCOME = (OutcomeException, Exception)
|
|||
class Skipped(OutcomeException):
|
||||
# XXX hackish: on 3k we fake to live in the builtins
|
||||
# in order to have Skipped exception printing shorter/nicer
|
||||
__module__ = 'builtins'
|
||||
__module__ = "builtins"
|
||||
|
||||
def __init__(self, msg=None, pytrace=True, allow_module_level=False):
|
||||
OutcomeException.__init__(self, msg=msg, pytrace=pytrace)
|
||||
|
@ -41,15 +43,17 @@ class Skipped(OutcomeException):
|
|||
|
||||
class Failed(OutcomeException):
|
||||
""" raised from an explicit call to pytest.fail() """
|
||||
__module__ = 'builtins'
|
||||
__module__ = "builtins"
|
||||
|
||||
|
||||
class Exit(KeyboardInterrupt):
|
||||
""" raised for immediate program exits (no tracebacks/summaries)"""
|
||||
|
||||
def __init__(self, msg="unknown reason"):
|
||||
self.msg = msg
|
||||
KeyboardInterrupt.__init__(self, msg)
|
||||
|
||||
|
||||
# exposed helper methods
|
||||
|
||||
|
||||
|
@ -72,10 +76,10 @@ def skip(msg="", **kwargs):
|
|||
module level, skipping the rest of the module. Default to False.
|
||||
"""
|
||||
__tracebackhide__ = True
|
||||
allow_module_level = kwargs.pop('allow_module_level', False)
|
||||
allow_module_level = kwargs.pop("allow_module_level", False)
|
||||
if kwargs:
|
||||
keys = [k for k in kwargs.keys()]
|
||||
raise TypeError('unexpected keyword arguments: {}'.format(keys))
|
||||
raise TypeError("unexpected keyword arguments: {}".format(keys))
|
||||
raise Skipped(msg=msg, allow_module_level=allow_module_level)
|
||||
|
||||
|
||||
|
@ -114,15 +118,16 @@ def importorskip(modname, minversion=None):
|
|||
is only triggered if the module can not be imported.
|
||||
"""
|
||||
import warnings
|
||||
|
||||
__tracebackhide__ = True
|
||||
compile(modname, '', 'eval') # to catch syntaxerrors
|
||||
compile(modname, "", "eval") # to catch syntaxerrors
|
||||
should_skip = False
|
||||
|
||||
with warnings.catch_warnings():
|
||||
# make sure to ignore ImportWarnings that might happen because
|
||||
# of existing directories with the same name we're trying to
|
||||
# import but without a __init__.py file
|
||||
warnings.simplefilter('ignore')
|
||||
warnings.simplefilter("ignore")
|
||||
try:
|
||||
__import__(modname)
|
||||
except ImportError:
|
||||
|
@ -133,15 +138,20 @@ def importorskip(modname, minversion=None):
|
|||
mod = sys.modules[modname]
|
||||
if minversion is None:
|
||||
return mod
|
||||
verattr = getattr(mod, '__version__', None)
|
||||
verattr = getattr(mod, "__version__", None)
|
||||
if minversion is not None:
|
||||
try:
|
||||
from pkg_resources import parse_version as pv
|
||||
except ImportError:
|
||||
raise Skipped("we have a required version for %r but can not import "
|
||||
"pkg_resources to parse version strings." % (modname,),
|
||||
allow_module_level=True)
|
||||
raise Skipped(
|
||||
"we have a required version for %r but can not import "
|
||||
"pkg_resources to parse version strings." % (modname,),
|
||||
allow_module_level=True,
|
||||
)
|
||||
if verattr is None or pv(verattr) < pv(minversion):
|
||||
raise Skipped("module %r has __version__ %r, required is: %r" % (
|
||||
modname, verattr, minversion), allow_module_level=True)
|
||||
raise Skipped(
|
||||
"module %r has __version__ %r, required is: %r"
|
||||
% (modname, verattr, minversion),
|
||||
allow_module_level=True,
|
||||
)
|
||||
return mod
|
||||
|
|
|
@ -9,43 +9,48 @@ import tempfile
|
|||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("terminal reporting")
|
||||
group._addoption('--pastebin', metavar="mode",
|
||||
action='store', dest="pastebin", default=None,
|
||||
choices=['failed', 'all'],
|
||||
help="send failed|all info to bpaste.net pastebin service.")
|
||||
group._addoption(
|
||||
"--pastebin",
|
||||
metavar="mode",
|
||||
action="store",
|
||||
dest="pastebin",
|
||||
default=None,
|
||||
choices=["failed", "all"],
|
||||
help="send failed|all info to bpaste.net pastebin service.",
|
||||
)
|
||||
|
||||
|
||||
@pytest.hookimpl(trylast=True)
|
||||
def pytest_configure(config):
|
||||
if config.option.pastebin == "all":
|
||||
tr = config.pluginmanager.getplugin('terminalreporter')
|
||||
tr = config.pluginmanager.getplugin("terminalreporter")
|
||||
# if no terminal reporter plugin is present, nothing we can do here;
|
||||
# this can happen when this function executes in a slave node
|
||||
# when using pytest-xdist, for example
|
||||
if tr is not None:
|
||||
# pastebin file will be utf-8 encoded binary file
|
||||
config._pastebinfile = tempfile.TemporaryFile('w+b')
|
||||
config._pastebinfile = tempfile.TemporaryFile("w+b")
|
||||
oldwrite = tr._tw.write
|
||||
|
||||
def tee_write(s, **kwargs):
|
||||
oldwrite(s, **kwargs)
|
||||
if isinstance(s, six.text_type):
|
||||
s = s.encode('utf-8')
|
||||
s = s.encode("utf-8")
|
||||
config._pastebinfile.write(s)
|
||||
|
||||
tr._tw.write = tee_write
|
||||
|
||||
|
||||
def pytest_unconfigure(config):
|
||||
if hasattr(config, '_pastebinfile'):
|
||||
if hasattr(config, "_pastebinfile"):
|
||||
# get terminal contents and delete file
|
||||
config._pastebinfile.seek(0)
|
||||
sessionlog = config._pastebinfile.read()
|
||||
config._pastebinfile.close()
|
||||
del config._pastebinfile
|
||||
# undo our patching in the terminal reporter
|
||||
tr = config.pluginmanager.getplugin('terminalreporter')
|
||||
del tr._tw.__dict__['write']
|
||||
tr = config.pluginmanager.getplugin("terminalreporter")
|
||||
del tr._tw.__dict__["write"]
|
||||
# write summary
|
||||
tr.write_sep("=", "Sending information to Paste Service")
|
||||
pastebinurl = create_new_paste(sessionlog)
|
||||
|
@ -60,6 +65,7 @@ def create_new_paste(contents):
|
|||
:returns: url to the pasted contents
|
||||
"""
|
||||
import re
|
||||
|
||||
if sys.version_info < (3, 0):
|
||||
from urllib import urlopen, urlencode
|
||||
else:
|
||||
|
@ -67,32 +73,35 @@ def create_new_paste(contents):
|
|||
from urllib.parse import urlencode
|
||||
|
||||
params = {
|
||||
'code': contents,
|
||||
'lexer': 'python3' if sys.version_info[0] == 3 else 'python',
|
||||
'expiry': '1week',
|
||||
"code": contents,
|
||||
"lexer": "python3" if sys.version_info[0] == 3 else "python",
|
||||
"expiry": "1week",
|
||||
}
|
||||
url = 'https://bpaste.net'
|
||||
response = urlopen(url, data=urlencode(params).encode('ascii')).read()
|
||||
m = re.search(r'href="/raw/(\w+)"', response.decode('utf-8'))
|
||||
url = "https://bpaste.net"
|
||||
response = urlopen(url, data=urlencode(params).encode("ascii")).read()
|
||||
m = re.search(r'href="/raw/(\w+)"', response.decode("utf-8"))
|
||||
if m:
|
||||
return '%s/show/%s' % (url, m.group(1))
|
||||
return "%s/show/%s" % (url, m.group(1))
|
||||
else:
|
||||
return 'bad response: ' + response
|
||||
return "bad response: " + response
|
||||
|
||||
|
||||
def pytest_terminal_summary(terminalreporter):
|
||||
import _pytest.config
|
||||
|
||||
if terminalreporter.config.option.pastebin != "failed":
|
||||
return
|
||||
tr = terminalreporter
|
||||
if 'failed' in tr.stats:
|
||||
if "failed" in tr.stats:
|
||||
terminalreporter.write_sep("=", "Sending information to Paste Service")
|
||||
for rep in terminalreporter.stats.get('failed'):
|
||||
for rep in terminalreporter.stats.get("failed"):
|
||||
try:
|
||||
msg = rep.longrepr.reprtraceback.reprentries[-1].reprfileloc
|
||||
except AttributeError:
|
||||
msg = tr._getfailureheadline(rep)
|
||||
tw = _pytest.config.create_terminal_writer(terminalreporter.config, stringio=True)
|
||||
tw = _pytest.config.create_terminal_writer(
|
||||
terminalreporter.config, stringio=True
|
||||
)
|
||||
rep.toterminal(tw)
|
||||
s = tw.stringio.getvalue()
|
||||
assert len(s)
|
||||
|
|
|
@ -23,23 +23,35 @@ from _pytest.main import Session, EXIT_OK
|
|||
from _pytest.assertion.rewrite import AssertionRewritingHook
|
||||
|
||||
|
||||
PYTEST_FULLPATH = os.path.abspath(pytest.__file__.rstrip("oc")).replace("$py.class", ".py")
|
||||
PYTEST_FULLPATH = os.path.abspath(pytest.__file__.rstrip("oc")).replace(
|
||||
"$py.class", ".py"
|
||||
)
|
||||
|
||||
|
||||
IGNORE_PAM = [ # filenames added when obtaining details about the current user
|
||||
u'/var/lib/sss/mc/passwd'
|
||||
u"/var/lib/sss/mc/passwd"
|
||||
]
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption('--lsof',
|
||||
action="store_true", dest="lsof", default=False,
|
||||
help=("run FD checks if lsof is available"))
|
||||
parser.addoption(
|
||||
"--lsof",
|
||||
action="store_true",
|
||||
dest="lsof",
|
||||
default=False,
|
||||
help=("run FD checks if lsof is available"),
|
||||
)
|
||||
|
||||
parser.addoption('--runpytest', default="inprocess", dest="runpytest",
|
||||
choices=("inprocess", "subprocess"),
|
||||
help=("run pytest sub runs in tests using an 'inprocess' "
|
||||
"or 'subprocess' (python -m main) method"))
|
||||
parser.addoption(
|
||||
"--runpytest",
|
||||
default="inprocess",
|
||||
dest="runpytest",
|
||||
choices=("inprocess", "subprocess"),
|
||||
help=(
|
||||
"run pytest sub runs in tests using an 'inprocess' "
|
||||
"or 'subprocess' (python -m main) method"
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
|
@ -50,6 +62,7 @@ def pytest_configure(config):
|
|||
|
||||
|
||||
class LsofFdLeakChecker(object):
|
||||
|
||||
def get_open_files(self):
|
||||
out = self._exec_lsof()
|
||||
open_files = self._parse_lsof_output(out)
|
||||
|
@ -60,20 +73,25 @@ class LsofFdLeakChecker(object):
|
|||
return py.process.cmdexec("lsof -Ffn0 -p %d" % pid)
|
||||
|
||||
def _parse_lsof_output(self, out):
|
||||
|
||||
def isopen(line):
|
||||
return line.startswith('f') and ("deleted" not in line and
|
||||
'mem' not in line and "txt" not in line and 'cwd' not in line)
|
||||
return line.startswith("f") and (
|
||||
"deleted" not in line
|
||||
and "mem" not in line
|
||||
and "txt" not in line
|
||||
and "cwd" not in line
|
||||
)
|
||||
|
||||
open_files = []
|
||||
|
||||
for line in out.split("\n"):
|
||||
if isopen(line):
|
||||
fields = line.split('\0')
|
||||
fields = line.split("\0")
|
||||
fd = fields[0][1:]
|
||||
filename = fields[1][1:]
|
||||
if filename in IGNORE_PAM:
|
||||
continue
|
||||
if filename.startswith('/'):
|
||||
if filename.startswith("/"):
|
||||
open_files.append((fd, filename))
|
||||
|
||||
return open_files
|
||||
|
@ -110,15 +128,15 @@ class LsofFdLeakChecker(object):
|
|||
error.append(error[0])
|
||||
error.append("*** function %s:%s: %s " % item.location)
|
||||
error.append("See issue #2366")
|
||||
item.warn('', "\n".join(error))
|
||||
item.warn("", "\n".join(error))
|
||||
|
||||
|
||||
# XXX copied from execnet's conftest.py - needs to be merged
|
||||
winpymap = {
|
||||
'python2.7': r'C:\Python27\python.exe',
|
||||
'python3.4': r'C:\Python34\python.exe',
|
||||
'python3.5': r'C:\Python35\python.exe',
|
||||
'python3.6': r'C:\Python36\python.exe',
|
||||
"python2.7": r"C:\Python27\python.exe",
|
||||
"python3.4": r"C:\Python34\python.exe",
|
||||
"python3.5": r"C:\Python35\python.exe",
|
||||
"python3.6": r"C:\Python36\python.exe",
|
||||
}
|
||||
|
||||
|
||||
|
@ -129,8 +147,12 @@ def getexecutable(name, cache={}):
|
|||
executable = py.path.local.sysfind(name)
|
||||
if executable:
|
||||
import subprocess
|
||||
popen = subprocess.Popen([str(executable), "--version"],
|
||||
universal_newlines=True, stderr=subprocess.PIPE)
|
||||
|
||||
popen = subprocess.Popen(
|
||||
[str(executable), "--version"],
|
||||
universal_newlines=True,
|
||||
stderr=subprocess.PIPE,
|
||||
)
|
||||
out, err = popen.communicate()
|
||||
if name == "jython":
|
||||
if not err or "2.5" not in err:
|
||||
|
@ -144,7 +166,7 @@ def getexecutable(name, cache={}):
|
|||
return executable
|
||||
|
||||
|
||||
@pytest.fixture(params=['python2.7', 'python3.4', 'pypy', 'pypy3'])
|
||||
@pytest.fixture(params=["python2.7", "python3.4", "pypy", "pypy3"])
|
||||
def anypython(request):
|
||||
name = request.param
|
||||
executable = getexecutable(name)
|
||||
|
@ -158,6 +180,7 @@ def anypython(request):
|
|||
pytest.skip("no suitable %s found" % (name,))
|
||||
return executable
|
||||
|
||||
|
||||
# used at least by pytest-xdist plugin
|
||||
|
||||
|
||||
|
@ -172,6 +195,7 @@ def _pytest(request):
|
|||
|
||||
|
||||
class PytestArg(object):
|
||||
|
||||
def __init__(self, request):
|
||||
self.request = request
|
||||
|
||||
|
@ -187,13 +211,14 @@ def get_public_names(values):
|
|||
|
||||
|
||||
class ParsedCall(object):
|
||||
|
||||
def __init__(self, name, kwargs):
|
||||
self.__dict__.update(kwargs)
|
||||
self._name = name
|
||||
|
||||
def __repr__(self):
|
||||
d = self.__dict__.copy()
|
||||
del d['_name']
|
||||
del d["_name"]
|
||||
return "<ParsedCall %r(**%r)>" % (self._name, d)
|
||||
|
||||
|
||||
|
@ -263,12 +288,15 @@ class HookRecorder(object):
|
|||
|
||||
# functionality for test reports
|
||||
|
||||
def getreports(self,
|
||||
names="pytest_runtest_logreport pytest_collectreport"):
|
||||
def getreports(self, names="pytest_runtest_logreport pytest_collectreport"):
|
||||
return [x.report for x in self.getcalls(names)]
|
||||
|
||||
def matchreport(self, inamepart="",
|
||||
names="pytest_runtest_logreport pytest_collectreport", when=None):
|
||||
def matchreport(
|
||||
self,
|
||||
inamepart="",
|
||||
names="pytest_runtest_logreport pytest_collectreport",
|
||||
when=None,
|
||||
):
|
||||
"""return a testreport whose dotted import path matches"""
|
||||
values = []
|
||||
for rep in self.getreports(names=names):
|
||||
|
@ -278,31 +306,32 @@ class HookRecorder(object):
|
|||
continue
|
||||
except AttributeError:
|
||||
pass
|
||||
if when and getattr(rep, 'when', None) != when:
|
||||
if when and getattr(rep, "when", None) != when:
|
||||
continue
|
||||
if not inamepart or inamepart in rep.nodeid.split("::"):
|
||||
values.append(rep)
|
||||
if not values:
|
||||
raise ValueError("could not find test report matching %r: "
|
||||
"no test reports at all!" % (inamepart,))
|
||||
raise ValueError(
|
||||
"could not find test report matching %r: "
|
||||
"no test reports at all!" % (inamepart,)
|
||||
)
|
||||
if len(values) > 1:
|
||||
raise ValueError(
|
||||
"found 2 or more testreports matching %r: %s" % (inamepart, values))
|
||||
"found 2 or more testreports matching %r: %s" % (inamepart, values)
|
||||
)
|
||||
return values[0]
|
||||
|
||||
def getfailures(self,
|
||||
names='pytest_runtest_logreport pytest_collectreport'):
|
||||
def getfailures(self, names="pytest_runtest_logreport pytest_collectreport"):
|
||||
return [rep for rep in self.getreports(names) if rep.failed]
|
||||
|
||||
def getfailedcollections(self):
|
||||
return self.getfailures('pytest_collectreport')
|
||||
return self.getfailures("pytest_collectreport")
|
||||
|
||||
def listoutcomes(self):
|
||||
passed = []
|
||||
skipped = []
|
||||
failed = []
|
||||
for rep in self.getreports(
|
||||
"pytest_collectreport pytest_runtest_logreport"):
|
||||
for rep in self.getreports("pytest_collectreport pytest_runtest_logreport"):
|
||||
if rep.passed:
|
||||
if getattr(rep, "when", None) == "call":
|
||||
passed.append(rep)
|
||||
|
@ -330,7 +359,7 @@ def linecomp(request):
|
|||
return LineComp()
|
||||
|
||||
|
||||
@pytest.fixture(name='LineMatcher')
|
||||
@pytest.fixture(name="LineMatcher")
|
||||
def LineMatcher_fixture(request):
|
||||
return LineMatcher
|
||||
|
||||
|
@ -373,7 +402,7 @@ class RunResult(object):
|
|||
|
||||
"""
|
||||
for line in reversed(self.outlines):
|
||||
if 'seconds' in line:
|
||||
if "seconds" in line:
|
||||
outcomes = rex_outcome.findall(line)
|
||||
if outcomes:
|
||||
d = {}
|
||||
|
@ -389,15 +418,18 @@ class RunResult(object):
|
|||
"""
|
||||
d = self.parseoutcomes()
|
||||
obtained = {
|
||||
'passed': d.get('passed', 0),
|
||||
'skipped': d.get('skipped', 0),
|
||||
'failed': d.get('failed', 0),
|
||||
'error': d.get('error', 0),
|
||||
"passed": d.get("passed", 0),
|
||||
"skipped": d.get("skipped", 0),
|
||||
"failed": d.get("failed", 0),
|
||||
"error": d.get("error", 0),
|
||||
}
|
||||
assert obtained == dict(passed=passed, skipped=skipped, failed=failed, error=error)
|
||||
assert (
|
||||
obtained == dict(passed=passed, skipped=skipped, failed=failed, error=error)
|
||||
)
|
||||
|
||||
|
||||
class CwdSnapshot(object):
|
||||
|
||||
def __init__(self):
|
||||
self.__saved = os.getcwd()
|
||||
|
||||
|
@ -406,6 +438,7 @@ class CwdSnapshot(object):
|
|||
|
||||
|
||||
class SysModulesSnapshot(object):
|
||||
|
||||
def __init__(self, preserve=None):
|
||||
self.__preserve = preserve
|
||||
self.__saved = dict(sys.modules)
|
||||
|
@ -413,12 +446,14 @@ class SysModulesSnapshot(object):
|
|||
def restore(self):
|
||||
if self.__preserve:
|
||||
self.__saved.update(
|
||||
(k, m) for k, m in sys.modules.items() if self.__preserve(k))
|
||||
(k, m) for k, m in sys.modules.items() if self.__preserve(k)
|
||||
)
|
||||
sys.modules.clear()
|
||||
sys.modules.update(self.__saved)
|
||||
|
||||
|
||||
class SysPathsSnapshot(object):
|
||||
|
||||
def __init__(self):
|
||||
self.__saved = list(sys.path), list(sys.meta_path)
|
||||
|
||||
|
@ -482,6 +517,7 @@ class Testdir(object):
|
|||
# `zope.interface` for example
|
||||
def preserve_module(name):
|
||||
return name.startswith("zope")
|
||||
|
||||
return SysModulesSnapshot(preserve=preserve_module)
|
||||
|
||||
def make_hook_recorder(self, pluginmanager):
|
||||
|
@ -499,7 +535,7 @@ class Testdir(object):
|
|||
"""
|
||||
self.tmpdir.chdir()
|
||||
|
||||
def _makefile(self, ext, args, kwargs, encoding='utf-8'):
|
||||
def _makefile(self, ext, args, kwargs, encoding="utf-8"):
|
||||
items = list(kwargs.items())
|
||||
|
||||
def to_text(s):
|
||||
|
@ -544,20 +580,20 @@ class Testdir(object):
|
|||
|
||||
def makeini(self, source):
|
||||
"""Write a tox.ini file with 'source' as contents."""
|
||||
return self.makefile('.ini', tox=source)
|
||||
return self.makefile(".ini", tox=source)
|
||||
|
||||
def getinicfg(self, source):
|
||||
"""Return the pytest section from the tox.ini config file."""
|
||||
p = self.makeini(source)
|
||||
return py.iniconfig.IniConfig(p)['pytest']
|
||||
return py.iniconfig.IniConfig(p)["pytest"]
|
||||
|
||||
def makepyfile(self, *args, **kwargs):
|
||||
"""Shortcut for .makefile() with a .py extension."""
|
||||
return self._makefile('.py', args, kwargs)
|
||||
return self._makefile(".py", args, kwargs)
|
||||
|
||||
def maketxtfile(self, *args, **kwargs):
|
||||
"""Shortcut for .makefile() with a .txt extension."""
|
||||
return self._makefile('.txt', args, kwargs)
|
||||
return self._makefile(".txt", args, kwargs)
|
||||
|
||||
def syspathinsert(self, path=None):
|
||||
"""Prepend a directory to sys.path, defaults to :py:attr:`tmpdir`.
|
||||
|
@ -612,7 +648,7 @@ class Testdir(object):
|
|||
|
||||
"""
|
||||
session = Session(config)
|
||||
assert '::' not in str(arg)
|
||||
assert "::" not in str(arg)
|
||||
p = py.path.local(arg)
|
||||
config.hook.pytest_sessionstart(session=session)
|
||||
res = session.perform_collect([str(p)], genitems=False)[0]
|
||||
|
@ -722,6 +758,7 @@ class Testdir(object):
|
|||
|
||||
def revert_warn_already_imported():
|
||||
AssertionRewritingHook._warn_already_imported = orig_warn
|
||||
|
||||
finalizers.append(revert_warn_already_imported)
|
||||
AssertionRewritingHook._warn_already_imported = lambda *a: None
|
||||
|
||||
|
@ -741,6 +778,7 @@ class Testdir(object):
|
|||
rec = []
|
||||
|
||||
class Collect(object):
|
||||
|
||||
def pytest_configure(x, config):
|
||||
rec.append(self.make_hook_recorder(config.pluginmanager))
|
||||
|
||||
|
@ -750,8 +788,10 @@ class Testdir(object):
|
|||
if len(rec) == 1:
|
||||
reprec = rec.pop()
|
||||
else:
|
||||
|
||||
class reprec(object):
|
||||
pass
|
||||
|
||||
reprec.ret = ret
|
||||
|
||||
# typically we reraise keyboard interrupts from the child run
|
||||
|
@ -788,15 +828,14 @@ class Testdir(object):
|
|||
|
||||
class reprec(object):
|
||||
ret = 3
|
||||
|
||||
finally:
|
||||
out, err = capture.readouterr()
|
||||
capture.stop_capturing()
|
||||
sys.stdout.write(out)
|
||||
sys.stderr.write(err)
|
||||
|
||||
res = RunResult(reprec.ret,
|
||||
out.split("\n"), err.split("\n"),
|
||||
time.time() - now)
|
||||
res = RunResult(reprec.ret, out.split("\n"), err.split("\n"), time.time() - now)
|
||||
res.reprec = reprec
|
||||
return res
|
||||
|
||||
|
@ -811,11 +850,11 @@ class Testdir(object):
|
|||
def _ensure_basetemp(self, args):
|
||||
args = [str(x) for x in args]
|
||||
for x in args:
|
||||
if str(x).startswith('--basetemp'):
|
||||
if str(x).startswith("--basetemp"):
|
||||
# print("basedtemp exists: %s" %(args,))
|
||||
break
|
||||
else:
|
||||
args.append("--basetemp=%s" % self.tmpdir.dirpath('basetemp'))
|
||||
args.append("--basetemp=%s" % self.tmpdir.dirpath("basetemp"))
|
||||
# print("added basetemp: %s" %(args,))
|
||||
return args
|
||||
|
||||
|
@ -834,6 +873,7 @@ class Testdir(object):
|
|||
args = self._ensure_basetemp(args)
|
||||
|
||||
import _pytest.config
|
||||
|
||||
config = _pytest.config._prepareconfig(args, self.plugins)
|
||||
# we don't know what the test will do with this half-setup config
|
||||
# object and thus we make sure it gets unconfigured properly in any
|
||||
|
@ -870,8 +910,9 @@ class Testdir(object):
|
|||
for item in items:
|
||||
if item.name == funcname:
|
||||
return item
|
||||
assert 0, "%r item not found in module:\n%s\nitems: %s" % (
|
||||
funcname, source, items)
|
||||
assert 0, (
|
||||
"%r item not found in module:\n%s\nitems: %s" % (funcname, source, items)
|
||||
)
|
||||
|
||||
def getitems(self, source):
|
||||
"""Return all test items collected from the module.
|
||||
|
@ -935,11 +976,14 @@ class Testdir(object):
|
|||
|
||||
"""
|
||||
env = os.environ.copy()
|
||||
env['PYTHONPATH'] = os.pathsep.join(filter(None, [
|
||||
str(os.getcwd()), env.get('PYTHONPATH', '')]))
|
||||
kw['env'] = env
|
||||
env["PYTHONPATH"] = os.pathsep.join(
|
||||
filter(None, [str(os.getcwd()), env.get("PYTHONPATH", "")])
|
||||
)
|
||||
kw["env"] = env
|
||||
|
||||
popen = subprocess.Popen(cmdargs, stdin=subprocess.PIPE, stdout=stdout, stderr=stderr, **kw)
|
||||
popen = subprocess.Popen(
|
||||
cmdargs, stdin=subprocess.PIPE, stdout=stdout, stderr=stderr, **kw
|
||||
)
|
||||
popen.stdin.close()
|
||||
|
||||
return popen
|
||||
|
@ -958,14 +1002,15 @@ class Testdir(object):
|
|||
cmdargs = [str(x) for x in cmdargs]
|
||||
p1 = self.tmpdir.join("stdout")
|
||||
p2 = self.tmpdir.join("stderr")
|
||||
print("running:", ' '.join(cmdargs))
|
||||
print("running:", " ".join(cmdargs))
|
||||
print(" in:", str(py.path.local()))
|
||||
f1 = codecs.open(str(p1), "w", encoding="utf8")
|
||||
f2 = codecs.open(str(p2), "w", encoding="utf8")
|
||||
try:
|
||||
now = time.time()
|
||||
popen = self.popen(cmdargs, stdout=f1, stderr=f2,
|
||||
close_fds=(sys.platform != "win32"))
|
||||
popen = self.popen(
|
||||
cmdargs, stdout=f1, stderr=f2, close_fds=(sys.platform != "win32")
|
||||
)
|
||||
ret = popen.wait()
|
||||
finally:
|
||||
f1.close()
|
||||
|
@ -992,7 +1037,7 @@ class Testdir(object):
|
|||
def _getpytestargs(self):
|
||||
# we cannot use `(sys.executable, script)` because on Windows the
|
||||
# script is e.g. `pytest.exe`
|
||||
return (sys.executable, PYTEST_FULLPATH) # noqa
|
||||
return (sys.executable, PYTEST_FULLPATH) # noqa
|
||||
|
||||
def runpython(self, script):
|
||||
"""Run a python script using sys.executable as interpreter.
|
||||
|
@ -1018,12 +1063,13 @@ class Testdir(object):
|
|||
Returns a :py:class:`RunResult`.
|
||||
|
||||
"""
|
||||
p = py.path.local.make_numbered_dir(prefix="runpytest-",
|
||||
keep=None, rootdir=self.tmpdir)
|
||||
args = ('--basetemp=%s' % p,) + args
|
||||
p = py.path.local.make_numbered_dir(
|
||||
prefix="runpytest-", keep=None, rootdir=self.tmpdir
|
||||
)
|
||||
args = ("--basetemp=%s" % p,) + args
|
||||
plugins = [x for x in self.plugins if isinstance(x, str)]
|
||||
if plugins:
|
||||
args = ('-p', plugins[0]) + args
|
||||
args = ("-p", plugins[0]) + args
|
||||
args = self._getpytestargs() + args
|
||||
return self.run(*args)
|
||||
|
||||
|
@ -1048,7 +1094,7 @@ class Testdir(object):
|
|||
|
||||
"""
|
||||
pexpect = pytest.importorskip("pexpect", "3.0")
|
||||
if hasattr(sys, 'pypy_version_info') and '64' in platform.machine():
|
||||
if hasattr(sys, "pypy_version_info") and "64" in platform.machine():
|
||||
pytest.skip("pypy-64 bit not supported")
|
||||
if sys.platform.startswith("freebsd"):
|
||||
pytest.xfail("pexpect does not work reliably on freebsd")
|
||||
|
@ -1064,10 +1110,12 @@ def getdecoded(out):
|
|||
return out.decode("utf-8")
|
||||
except UnicodeDecodeError:
|
||||
return "INTERNAL not-utf8-decodeable, truncated string:\n%s" % (
|
||||
py.io.saferepr(out),)
|
||||
py.io.saferepr(out),
|
||||
)
|
||||
|
||||
|
||||
class LineComp(object):
|
||||
|
||||
def __init__(self):
|
||||
self.stringio = py.io.TextIO()
|
||||
|
||||
|
@ -1158,11 +1206,11 @@ class LineMatcher(object):
|
|||
raise ValueError("line %r not found in output" % fnline)
|
||||
|
||||
def _log(self, *args):
|
||||
self._log_output.append(' '.join((str(x) for x in args)))
|
||||
self._log_output.append(" ".join((str(x) for x in args)))
|
||||
|
||||
@property
|
||||
def _log_text(self):
|
||||
return '\n'.join(self._log_output)
|
||||
return "\n".join(self._log_output)
|
||||
|
||||
def fnmatch_lines(self, lines2):
|
||||
"""Search captured text for matching lines using ``fnmatch.fnmatch``.
|
||||
|
@ -1172,7 +1220,7 @@ class LineMatcher(object):
|
|||
matches and non-matches are also printed on stdout.
|
||||
|
||||
"""
|
||||
self._match_lines(lines2, fnmatch, 'fnmatch')
|
||||
self._match_lines(lines2, fnmatch, "fnmatch")
|
||||
|
||||
def re_match_lines(self, lines2):
|
||||
"""Search captured text for matching lines using ``re.match``.
|
||||
|
@ -1183,7 +1231,7 @@ class LineMatcher(object):
|
|||
The matches and non-matches are also printed on stdout.
|
||||
|
||||
"""
|
||||
self._match_lines(lines2, lambda name, pat: re.match(pat, name), 're.match')
|
||||
self._match_lines(lines2, lambda name, pat: re.match(pat, name), "re.match")
|
||||
|
||||
def _match_lines(self, lines2, match_func, match_nickname):
|
||||
"""Underlying implementation of ``fnmatch_lines`` and ``re_match_lines``.
|
||||
|
|
|
@ -22,10 +22,21 @@ from _pytest import fixtures
|
|||
from _pytest import nodes
|
||||
from _pytest import deprecated
|
||||
from _pytest.compat import (
|
||||
isclass, isfunction, is_generator, ascii_escaped,
|
||||
REGEX_TYPE, STRING_TYPES, NoneType, NOTSET,
|
||||
get_real_func, getfslineno, safe_getattr,
|
||||
safe_str, getlocation, enum, get_default_arg_names
|
||||
isclass,
|
||||
isfunction,
|
||||
is_generator,
|
||||
ascii_escaped,
|
||||
REGEX_TYPE,
|
||||
STRING_TYPES,
|
||||
NoneType,
|
||||
NOTSET,
|
||||
get_real_func,
|
||||
getfslineno,
|
||||
safe_getattr,
|
||||
safe_str,
|
||||
getlocation,
|
||||
enum,
|
||||
get_default_arg_names,
|
||||
)
|
||||
from _pytest.outcomes import fail
|
||||
from _pytest.mark.structures import transfer_markers, get_unpacked_marks
|
||||
|
@ -37,7 +48,7 @@ from _pytest.mark.structures import transfer_markers, get_unpacked_marks
|
|||
# for better maintenance
|
||||
_pluggy_dir = py.path.local(pluggy.__file__.rstrip("oc"))
|
||||
# pluggy is either a package or a single module depending on the version
|
||||
if _pluggy_dir.basename == '__init__.py':
|
||||
if _pluggy_dir.basename == "__init__.py":
|
||||
_pluggy_dir = _pluggy_dir.dirpath()
|
||||
_pytest_dir = py.path.local(_pytest.__file__).dirpath()
|
||||
_py_dir = py.path.local(py.__file__).dirpath()
|
||||
|
@ -52,53 +63,81 @@ def filter_traceback(entry):
|
|||
# points to dynamically generated code
|
||||
# see https://bitbucket.org/pytest-dev/py/issues/71
|
||||
raw_filename = entry.frame.code.raw.co_filename
|
||||
is_generated = '<' in raw_filename and '>' in raw_filename
|
||||
is_generated = "<" in raw_filename and ">" in raw_filename
|
||||
if is_generated:
|
||||
return False
|
||||
# entry.path might point to a non-existing file, in which case it will
|
||||
# also return a str object. see #1133
|
||||
p = py.path.local(entry.path)
|
||||
return not p.relto(_pluggy_dir) and not p.relto(_pytest_dir) and not p.relto(_py_dir)
|
||||
return not p.relto(_pluggy_dir) and not p.relto(_pytest_dir) and not p.relto(
|
||||
_py_dir
|
||||
)
|
||||
|
||||
|
||||
def pyobj_property(name):
|
||||
|
||||
def get(self):
|
||||
node = self.getparent(getattr(__import__('pytest'), name))
|
||||
node = self.getparent(getattr(__import__("pytest"), name))
|
||||
if node is not None:
|
||||
return node.obj
|
||||
|
||||
doc = "python %s object this node was collected from (can be None)." % (
|
||||
name.lower(),)
|
||||
name.lower(),
|
||||
)
|
||||
return property(get, None, None, doc)
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("general")
|
||||
group.addoption('--fixtures', '--funcargs',
|
||||
action="store_true", dest="showfixtures", default=False,
|
||||
help="show available fixtures, sorted by plugin appearance "
|
||||
"(fixtures with leading '_' are only shown with '-v')")
|
||||
group.addoption(
|
||||
'--fixtures-per-test',
|
||||
"--fixtures",
|
||||
"--funcargs",
|
||||
action="store_true",
|
||||
dest="showfixtures",
|
||||
default=False,
|
||||
help="show available fixtures, sorted by plugin appearance "
|
||||
"(fixtures with leading '_' are only shown with '-v')",
|
||||
)
|
||||
group.addoption(
|
||||
"--fixtures-per-test",
|
||||
action="store_true",
|
||||
dest="show_fixtures_per_test",
|
||||
default=False,
|
||||
help="show fixtures per test",
|
||||
)
|
||||
parser.addini("usefixtures", type="args", default=[],
|
||||
help="list of default fixtures to be used with this project")
|
||||
parser.addini("python_files", type="args",
|
||||
default=['test_*.py', '*_test.py'],
|
||||
help="glob-style file patterns for Python test module discovery")
|
||||
parser.addini("python_classes", type="args", default=["Test", ],
|
||||
help="prefixes or glob names for Python test class discovery")
|
||||
parser.addini("python_functions", type="args", default=["test", ],
|
||||
help="prefixes or glob names for Python test function and "
|
||||
"method discovery")
|
||||
parser.addini(
|
||||
"usefixtures",
|
||||
type="args",
|
||||
default=[],
|
||||
help="list of default fixtures to be used with this project",
|
||||
)
|
||||
parser.addini(
|
||||
"python_files",
|
||||
type="args",
|
||||
default=["test_*.py", "*_test.py"],
|
||||
help="glob-style file patterns for Python test module discovery",
|
||||
)
|
||||
parser.addini(
|
||||
"python_classes",
|
||||
type="args",
|
||||
default=["Test"],
|
||||
help="prefixes or glob names for Python test class discovery",
|
||||
)
|
||||
parser.addini(
|
||||
"python_functions",
|
||||
type="args",
|
||||
default=["test"],
|
||||
help="prefixes or glob names for Python test function and " "method discovery",
|
||||
)
|
||||
|
||||
group.addoption("--import-mode", default="prepend",
|
||||
choices=["prepend", "append"], dest="importmode",
|
||||
help="prepend/append to sys.path when importing test modules, "
|
||||
"default is to prepend.")
|
||||
group.addoption(
|
||||
"--import-mode",
|
||||
default="prepend",
|
||||
choices=["prepend", "append"],
|
||||
dest="importmode",
|
||||
help="prepend/append to sys.path when importing test modules, "
|
||||
"default is to prepend.",
|
||||
)
|
||||
|
||||
|
||||
def pytest_cmdline_main(config):
|
||||
|
@ -113,30 +152,32 @@ def pytest_cmdline_main(config):
|
|||
def pytest_generate_tests(metafunc):
|
||||
# those alternative spellings are common - raise a specific error to alert
|
||||
# the user
|
||||
alt_spellings = ['parameterize', 'parametrise', 'parameterise']
|
||||
alt_spellings = ["parameterize", "parametrise", "parameterise"]
|
||||
for attr in alt_spellings:
|
||||
if hasattr(metafunc.function, attr):
|
||||
msg = "{0} has '{1}', spelling should be 'parametrize'"
|
||||
raise MarkerError(msg.format(metafunc.function.__name__, attr))
|
||||
for marker in metafunc.definition.iter_markers(name='parametrize'):
|
||||
for marker in metafunc.definition.iter_markers(name="parametrize"):
|
||||
metafunc.parametrize(*marker.args, **marker.kwargs)
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
config.addinivalue_line("markers",
|
||||
"parametrize(argnames, argvalues): call a test function multiple "
|
||||
"times passing in different arguments in turn. argvalues generally "
|
||||
"needs to be a list of values if argnames specifies only one name "
|
||||
"or a list of tuples of values if argnames specifies multiple names. "
|
||||
"Example: @parametrize('arg1', [1,2]) would lead to two calls of the "
|
||||
"decorated test function, one with arg1=1 and another with arg1=2."
|
||||
"see http://pytest.org/latest/parametrize.html for more info and "
|
||||
"examples."
|
||||
)
|
||||
config.addinivalue_line("markers",
|
||||
"usefixtures(fixturename1, fixturename2, ...): mark tests as needing "
|
||||
"all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures "
|
||||
)
|
||||
config.addinivalue_line(
|
||||
"markers",
|
||||
"parametrize(argnames, argvalues): call a test function multiple "
|
||||
"times passing in different arguments in turn. argvalues generally "
|
||||
"needs to be a list of values if argnames specifies only one name "
|
||||
"or a list of tuples of values if argnames specifies multiple names. "
|
||||
"Example: @parametrize('arg1', [1,2]) would lead to two calls of the "
|
||||
"decorated test function, one with arg1=1 and another with arg1=2."
|
||||
"see http://pytest.org/latest/parametrize.html for more info and "
|
||||
"examples.",
|
||||
)
|
||||
config.addinivalue_line(
|
||||
"markers",
|
||||
"usefixtures(fixturename1, fixturename2, ...): mark tests as needing "
|
||||
"all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures ",
|
||||
)
|
||||
|
||||
|
||||
@hookimpl(trylast=True)
|
||||
|
@ -157,7 +198,7 @@ def pytest_collect_file(path, parent):
|
|||
ext = path.ext
|
||||
if ext == ".py":
|
||||
if not parent.session.isinitpath(path):
|
||||
for pat in parent.config.getini('python_files'):
|
||||
for pat in parent.config.getini("python_files"):
|
||||
if path.fnmatch(pat):
|
||||
break
|
||||
else:
|
||||
|
@ -188,8 +229,10 @@ def pytest_pycollect_makeitem(collector, name, obj):
|
|||
# or a funtools.wrapped.
|
||||
# We musn't if it's been wrapped with mock.patch (python 2 only)
|
||||
if not (isfunction(obj) or isfunction(get_real_func(obj))):
|
||||
collector.warn(code="C2", message="cannot collect %r because it is not a function."
|
||||
% name, )
|
||||
collector.warn(
|
||||
code="C2",
|
||||
message="cannot collect %r because it is not a function." % name,
|
||||
)
|
||||
elif getattr(obj, "__test__", True):
|
||||
if is_generator(obj):
|
||||
res = Generator(name, parent=collector)
|
||||
|
@ -215,8 +258,9 @@ class PyobjMixin(PyobjContext):
|
|||
super(PyobjMixin, self).__init__(*k, **kw)
|
||||
|
||||
def obj():
|
||||
|
||||
def fget(self):
|
||||
obj = getattr(self, '_obj', None)
|
||||
obj = getattr(self, "_obj", None)
|
||||
if obj is None:
|
||||
self._obj = obj = self._getobj()
|
||||
# XXX evil hack
|
||||
|
@ -261,7 +305,7 @@ class PyobjMixin(PyobjContext):
|
|||
def reportinfo(self):
|
||||
# XXX caching?
|
||||
obj = self.obj
|
||||
compat_co_firstlineno = getattr(obj, 'compat_co_firstlineno', None)
|
||||
compat_co_firstlineno = getattr(obj, "compat_co_firstlineno", None)
|
||||
if isinstance(compat_co_firstlineno, int):
|
||||
# nose compatibility
|
||||
fspath = sys.modules[obj.__module__].__file__
|
||||
|
@ -278,7 +322,7 @@ class PyobjMixin(PyobjContext):
|
|||
class PyCollector(PyobjMixin, nodes.Collector):
|
||||
|
||||
def funcnamefilter(self, name):
|
||||
return self._matches_prefix_or_glob_option('python_functions', name)
|
||||
return self._matches_prefix_or_glob_option("python_functions", name)
|
||||
|
||||
def isnosetest(self, obj):
|
||||
""" Look for the __test__ attribute, which is applied by the
|
||||
|
@ -287,25 +331,24 @@ class PyCollector(PyobjMixin, nodes.Collector):
|
|||
# We explicitly check for "is True" here to not mistakenly treat
|
||||
# classes with a custom __getattr__ returning something truthy (like a
|
||||
# function) as test classes.
|
||||
return safe_getattr(obj, '__test__', False) is True
|
||||
return safe_getattr(obj, "__test__", False) is True
|
||||
|
||||
def classnamefilter(self, name):
|
||||
return self._matches_prefix_or_glob_option('python_classes', name)
|
||||
return self._matches_prefix_or_glob_option("python_classes", name)
|
||||
|
||||
def istestfunction(self, obj, name):
|
||||
if self.funcnamefilter(name) or self.isnosetest(obj):
|
||||
if isinstance(obj, staticmethod):
|
||||
# static methods need to be unwrapped
|
||||
obj = safe_getattr(obj, '__func__', False)
|
||||
obj = safe_getattr(obj, "__func__", False)
|
||||
if obj is False:
|
||||
# Python 2.6 wraps in a different way that we won't try to handle
|
||||
msg = "cannot collect static method %r because " \
|
||||
"it is not a function (always the case in Python 2.6)"
|
||||
self.warn(
|
||||
code="C2", message=msg % name)
|
||||
msg = "cannot collect static method %r because " "it is not a function (always the case in Python 2.6)"
|
||||
self.warn(code="C2", message=msg % name)
|
||||
return False
|
||||
return (
|
||||
safe_getattr(obj, "__call__", False) and fixtures.getfixturemarker(obj) is None
|
||||
safe_getattr(obj, "__call__", False)
|
||||
and fixtures.getfixturemarker(obj) is None
|
||||
)
|
||||
else:
|
||||
return False
|
||||
|
@ -324,8 +367,9 @@ class PyCollector(PyobjMixin, nodes.Collector):
|
|||
# check that name looks like a glob-string before calling fnmatch
|
||||
# because this is called for every name in each collected module,
|
||||
# and fnmatch is somewhat expensive to call
|
||||
elif ('*' in option or '?' in option or '[' in option) and \
|
||||
fnmatch.fnmatch(name, option):
|
||||
elif ("*" in option or "?" in option or "[" in option) and fnmatch.fnmatch(
|
||||
name, option
|
||||
):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
@ -335,7 +379,7 @@ class PyCollector(PyobjMixin, nodes.Collector):
|
|||
|
||||
# NB. we avoid random getattrs and peek in the __dict__ instead
|
||||
# (XXX originally introduced from a PyPy need, still true?)
|
||||
dicts = [getattr(self.obj, '__dict__', {})]
|
||||
dicts = [getattr(self.obj, "__dict__", {})]
|
||||
for basecls in inspect.getmro(self.obj.__class__):
|
||||
dicts.append(basecls.__dict__)
|
||||
seen = {}
|
||||
|
@ -360,8 +404,7 @@ class PyCollector(PyobjMixin, nodes.Collector):
|
|||
|
||||
def _makeitem(self, name, obj):
|
||||
# assert self.ihook.fspath == self.fspath, self
|
||||
return self.ihook.pytest_pycollect_makeitem(
|
||||
collector=self, name=name, obj=obj)
|
||||
return self.ihook.pytest_pycollect_makeitem(collector=self, name=name, obj=obj)
|
||||
|
||||
def _genfunctions(self, name, funcobj):
|
||||
module = self.getparent(Module).obj
|
||||
|
@ -370,22 +413,21 @@ class PyCollector(PyobjMixin, nodes.Collector):
|
|||
transfer_markers(funcobj, cls, module)
|
||||
fm = self.session._fixturemanager
|
||||
|
||||
definition = FunctionDefinition(
|
||||
name=name,
|
||||
parent=self,
|
||||
callobj=funcobj,
|
||||
)
|
||||
definition = FunctionDefinition(name=name, parent=self, callobj=funcobj)
|
||||
fixtureinfo = fm.getfixtureinfo(definition, funcobj, cls)
|
||||
|
||||
metafunc = Metafunc(definition, fixtureinfo, self.config, cls=cls, module=module)
|
||||
metafunc = Metafunc(
|
||||
definition, fixtureinfo, self.config, cls=cls, module=module
|
||||
)
|
||||
methods = []
|
||||
if hasattr(module, "pytest_generate_tests"):
|
||||
methods.append(module.pytest_generate_tests)
|
||||
if hasattr(cls, "pytest_generate_tests"):
|
||||
methods.append(cls().pytest_generate_tests)
|
||||
if methods:
|
||||
self.ihook.pytest_generate_tests.call_extra(methods,
|
||||
dict(metafunc=metafunc))
|
||||
self.ihook.pytest_generate_tests.call_extra(
|
||||
methods, dict(metafunc=metafunc)
|
||||
)
|
||||
else:
|
||||
self.ihook.pytest_generate_tests(metafunc=metafunc)
|
||||
|
||||
|
@ -398,12 +440,15 @@ class PyCollector(PyobjMixin, nodes.Collector):
|
|||
|
||||
for callspec in metafunc._calls:
|
||||
subname = "%s[%s]" % (name, callspec.id)
|
||||
yield Function(name=subname, parent=self,
|
||||
callspec=callspec, callobj=funcobj,
|
||||
fixtureinfo=fixtureinfo,
|
||||
keywords={callspec.id: True},
|
||||
originalname=name,
|
||||
)
|
||||
yield Function(
|
||||
name=subname,
|
||||
parent=self,
|
||||
callspec=callspec,
|
||||
callobj=funcobj,
|
||||
fixtureinfo=fixtureinfo,
|
||||
keywords={callspec.id: True},
|
||||
originalname=name,
|
||||
)
|
||||
|
||||
|
||||
class Module(nodes.File, PyCollector):
|
||||
|
@ -423,7 +468,8 @@ class Module(nodes.File, PyCollector):
|
|||
mod = self.fspath.pyimport(ensuresyspath=importmode)
|
||||
except SyntaxError:
|
||||
raise self.CollectError(
|
||||
_pytest._code.ExceptionInfo().getrepr(style="short"))
|
||||
_pytest._code.ExceptionInfo().getrepr(style="short")
|
||||
)
|
||||
except self.fspath.ImportMismatchError:
|
||||
e = sys.exc_info()[1]
|
||||
raise self.CollectError(
|
||||
|
@ -433,15 +479,17 @@ class Module(nodes.File, PyCollector):
|
|||
"which is not the same as the test file we want to collect:\n"
|
||||
" %s\n"
|
||||
"HINT: remove __pycache__ / .pyc files and/or use a "
|
||||
"unique basename for your test file modules"
|
||||
% e.args
|
||||
"unique basename for your test file modules" % e.args
|
||||
)
|
||||
except ImportError:
|
||||
from _pytest._code.code import ExceptionInfo
|
||||
|
||||
exc_info = ExceptionInfo()
|
||||
if self.config.getoption('verbose') < 2:
|
||||
if self.config.getoption("verbose") < 2:
|
||||
exc_info.traceback = exc_info.traceback.filter(filter_traceback)
|
||||
exc_repr = exc_info.getrepr(style='short') if exc_info.traceback else exc_info.exconly()
|
||||
exc_repr = exc_info.getrepr(
|
||||
style="short"
|
||||
) if exc_info.traceback else exc_info.exconly()
|
||||
formatted_tb = safe_str(exc_repr)
|
||||
raise self.CollectError(
|
||||
"ImportError while importing test module '{fspath}'.\n"
|
||||
|
@ -468,9 +516,9 @@ class Module(nodes.File, PyCollector):
|
|||
if setup_module is not None:
|
||||
setup_module()
|
||||
|
||||
teardown_module = _get_xunit_setup_teardown(self.obj, 'tearDownModule')
|
||||
teardown_module = _get_xunit_setup_teardown(self.obj, "tearDownModule")
|
||||
if teardown_module is None:
|
||||
teardown_module = _get_xunit_setup_teardown(self.obj, 'teardown_module')
|
||||
teardown_module = _get_xunit_setup_teardown(self.obj, "teardown_module")
|
||||
if teardown_module is not None:
|
||||
self.addfinalizer(teardown_module)
|
||||
|
||||
|
@ -512,26 +560,32 @@ class Class(PyCollector):
|
|||
if not safe_getattr(self.obj, "__test__", True):
|
||||
return []
|
||||
if hasinit(self.obj):
|
||||
self.warn("C1", "cannot collect test class %r because it has a "
|
||||
"__init__ constructor" % self.obj.__name__)
|
||||
self.warn(
|
||||
"C1",
|
||||
"cannot collect test class %r because it has a "
|
||||
"__init__ constructor" % self.obj.__name__,
|
||||
)
|
||||
return []
|
||||
elif hasnew(self.obj):
|
||||
self.warn("C1", "cannot collect test class %r because it has a "
|
||||
"__new__ constructor" % self.obj.__name__)
|
||||
self.warn(
|
||||
"C1",
|
||||
"cannot collect test class %r because it has a "
|
||||
"__new__ constructor" % self.obj.__name__,
|
||||
)
|
||||
return []
|
||||
return [self._getcustomclass("Instance")(name="()", parent=self)]
|
||||
|
||||
def setup(self):
|
||||
setup_class = _get_xunit_func(self.obj, 'setup_class')
|
||||
setup_class = _get_xunit_func(self.obj, "setup_class")
|
||||
if setup_class is not None:
|
||||
setup_class = getattr(setup_class, 'im_func', setup_class)
|
||||
setup_class = getattr(setup_class, '__func__', setup_class)
|
||||
setup_class = getattr(setup_class, "im_func", setup_class)
|
||||
setup_class = getattr(setup_class, "__func__", setup_class)
|
||||
setup_class(self.obj)
|
||||
|
||||
fin_class = getattr(self.obj, 'teardown_class', None)
|
||||
fin_class = getattr(self.obj, "teardown_class", None)
|
||||
if fin_class is not None:
|
||||
fin_class = getattr(fin_class, 'im_func', fin_class)
|
||||
fin_class = getattr(fin_class, '__func__', fin_class)
|
||||
fin_class = getattr(fin_class, "im_func", fin_class)
|
||||
fin_class = getattr(fin_class, "__func__", fin_class)
|
||||
self.addfinalizer(lambda: fin_class(self.obj))
|
||||
|
||||
|
||||
|
@ -559,7 +613,7 @@ class FunctionMixin(PyobjMixin):
|
|||
|
||||
def setup(self):
|
||||
""" perform setup for this test function. """
|
||||
if hasattr(self, '_preservedparent'):
|
||||
if hasattr(self, "_preservedparent"):
|
||||
obj = self._preservedparent
|
||||
elif isinstance(self.parent, Instance):
|
||||
obj = self.parent.newinstance()
|
||||
|
@ -567,20 +621,24 @@ class FunctionMixin(PyobjMixin):
|
|||
else:
|
||||
obj = self.parent.obj
|
||||
if inspect.ismethod(self.obj):
|
||||
setup_name = 'setup_method'
|
||||
teardown_name = 'teardown_method'
|
||||
setup_name = "setup_method"
|
||||
teardown_name = "teardown_method"
|
||||
else:
|
||||
setup_name = 'setup_function'
|
||||
teardown_name = 'teardown_function'
|
||||
setup_func_or_method = _get_xunit_setup_teardown(obj, setup_name, param_obj=self.obj)
|
||||
setup_name = "setup_function"
|
||||
teardown_name = "teardown_function"
|
||||
setup_func_or_method = _get_xunit_setup_teardown(
|
||||
obj, setup_name, param_obj=self.obj
|
||||
)
|
||||
if setup_func_or_method is not None:
|
||||
setup_func_or_method()
|
||||
teardown_func_or_method = _get_xunit_setup_teardown(obj, teardown_name, param_obj=self.obj)
|
||||
teardown_func_or_method = _get_xunit_setup_teardown(
|
||||
obj, teardown_name, param_obj=self.obj
|
||||
)
|
||||
if teardown_func_or_method is not None:
|
||||
self.addfinalizer(teardown_func_or_method)
|
||||
|
||||
def _prunetraceback(self, excinfo):
|
||||
if hasattr(self, '_obj') and not self.config.option.fulltrace:
|
||||
if hasattr(self, "_obj") and not self.config.option.fulltrace:
|
||||
code = _pytest._code.Code(get_real_func(self.obj))
|
||||
path, firstlineno = code.path, code.firstlineno
|
||||
traceback = excinfo.traceback
|
||||
|
@ -598,14 +656,13 @@ class FunctionMixin(PyobjMixin):
|
|||
if self.config.option.tbstyle == "auto":
|
||||
if len(excinfo.traceback) > 2:
|
||||
for entry in excinfo.traceback[1:-1]:
|
||||
entry.set_repr_style('short')
|
||||
entry.set_repr_style("short")
|
||||
|
||||
def _repr_failure_py(self, excinfo, style="long"):
|
||||
if excinfo.errisinstance(fail.Exception):
|
||||
if not excinfo.value.pytrace:
|
||||
return py._builtin._totext(excinfo.value)
|
||||
return super(FunctionMixin, self)._repr_failure_py(excinfo,
|
||||
style=style)
|
||||
return super(FunctionMixin, self)._repr_failure_py(excinfo, style=style)
|
||||
|
||||
def repr_failure(self, excinfo, outerr=None):
|
||||
assert outerr is None, "XXX outerr usage is deprecated"
|
||||
|
@ -616,11 +673,13 @@ class FunctionMixin(PyobjMixin):
|
|||
|
||||
|
||||
class Generator(FunctionMixin, PyCollector):
|
||||
|
||||
def collect(self):
|
||||
# test generators are seen as collectors but they also
|
||||
# invoke setup/teardown on popular request
|
||||
# (induced by the common "test_*" naming shared with normal tests)
|
||||
from _pytest import deprecated
|
||||
|
||||
self.session._setupstate.prepare(self)
|
||||
# see FunctionMixin.setup and test_setupstate_is_preserved_134
|
||||
self._preservedparent = self.parent.obj
|
||||
|
@ -629,16 +688,18 @@ class Generator(FunctionMixin, PyCollector):
|
|||
for i, x in enumerate(self.obj()):
|
||||
name, call, args = self.getcallargs(x)
|
||||
if not callable(call):
|
||||
raise TypeError("%r yielded non callable test %r" % (self.obj, call,))
|
||||
raise TypeError("%r yielded non callable test %r" % (self.obj, call))
|
||||
if name is None:
|
||||
name = "[%d]" % i
|
||||
else:
|
||||
name = "['%s']" % name
|
||||
if name in seen:
|
||||
raise ValueError("%r generated tests with non-unique name %r" % (self, name))
|
||||
raise ValueError(
|
||||
"%r generated tests with non-unique name %r" % (self, name)
|
||||
)
|
||||
seen[name] = True
|
||||
values.append(self.Function(name, self, args=args, callobj=call))
|
||||
self.warn('C1', deprecated.YIELD_TESTS)
|
||||
self.warn("C1", deprecated.YIELD_TESTS)
|
||||
return values
|
||||
|
||||
def getcallargs(self, obj):
|
||||
|
@ -655,18 +716,19 @@ class Generator(FunctionMixin, PyCollector):
|
|||
|
||||
|
||||
def hasinit(obj):
|
||||
init = getattr(obj, '__init__', None)
|
||||
init = getattr(obj, "__init__", None)
|
||||
if init:
|
||||
return init != object.__init__
|
||||
|
||||
|
||||
def hasnew(obj):
|
||||
new = getattr(obj, '__new__', None)
|
||||
new = getattr(obj, "__new__", None)
|
||||
if new:
|
||||
return new != object.__new__
|
||||
|
||||
|
||||
class CallSpec2(object):
|
||||
|
||||
def __init__(self, metafunc):
|
||||
self.metafunc = metafunc
|
||||
self.funcargs = {}
|
||||
|
@ -708,8 +770,7 @@ class CallSpec2(object):
|
|||
def id(self):
|
||||
return "-".join(map(str, filter(None, self._idlist)))
|
||||
|
||||
def setmulti2(self, valtypes, argnames, valset, id, marks, scopenum,
|
||||
param_index):
|
||||
def setmulti2(self, valtypes, argnames, valset, id, marks, scopenum, param_index):
|
||||
for arg, val in zip(argnames, valset):
|
||||
self._checkargnotcontained(arg)
|
||||
valtype_for_arg = valtypes[arg]
|
||||
|
@ -742,7 +803,10 @@ class Metafunc(fixtures.FuncargnamesCompatAttr):
|
|||
|
||||
def __init__(self, definition, fixtureinfo, config, cls=None, module=None):
|
||||
#: access to the :class:`_pytest.config.Config` object for the test session
|
||||
assert isinstance(definition, FunctionDefinition) or type(definition).__name__ == "DefinitionMock"
|
||||
assert (
|
||||
isinstance(definition, FunctionDefinition)
|
||||
or type(definition).__name__ == "DefinitionMock"
|
||||
)
|
||||
self.definition = definition
|
||||
self.config = config
|
||||
|
||||
|
@ -762,8 +826,7 @@ class Metafunc(fixtures.FuncargnamesCompatAttr):
|
|||
self._ids = set()
|
||||
self._arg2fixturedefs = fixtureinfo.name2fixturedefs
|
||||
|
||||
def parametrize(self, argnames, argvalues, indirect=False, ids=None,
|
||||
scope=None):
|
||||
def parametrize(self, argnames, argvalues, indirect=False, ids=None, scope=None):
|
||||
""" Add new invocations to the underlying test function using the list
|
||||
of argvalues for the given argnames. Parametrization is performed
|
||||
during the collection phase. If you need to setup expensive resources
|
||||
|
@ -806,27 +869,29 @@ class Metafunc(fixtures.FuncargnamesCompatAttr):
|
|||
from py.io import saferepr
|
||||
|
||||
argnames, parameters = ParameterSet._for_parametrize(
|
||||
argnames, argvalues, self.function, self.config)
|
||||
argnames, argvalues, self.function, self.config
|
||||
)
|
||||
del argvalues
|
||||
default_arg_names = set(get_default_arg_names(self.function))
|
||||
|
||||
if scope is None:
|
||||
scope = _find_parametrized_scope(argnames, self._arg2fixturedefs, indirect)
|
||||
|
||||
scopenum = scope2index(scope, descr='call to {}'.format(self.parametrize))
|
||||
scopenum = scope2index(scope, descr="call to {}".format(self.parametrize))
|
||||
valtypes = {}
|
||||
for arg in argnames:
|
||||
if arg not in self.fixturenames:
|
||||
if arg in default_arg_names:
|
||||
raise ValueError("%r already takes an argument %r with a default value" % (self.function, arg))
|
||||
raise ValueError(
|
||||
"%r already takes an argument %r with a default value"
|
||||
% (self.function, arg)
|
||||
)
|
||||
else:
|
||||
if isinstance(indirect, (tuple, list)):
|
||||
name = 'fixture' if arg in indirect else 'argument'
|
||||
name = "fixture" if arg in indirect else "argument"
|
||||
else:
|
||||
name = 'fixture' if indirect else 'argument'
|
||||
raise ValueError(
|
||||
"%r uses no %s %r" % (
|
||||
self.function, name, arg))
|
||||
name = "fixture" if indirect else "argument"
|
||||
raise ValueError("%r uses no %s %r" % (self.function, name, arg))
|
||||
|
||||
if indirect is True:
|
||||
valtypes = dict.fromkeys(argnames, "params")
|
||||
|
@ -836,8 +901,10 @@ class Metafunc(fixtures.FuncargnamesCompatAttr):
|
|||
valtypes = dict.fromkeys(argnames, "funcargs")
|
||||
for arg in indirect:
|
||||
if arg not in argnames:
|
||||
raise ValueError("indirect given to %r: fixture %r doesn't exist" % (
|
||||
self.function, arg))
|
||||
raise ValueError(
|
||||
"indirect given to %r: fixture %r doesn't exist"
|
||||
% (self.function, arg)
|
||||
)
|
||||
valtypes[arg] = "params"
|
||||
idfn = None
|
||||
if callable(ids):
|
||||
|
@ -845,12 +912,15 @@ class Metafunc(fixtures.FuncargnamesCompatAttr):
|
|||
ids = None
|
||||
if ids:
|
||||
if len(ids) != len(parameters):
|
||||
raise ValueError('%d tests specified with %d ids' % (
|
||||
len(parameters), len(ids)))
|
||||
raise ValueError(
|
||||
"%d tests specified with %d ids" % (len(parameters), len(ids))
|
||||
)
|
||||
for id_value in ids:
|
||||
if id_value is not None and not isinstance(id_value, six.string_types):
|
||||
msg = 'ids must be list of strings, found: %s (type: %s)'
|
||||
raise ValueError(msg % (saferepr(id_value), type(id_value).__name__))
|
||||
msg = "ids must be list of strings, found: %s (type: %s)"
|
||||
raise ValueError(
|
||||
msg % (saferepr(id_value), type(id_value).__name__)
|
||||
)
|
||||
ids = idmaker(argnames, parameters, idfn, ids, self.config)
|
||||
newcalls = []
|
||||
for callspec in self._calls or [CallSpec2(self)]:
|
||||
|
@ -859,11 +929,20 @@ class Metafunc(fixtures.FuncargnamesCompatAttr):
|
|||
if len(param.values) != len(argnames):
|
||||
raise ValueError(
|
||||
'In "parametrize" the number of values ({}) must be '
|
||||
'equal to the number of names ({})'.format(
|
||||
param.values, argnames))
|
||||
"equal to the number of names ({})".format(
|
||||
param.values, argnames
|
||||
)
|
||||
)
|
||||
newcallspec = callspec.copy(self)
|
||||
newcallspec.setmulti2(valtypes, argnames, param.values, a_id,
|
||||
param.marks, scopenum, param_index)
|
||||
newcallspec.setmulti2(
|
||||
valtypes,
|
||||
argnames,
|
||||
param.values,
|
||||
a_id,
|
||||
param.marks,
|
||||
scopenum,
|
||||
param_index,
|
||||
)
|
||||
newcalls.append(newcallspec)
|
||||
self._calls = newcalls
|
||||
|
||||
|
@ -888,7 +967,9 @@ class Metafunc(fixtures.FuncargnamesCompatAttr):
|
|||
invocation through the ``request.param`` attribute.
|
||||
"""
|
||||
if self.config:
|
||||
self.config.warn('C1', message=deprecated.METAFUNC_ADD_CALL, fslocation=None)
|
||||
self.config.warn(
|
||||
"C1", message=deprecated.METAFUNC_ADD_CALL, fslocation=None
|
||||
)
|
||||
assert funcargs is None or isinstance(funcargs, dict)
|
||||
if funcargs is not None:
|
||||
for name in funcargs:
|
||||
|
@ -921,9 +1002,11 @@ def _find_parametrized_scope(argnames, arg2fixturedefs, indirect):
|
|||
Related to issue #1832, based on code posted by @Kingdread.
|
||||
"""
|
||||
from _pytest.fixtures import scopes
|
||||
|
||||
indirect_as_list = isinstance(indirect, (list, tuple))
|
||||
all_arguments_are_fixtures = indirect is True or \
|
||||
indirect_as_list and len(indirect) == argnames
|
||||
all_arguments_are_fixtures = indirect is True or indirect_as_list and len(
|
||||
indirect
|
||||
) == argnames
|
||||
if all_arguments_are_fixtures:
|
||||
fixturedefs = arg2fixturedefs or {}
|
||||
used_scopes = [fixturedef[0].scope for name, fixturedef in fixturedefs.items()]
|
||||
|
@ -933,7 +1016,7 @@ def _find_parametrized_scope(argnames, arg2fixturedefs, indirect):
|
|||
if scope in used_scopes:
|
||||
return scope
|
||||
|
||||
return 'function'
|
||||
return "function"
|
||||
|
||||
|
||||
def _idval(val, argname, idx, idfn, config=None):
|
||||
|
@ -944,15 +1027,19 @@ def _idval(val, argname, idx, idfn, config=None):
|
|||
except Exception:
|
||||
# See issue https://github.com/pytest-dev/pytest/issues/2169
|
||||
import warnings
|
||||
msg = "Raised while trying to determine id of parameter %s at position %d." % (argname, idx)
|
||||
msg += '\nUpdate your code as this will raise an error in pytest-4.0.'
|
||||
|
||||
msg = "Raised while trying to determine id of parameter %s at position %d." % (
|
||||
argname, idx
|
||||
)
|
||||
msg += "\nUpdate your code as this will raise an error in pytest-4.0."
|
||||
warnings.warn(msg, DeprecationWarning)
|
||||
if s:
|
||||
return ascii_escaped(s)
|
||||
|
||||
if config:
|
||||
hook_id = config.hook.pytest_make_parametrize_id(
|
||||
config=config, val=val, argname=argname)
|
||||
config=config, val=val, argname=argname
|
||||
)
|
||||
if hook_id:
|
||||
return hook_id
|
||||
|
||||
|
@ -964,7 +1051,7 @@ def _idval(val, argname, idx, idfn, config=None):
|
|||
return ascii_escaped(val.pattern)
|
||||
elif enum is not None and isinstance(val, enum.Enum):
|
||||
return str(val)
|
||||
elif (isclass(val) or isfunction(val)) and hasattr(val, '__name__'):
|
||||
elif (isclass(val) or isfunction(val)) and hasattr(val, "__name__"):
|
||||
return val.__name__
|
||||
return str(argname) + str(idx)
|
||||
|
||||
|
@ -973,16 +1060,20 @@ def _idvalset(idx, parameterset, argnames, idfn, ids, config=None):
|
|||
if parameterset.id is not None:
|
||||
return parameterset.id
|
||||
if ids is None or (idx >= len(ids) or ids[idx] is None):
|
||||
this_id = [_idval(val, argname, idx, idfn, config)
|
||||
for val, argname in zip(parameterset.values, argnames)]
|
||||
this_id = [
|
||||
_idval(val, argname, idx, idfn, config)
|
||||
for val, argname in zip(parameterset.values, argnames)
|
||||
]
|
||||
return "-".join(this_id)
|
||||
else:
|
||||
return ascii_escaped(ids[idx])
|
||||
|
||||
|
||||
def idmaker(argnames, parametersets, idfn=None, ids=None, config=None):
|
||||
ids = [_idvalset(valindex, parameterset, argnames, idfn, ids, config)
|
||||
for valindex, parameterset in enumerate(parametersets)]
|
||||
ids = [
|
||||
_idvalset(valindex, parameterset, argnames, idfn, ids, config)
|
||||
for valindex, parameterset in enumerate(parametersets)
|
||||
]
|
||||
if len(set(ids)) != len(ids):
|
||||
# The ids are not unique
|
||||
duplicates = [testid for testid in ids if ids.count(testid) > 1]
|
||||
|
@ -996,11 +1087,13 @@ def idmaker(argnames, parametersets, idfn=None, ids=None, config=None):
|
|||
|
||||
def show_fixtures_per_test(config):
|
||||
from _pytest.main import wrap_session
|
||||
|
||||
return wrap_session(config, _show_fixtures_per_test)
|
||||
|
||||
|
||||
def _show_fixtures_per_test(config, session):
|
||||
import _pytest.config
|
||||
|
||||
session.perform_collect()
|
||||
curdir = py.path.local()
|
||||
tw = _pytest.config.create_terminal_writer(config)
|
||||
|
@ -1024,7 +1117,7 @@ def _show_fixtures_per_test(config, session):
|
|||
if fixture_doc:
|
||||
write_docstring(tw, fixture_doc)
|
||||
else:
|
||||
tw.line(' no docstring available', red=True)
|
||||
tw.line(" no docstring available", red=True)
|
||||
|
||||
def write_item(item):
|
||||
try:
|
||||
|
@ -1036,8 +1129,8 @@ def _show_fixtures_per_test(config, session):
|
|||
# this test item does not use any fixtures
|
||||
return
|
||||
tw.line()
|
||||
tw.sep('-', 'fixtures used by {}'.format(item.name))
|
||||
tw.sep('-', '({})'.format(get_best_relpath(item.function)))
|
||||
tw.sep("-", "fixtures used by {}".format(item.name))
|
||||
tw.sep("-", "({})".format(get_best_relpath(item.function)))
|
||||
# dict key not used in loop but needed for sorting
|
||||
for _, fixturedefs in sorted(info.name2fixturedefs.items()):
|
||||
assert fixturedefs is not None
|
||||
|
@ -1052,11 +1145,13 @@ def _show_fixtures_per_test(config, session):
|
|||
|
||||
def showfixtures(config):
|
||||
from _pytest.main import wrap_session
|
||||
|
||||
return wrap_session(config, _showfixtures_main)
|
||||
|
||||
|
||||
def _showfixtures_main(config, session):
|
||||
import _pytest.config
|
||||
|
||||
session.perform_collect()
|
||||
curdir = py.path.local()
|
||||
tw = _pytest.config.create_terminal_writer(config)
|
||||
|
@ -1076,10 +1171,15 @@ def _showfixtures_main(config, session):
|
|||
if (fixturedef.argname, loc) in seen:
|
||||
continue
|
||||
seen.add((fixturedef.argname, loc))
|
||||
available.append((len(fixturedef.baseid),
|
||||
fixturedef.func.__module__,
|
||||
curdir.bestrelpath(loc),
|
||||
fixturedef.argname, fixturedef))
|
||||
available.append(
|
||||
(
|
||||
len(fixturedef.baseid),
|
||||
fixturedef.func.__module__,
|
||||
curdir.bestrelpath(loc),
|
||||
fixturedef.argname,
|
||||
fixturedef,
|
||||
)
|
||||
)
|
||||
|
||||
available.sort()
|
||||
currentmodule = None
|
||||
|
@ -1092,7 +1192,7 @@ def _showfixtures_main(config, session):
|
|||
if verbose <= 0 and argname[0] == "_":
|
||||
continue
|
||||
if verbose > 0:
|
||||
funcargspec = "%s -- %s" % (argname, bestrel,)
|
||||
funcargspec = "%s -- %s" % (argname, bestrel)
|
||||
else:
|
||||
funcargspec = argname
|
||||
tw.line(funcargspec, green=True)
|
||||
|
@ -1101,8 +1201,7 @@ def _showfixtures_main(config, session):
|
|||
if doc:
|
||||
write_docstring(tw, doc)
|
||||
else:
|
||||
tw.line(" %s: no docstring available" % (loc,),
|
||||
red=True)
|
||||
tw.line(" %s: no docstring available" % (loc,), red=True)
|
||||
|
||||
|
||||
def write_docstring(tw, doc):
|
||||
|
@ -1129,11 +1228,20 @@ class Function(FunctionMixin, nodes.Item, fixtures.FuncargnamesCompatAttr):
|
|||
# disable since functions handle it themselfes
|
||||
_ALLOW_MARKERS = False
|
||||
|
||||
def __init__(self, name, parent, args=None, config=None,
|
||||
callspec=None, callobj=NOTSET, keywords=None, session=None,
|
||||
fixtureinfo=None, originalname=None):
|
||||
super(Function, self).__init__(name, parent, config=config,
|
||||
session=session)
|
||||
def __init__(
|
||||
self,
|
||||
name,
|
||||
parent,
|
||||
args=None,
|
||||
config=None,
|
||||
callspec=None,
|
||||
callobj=NOTSET,
|
||||
keywords=None,
|
||||
session=None,
|
||||
fixtureinfo=None,
|
||||
originalname=None,
|
||||
):
|
||||
super(Function, self).__init__(name, parent, config=config, session=session)
|
||||
self._args = args
|
||||
if callobj is not NOTSET:
|
||||
self.obj = callobj
|
||||
|
@ -1155,8 +1263,8 @@ class Function(FunctionMixin, nodes.Item, fixtures.FuncargnamesCompatAttr):
|
|||
|
||||
if fixtureinfo is None:
|
||||
fixtureinfo = self.session._fixturemanager.getfixtureinfo(
|
||||
self.parent, self.obj, self.cls,
|
||||
funcargs=not self._isyieldedfunction())
|
||||
self.parent, self.obj, self.cls, funcargs=not self._isyieldedfunction()
|
||||
)
|
||||
self._fixtureinfo = fixtureinfo
|
||||
self.fixturenames = fixtureinfo.names_closure
|
||||
self._initrequest()
|
||||
|
@ -1170,8 +1278,9 @@ class Function(FunctionMixin, nodes.Item, fixtures.FuncargnamesCompatAttr):
|
|||
def _initrequest(self):
|
||||
self.funcargs = {}
|
||||
if self._isyieldedfunction():
|
||||
assert not hasattr(self, "callspec"), (
|
||||
"yielded functions (deprecated) cannot have funcargs")
|
||||
assert not hasattr(
|
||||
self, "callspec"
|
||||
), "yielded functions (deprecated) cannot have funcargs"
|
||||
else:
|
||||
if hasattr(self, "callspec"):
|
||||
callspec = self.callspec
|
||||
|
@ -1184,7 +1293,7 @@ class Function(FunctionMixin, nodes.Item, fixtures.FuncargnamesCompatAttr):
|
|||
@property
|
||||
def function(self):
|
||||
"underlying python 'function' object"
|
||||
return getattr(self.obj, 'im_func', self.obj)
|
||||
return getattr(self.obj, "im_func", self.obj)
|
||||
|
||||
def _getobj(self):
|
||||
name = self.name
|
||||
|
|
|
@ -20,7 +20,9 @@ def _cmp_raises_type_error(self, other):
|
|||
other operators at all.
|
||||
"""
|
||||
__tracebackhide__ = True
|
||||
raise TypeError('Comparison operators other than == and != not supported by approx objects')
|
||||
raise TypeError(
|
||||
"Comparison operators other than == and != not supported by approx objects"
|
||||
)
|
||||
|
||||
|
||||
# builtin pytest.approx helper
|
||||
|
@ -47,8 +49,8 @@ class ApproxBase(object):
|
|||
|
||||
def __eq__(self, actual):
|
||||
return all(
|
||||
a == self._approx_scalar(x)
|
||||
for a, x in self._yield_comparisons(actual))
|
||||
a == self._approx_scalar(x) for a, x in self._yield_comparisons(actual)
|
||||
)
|
||||
|
||||
__hash__ = None
|
||||
|
||||
|
@ -79,8 +81,9 @@ class ApproxNumpy(ApproxBase):
|
|||
# shape of the array...
|
||||
import numpy as np
|
||||
|
||||
return "approx({!r})".format(list(
|
||||
self._approx_scalar(x) for x in np.asarray(self.expected)))
|
||||
return "approx({!r})".format(
|
||||
list(self._approx_scalar(x) for x in np.asarray(self.expected))
|
||||
)
|
||||
|
||||
if sys.version_info[0] == 2:
|
||||
__cmp__ = _cmp_raises_type_error
|
||||
|
@ -123,9 +126,9 @@ class ApproxMapping(ApproxBase):
|
|||
"""
|
||||
|
||||
def __repr__(self):
|
||||
return "approx({!r})".format({
|
||||
k: self._approx_scalar(v)
|
||||
for k, v in self.expected.items()})
|
||||
return "approx({!r})".format(
|
||||
{k: self._approx_scalar(v) for k, v in self.expected.items()}
|
||||
)
|
||||
|
||||
def __eq__(self, actual):
|
||||
if set(actual.keys()) != set(self.expected.keys()):
|
||||
|
@ -147,8 +150,9 @@ class ApproxSequence(ApproxBase):
|
|||
seq_type = type(self.expected)
|
||||
if seq_type not in (tuple, list, set):
|
||||
seq_type = list
|
||||
return "approx({!r})".format(seq_type(
|
||||
self._approx_scalar(x) for x in self.expected))
|
||||
return "approx({!r})".format(
|
||||
seq_type(self._approx_scalar(x) for x in self.expected)
|
||||
)
|
||||
|
||||
def __eq__(self, actual):
|
||||
if len(actual) != len(self.expected):
|
||||
|
@ -184,14 +188,14 @@ class ApproxScalar(ApproxBase):
|
|||
# If a sensible tolerance can't be calculated, self.tolerance will
|
||||
# raise a ValueError. In this case, display '???'.
|
||||
try:
|
||||
vetted_tolerance = '{:.1e}'.format(self.tolerance)
|
||||
vetted_tolerance = "{:.1e}".format(self.tolerance)
|
||||
except ValueError:
|
||||
vetted_tolerance = '???'
|
||||
vetted_tolerance = "???"
|
||||
|
||||
if sys.version_info[0] == 2:
|
||||
return '{} +- {}'.format(self.expected, vetted_tolerance)
|
||||
return "{} +- {}".format(self.expected, vetted_tolerance)
|
||||
else:
|
||||
return u'{} \u00b1 {}'.format(self.expected, vetted_tolerance)
|
||||
return u"{} \u00b1 {}".format(self.expected, vetted_tolerance)
|
||||
|
||||
def __eq__(self, actual):
|
||||
"""
|
||||
|
@ -232,6 +236,7 @@ class ApproxScalar(ApproxBase):
|
|||
absolute tolerance or a relative tolerance, depending on what the user
|
||||
specified or which would be larger.
|
||||
"""
|
||||
|
||||
def set_default(x, default):
|
||||
return x if x is not None else default
|
||||
|
||||
|
@ -240,7 +245,9 @@ class ApproxScalar(ApproxBase):
|
|||
absolute_tolerance = set_default(self.abs, self.DEFAULT_ABSOLUTE_TOLERANCE)
|
||||
|
||||
if absolute_tolerance < 0:
|
||||
raise ValueError("absolute tolerance can't be negative: {}".format(absolute_tolerance))
|
||||
raise ValueError(
|
||||
"absolute tolerance can't be negative: {}".format(absolute_tolerance)
|
||||
)
|
||||
if math.isnan(absolute_tolerance):
|
||||
raise ValueError("absolute tolerance can't be NaN.")
|
||||
|
||||
|
@ -255,10 +262,16 @@ class ApproxScalar(ApproxBase):
|
|||
# we've made sure the user didn't ask for an absolute tolerance only,
|
||||
# because we don't want to raise errors about the relative tolerance if
|
||||
# we aren't even going to use it.
|
||||
relative_tolerance = set_default(self.rel, self.DEFAULT_RELATIVE_TOLERANCE) * abs(self.expected)
|
||||
relative_tolerance = set_default(
|
||||
self.rel, self.DEFAULT_RELATIVE_TOLERANCE
|
||||
) * abs(
|
||||
self.expected
|
||||
)
|
||||
|
||||
if relative_tolerance < 0:
|
||||
raise ValueError("relative tolerance can't be negative: {}".format(absolute_tolerance))
|
||||
raise ValueError(
|
||||
"relative tolerance can't be negative: {}".format(absolute_tolerance)
|
||||
)
|
||||
if math.isnan(relative_tolerance):
|
||||
raise ValueError("relative tolerance can't be NaN.")
|
||||
|
||||
|
@ -269,8 +282,8 @@ class ApproxScalar(ApproxBase):
|
|||
class ApproxDecimal(ApproxScalar):
|
||||
from decimal import Decimal
|
||||
|
||||
DEFAULT_ABSOLUTE_TOLERANCE = Decimal('1e-12')
|
||||
DEFAULT_RELATIVE_TOLERANCE = Decimal('1e-6')
|
||||
DEFAULT_ABSOLUTE_TOLERANCE = Decimal("1e-12")
|
||||
DEFAULT_RELATIVE_TOLERANCE = Decimal("1e-6")
|
||||
|
||||
|
||||
def approx(expected, rel=None, abs=None, nan_ok=False):
|
||||
|
@ -466,9 +479,10 @@ def _is_numpy_array(obj):
|
|||
import inspect
|
||||
|
||||
for cls in inspect.getmro(type(obj)):
|
||||
if cls.__module__ == 'numpy':
|
||||
if cls.__module__ == "numpy":
|
||||
try:
|
||||
import numpy as np
|
||||
|
||||
return isinstance(obj, np.ndarray)
|
||||
except ImportError:
|
||||
pass
|
||||
|
@ -478,6 +492,7 @@ def _is_numpy_array(obj):
|
|||
|
||||
# builtin pytest.raises helper
|
||||
|
||||
|
||||
def raises(expected_exception, *args, **kwargs):
|
||||
"""
|
||||
Assert that a code block/function call raises ``expected_exception``
|
||||
|
@ -587,8 +602,10 @@ def raises(expected_exception, *args, **kwargs):
|
|||
__tracebackhide__ = True
|
||||
base_type = (type, text_type, binary_type)
|
||||
for exc in filterfalse(isclass, always_iterable(expected_exception, base_type)):
|
||||
msg = ("exceptions must be old-style classes or"
|
||||
" derived from BaseException, not %s")
|
||||
msg = (
|
||||
"exceptions must be old-style classes or"
|
||||
" derived from BaseException, not %s"
|
||||
)
|
||||
raise TypeError(msg % type(exc))
|
||||
|
||||
message = "DID NOT RAISE {}".format(expected_exception)
|
||||
|
@ -600,8 +617,8 @@ def raises(expected_exception, *args, **kwargs):
|
|||
if "match" in kwargs:
|
||||
match_expr = kwargs.pop("match")
|
||||
if kwargs:
|
||||
msg = 'Unexpected keyword arguments passed to pytest.raises: '
|
||||
msg += ', '.join(kwargs.keys())
|
||||
msg = "Unexpected keyword arguments passed to pytest.raises: "
|
||||
msg += ", ".join(kwargs.keys())
|
||||
raise TypeError(msg)
|
||||
return RaisesContext(expected_exception, message, match_expr)
|
||||
elif isinstance(args[0], str):
|
||||
|
@ -631,6 +648,7 @@ raises.Exception = fail.Exception
|
|||
|
||||
|
||||
class RaisesContext(object):
|
||||
|
||||
def __init__(self, expected_exception, message, match_expr):
|
||||
self.expected_exception = expected_exception
|
||||
self.message = message
|
||||
|
|
|
@ -23,7 +23,7 @@ def recwarn():
|
|||
"""
|
||||
wrec = WarningsRecorder()
|
||||
with wrec:
|
||||
warnings.simplefilter('default')
|
||||
warnings.simplefilter("default")
|
||||
yield wrec
|
||||
|
||||
|
||||
|
@ -76,7 +76,9 @@ class _DeprecatedCallContext(object):
|
|||
|
||||
if exc_type is None:
|
||||
deprecation_categories = (DeprecationWarning, PendingDeprecationWarning)
|
||||
if not any(issubclass(c, deprecation_categories) for c in self._captured_categories):
|
||||
if not any(
|
||||
issubclass(c, deprecation_categories) for c in self._captured_categories
|
||||
):
|
||||
__tracebackhide__ = True
|
||||
msg = "Did not produce DeprecationWarning or PendingDeprecationWarning"
|
||||
raise AssertionError(msg)
|
||||
|
@ -180,7 +182,7 @@ class WarningsRecorder(warnings.catch_warnings):
|
|||
__tracebackhide__ = True
|
||||
raise RuntimeError("Cannot enter %r twice" % self)
|
||||
self._list = super(WarningsRecorder, self).__enter__()
|
||||
warnings.simplefilter('always')
|
||||
warnings.simplefilter("always")
|
||||
return self
|
||||
|
||||
def __exit__(self, *exc_info):
|
||||
|
@ -191,11 +193,13 @@ class WarningsRecorder(warnings.catch_warnings):
|
|||
|
||||
|
||||
class WarningsChecker(WarningsRecorder):
|
||||
|
||||
def __init__(self, expected_warning=None, match_expr=None):
|
||||
super(WarningsChecker, self).__init__()
|
||||
|
||||
msg = ("exceptions must be old-style classes or "
|
||||
"derived from Warning, not %s")
|
||||
msg = (
|
||||
"exceptions must be old-style classes or " "derived from Warning, not %s"
|
||||
)
|
||||
if isinstance(expected_warning, tuple):
|
||||
for exc in expected_warning:
|
||||
if not inspect.isclass(exc):
|
||||
|
@ -214,13 +218,14 @@ class WarningsChecker(WarningsRecorder):
|
|||
# only check if we're not currently handling an exception
|
||||
if all(a is None for a in exc_info):
|
||||
if self.expected_warning is not None:
|
||||
if not any(issubclass(r.category, self.expected_warning)
|
||||
for r in self):
|
||||
if not any(issubclass(r.category, self.expected_warning) for r in self):
|
||||
__tracebackhide__ = True
|
||||
fail("DID NOT WARN. No warnings of type {} was emitted. "
|
||||
"The list of emitted warnings is: {}.".format(
|
||||
self.expected_warning,
|
||||
[each.message for each in self]))
|
||||
fail(
|
||||
"DID NOT WARN. No warnings of type {} was emitted. "
|
||||
"The list of emitted warnings is: {}.".format(
|
||||
self.expected_warning, [each.message for each in self]
|
||||
)
|
||||
)
|
||||
elif self.match_expr is not None:
|
||||
for r in self:
|
||||
if issubclass(r.category, self.expected_warning):
|
||||
|
@ -231,5 +236,8 @@ class WarningsChecker(WarningsRecorder):
|
|||
"DID NOT WARN. No warnings of type {} matching"
|
||||
" ('{}') was emitted. The list of emitted warnings"
|
||||
" is: {}.".format(
|
||||
self.expected_warning, self.match_expr,
|
||||
[each.message for each in self]))
|
||||
self.expected_warning,
|
||||
self.match_expr,
|
||||
[each.message for each in self],
|
||||
)
|
||||
)
|
||||
|
|
|
@ -9,28 +9,34 @@ import os
|
|||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("terminal reporting", "resultlog plugin options")
|
||||
group.addoption('--resultlog', '--result-log', action="store",
|
||||
metavar="path", default=None,
|
||||
help="DEPRECATED path for machine-readable result log.")
|
||||
group.addoption(
|
||||
"--resultlog",
|
||||
"--result-log",
|
||||
action="store",
|
||||
metavar="path",
|
||||
default=None,
|
||||
help="DEPRECATED path for machine-readable result log.",
|
||||
)
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
resultlog = config.option.resultlog
|
||||
# prevent opening resultlog on slave nodes (xdist)
|
||||
if resultlog and not hasattr(config, 'slaveinput'):
|
||||
if resultlog and not hasattr(config, "slaveinput"):
|
||||
dirname = os.path.dirname(os.path.abspath(resultlog))
|
||||
if not os.path.isdir(dirname):
|
||||
os.makedirs(dirname)
|
||||
logfile = open(resultlog, 'w', 1) # line buffered
|
||||
logfile = open(resultlog, "w", 1) # line buffered
|
||||
config._resultlog = ResultLog(config, logfile)
|
||||
config.pluginmanager.register(config._resultlog)
|
||||
|
||||
from _pytest.deprecated import RESULT_LOG
|
||||
config.warn('C1', RESULT_LOG)
|
||||
|
||||
config.warn("C1", RESULT_LOG)
|
||||
|
||||
|
||||
def pytest_unconfigure(config):
|
||||
resultlog = getattr(config, '_resultlog', None)
|
||||
resultlog = getattr(config, "_resultlog", None)
|
||||
if resultlog:
|
||||
resultlog.logfile.close()
|
||||
del config._resultlog
|
||||
|
@ -46,22 +52,23 @@ def generic_path(item):
|
|||
newfspath = node.fspath
|
||||
if newfspath == fspath:
|
||||
if fspart:
|
||||
gpath.append(':')
|
||||
gpath.append(":")
|
||||
fspart = False
|
||||
else:
|
||||
gpath.append('.')
|
||||
gpath.append(".")
|
||||
else:
|
||||
gpath.append('/')
|
||||
gpath.append("/")
|
||||
fspart = True
|
||||
name = node.name
|
||||
if name[0] in '([':
|
||||
if name[0] in "([":
|
||||
gpath.pop()
|
||||
gpath.append(name)
|
||||
fspath = newfspath
|
||||
return ''.join(gpath)
|
||||
return "".join(gpath)
|
||||
|
||||
|
||||
class ResultLog(object):
|
||||
|
||||
def __init__(self, config, logfile):
|
||||
self.config = config
|
||||
self.logfile = logfile # preferably line buffered
|
||||
|
@ -72,7 +79,7 @@ class ResultLog(object):
|
|||
print(" %s" % line, file=self.logfile)
|
||||
|
||||
def log_outcome(self, report, lettercode, longrepr):
|
||||
testpath = getattr(report, 'nodeid', None)
|
||||
testpath = getattr(report, "nodeid", None)
|
||||
if testpath is None:
|
||||
testpath = report.fspath
|
||||
self.write_log_entry(testpath, lettercode, longrepr)
|
||||
|
@ -82,10 +89,10 @@ class ResultLog(object):
|
|||
return
|
||||
res = self.config.hook.pytest_report_teststatus(report=report)
|
||||
code = res[1]
|
||||
if code == 'x':
|
||||
if code == "x":
|
||||
longrepr = str(report.longrepr)
|
||||
elif code == 'X':
|
||||
longrepr = ''
|
||||
elif code == "X":
|
||||
longrepr = ""
|
||||
elif report.passed:
|
||||
longrepr = ""
|
||||
elif report.failed:
|
||||
|
@ -106,8 +113,8 @@ class ResultLog(object):
|
|||
self.log_outcome(report, code, longrepr)
|
||||
|
||||
def pytest_internalerror(self, excrepr):
|
||||
reprcrash = getattr(excrepr, 'reprcrash', None)
|
||||
reprcrash = getattr(excrepr, "reprcrash", None)
|
||||
path = getattr(reprcrash, "path", None)
|
||||
if path is None:
|
||||
path = "cwd:%s" % py.path.local()
|
||||
self.write_log_entry(path, '!', str(excrepr))
|
||||
self.write_log_entry(path, "!", str(excrepr))
|
||||
|
|
|
@ -16,9 +16,14 @@ from _pytest.outcomes import skip, Skipped, TEST_OUTCOME
|
|||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("terminal reporting", "reporting", after="general")
|
||||
group.addoption('--durations',
|
||||
action="store", type=int, default=None, metavar="N",
|
||||
help="show N slowest setup/test durations (N=0 for all)."),
|
||||
group.addoption(
|
||||
"--durations",
|
||||
action="store",
|
||||
type=int,
|
||||
default=None,
|
||||
metavar="N",
|
||||
help="show N slowest setup/test durations (N=0 for all).",
|
||||
),
|
||||
|
||||
|
||||
def pytest_terminal_summary(terminalreporter):
|
||||
|
@ -29,7 +34,7 @@ def pytest_terminal_summary(terminalreporter):
|
|||
dlist = []
|
||||
for replist in tr.stats.values():
|
||||
for rep in replist:
|
||||
if hasattr(rep, 'duration'):
|
||||
if hasattr(rep, "duration"):
|
||||
dlist.append(rep)
|
||||
if not dlist:
|
||||
return
|
||||
|
@ -43,8 +48,7 @@ def pytest_terminal_summary(terminalreporter):
|
|||
|
||||
for rep in dlist:
|
||||
nodeid = rep.nodeid.replace("::()::", "::")
|
||||
tr.write_line("%02.2fs %-8s %s" %
|
||||
(rep.duration, rep.when, nodeid))
|
||||
tr.write_line("%02.2fs %-8s %s" % (rep.duration, rep.when, nodeid))
|
||||
|
||||
|
||||
def pytest_sessionstart(session):
|
||||
|
@ -56,13 +60,9 @@ def pytest_sessionfinish(session):
|
|||
|
||||
|
||||
def pytest_runtest_protocol(item, nextitem):
|
||||
item.ihook.pytest_runtest_logstart(
|
||||
nodeid=item.nodeid, location=item.location,
|
||||
)
|
||||
item.ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location)
|
||||
runtestprotocol(item, nextitem=nextitem)
|
||||
item.ihook.pytest_runtest_logfinish(
|
||||
nodeid=item.nodeid, location=item.location,
|
||||
)
|
||||
item.ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location)
|
||||
return True
|
||||
|
||||
|
||||
|
@ -77,8 +77,7 @@ def runtestprotocol(item, log=True, nextitem=None):
|
|||
show_test_item(item)
|
||||
if not item.config.option.setuponly:
|
||||
reports.append(call_and_report(item, "call", log))
|
||||
reports.append(call_and_report(item, "teardown", log,
|
||||
nextitem=nextitem))
|
||||
reports.append(call_and_report(item, "teardown", log, nextitem=nextitem))
|
||||
# after all teardown hooks have been called
|
||||
# want funcargs and request info to go away
|
||||
if hasrequest:
|
||||
|
@ -91,20 +90,20 @@ def show_test_item(item):
|
|||
"""Show test function, parameters and the fixtures of the test item."""
|
||||
tw = item.config.get_terminal_writer()
|
||||
tw.line()
|
||||
tw.write(' ' * 8)
|
||||
tw.write(" " * 8)
|
||||
tw.write(item._nodeid)
|
||||
used_fixtures = sorted(item._fixtureinfo.name2fixturedefs.keys())
|
||||
if used_fixtures:
|
||||
tw.write(' (fixtures used: {})'.format(', '.join(used_fixtures)))
|
||||
tw.write(" (fixtures used: {})".format(", ".join(used_fixtures)))
|
||||
|
||||
|
||||
def pytest_runtest_setup(item):
|
||||
_update_current_test_var(item, 'setup')
|
||||
_update_current_test_var(item, "setup")
|
||||
item.session._setupstate.prepare(item)
|
||||
|
||||
|
||||
def pytest_runtest_call(item):
|
||||
_update_current_test_var(item, 'call')
|
||||
_update_current_test_var(item, "call")
|
||||
sys.last_type, sys.last_value, sys.last_traceback = (None, None, None)
|
||||
try:
|
||||
item.runtest()
|
||||
|
@ -120,7 +119,7 @@ def pytest_runtest_call(item):
|
|||
|
||||
|
||||
def pytest_runtest_teardown(item, nextitem):
|
||||
_update_current_test_var(item, 'teardown')
|
||||
_update_current_test_var(item, "teardown")
|
||||
item.session._setupstate.teardown_exact(item, nextitem)
|
||||
_update_current_test_var(item, None)
|
||||
|
||||
|
@ -131,11 +130,11 @@ def _update_current_test_var(item, when):
|
|||
|
||||
If ``when`` is None, delete PYTEST_CURRENT_TEST from the environment.
|
||||
"""
|
||||
var_name = 'PYTEST_CURRENT_TEST'
|
||||
var_name = "PYTEST_CURRENT_TEST"
|
||||
if when:
|
||||
value = '{} ({})'.format(item.nodeid, when)
|
||||
value = "{} ({})".format(item.nodeid, when)
|
||||
# don't allow null bytes on environment variables (see #2644, #2957)
|
||||
value = value.replace('\x00', '(null)')
|
||||
value = value.replace("\x00", "(null)")
|
||||
os.environ[var_name] = value
|
||||
else:
|
||||
os.environ.pop(var_name)
|
||||
|
@ -155,6 +154,7 @@ def pytest_report_teststatus(report):
|
|||
#
|
||||
# Implementation
|
||||
|
||||
|
||||
def call_and_report(item, when, log=True, **kwds):
|
||||
call = call_runtest_hook(item, when, **kwds)
|
||||
hook = item.ihook
|
||||
|
@ -168,16 +168,20 @@ def call_and_report(item, when, log=True, **kwds):
|
|||
|
||||
def check_interactive_exception(call, report):
|
||||
return call.excinfo and not (
|
||||
hasattr(report, "wasxfail") or
|
||||
call.excinfo.errisinstance(skip.Exception) or
|
||||
call.excinfo.errisinstance(bdb.BdbQuit))
|
||||
hasattr(report, "wasxfail")
|
||||
or call.excinfo.errisinstance(skip.Exception)
|
||||
or call.excinfo.errisinstance(bdb.BdbQuit)
|
||||
)
|
||||
|
||||
|
||||
def call_runtest_hook(item, when, **kwds):
|
||||
hookname = "pytest_runtest_" + when
|
||||
ihook = getattr(item.ihook, hookname)
|
||||
return CallInfo(lambda: ihook(item=item, **kwds), when=when,
|
||||
treat_keyboard_interrupt_as_exception=item.config.getvalue("usepdb"))
|
||||
return CallInfo(
|
||||
lambda: ihook(item=item, **kwds),
|
||||
when=when,
|
||||
treat_keyboard_interrupt_as_exception=item.config.getvalue("usepdb"),
|
||||
)
|
||||
|
||||
|
||||
class CallInfo(object):
|
||||
|
@ -215,9 +219,10 @@ def getslaveinfoline(node):
|
|||
return node._slaveinfocache
|
||||
except AttributeError:
|
||||
d = node.slaveinfo
|
||||
ver = "%s.%s.%s" % d['version_info'][:3]
|
||||
ver = "%s.%s.%s" % d["version_info"][:3]
|
||||
node._slaveinfocache = s = "[%s] %s -- Python %s %s" % (
|
||||
d['id'], d['sysplatform'], ver, d['executable'])
|
||||
d["id"], d["sysplatform"], ver, d["executable"]
|
||||
)
|
||||
return s
|
||||
|
||||
|
||||
|
@ -227,14 +232,14 @@ class BaseReport(object):
|
|||
self.__dict__.update(kw)
|
||||
|
||||
def toterminal(self, out):
|
||||
if hasattr(self, 'node'):
|
||||
if hasattr(self, "node"):
|
||||
out.line(getslaveinfoline(self.node))
|
||||
|
||||
longrepr = self.longrepr
|
||||
if longrepr is None:
|
||||
return
|
||||
|
||||
if hasattr(longrepr, 'toterminal'):
|
||||
if hasattr(longrepr, "toterminal"):
|
||||
longrepr.toterminal(out)
|
||||
else:
|
||||
try:
|
||||
|
@ -267,7 +272,9 @@ class BaseReport(object):
|
|||
|
||||
.. versionadded:: 3.5
|
||||
"""
|
||||
return '\n'.join(content for (prefix, content) in self.get_sections('Captured log'))
|
||||
return "\n".join(
|
||||
content for (prefix, content) in self.get_sections("Captured log")
|
||||
)
|
||||
|
||||
@property
|
||||
def capstdout(self):
|
||||
|
@ -275,7 +282,9 @@ class BaseReport(object):
|
|||
|
||||
.. versionadded:: 3.0
|
||||
"""
|
||||
return ''.join(content for (prefix, content) in self.get_sections('Captured stdout'))
|
||||
return "".join(
|
||||
content for (prefix, content) in self.get_sections("Captured stdout")
|
||||
)
|
||||
|
||||
@property
|
||||
def capstderr(self):
|
||||
|
@ -283,7 +292,9 @@ class BaseReport(object):
|
|||
|
||||
.. versionadded:: 3.0
|
||||
"""
|
||||
return ''.join(content for (prefix, content) in self.get_sections('Captured stderr'))
|
||||
return "".join(
|
||||
content for (prefix, content) in self.get_sections("Captured stderr")
|
||||
)
|
||||
|
||||
passed = property(lambda x: x.outcome == "passed")
|
||||
failed = property(lambda x: x.outcome == "failed")
|
||||
|
@ -316,13 +327,22 @@ def pytest_runtest_makereport(item, call):
|
|||
if call.when == "call":
|
||||
longrepr = item.repr_failure(excinfo)
|
||||
else: # exception in setup or teardown
|
||||
longrepr = item._repr_failure_py(excinfo,
|
||||
style=item.config.option.tbstyle)
|
||||
longrepr = item._repr_failure_py(
|
||||
excinfo, style=item.config.option.tbstyle
|
||||
)
|
||||
for rwhen, key, content in item._report_sections:
|
||||
sections.append(("Captured %s %s" % (key, rwhen), content))
|
||||
return TestReport(item.nodeid, item.location,
|
||||
keywords, outcome, longrepr, when,
|
||||
sections, duration, user_properties=item.user_properties)
|
||||
return TestReport(
|
||||
item.nodeid,
|
||||
item.location,
|
||||
keywords,
|
||||
outcome,
|
||||
longrepr,
|
||||
when,
|
||||
sections,
|
||||
duration,
|
||||
user_properties=item.user_properties,
|
||||
)
|
||||
|
||||
|
||||
class TestReport(BaseReport):
|
||||
|
@ -330,8 +350,19 @@ class TestReport(BaseReport):
|
|||
they fail).
|
||||
"""
|
||||
|
||||
def __init__(self, nodeid, location, keywords, outcome,
|
||||
longrepr, when, sections=(), duration=0, user_properties=(), **extra):
|
||||
def __init__(
|
||||
self,
|
||||
nodeid,
|
||||
location,
|
||||
keywords,
|
||||
outcome,
|
||||
longrepr,
|
||||
when,
|
||||
sections=(),
|
||||
duration=0,
|
||||
user_properties=(),
|
||||
**extra
|
||||
):
|
||||
#: normalized collection node id
|
||||
self.nodeid = nodeid
|
||||
|
||||
|
@ -370,7 +401,8 @@ class TestReport(BaseReport):
|
|||
|
||||
def __repr__(self):
|
||||
return "<TestReport %r when=%r outcome=%r>" % (
|
||||
self.nodeid, self.when, self.outcome)
|
||||
self.nodeid, self.when, self.outcome
|
||||
)
|
||||
|
||||
|
||||
class TeardownErrorReport(BaseReport):
|
||||
|
@ -384,14 +416,13 @@ class TeardownErrorReport(BaseReport):
|
|||
|
||||
|
||||
def pytest_make_collect_report(collector):
|
||||
call = CallInfo(
|
||||
lambda: list(collector.collect()),
|
||||
'collect')
|
||||
call = CallInfo(lambda: list(collector.collect()), "collect")
|
||||
longrepr = None
|
||||
if not call.excinfo:
|
||||
outcome = "passed"
|
||||
else:
|
||||
from _pytest import nose
|
||||
|
||||
skip_exceptions = (Skipped,) + nose.get_skip_exceptions()
|
||||
if call.excinfo.errisinstance(skip_exceptions):
|
||||
outcome = "skipped"
|
||||
|
@ -403,15 +434,16 @@ def pytest_make_collect_report(collector):
|
|||
if not hasattr(errorinfo, "toterminal"):
|
||||
errorinfo = CollectErrorRepr(errorinfo)
|
||||
longrepr = errorinfo
|
||||
rep = CollectReport(collector.nodeid, outcome, longrepr,
|
||||
getattr(call, 'result', None))
|
||||
rep = CollectReport(
|
||||
collector.nodeid, outcome, longrepr, getattr(call, "result", None)
|
||||
)
|
||||
rep.call = call # see collect_one_node
|
||||
return rep
|
||||
|
||||
|
||||
class CollectReport(BaseReport):
|
||||
def __init__(self, nodeid, outcome, longrepr, result,
|
||||
sections=(), **extra):
|
||||
|
||||
def __init__(self, nodeid, outcome, longrepr, result, sections=(), **extra):
|
||||
self.nodeid = nodeid
|
||||
self.outcome = outcome
|
||||
self.longrepr = longrepr
|
||||
|
@ -425,10 +457,12 @@ class CollectReport(BaseReport):
|
|||
|
||||
def __repr__(self):
|
||||
return "<CollectReport %r lenresult=%s outcome=%r>" % (
|
||||
self.nodeid, len(self.result), self.outcome)
|
||||
self.nodeid, len(self.result), self.outcome
|
||||
)
|
||||
|
||||
|
||||
class CollectErrorRepr(TerminalRepr):
|
||||
|
||||
def __init__(self, msg):
|
||||
self.longrepr = msg
|
||||
|
||||
|
@ -477,8 +511,9 @@ class SetupState(object):
|
|||
if hasattr(colitem, "teardown"):
|
||||
colitem.teardown()
|
||||
for colitem in self._finalizers:
|
||||
assert colitem is None or colitem in self.stack \
|
||||
or isinstance(colitem, tuple)
|
||||
assert (
|
||||
colitem is None or colitem in self.stack or isinstance(colitem, tuple)
|
||||
)
|
||||
|
||||
def teardown_all(self):
|
||||
while self.stack:
|
||||
|
@ -505,7 +540,7 @@ class SetupState(object):
|
|||
|
||||
# check if the last collection node has raised an error
|
||||
for col in self.stack:
|
||||
if hasattr(col, '_prepare_exc'):
|
||||
if hasattr(col, "_prepare_exc"):
|
||||
py.builtin._reraise(*col._prepare_exc)
|
||||
for col in needed_collectors[len(self.stack):]:
|
||||
self.stack.append(col)
|
||||
|
|
|
@ -6,10 +6,18 @@ import sys
|
|||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("debugconfig")
|
||||
group.addoption('--setuponly', '--setup-only', action="store_true",
|
||||
help="only setup fixtures, do not execute tests.")
|
||||
group.addoption('--setupshow', '--setup-show', action="store_true",
|
||||
help="show setup of fixtures while executing tests.")
|
||||
group.addoption(
|
||||
"--setuponly",
|
||||
"--setup-only",
|
||||
action="store_true",
|
||||
help="only setup fixtures, do not execute tests.",
|
||||
)
|
||||
group.addoption(
|
||||
"--setupshow",
|
||||
"--setup-show",
|
||||
action="store_true",
|
||||
help="show setup of fixtures while executing tests.",
|
||||
)
|
||||
|
||||
|
||||
@pytest.hookimpl(hookwrapper=True)
|
||||
|
@ -17,50 +25,52 @@ def pytest_fixture_setup(fixturedef, request):
|
|||
yield
|
||||
config = request.config
|
||||
if config.option.setupshow:
|
||||
if hasattr(request, 'param'):
|
||||
if hasattr(request, "param"):
|
||||
# Save the fixture parameter so ._show_fixture_action() can
|
||||
# display it now and during the teardown (in .finish()).
|
||||
if fixturedef.ids:
|
||||
if callable(fixturedef.ids):
|
||||
fixturedef.cached_param = fixturedef.ids(request.param)
|
||||
else:
|
||||
fixturedef.cached_param = fixturedef.ids[
|
||||
request.param_index]
|
||||
fixturedef.cached_param = fixturedef.ids[request.param_index]
|
||||
else:
|
||||
fixturedef.cached_param = request.param
|
||||
_show_fixture_action(fixturedef, 'SETUP')
|
||||
_show_fixture_action(fixturedef, "SETUP")
|
||||
|
||||
|
||||
def pytest_fixture_post_finalizer(fixturedef):
|
||||
if hasattr(fixturedef, "cached_result"):
|
||||
config = fixturedef._fixturemanager.config
|
||||
if config.option.setupshow:
|
||||
_show_fixture_action(fixturedef, 'TEARDOWN')
|
||||
_show_fixture_action(fixturedef, "TEARDOWN")
|
||||
if hasattr(fixturedef, "cached_param"):
|
||||
del fixturedef.cached_param
|
||||
|
||||
|
||||
def _show_fixture_action(fixturedef, msg):
|
||||
config = fixturedef._fixturemanager.config
|
||||
capman = config.pluginmanager.getplugin('capturemanager')
|
||||
capman = config.pluginmanager.getplugin("capturemanager")
|
||||
if capman:
|
||||
out, err = capman.suspend_global_capture()
|
||||
|
||||
tw = config.get_terminal_writer()
|
||||
tw.line()
|
||||
tw.write(' ' * 2 * fixturedef.scopenum)
|
||||
tw.write('{step} {scope} {fixture}'.format(
|
||||
step=msg.ljust(8), # align the output to TEARDOWN
|
||||
scope=fixturedef.scope[0].upper(),
|
||||
fixture=fixturedef.argname))
|
||||
tw.write(" " * 2 * fixturedef.scopenum)
|
||||
tw.write(
|
||||
"{step} {scope} {fixture}".format(
|
||||
step=msg.ljust(8), # align the output to TEARDOWN
|
||||
scope=fixturedef.scope[0].upper(),
|
||||
fixture=fixturedef.argname,
|
||||
)
|
||||
)
|
||||
|
||||
if msg == 'SETUP':
|
||||
deps = sorted(arg for arg in fixturedef.argnames if arg != 'request')
|
||||
if msg == "SETUP":
|
||||
deps = sorted(arg for arg in fixturedef.argnames if arg != "request")
|
||||
if deps:
|
||||
tw.write(' (fixtures used: {})'.format(', '.join(deps)))
|
||||
tw.write(" (fixtures used: {})".format(", ".join(deps)))
|
||||
|
||||
if hasattr(fixturedef, 'cached_param'):
|
||||
tw.write('[{}]'.format(fixturedef.cached_param))
|
||||
if hasattr(fixturedef, "cached_param"):
|
||||
tw.write("[{}]".format(fixturedef.cached_param))
|
||||
|
||||
if capman:
|
||||
capman.resume_global_capture()
|
||||
|
|
|
@ -5,9 +5,13 @@ import pytest
|
|||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("debugconfig")
|
||||
group.addoption('--setupplan', '--setup-plan', action="store_true",
|
||||
help="show what fixtures and tests would be executed but "
|
||||
"don't execute anything.")
|
||||
group.addoption(
|
||||
"--setupplan",
|
||||
"--setup-plan",
|
||||
action="store_true",
|
||||
help="show what fixtures and tests would be executed but "
|
||||
"don't execute anything.",
|
||||
)
|
||||
|
||||
|
||||
@pytest.hookimpl(tryfirst=True)
|
||||
|
|
|
@ -8,21 +8,28 @@ from _pytest.outcomes import fail, skip, xfail
|
|||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("general")
|
||||
group.addoption('--runxfail',
|
||||
action="store_true", dest="runxfail", default=False,
|
||||
help="run tests even if they are marked xfail")
|
||||
group.addoption(
|
||||
"--runxfail",
|
||||
action="store_true",
|
||||
dest="runxfail",
|
||||
default=False,
|
||||
help="run tests even if they are marked xfail",
|
||||
)
|
||||
|
||||
parser.addini("xfail_strict",
|
||||
"default for the strict parameter of xfail "
|
||||
"markers when not given explicitly (default: False)",
|
||||
default=False,
|
||||
type="bool")
|
||||
parser.addini(
|
||||
"xfail_strict",
|
||||
"default for the strict parameter of xfail "
|
||||
"markers when not given explicitly (default: False)",
|
||||
default=False,
|
||||
type="bool",
|
||||
)
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
if config.option.runxfail:
|
||||
# yay a hack
|
||||
import pytest
|
||||
|
||||
old = pytest.xfail
|
||||
config._cleanup.append(lambda: setattr(pytest, "xfail", old))
|
||||
|
||||
|
@ -32,48 +39,51 @@ def pytest_configure(config):
|
|||
nop.Exception = xfail.Exception
|
||||
setattr(pytest, "xfail", nop)
|
||||
|
||||
config.addinivalue_line("markers",
|
||||
"skip(reason=None): skip the given test function with an optional reason. "
|
||||
"Example: skip(reason=\"no way of currently testing this\") skips the "
|
||||
"test."
|
||||
)
|
||||
config.addinivalue_line("markers",
|
||||
"skipif(condition): skip the given test function if eval(condition) "
|
||||
"results in a True value. Evaluation happens within the "
|
||||
"module global context. Example: skipif('sys.platform == \"win32\"') "
|
||||
"skips the test if we are on the win32 platform. see "
|
||||
"http://pytest.org/latest/skipping.html"
|
||||
)
|
||||
config.addinivalue_line("markers",
|
||||
"xfail(condition, reason=None, run=True, raises=None, strict=False): "
|
||||
"mark the test function as an expected failure if eval(condition) "
|
||||
"has a True value. Optionally specify a reason for better reporting "
|
||||
"and run=False if you don't even want to execute the test function. "
|
||||
"If only specific exception(s) are expected, you can list them in "
|
||||
"raises, and if the test fails in other ways, it will be reported as "
|
||||
"a true failure. See http://pytest.org/latest/skipping.html"
|
||||
)
|
||||
config.addinivalue_line(
|
||||
"markers",
|
||||
"skip(reason=None): skip the given test function with an optional reason. "
|
||||
'Example: skip(reason="no way of currently testing this") skips the '
|
||||
"test.",
|
||||
)
|
||||
config.addinivalue_line(
|
||||
"markers",
|
||||
"skipif(condition): skip the given test function if eval(condition) "
|
||||
"results in a True value. Evaluation happens within the "
|
||||
"module global context. Example: skipif('sys.platform == \"win32\"') "
|
||||
"skips the test if we are on the win32 platform. see "
|
||||
"http://pytest.org/latest/skipping.html",
|
||||
)
|
||||
config.addinivalue_line(
|
||||
"markers",
|
||||
"xfail(condition, reason=None, run=True, raises=None, strict=False): "
|
||||
"mark the test function as an expected failure if eval(condition) "
|
||||
"has a True value. Optionally specify a reason for better reporting "
|
||||
"and run=False if you don't even want to execute the test function. "
|
||||
"If only specific exception(s) are expected, you can list them in "
|
||||
"raises, and if the test fails in other ways, it will be reported as "
|
||||
"a true failure. See http://pytest.org/latest/skipping.html",
|
||||
)
|
||||
|
||||
|
||||
@hookimpl(tryfirst=True)
|
||||
def pytest_runtest_setup(item):
|
||||
# Check if skip or skipif are specified as pytest marks
|
||||
item._skipped_by_mark = False
|
||||
eval_skipif = MarkEvaluator(item, 'skipif')
|
||||
eval_skipif = MarkEvaluator(item, "skipif")
|
||||
if eval_skipif.istrue():
|
||||
item._skipped_by_mark = True
|
||||
skip(eval_skipif.getexplanation())
|
||||
|
||||
for skip_info in item.iter_markers(name='skip'):
|
||||
for skip_info in item.iter_markers(name="skip"):
|
||||
item._skipped_by_mark = True
|
||||
if 'reason' in skip_info.kwargs:
|
||||
skip(skip_info.kwargs['reason'])
|
||||
if "reason" in skip_info.kwargs:
|
||||
skip(skip_info.kwargs["reason"])
|
||||
elif skip_info.args:
|
||||
skip(skip_info.args[0])
|
||||
else:
|
||||
skip("unconditional skip")
|
||||
|
||||
item._evalxfail = MarkEvaluator(item, 'xfail')
|
||||
item._evalxfail = MarkEvaluator(item, "xfail")
|
||||
check_xfail_no_run(item)
|
||||
|
||||
|
||||
|
@ -91,7 +101,7 @@ def check_xfail_no_run(item):
|
|||
if not item.config.option.runxfail:
|
||||
evalxfail = item._evalxfail
|
||||
if evalxfail.istrue():
|
||||
if not evalxfail.get('run', True):
|
||||
if not evalxfail.get("run", True):
|
||||
xfail("[NOTRUN] " + evalxfail.getexplanation())
|
||||
|
||||
|
||||
|
@ -99,22 +109,23 @@ def check_strict_xfail(pyfuncitem):
|
|||
"""check xfail(strict=True) for the given PASSING test"""
|
||||
evalxfail = pyfuncitem._evalxfail
|
||||
if evalxfail.istrue():
|
||||
strict_default = pyfuncitem.config.getini('xfail_strict')
|
||||
is_strict_xfail = evalxfail.get('strict', strict_default)
|
||||
strict_default = pyfuncitem.config.getini("xfail_strict")
|
||||
is_strict_xfail = evalxfail.get("strict", strict_default)
|
||||
if is_strict_xfail:
|
||||
del pyfuncitem._evalxfail
|
||||
explanation = evalxfail.getexplanation()
|
||||
fail('[XPASS(strict)] ' + explanation, pytrace=False)
|
||||
fail("[XPASS(strict)] " + explanation, pytrace=False)
|
||||
|
||||
|
||||
@hookimpl(hookwrapper=True)
|
||||
def pytest_runtest_makereport(item, call):
|
||||
outcome = yield
|
||||
rep = outcome.get_result()
|
||||
evalxfail = getattr(item, '_evalxfail', None)
|
||||
evalxfail = getattr(item, "_evalxfail", None)
|
||||
# unitttest special case, see setting of _unexpectedsuccess
|
||||
if hasattr(item, '_unexpectedsuccess') and rep.when == "call":
|
||||
if hasattr(item, "_unexpectedsuccess") and rep.when == "call":
|
||||
from _pytest.compat import _is_unittest_unexpected_success_a_failure
|
||||
|
||||
if item._unexpectedsuccess:
|
||||
rep.longrepr = "Unexpected success: {}".format(item._unexpectedsuccess)
|
||||
else:
|
||||
|
@ -129,8 +140,7 @@ def pytest_runtest_makereport(item, call):
|
|||
elif call.excinfo and call.excinfo.errisinstance(xfail.Exception):
|
||||
rep.wasxfail = "reason: " + call.excinfo.value.msg
|
||||
rep.outcome = "skipped"
|
||||
elif evalxfail and not rep.skipped and evalxfail.wasvalid() and \
|
||||
evalxfail.istrue():
|
||||
elif evalxfail and not rep.skipped and evalxfail.wasvalid() and evalxfail.istrue():
|
||||
if call.excinfo:
|
||||
if evalxfail.invalidraise(call.excinfo.value):
|
||||
rep.outcome = "failed"
|
||||
|
@ -138,8 +148,8 @@ def pytest_runtest_makereport(item, call):
|
|||
rep.outcome = "skipped"
|
||||
rep.wasxfail = evalxfail.getexplanation()
|
||||
elif call.when == "call":
|
||||
strict_default = item.config.getini('xfail_strict')
|
||||
is_strict_xfail = evalxfail.get('strict', strict_default)
|
||||
strict_default = item.config.getini("xfail_strict")
|
||||
is_strict_xfail = evalxfail.get("strict", strict_default)
|
||||
explanation = evalxfail.getexplanation()
|
||||
if is_strict_xfail:
|
||||
rep.outcome = "failed"
|
||||
|
@ -147,7 +157,9 @@ def pytest_runtest_makereport(item, call):
|
|||
else:
|
||||
rep.outcome = "passed"
|
||||
rep.wasxfail = explanation
|
||||
elif getattr(item, '_skipped_by_mark', False) and rep.skipped and type(rep.longrepr) is tuple:
|
||||
elif getattr(item, "_skipped_by_mark", False) and rep.skipped and type(
|
||||
rep.longrepr
|
||||
) is tuple:
|
||||
# skipped by mark.skipif; change the location of the failure
|
||||
# to point to the item definition, otherwise it will display
|
||||
# the location of where the skip exception was raised within pytest
|
||||
|
@ -164,7 +176,7 @@ def pytest_report_teststatus(report):
|
|||
if report.skipped:
|
||||
return "xfailed", "x", "xfail"
|
||||
elif report.passed:
|
||||
return "xpassed", "X", ("XPASS", {'yellow': True})
|
||||
return "xpassed", "X", ("XPASS", {"yellow": True})
|
||||
|
||||
|
||||
# called by the terminalreporter instance/plugin
|
||||
|
@ -224,12 +236,12 @@ def folded_skips(skipped):
|
|||
for event in skipped:
|
||||
key = event.longrepr
|
||||
assert len(key) == 3, (event, key)
|
||||
keywords = getattr(event, 'keywords', {})
|
||||
keywords = getattr(event, "keywords", {})
|
||||
# folding reports with global pytestmark variable
|
||||
# this is workaround, because for now we cannot identify the scope of a skip marker
|
||||
# TODO: revisit after marks scope would be fixed
|
||||
when = getattr(event, 'when', None)
|
||||
if when == 'setup' and 'skip' in keywords and 'pytestmark' not in keywords:
|
||||
when = getattr(event, "when", None)
|
||||
if when == "setup" and "skip" in keywords and "pytestmark" not in keywords:
|
||||
key = (key[0], None, key[2])
|
||||
d.setdefault(key, []).append(event)
|
||||
values = []
|
||||
|
@ -240,7 +252,7 @@ def folded_skips(skipped):
|
|||
|
||||
def show_skipped(terminalreporter, lines):
|
||||
tr = terminalreporter
|
||||
skipped = tr.stats.get('skipped', [])
|
||||
skipped = tr.stats.get("skipped", [])
|
||||
if skipped:
|
||||
# if not tr.hasopt('skipped'):
|
||||
# tr.write_line(
|
||||
|
@ -255,15 +267,14 @@ def show_skipped(terminalreporter, lines):
|
|||
reason = reason[9:]
|
||||
if lineno is not None:
|
||||
lines.append(
|
||||
"SKIP [%d] %s:%d: %s" %
|
||||
(num, fspath, lineno + 1, reason))
|
||||
"SKIP [%d] %s:%d: %s" % (num, fspath, lineno + 1, reason)
|
||||
)
|
||||
else:
|
||||
lines.append(
|
||||
"SKIP [%d] %s: %s" %
|
||||
(num, fspath, reason))
|
||||
lines.append("SKIP [%d] %s: %s" % (num, fspath, reason))
|
||||
|
||||
|
||||
def shower(stat, format):
|
||||
|
||||
def show_(terminalreporter, lines):
|
||||
return show_simple(terminalreporter, lines, stat, format)
|
||||
|
||||
|
@ -271,13 +282,12 @@ def shower(stat, format):
|
|||
|
||||
|
||||
REPORTCHAR_ACTIONS = {
|
||||
'x': show_xfailed,
|
||||
'X': show_xpassed,
|
||||
'f': shower('failed', "FAIL %s"),
|
||||
'F': shower('failed', "FAIL %s"),
|
||||
's': show_skipped,
|
||||
'S': show_skipped,
|
||||
'p': shower('passed', "PASSED %s"),
|
||||
'E': shower('error', "ERROR %s")
|
||||
|
||||
"x": show_xfailed,
|
||||
"X": show_xpassed,
|
||||
"f": shower("failed", "FAIL %s"),
|
||||
"F": shower("failed", "FAIL %s"),
|
||||
"s": show_skipped,
|
||||
"S": show_skipped,
|
||||
"p": shower("passed", "PASSED %s"),
|
||||
"E": shower("error", "ERROR %s"),
|
||||
}
|
||||
|
|
|
@ -16,8 +16,13 @@ from more_itertools import collapse
|
|||
|
||||
import pytest
|
||||
from _pytest import nodes
|
||||
from _pytest.main import EXIT_OK, EXIT_TESTSFAILED, EXIT_INTERRUPTED, \
|
||||
EXIT_USAGEERROR, EXIT_NOTESTSCOLLECTED
|
||||
from _pytest.main import (
|
||||
EXIT_OK,
|
||||
EXIT_TESTSFAILED,
|
||||
EXIT_INTERRUPTED,
|
||||
EXIT_USAGEERROR,
|
||||
EXIT_NOTESTSCOLLECTED,
|
||||
)
|
||||
|
||||
|
||||
import argparse
|
||||
|
@ -30,93 +35,140 @@ class MoreQuietAction(argparse.Action):
|
|||
|
||||
used to unify verbosity handling
|
||||
"""
|
||||
def __init__(self,
|
||||
option_strings,
|
||||
dest,
|
||||
default=None,
|
||||
required=False,
|
||||
help=None):
|
||||
|
||||
def __init__(self, option_strings, dest, default=None, required=False, help=None):
|
||||
super(MoreQuietAction, self).__init__(
|
||||
option_strings=option_strings,
|
||||
dest=dest,
|
||||
nargs=0,
|
||||
default=default,
|
||||
required=required,
|
||||
help=help)
|
||||
help=help,
|
||||
)
|
||||
|
||||
def __call__(self, parser, namespace, values, option_string=None):
|
||||
new_count = getattr(namespace, self.dest, 0) - 1
|
||||
setattr(namespace, self.dest, new_count)
|
||||
# todo Deprecate config.quiet
|
||||
namespace.quiet = getattr(namespace, 'quiet', 0) + 1
|
||||
namespace.quiet = getattr(namespace, "quiet", 0) + 1
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("terminal reporting", "reporting", after="general")
|
||||
group._addoption('-v', '--verbose', action="count", default=0,
|
||||
dest="verbose", help="increase verbosity."),
|
||||
group._addoption('-q', '--quiet', action=MoreQuietAction, default=0,
|
||||
dest="verbose", help="decrease verbosity."),
|
||||
group._addoption("--verbosity", dest='verbose', type=int, default=0,
|
||||
help="set verbosity")
|
||||
group._addoption('-r',
|
||||
action="store", dest="reportchars", default='', metavar="chars",
|
||||
help="show extra test summary info as specified by chars (f)ailed, "
|
||||
"(E)error, (s)skipped, (x)failed, (X)passed, "
|
||||
"(p)passed, (P)passed with output, (a)all except pP. "
|
||||
"Warnings are displayed at all times except when "
|
||||
"--disable-warnings is set")
|
||||
group._addoption('--disable-warnings', '--disable-pytest-warnings', default=False,
|
||||
dest='disable_warnings', action='store_true',
|
||||
help='disable warnings summary')
|
||||
group._addoption('-l', '--showlocals',
|
||||
action="store_true", dest="showlocals", default=False,
|
||||
help="show locals in tracebacks (disabled by default).")
|
||||
group._addoption('--tb', metavar="style",
|
||||
action="store", dest="tbstyle", default='auto',
|
||||
choices=['auto', 'long', 'short', 'no', 'line', 'native'],
|
||||
help="traceback print mode (auto/long/short/line/native/no).")
|
||||
group._addoption('--show-capture',
|
||||
action="store", dest="showcapture",
|
||||
choices=['no', 'stdout', 'stderr', 'log', 'all'], default='all',
|
||||
help="Controls how captured stdout/stderr/log is shown on failed tests. "
|
||||
"Default is 'all'.")
|
||||
group._addoption('--fulltrace', '--full-trace',
|
||||
action="store_true", default=False,
|
||||
help="don't cut any tracebacks (default is to cut).")
|
||||
group._addoption('--color', metavar="color",
|
||||
action="store", dest="color", default='auto',
|
||||
choices=['yes', 'no', 'auto'],
|
||||
help="color terminal output (yes/no/auto).")
|
||||
group._addoption(
|
||||
"-v",
|
||||
"--verbose",
|
||||
action="count",
|
||||
default=0,
|
||||
dest="verbose",
|
||||
help="increase verbosity.",
|
||||
),
|
||||
group._addoption(
|
||||
"-q",
|
||||
"--quiet",
|
||||
action=MoreQuietAction,
|
||||
default=0,
|
||||
dest="verbose",
|
||||
help="decrease verbosity.",
|
||||
),
|
||||
group._addoption(
|
||||
"--verbosity", dest="verbose", type=int, default=0, help="set verbosity"
|
||||
)
|
||||
group._addoption(
|
||||
"-r",
|
||||
action="store",
|
||||
dest="reportchars",
|
||||
default="",
|
||||
metavar="chars",
|
||||
help="show extra test summary info as specified by chars (f)ailed, "
|
||||
"(E)error, (s)skipped, (x)failed, (X)passed, "
|
||||
"(p)passed, (P)passed with output, (a)all except pP. "
|
||||
"Warnings are displayed at all times except when "
|
||||
"--disable-warnings is set",
|
||||
)
|
||||
group._addoption(
|
||||
"--disable-warnings",
|
||||
"--disable-pytest-warnings",
|
||||
default=False,
|
||||
dest="disable_warnings",
|
||||
action="store_true",
|
||||
help="disable warnings summary",
|
||||
)
|
||||
group._addoption(
|
||||
"-l",
|
||||
"--showlocals",
|
||||
action="store_true",
|
||||
dest="showlocals",
|
||||
default=False,
|
||||
help="show locals in tracebacks (disabled by default).",
|
||||
)
|
||||
group._addoption(
|
||||
"--tb",
|
||||
metavar="style",
|
||||
action="store",
|
||||
dest="tbstyle",
|
||||
default="auto",
|
||||
choices=["auto", "long", "short", "no", "line", "native"],
|
||||
help="traceback print mode (auto/long/short/line/native/no).",
|
||||
)
|
||||
group._addoption(
|
||||
"--show-capture",
|
||||
action="store",
|
||||
dest="showcapture",
|
||||
choices=["no", "stdout", "stderr", "log", "all"],
|
||||
default="all",
|
||||
help="Controls how captured stdout/stderr/log is shown on failed tests. "
|
||||
"Default is 'all'.",
|
||||
)
|
||||
group._addoption(
|
||||
"--fulltrace",
|
||||
"--full-trace",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="don't cut any tracebacks (default is to cut).",
|
||||
)
|
||||
group._addoption(
|
||||
"--color",
|
||||
metavar="color",
|
||||
action="store",
|
||||
dest="color",
|
||||
default="auto",
|
||||
choices=["yes", "no", "auto"],
|
||||
help="color terminal output (yes/no/auto).",
|
||||
)
|
||||
|
||||
parser.addini("console_output_style",
|
||||
help="console output: classic or with additional progress information (classic|progress).",
|
||||
default='progress')
|
||||
parser.addini(
|
||||
"console_output_style",
|
||||
help="console output: classic or with additional progress information (classic|progress).",
|
||||
default="progress",
|
||||
)
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
reporter = TerminalReporter(config, sys.stdout)
|
||||
config.pluginmanager.register(reporter, 'terminalreporter')
|
||||
config.pluginmanager.register(reporter, "terminalreporter")
|
||||
if config.option.debug or config.option.traceconfig:
|
||||
|
||||
def mywriter(tags, args):
|
||||
msg = " ".join(map(str, args))
|
||||
reporter.write_line("[traceconfig] " + msg)
|
||||
|
||||
config.trace.root.setprocessor("pytest:config", mywriter)
|
||||
|
||||
|
||||
def getreportopt(config):
|
||||
reportopts = ""
|
||||
reportchars = config.option.reportchars
|
||||
if not config.option.disable_warnings and 'w' not in reportchars:
|
||||
reportchars += 'w'
|
||||
elif config.option.disable_warnings and 'w' in reportchars:
|
||||
reportchars = reportchars.replace('w', '')
|
||||
if not config.option.disable_warnings and "w" not in reportchars:
|
||||
reportchars += "w"
|
||||
elif config.option.disable_warnings and "w" in reportchars:
|
||||
reportchars = reportchars.replace("w", "")
|
||||
if reportchars:
|
||||
for char in reportchars:
|
||||
if char not in reportopts and char != 'a':
|
||||
if char not in reportopts and char != "a":
|
||||
reportopts += char
|
||||
elif char == 'a':
|
||||
reportopts = 'fEsxXw'
|
||||
elif char == "a":
|
||||
reportopts = "fEsxXw"
|
||||
return reportopts
|
||||
|
||||
|
||||
|
@ -161,15 +213,17 @@ class WarningReport(object):
|
|||
if isinstance(self.fslocation, tuple) and len(self.fslocation) >= 2:
|
||||
filename, linenum = self.fslocation[:2]
|
||||
relpath = py.path.local(filename).relto(config.invocation_dir)
|
||||
return '%s:%s' % (relpath, linenum)
|
||||
return "%s:%s" % (relpath, linenum)
|
||||
else:
|
||||
return str(self.fslocation)
|
||||
return None
|
||||
|
||||
|
||||
class TerminalReporter(object):
|
||||
|
||||
def __init__(self, config, file=None):
|
||||
import _pytest.config
|
||||
|
||||
self.config = config
|
||||
self.verbosity = self.config.option.verbose
|
||||
self.showheader = self.verbosity >= 0
|
||||
|
@ -196,15 +250,15 @@ class TerminalReporter(object):
|
|||
def _determine_show_progress_info(self):
|
||||
"""Return True if we should display progress information based on the current config"""
|
||||
# do not show progress if we are not capturing output (#3038)
|
||||
if self.config.getoption('capture') == 'no':
|
||||
if self.config.getoption("capture") == "no":
|
||||
return False
|
||||
# do not show progress if we are showing fixture setup/teardown
|
||||
if self.config.getoption('setupshow'):
|
||||
if self.config.getoption("setupshow"):
|
||||
return False
|
||||
return self.config.getini('console_output_style') == 'progress'
|
||||
return self.config.getini("console_output_style") == "progress"
|
||||
|
||||
def hasopt(self, char):
|
||||
char = {'xfailed': 'x', 'skipped': 's'}.get(char, char)
|
||||
char = {"xfailed": "x", "skipped": "s"}.get(char, char)
|
||||
return char in self.reportchars
|
||||
|
||||
def write_fspath_result(self, nodeid, res):
|
||||
|
@ -250,12 +304,12 @@ class TerminalReporter(object):
|
|||
|
||||
The rest of the keyword arguments are markup instructions.
|
||||
"""
|
||||
erase = markup.pop('erase', False)
|
||||
erase = markup.pop("erase", False)
|
||||
if erase:
|
||||
fill_count = self._tw.fullwidth - len(line) - 1
|
||||
fill = ' ' * fill_count
|
||||
fill = " " * fill_count
|
||||
else:
|
||||
fill = ''
|
||||
fill = ""
|
||||
line = str(line)
|
||||
self._tw.write("\r" + line + fill, **markup)
|
||||
|
||||
|
@ -276,8 +330,9 @@ class TerminalReporter(object):
|
|||
|
||||
def pytest_logwarning(self, code, fslocation, message, nodeid):
|
||||
warnings = self.stats.setdefault("warnings", [])
|
||||
warning = WarningReport(code=code, fslocation=fslocation,
|
||||
message=message, nodeid=nodeid)
|
||||
warning = WarningReport(
|
||||
code=code, fslocation=fslocation, message=message, nodeid=nodeid
|
||||
)
|
||||
warnings.append(warning)
|
||||
|
||||
def pytest_plugin_registered(self, plugin):
|
||||
|
@ -289,7 +344,7 @@ class TerminalReporter(object):
|
|||
self.write_line(msg)
|
||||
|
||||
def pytest_deselected(self, items):
|
||||
self.stats.setdefault('deselected', []).extend(items)
|
||||
self.stats.setdefault("deselected", []).extend(items)
|
||||
|
||||
def pytest_runtest_logstart(self, nodeid, location):
|
||||
# ensure that the path is printed before the
|
||||
|
@ -314,7 +369,7 @@ class TerminalReporter(object):
|
|||
if not letter and not word:
|
||||
# probably passed setup/teardown
|
||||
return
|
||||
running_xdist = hasattr(rep, 'node')
|
||||
running_xdist = hasattr(rep, "node")
|
||||
if self.verbosity <= 0:
|
||||
if not running_xdist and self.showfspath:
|
||||
self.write_fspath_result(rep.nodeid, letter)
|
||||
|
@ -324,11 +379,11 @@ class TerminalReporter(object):
|
|||
self._progress_nodeids_reported.add(rep.nodeid)
|
||||
if markup is None:
|
||||
if rep.passed:
|
||||
markup = {'green': True}
|
||||
markup = {"green": True}
|
||||
elif rep.failed:
|
||||
markup = {'red': True}
|
||||
markup = {"red": True}
|
||||
elif rep.skipped:
|
||||
markup = {'yellow': True}
|
||||
markup = {"yellow": True}
|
||||
else:
|
||||
markup = {}
|
||||
line = self._locationline(rep.nodeid, *rep.location)
|
||||
|
@ -340,9 +395,11 @@ class TerminalReporter(object):
|
|||
self.ensure_newline()
|
||||
self._tw.write("[%s]" % rep.node.gateway.id)
|
||||
if self._show_progress_info:
|
||||
self._tw.write(self._get_progress_information_message() + " ", cyan=True)
|
||||
self._tw.write(
|
||||
self._get_progress_information_message() + " ", cyan=True
|
||||
)
|
||||
else:
|
||||
self._tw.write(' ')
|
||||
self._tw.write(" ")
|
||||
self._tw.write(word, **markup)
|
||||
self._tw.write(" " + line)
|
||||
self.currentfspath = -2
|
||||
|
@ -350,29 +407,33 @@ class TerminalReporter(object):
|
|||
def pytest_runtest_logfinish(self, nodeid):
|
||||
if self.verbosity <= 0 and self._show_progress_info:
|
||||
self._progress_nodeids_reported.add(nodeid)
|
||||
last_item = len(self._progress_nodeids_reported) == self._session.testscollected
|
||||
last_item = len(
|
||||
self._progress_nodeids_reported
|
||||
) == self._session.testscollected
|
||||
if last_item:
|
||||
self._write_progress_information_filling_space()
|
||||
else:
|
||||
past_edge = self._tw.chars_on_current_line + self._PROGRESS_LENGTH + 1 >= self._screen_width
|
||||
if past_edge:
|
||||
msg = self._get_progress_information_message()
|
||||
self._tw.write(msg + '\n', cyan=True)
|
||||
self._tw.write(msg + "\n", cyan=True)
|
||||
|
||||
_PROGRESS_LENGTH = len(' [100%]')
|
||||
_PROGRESS_LENGTH = len(" [100%]")
|
||||
|
||||
def _get_progress_information_message(self):
|
||||
if self.config.getoption('capture') == 'no':
|
||||
return ''
|
||||
if self.config.getoption("capture") == "no":
|
||||
return ""
|
||||
collected = self._session.testscollected
|
||||
if collected:
|
||||
progress = len(self._progress_nodeids_reported) * 100 // collected
|
||||
return ' [{:3d}%]'.format(progress)
|
||||
return ' [100%]'
|
||||
return " [{:3d}%]".format(progress)
|
||||
return " [100%]"
|
||||
|
||||
def _write_progress_information_filling_space(self):
|
||||
msg = self._get_progress_information_message()
|
||||
fill = ' ' * (self._tw.fullwidth - self._tw.chars_on_current_line - len(msg) - 1)
|
||||
fill = " " * (
|
||||
self._tw.fullwidth - self._tw.chars_on_current_line - len(msg) - 1
|
||||
)
|
||||
self.write(fill + msg, cyan=True)
|
||||
|
||||
def pytest_collection(self):
|
||||
|
@ -394,14 +455,16 @@ class TerminalReporter(object):
|
|||
if self.config.option.verbose < 0:
|
||||
return
|
||||
|
||||
errors = len(self.stats.get('error', []))
|
||||
skipped = len(self.stats.get('skipped', []))
|
||||
deselected = len(self.stats.get('deselected', []))
|
||||
errors = len(self.stats.get("error", []))
|
||||
skipped = len(self.stats.get("skipped", []))
|
||||
deselected = len(self.stats.get("deselected", []))
|
||||
if final:
|
||||
line = "collected "
|
||||
else:
|
||||
line = "collecting "
|
||||
line += str(self._numcollected) + " item" + ('' if self._numcollected == 1 else 's')
|
||||
line += str(self._numcollected) + " item" + (
|
||||
"" if self._numcollected == 1 else "s"
|
||||
)
|
||||
if errors:
|
||||
line += " / %d errors" % errors
|
||||
if deselected:
|
||||
|
@ -411,7 +474,7 @@ class TerminalReporter(object):
|
|||
if self.isatty:
|
||||
self.rewrite(line, bold=True, erase=True)
|
||||
if final:
|
||||
self.write('\n')
|
||||
self.write("\n")
|
||||
else:
|
||||
self.write_line(line)
|
||||
|
||||
|
@ -428,17 +491,22 @@ class TerminalReporter(object):
|
|||
self.write_sep("=", "test session starts", bold=True)
|
||||
verinfo = platform.python_version()
|
||||
msg = "platform %s -- Python %s" % (sys.platform, verinfo)
|
||||
if hasattr(sys, 'pypy_version_info'):
|
||||
if hasattr(sys, "pypy_version_info"):
|
||||
verinfo = ".".join(map(str, sys.pypy_version_info[:3]))
|
||||
msg += "[pypy-%s-%s]" % (verinfo, sys.pypy_version_info[3])
|
||||
msg += ", pytest-%s, py-%s, pluggy-%s" % (
|
||||
pytest.__version__, py.__version__, pluggy.__version__)
|
||||
if self.verbosity > 0 or self.config.option.debug or \
|
||||
getattr(self.config.option, 'pastebin', None):
|
||||
pytest.__version__, py.__version__, pluggy.__version__
|
||||
)
|
||||
if (
|
||||
self.verbosity > 0
|
||||
or self.config.option.debug
|
||||
or getattr(self.config.option, "pastebin", None)
|
||||
):
|
||||
msg += " -- " + str(sys.executable)
|
||||
self.write_line(msg)
|
||||
lines = self.config.hook.pytest_report_header(
|
||||
config=self.config, startdir=self.startdir)
|
||||
config=self.config, startdir=self.startdir
|
||||
)
|
||||
self._write_report_lines_from_hooks(lines)
|
||||
|
||||
def _write_report_lines_from_hooks(self, lines):
|
||||
|
@ -455,21 +523,21 @@ class TerminalReporter(object):
|
|||
plugininfo = config.pluginmanager.list_plugin_distinfo()
|
||||
if plugininfo:
|
||||
|
||||
lines.append(
|
||||
"plugins: %s" % ", ".join(_plugin_nameversions(plugininfo)))
|
||||
lines.append("plugins: %s" % ", ".join(_plugin_nameversions(plugininfo)))
|
||||
return lines
|
||||
|
||||
def pytest_collection_finish(self, session):
|
||||
if self.config.option.collectonly:
|
||||
self._printcollecteditems(session.items)
|
||||
if self.stats.get('failed'):
|
||||
if self.stats.get("failed"):
|
||||
self._tw.sep("!", "collection failures")
|
||||
for rep in self.stats.get('failed'):
|
||||
for rep in self.stats.get("failed"):
|
||||
rep.toterminal(self._tw)
|
||||
return 1
|
||||
return 0
|
||||
lines = self.config.hook.pytest_report_collectionfinish(
|
||||
config=self.config, startdir=self.startdir, items=session.items)
|
||||
config=self.config, startdir=self.startdir, items=session.items
|
||||
)
|
||||
self._write_report_lines_from_hooks(lines)
|
||||
|
||||
def _printcollecteditems(self, items):
|
||||
|
@ -480,7 +548,7 @@ class TerminalReporter(object):
|
|||
if self.config.option.verbose < -1:
|
||||
counts = {}
|
||||
for item in items:
|
||||
name = item.nodeid.split('::', 1)[0]
|
||||
name = item.nodeid.split("::", 1)[0]
|
||||
counts[name] = counts.get(name, 0) + 1
|
||||
for name, count in sorted(counts.items()):
|
||||
self._tw.line("%s: %d" % (name, count))
|
||||
|
@ -511,11 +579,16 @@ class TerminalReporter(object):
|
|||
outcome.get_result()
|
||||
self._tw.line("")
|
||||
summary_exit_codes = (
|
||||
EXIT_OK, EXIT_TESTSFAILED, EXIT_INTERRUPTED, EXIT_USAGEERROR,
|
||||
EXIT_NOTESTSCOLLECTED)
|
||||
EXIT_OK,
|
||||
EXIT_TESTSFAILED,
|
||||
EXIT_INTERRUPTED,
|
||||
EXIT_USAGEERROR,
|
||||
EXIT_NOTESTSCOLLECTED,
|
||||
)
|
||||
if exitstatus in summary_exit_codes:
|
||||
self.config.hook.pytest_terminal_summary(terminalreporter=self,
|
||||
exitstatus=exitstatus)
|
||||
self.config.hook.pytest_terminal_summary(
|
||||
terminalreporter=self, exitstatus=exitstatus
|
||||
)
|
||||
if exitstatus == EXIT_INTERRUPTED:
|
||||
self._report_keyboardinterrupt()
|
||||
del self._keyboardinterrupt_memo
|
||||
|
@ -533,7 +606,7 @@ class TerminalReporter(object):
|
|||
self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True)
|
||||
|
||||
def pytest_unconfigure(self):
|
||||
if hasattr(self, '_keyboardinterrupt_memo'):
|
||||
if hasattr(self, "_keyboardinterrupt_memo"):
|
||||
self._report_keyboardinterrupt()
|
||||
|
||||
def _report_keyboardinterrupt(self):
|
||||
|
@ -544,18 +617,23 @@ class TerminalReporter(object):
|
|||
if self.config.option.fulltrace:
|
||||
excrepr.toterminal(self._tw)
|
||||
else:
|
||||
self._tw.line("to show a full traceback on KeyboardInterrupt use --fulltrace", yellow=True)
|
||||
self._tw.line(
|
||||
"to show a full traceback on KeyboardInterrupt use --fulltrace",
|
||||
yellow=True,
|
||||
)
|
||||
excrepr.reprcrash.toterminal(self._tw)
|
||||
|
||||
def _locationline(self, nodeid, fspath, lineno, domain):
|
||||
|
||||
def mkrel(nodeid):
|
||||
line = self.config.cwd_relative_nodeid(nodeid)
|
||||
if domain and line.endswith(domain):
|
||||
line = line[:-len(domain)]
|
||||
values = domain.split("[")
|
||||
values[0] = values[0].replace('.', '::') # don't replace '.' in params
|
||||
values[0] = values[0].replace(".", "::") # don't replace '.' in params
|
||||
line += "[".join(values)
|
||||
return line
|
||||
|
||||
# collect_fspath comes from testid which has a "/"-normalized path
|
||||
|
||||
if fspath:
|
||||
|
@ -567,7 +645,7 @@ class TerminalReporter(object):
|
|||
return res + " "
|
||||
|
||||
def _getfailureheadline(self, rep):
|
||||
if hasattr(rep, 'location'):
|
||||
if hasattr(rep, "location"):
|
||||
fspath, lineno, domain = rep.location
|
||||
return domain
|
||||
else:
|
||||
|
@ -588,7 +666,7 @@ class TerminalReporter(object):
|
|||
def getreports(self, name):
|
||||
values = []
|
||||
for x in self.stats.get(name, []):
|
||||
if not hasattr(x, '_pdbshown'):
|
||||
if not hasattr(x, "_pdbshown"):
|
||||
values.append(x)
|
||||
return values
|
||||
|
||||
|
@ -598,22 +676,24 @@ class TerminalReporter(object):
|
|||
if not all_warnings:
|
||||
return
|
||||
|
||||
grouped = itertools.groupby(all_warnings, key=lambda wr: wr.get_location(self.config))
|
||||
grouped = itertools.groupby(
|
||||
all_warnings, key=lambda wr: wr.get_location(self.config)
|
||||
)
|
||||
|
||||
self.write_sep("=", "warnings summary", yellow=True, bold=False)
|
||||
for location, warning_records in grouped:
|
||||
self._tw.line(str(location) or '<undetermined location>')
|
||||
self._tw.line(str(location) or "<undetermined location>")
|
||||
for w in warning_records:
|
||||
lines = w.message.splitlines()
|
||||
indented = '\n'.join(' ' + x for x in lines)
|
||||
indented = "\n".join(" " + x for x in lines)
|
||||
self._tw.line(indented)
|
||||
self._tw.line()
|
||||
self._tw.line('-- Docs: http://doc.pytest.org/en/latest/warnings.html')
|
||||
self._tw.line("-- Docs: http://doc.pytest.org/en/latest/warnings.html")
|
||||
|
||||
def summary_passes(self):
|
||||
if self.config.option.tbstyle != "no":
|
||||
if self.hasopt("P"):
|
||||
reports = self.getreports('passed')
|
||||
reports = self.getreports("passed")
|
||||
if not reports:
|
||||
return
|
||||
self.write_sep("=", "PASSES")
|
||||
|
@ -624,15 +704,15 @@ class TerminalReporter(object):
|
|||
|
||||
def print_teardown_sections(self, rep):
|
||||
for secname, content in rep.sections:
|
||||
if 'teardown' in secname:
|
||||
self._tw.sep('-', secname)
|
||||
if "teardown" in secname:
|
||||
self._tw.sep("-", secname)
|
||||
if content[-1:] == "\n":
|
||||
content = content[:-1]
|
||||
self._tw.line(content)
|
||||
|
||||
def summary_failures(self):
|
||||
if self.config.option.tbstyle != "no":
|
||||
reports = self.getreports('failed')
|
||||
reports = self.getreports("failed")
|
||||
if not reports:
|
||||
return
|
||||
self.write_sep("=", "FAILURES")
|
||||
|
@ -642,22 +722,22 @@ class TerminalReporter(object):
|
|||
self.write_line(line)
|
||||
else:
|
||||
msg = self._getfailureheadline(rep)
|
||||
markup = {'red': True, 'bold': True}
|
||||
markup = {"red": True, "bold": True}
|
||||
self.write_sep("_", msg, **markup)
|
||||
self._outrep_summary(rep)
|
||||
for report in self.getreports(''):
|
||||
if report.nodeid == rep.nodeid and report.when == 'teardown':
|
||||
for report in self.getreports(""):
|
||||
if report.nodeid == rep.nodeid and report.when == "teardown":
|
||||
self.print_teardown_sections(report)
|
||||
|
||||
def summary_errors(self):
|
||||
if self.config.option.tbstyle != "no":
|
||||
reports = self.getreports('error')
|
||||
reports = self.getreports("error")
|
||||
if not reports:
|
||||
return
|
||||
self.write_sep("=", "ERRORS")
|
||||
for rep in self.stats['error']:
|
||||
for rep in self.stats["error"]:
|
||||
msg = self._getfailureheadline(rep)
|
||||
if not hasattr(rep, 'when'):
|
||||
if not hasattr(rep, "when"):
|
||||
# collect
|
||||
msg = "ERROR collecting " + msg
|
||||
elif rep.when == "setup":
|
||||
|
@ -670,10 +750,10 @@ class TerminalReporter(object):
|
|||
def _outrep_summary(self, rep):
|
||||
rep.toterminal(self._tw)
|
||||
showcapture = self.config.option.showcapture
|
||||
if showcapture == 'no':
|
||||
if showcapture == "no":
|
||||
return
|
||||
for secname, content in rep.sections:
|
||||
if showcapture != 'all' and showcapture not in secname:
|
||||
if showcapture != "all" and showcapture not in secname:
|
||||
continue
|
||||
self._tw.sep("-", secname)
|
||||
if content[-1:] == "\n":
|
||||
|
@ -684,7 +764,7 @@ class TerminalReporter(object):
|
|||
session_duration = time.time() - self._sessionstarttime
|
||||
(line, color) = build_summary_stats_line(self.stats)
|
||||
msg = "%s in %.2f seconds" % (line, session_duration)
|
||||
markup = {color: True, 'bold': True}
|
||||
markup = {color: True, "bold": True}
|
||||
|
||||
if self.verbosity >= 0:
|
||||
self.write_sep("=", msg, **markup)
|
||||
|
@ -702,8 +782,9 @@ def repr_pythonversion(v=None):
|
|||
|
||||
|
||||
def build_summary_stats_line(stats):
|
||||
keys = ("failed passed skipped deselected "
|
||||
"xfailed xpassed warnings error").split()
|
||||
keys = (
|
||||
"failed passed skipped deselected " "xfailed xpassed warnings error"
|
||||
).split()
|
||||
unknown_key_seen = False
|
||||
for key in stats.keys():
|
||||
if key not in keys:
|
||||
|
@ -721,14 +802,14 @@ def build_summary_stats_line(stats):
|
|||
else:
|
||||
line = "no tests ran"
|
||||
|
||||
if 'failed' in stats or 'error' in stats:
|
||||
color = 'red'
|
||||
elif 'warnings' in stats or unknown_key_seen:
|
||||
color = 'yellow'
|
||||
elif 'passed' in stats:
|
||||
color = 'green'
|
||||
if "failed" in stats or "error" in stats:
|
||||
color = "red"
|
||||
elif "warnings" in stats or unknown_key_seen:
|
||||
color = "yellow"
|
||||
elif "passed" in stats:
|
||||
color = "green"
|
||||
else:
|
||||
color = 'yellow'
|
||||
color = "yellow"
|
||||
|
||||
return (line, color)
|
||||
|
||||
|
@ -737,7 +818,7 @@ def _plugin_nameversions(plugininfo):
|
|||
values = []
|
||||
for plugin, dist in plugininfo:
|
||||
# gets us name and version!
|
||||
name = '{dist.project_name}-{dist.version}'.format(dist=dist)
|
||||
name = "{dist.project_name}-{dist.version}".format(dist=dist)
|
||||
# questionable convenience, but it keeps things short
|
||||
if name.startswith("pytest-"):
|
||||
name = name[7:]
|
||||
|
|
|
@ -37,8 +37,9 @@ class TempdirFactory(object):
|
|||
if not numbered:
|
||||
p = basetemp.mkdir(basename)
|
||||
else:
|
||||
p = py.path.local.make_numbered_dir(prefix=basename,
|
||||
keep=0, rootdir=basetemp, lock_timeout=None)
|
||||
p = py.path.local.make_numbered_dir(
|
||||
prefix=basename, keep=0, rootdir=basetemp, lock_timeout=None
|
||||
)
|
||||
self.trace("mktemp", p)
|
||||
return p
|
||||
|
||||
|
@ -59,12 +60,13 @@ class TempdirFactory(object):
|
|||
if user:
|
||||
# use a sub-directory in the temproot to speed-up
|
||||
# make_numbered_dir() call
|
||||
rootdir = temproot.join('pytest-of-%s' % user)
|
||||
rootdir = temproot.join("pytest-of-%s" % user)
|
||||
else:
|
||||
rootdir = temproot
|
||||
rootdir.ensure(dir=1)
|
||||
basetemp = py.path.local.make_numbered_dir(prefix='pytest-',
|
||||
rootdir=rootdir)
|
||||
basetemp = py.path.local.make_numbered_dir(
|
||||
prefix="pytest-", rootdir=rootdir
|
||||
)
|
||||
self._basetemp = t = basetemp.realpath()
|
||||
self.trace("new basetemp", t)
|
||||
return t
|
||||
|
@ -78,6 +80,7 @@ def get_user():
|
|||
in the current environment (see #1010).
|
||||
"""
|
||||
import getpass
|
||||
|
||||
try:
|
||||
return getpass.getuser()
|
||||
except (ImportError, KeyError):
|
||||
|
@ -98,11 +101,11 @@ def pytest_configure(config):
|
|||
mp = MonkeyPatch()
|
||||
t = TempdirFactory(config)
|
||||
config._cleanup.extend([mp.undo, t.finish])
|
||||
mp.setattr(config, '_tmpdirhandler', t, raising=False)
|
||||
mp.setattr(pytest, 'ensuretemp', t.ensuretemp, raising=False)
|
||||
mp.setattr(config, "_tmpdirhandler", t, raising=False)
|
||||
mp.setattr(pytest, "ensuretemp", t.ensuretemp, raising=False)
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
@pytest.fixture(scope="session")
|
||||
def tmpdir_factory(request):
|
||||
"""Return a TempdirFactory instance for the test session.
|
||||
"""
|
||||
|
|
|
@ -29,18 +29,19 @@ class UnitTestCase(Class):
|
|||
|
||||
def setup(self):
|
||||
cls = self.obj
|
||||
if getattr(cls, '__unittest_skip__', False):
|
||||
if getattr(cls, "__unittest_skip__", False):
|
||||
return # skipped
|
||||
setup = getattr(cls, 'setUpClass', None)
|
||||
setup = getattr(cls, "setUpClass", None)
|
||||
if setup is not None:
|
||||
setup()
|
||||
teardown = getattr(cls, 'tearDownClass', None)
|
||||
teardown = getattr(cls, "tearDownClass", None)
|
||||
if teardown is not None:
|
||||
self.addfinalizer(teardown)
|
||||
super(UnitTestCase, self).setup()
|
||||
|
||||
def collect(self):
|
||||
from unittest import TestLoader
|
||||
|
||||
cls = self.obj
|
||||
if not getattr(cls, "__test__", True):
|
||||
return
|
||||
|
@ -50,19 +51,19 @@ class UnitTestCase(Class):
|
|||
foundsomething = False
|
||||
for name in loader.getTestCaseNames(self.obj):
|
||||
x = getattr(self.obj, name)
|
||||
if not getattr(x, '__test__', True):
|
||||
if not getattr(x, "__test__", True):
|
||||
continue
|
||||
funcobj = getattr(x, 'im_func', x)
|
||||
funcobj = getattr(x, "im_func", x)
|
||||
transfer_markers(funcobj, cls, module)
|
||||
yield TestCaseFunction(name, parent=self)
|
||||
foundsomething = True
|
||||
|
||||
if not foundsomething:
|
||||
runtest = getattr(self.obj, 'runTest', None)
|
||||
runtest = getattr(self.obj, "runTest", None)
|
||||
if runtest is not None:
|
||||
ut = sys.modules.get("twisted.trial.unittest", None)
|
||||
if ut is None or runtest != ut.TestCase.runTest:
|
||||
yield TestCaseFunction('runTest', parent=self)
|
||||
yield TestCaseFunction("runTest", parent=self)
|
||||
|
||||
|
||||
class TestCaseFunction(Function):
|
||||
|
@ -72,7 +73,7 @@ class TestCaseFunction(Function):
|
|||
self._testcase = self.parent.obj(self.name)
|
||||
self._fix_unittest_skip_decorator()
|
||||
self._obj = getattr(self._testcase, self.name)
|
||||
if hasattr(self._testcase, 'setup_method'):
|
||||
if hasattr(self._testcase, "setup_method"):
|
||||
self._testcase.setup_method(self._obj)
|
||||
if hasattr(self, "_request"):
|
||||
self._request._fillfixtures()
|
||||
|
@ -91,7 +92,7 @@ class TestCaseFunction(Function):
|
|||
setattr(self._testcase, "__name__", self.name)
|
||||
|
||||
def teardown(self):
|
||||
if hasattr(self._testcase, 'teardown_method'):
|
||||
if hasattr(self._testcase, "teardown_method"):
|
||||
self._testcase.teardown_method(self._obj)
|
||||
# Allow garbage collection on TestCase instance attributes.
|
||||
self._testcase = None
|
||||
|
@ -102,26 +103,32 @@ class TestCaseFunction(Function):
|
|||
|
||||
def _addexcinfo(self, rawexcinfo):
|
||||
# unwrap potential exception info (see twisted trial support below)
|
||||
rawexcinfo = getattr(rawexcinfo, '_rawexcinfo', rawexcinfo)
|
||||
rawexcinfo = getattr(rawexcinfo, "_rawexcinfo", rawexcinfo)
|
||||
try:
|
||||
excinfo = _pytest._code.ExceptionInfo(rawexcinfo)
|
||||
except TypeError:
|
||||
try:
|
||||
try:
|
||||
values = traceback.format_exception(*rawexcinfo)
|
||||
values.insert(0, "NOTE: Incompatible Exception Representation, "
|
||||
"displaying natively:\n\n")
|
||||
values.insert(
|
||||
0,
|
||||
"NOTE: Incompatible Exception Representation, "
|
||||
"displaying natively:\n\n",
|
||||
)
|
||||
fail("".join(values), pytrace=False)
|
||||
except (fail.Exception, KeyboardInterrupt):
|
||||
raise
|
||||
except: # noqa
|
||||
fail("ERROR: Unknown Incompatible Exception "
|
||||
"representation:\n%r" % (rawexcinfo,), pytrace=False)
|
||||
fail(
|
||||
"ERROR: Unknown Incompatible Exception "
|
||||
"representation:\n%r" % (rawexcinfo,),
|
||||
pytrace=False,
|
||||
)
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except fail.Exception:
|
||||
excinfo = _pytest._code.ExceptionInfo()
|
||||
self.__dict__.setdefault('_excinfo', []).append(excinfo)
|
||||
self.__dict__.setdefault("_excinfo", []).append(excinfo)
|
||||
|
||||
def addError(self, testcase, rawexcinfo):
|
||||
self._addexcinfo(rawexcinfo)
|
||||
|
@ -155,11 +162,15 @@ class TestCaseFunction(Function):
|
|||
# implements the skipping machinery (see #2137)
|
||||
# analog to pythons Lib/unittest/case.py:run
|
||||
testMethod = getattr(self._testcase, self._testcase._testMethodName)
|
||||
if (getattr(self._testcase.__class__, "__unittest_skip__", False) or
|
||||
getattr(testMethod, "__unittest_skip__", False)):
|
||||
if (
|
||||
getattr(self._testcase.__class__, "__unittest_skip__", False)
|
||||
or getattr(testMethod, "__unittest_skip__", False)
|
||||
):
|
||||
# If the class or method was skipped.
|
||||
skip_why = (getattr(self._testcase.__class__, '__unittest_skip_why__', '') or
|
||||
getattr(testMethod, '__unittest_skip_why__', ''))
|
||||
skip_why = (
|
||||
getattr(self._testcase.__class__, "__unittest_skip_why__", "")
|
||||
or getattr(testMethod, "__unittest_skip_why__", "")
|
||||
)
|
||||
try: # PY3, unittest2 on PY2
|
||||
self._testcase._addSkip(self, self._testcase, skip_why)
|
||||
except TypeError: # PY2
|
||||
|
@ -181,7 +192,8 @@ class TestCaseFunction(Function):
|
|||
def _prunetraceback(self, excinfo):
|
||||
Function._prunetraceback(self, excinfo)
|
||||
traceback = excinfo.traceback.filter(
|
||||
lambda x: not x.frame.f_globals.get('__unittest'))
|
||||
lambda x: not x.frame.f_globals.get("__unittest")
|
||||
)
|
||||
if traceback:
|
||||
excinfo.traceback = traceback
|
||||
|
||||
|
@ -196,19 +208,20 @@ def pytest_runtest_makereport(item, call):
|
|||
except AttributeError:
|
||||
pass
|
||||
|
||||
|
||||
# twisted trial support
|
||||
|
||||
|
||||
@hookimpl(hookwrapper=True)
|
||||
def pytest_runtest_protocol(item):
|
||||
if isinstance(item, TestCaseFunction) and \
|
||||
'twisted.trial.unittest' in sys.modules:
|
||||
ut = sys.modules['twisted.python.failure']
|
||||
if isinstance(item, TestCaseFunction) and "twisted.trial.unittest" in sys.modules:
|
||||
ut = sys.modules["twisted.python.failure"]
|
||||
Failure__init__ = ut.Failure.__init__
|
||||
check_testcase_implements_trial_reporter()
|
||||
|
||||
def excstore(self, exc_value=None, exc_type=None, exc_tb=None,
|
||||
captureVars=None):
|
||||
def excstore(
|
||||
self, exc_value=None, exc_type=None, exc_tb=None, captureVars=None
|
||||
):
|
||||
if exc_value is None:
|
||||
self._rawexcinfo = sys.exc_info()
|
||||
else:
|
||||
|
@ -216,8 +229,9 @@ def pytest_runtest_protocol(item):
|
|||
exc_type = type(exc_value)
|
||||
self._rawexcinfo = (exc_type, exc_value, exc_tb)
|
||||
try:
|
||||
Failure__init__(self, exc_value, exc_type, exc_tb,
|
||||
captureVars=captureVars)
|
||||
Failure__init__(
|
||||
self, exc_value, exc_type, exc_tb, captureVars=captureVars
|
||||
)
|
||||
except TypeError:
|
||||
Failure__init__(self, exc_value, exc_type, exc_tb)
|
||||
|
||||
|
@ -233,5 +247,6 @@ def check_testcase_implements_trial_reporter(done=[]):
|
|||
return
|
||||
from zope.interface import classImplements
|
||||
from twisted.trial.itrial import IReporter
|
||||
|
||||
classImplements(TestCaseFunction, IReporter)
|
||||
done.append(1)
|
||||
|
|
|
@ -12,13 +12,12 @@ def _setoption(wmod, arg):
|
|||
"""
|
||||
Copy of the warning._setoption function but does not escape arguments.
|
||||
"""
|
||||
parts = arg.split(':')
|
||||
parts = arg.split(":")
|
||||
if len(parts) > 5:
|
||||
raise wmod._OptionError("too many fields (max 5): %r" % (arg,))
|
||||
while len(parts) < 5:
|
||||
parts.append('')
|
||||
action, message, category, module, lineno = [s.strip()
|
||||
for s in parts]
|
||||
parts.append("")
|
||||
action, message, category, module, lineno = [s.strip() for s in parts]
|
||||
action = wmod._getaction(action)
|
||||
category = wmod._getcategory(category)
|
||||
if lineno:
|
||||
|
@ -36,12 +35,18 @@ def _setoption(wmod, arg):
|
|||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("pytest-warnings")
|
||||
group.addoption(
|
||||
'-W', '--pythonwarnings', action='append',
|
||||
help="set which warnings to report, see -W option of python itself.")
|
||||
parser.addini("filterwarnings", type="linelist",
|
||||
help="Each line specifies a pattern for "
|
||||
"warnings.filterwarnings. "
|
||||
"Processed after -W and --pythonwarnings.")
|
||||
"-W",
|
||||
"--pythonwarnings",
|
||||
action="append",
|
||||
help="set which warnings to report, see -W option of python itself.",
|
||||
)
|
||||
parser.addini(
|
||||
"filterwarnings",
|
||||
type="linelist",
|
||||
help="Each line specifies a pattern for "
|
||||
"warnings.filterwarnings. "
|
||||
"Processed after -W and --pythonwarnings.",
|
||||
)
|
||||
|
||||
|
||||
@contextmanager
|
||||
|
@ -51,7 +56,7 @@ def catch_warnings_for_item(item):
|
|||
of the given item and after it is done posts them as warnings to this
|
||||
item.
|
||||
"""
|
||||
args = item.config.getoption('pythonwarnings') or []
|
||||
args = item.config.getoption("pythonwarnings") or []
|
||||
inifilters = item.config.getini("filterwarnings")
|
||||
with warnings.catch_warnings(record=True) as log:
|
||||
for arg in args:
|
||||
|
@ -60,7 +65,7 @@ def catch_warnings_for_item(item):
|
|||
for arg in inifilters:
|
||||
_setoption(warnings, arg)
|
||||
|
||||
for mark in item.iter_markers(name='filterwarnings'):
|
||||
for mark in item.iter_markers(name="filterwarnings"):
|
||||
for arg in mark.args:
|
||||
warnings._setoption(arg)
|
||||
|
||||
|
@ -70,23 +75,35 @@ def catch_warnings_for_item(item):
|
|||
warn_msg = warning.message
|
||||
unicode_warning = False
|
||||
|
||||
if compat._PY2 and any(isinstance(m, compat.UNICODE_TYPES) for m in warn_msg.args):
|
||||
if (
|
||||
compat._PY2
|
||||
and any(isinstance(m, compat.UNICODE_TYPES) for m in warn_msg.args)
|
||||
):
|
||||
new_args = []
|
||||
for m in warn_msg.args:
|
||||
new_args.append(compat.ascii_escaped(m) if isinstance(m, compat.UNICODE_TYPES) else m)
|
||||
new_args.append(
|
||||
compat.ascii_escaped(m)
|
||||
if isinstance(m, compat.UNICODE_TYPES)
|
||||
else m
|
||||
)
|
||||
unicode_warning = list(warn_msg.args) != new_args
|
||||
warn_msg.args = new_args
|
||||
|
||||
msg = warnings.formatwarning(
|
||||
warn_msg, warning.category,
|
||||
warning.filename, warning.lineno, warning.line)
|
||||
warn_msg,
|
||||
warning.category,
|
||||
warning.filename,
|
||||
warning.lineno,
|
||||
warning.line,
|
||||
)
|
||||
item.warn("unused", msg)
|
||||
|
||||
if unicode_warning:
|
||||
warnings.warn(
|
||||
"Warning is using unicode non convertible to ascii, "
|
||||
"converting to a safe representation:\n %s" % msg,
|
||||
UnicodeWarning)
|
||||
UnicodeWarning,
|
||||
)
|
||||
|
||||
|
||||
@pytest.hookimpl(hookwrapper=True)
|
||||
|
|
|
@ -1,12 +1,13 @@
|
|||
import sys
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
import cProfile
|
||||
import pytest
|
||||
import pstats
|
||||
|
||||
script = sys.argv[1:] if len(sys.argv) > 1 else "empty.py"
|
||||
stats = cProfile.run('pytest.cmdline.main(%r)' % script, 'prof')
|
||||
stats = cProfile.run("pytest.cmdline.main(%r)" % script, "prof")
|
||||
p = pstats.Stats("prof")
|
||||
p.strip_dirs()
|
||||
p.sort_stats('cumulative')
|
||||
p.sort_stats("cumulative")
|
||||
print(p.print_stats(500))
|
||||
|
|
|
@ -6,14 +6,16 @@
|
|||
# FastFilesCompleter 0.7383 1.0760
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
import timeit
|
||||
from argcomplete.completers import FilesCompleter
|
||||
from _pytest._argcomplete import FastFilesCompleter
|
||||
count = 1000 # only a few seconds
|
||||
setup = 'from __main__ import FastFilesCompleter\nfc = FastFilesCompleter()'
|
||||
|
||||
count = 1000 # only a few seconds
|
||||
setup = "from __main__ import FastFilesCompleter\nfc = FastFilesCompleter()"
|
||||
run = 'fc("/d")'
|
||||
sys.stdout.write('%s\n' % (timeit.timeit(run,
|
||||
setup=setup.replace('Fast', ''), number=count)))
|
||||
sys.stdout.write('%s\n' % (timeit.timeit(run, setup=setup, number=count)))
|
||||
sys.stdout.write(
|
||||
"%s\n" % (timeit.timeit(run, setup=setup.replace("Fast", ""), number=count))
|
||||
)
|
||||
sys.stdout.write("%s\n" % (timeit.timeit(run, setup=setup, number=count)))
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
import py
|
||||
|
||||
for i in range(1000):
|
||||
py.builtin.exec_("def test_func_%d(): pass" % i)
|
||||
|
|
|
@ -1,11 +1,15 @@
|
|||
|
||||
import pytest
|
||||
|
||||
@pytest.fixture(scope='module', params=range(966))
|
||||
|
||||
@pytest.fixture(scope="module", params=range(966))
|
||||
def foo(request):
|
||||
return request.param
|
||||
|
||||
|
||||
def test_it(foo):
|
||||
pass
|
||||
|
||||
|
||||
def test_it2(foo):
|
||||
pass
|
||||
|
|
|
@ -4,6 +4,7 @@ import pytest
|
|||
|
||||
SKIP = True
|
||||
|
||||
|
||||
@pytest.mark.parametrize("x", xrange(5000))
|
||||
def test_foo(x):
|
||||
if SKIP:
|
||||
|
|
|
@ -1,7 +1,19 @@
|
|||
# flasky extensions. flasky pygments style based on tango style
|
||||
from pygments.style import Style
|
||||
from pygments.token import Keyword, Name, Comment, String, Error, \
|
||||
Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
|
||||
from pygments.token import (
|
||||
Keyword,
|
||||
Name,
|
||||
Comment,
|
||||
String,
|
||||
Error,
|
||||
Number,
|
||||
Operator,
|
||||
Generic,
|
||||
Whitespace,
|
||||
Punctuation,
|
||||
Other,
|
||||
Literal,
|
||||
)
|
||||
|
||||
|
||||
class FlaskyStyle(Style):
|
||||
|
@ -10,77 +22,68 @@ class FlaskyStyle(Style):
|
|||
|
||||
styles = {
|
||||
# No corresponding class for the following:
|
||||
#Text: "", # class: ''
|
||||
Whitespace: "underline #f8f8f8", # class: 'w'
|
||||
Error: "#a40000 border:#ef2929", # class: 'err'
|
||||
Other: "#000000", # class 'x'
|
||||
|
||||
Comment: "italic #8f5902", # class: 'c'
|
||||
Comment.Preproc: "noitalic", # class: 'cp'
|
||||
|
||||
Keyword: "bold #004461", # class: 'k'
|
||||
Keyword.Constant: "bold #004461", # class: 'kc'
|
||||
Keyword.Declaration: "bold #004461", # class: 'kd'
|
||||
Keyword.Namespace: "bold #004461", # class: 'kn'
|
||||
Keyword.Pseudo: "bold #004461", # class: 'kp'
|
||||
Keyword.Reserved: "bold #004461", # class: 'kr'
|
||||
Keyword.Type: "bold #004461", # class: 'kt'
|
||||
|
||||
Operator: "#582800", # class: 'o'
|
||||
Operator.Word: "bold #004461", # class: 'ow' - like keywords
|
||||
|
||||
Punctuation: "bold #000000", # class: 'p'
|
||||
|
||||
# Text: "", # class: ''
|
||||
Whitespace: "underline #f8f8f8", # class: 'w'
|
||||
Error: "#a40000 border:#ef2929", # class: 'err'
|
||||
Other: "#000000", # class 'x'
|
||||
Comment: "italic #8f5902", # class: 'c'
|
||||
Comment.Preproc: "noitalic", # class: 'cp'
|
||||
Keyword: "bold #004461", # class: 'k'
|
||||
Keyword.Constant: "bold #004461", # class: 'kc'
|
||||
Keyword.Declaration: "bold #004461", # class: 'kd'
|
||||
Keyword.Namespace: "bold #004461", # class: 'kn'
|
||||
Keyword.Pseudo: "bold #004461", # class: 'kp'
|
||||
Keyword.Reserved: "bold #004461", # class: 'kr'
|
||||
Keyword.Type: "bold #004461", # class: 'kt'
|
||||
Operator: "#582800", # class: 'o'
|
||||
Operator.Word: "bold #004461", # class: 'ow' - like keywords
|
||||
Punctuation: "bold #000000", # class: 'p'
|
||||
# because special names such as Name.Class, Name.Function, etc.
|
||||
# are not recognized as such later in the parsing, we choose them
|
||||
# to look the same as ordinary variables.
|
||||
Name: "#000000", # class: 'n'
|
||||
Name.Attribute: "#c4a000", # class: 'na' - to be revised
|
||||
Name.Builtin: "#004461", # class: 'nb'
|
||||
Name.Builtin.Pseudo: "#3465a4", # class: 'bp'
|
||||
Name.Class: "#000000", # class: 'nc' - to be revised
|
||||
Name.Constant: "#000000", # class: 'no' - to be revised
|
||||
Name.Decorator: "#888", # class: 'nd' - to be revised
|
||||
Name.Entity: "#ce5c00", # class: 'ni'
|
||||
Name.Exception: "bold #cc0000", # class: 'ne'
|
||||
Name.Function: "#000000", # class: 'nf'
|
||||
Name.Property: "#000000", # class: 'py'
|
||||
Name.Label: "#f57900", # class: 'nl'
|
||||
Name.Namespace: "#000000", # class: 'nn' - to be revised
|
||||
Name.Other: "#000000", # class: 'nx'
|
||||
Name.Tag: "bold #004461", # class: 'nt' - like a keyword
|
||||
Name.Variable: "#000000", # class: 'nv' - to be revised
|
||||
Name.Variable.Class: "#000000", # class: 'vc' - to be revised
|
||||
Name.Variable.Global: "#000000", # class: 'vg' - to be revised
|
||||
Name.Variable.Instance: "#000000", # class: 'vi' - to be revised
|
||||
|
||||
Number: "#990000", # class: 'm'
|
||||
|
||||
Literal: "#000000", # class: 'l'
|
||||
Literal.Date: "#000000", # class: 'ld'
|
||||
|
||||
String: "#4e9a06", # class: 's'
|
||||
String.Backtick: "#4e9a06", # class: 'sb'
|
||||
String.Char: "#4e9a06", # class: 'sc'
|
||||
String.Doc: "italic #8f5902", # class: 'sd' - like a comment
|
||||
String.Double: "#4e9a06", # class: 's2'
|
||||
String.Escape: "#4e9a06", # class: 'se'
|
||||
String.Heredoc: "#4e9a06", # class: 'sh'
|
||||
String.Interpol: "#4e9a06", # class: 'si'
|
||||
String.Other: "#4e9a06", # class: 'sx'
|
||||
String.Regex: "#4e9a06", # class: 'sr'
|
||||
String.Single: "#4e9a06", # class: 's1'
|
||||
String.Symbol: "#4e9a06", # class: 'ss'
|
||||
|
||||
Generic: "#000000", # class: 'g'
|
||||
Generic.Deleted: "#a40000", # class: 'gd'
|
||||
Generic.Emph: "italic #000000", # class: 'ge'
|
||||
Generic.Error: "#ef2929", # class: 'gr'
|
||||
Generic.Heading: "bold #000080", # class: 'gh'
|
||||
Generic.Inserted: "#00A000", # class: 'gi'
|
||||
Generic.Output: "#888", # class: 'go'
|
||||
Generic.Prompt: "#745334", # class: 'gp'
|
||||
Generic.Strong: "bold #000000", # class: 'gs'
|
||||
Generic.Subheading: "bold #800080", # class: 'gu'
|
||||
Generic.Traceback: "bold #a40000", # class: 'gt'
|
||||
Name: "#000000", # class: 'n'
|
||||
Name.Attribute: "#c4a000", # class: 'na' - to be revised
|
||||
Name.Builtin: "#004461", # class: 'nb'
|
||||
Name.Builtin.Pseudo: "#3465a4", # class: 'bp'
|
||||
Name.Class: "#000000", # class: 'nc' - to be revised
|
||||
Name.Constant: "#000000", # class: 'no' - to be revised
|
||||
Name.Decorator: "#888", # class: 'nd' - to be revised
|
||||
Name.Entity: "#ce5c00", # class: 'ni'
|
||||
Name.Exception: "bold #cc0000", # class: 'ne'
|
||||
Name.Function: "#000000", # class: 'nf'
|
||||
Name.Property: "#000000", # class: 'py'
|
||||
Name.Label: "#f57900", # class: 'nl'
|
||||
Name.Namespace: "#000000", # class: 'nn' - to be revised
|
||||
Name.Other: "#000000", # class: 'nx'
|
||||
Name.Tag: "bold #004461", # class: 'nt' - like a keyword
|
||||
Name.Variable: "#000000", # class: 'nv' - to be revised
|
||||
Name.Variable.Class: "#000000", # class: 'vc' - to be revised
|
||||
Name.Variable.Global: "#000000", # class: 'vg' - to be revised
|
||||
Name.Variable.Instance: "#000000", # class: 'vi' - to be revised
|
||||
Number: "#990000", # class: 'm'
|
||||
Literal: "#000000", # class: 'l'
|
||||
Literal.Date: "#000000", # class: 'ld'
|
||||
String: "#4e9a06", # class: 's'
|
||||
String.Backtick: "#4e9a06", # class: 'sb'
|
||||
String.Char: "#4e9a06", # class: 'sc'
|
||||
String.Doc: "italic #8f5902", # class: 'sd' - like a comment
|
||||
String.Double: "#4e9a06", # class: 's2'
|
||||
String.Escape: "#4e9a06", # class: 'se'
|
||||
String.Heredoc: "#4e9a06", # class: 'sh'
|
||||
String.Interpol: "#4e9a06", # class: 'si'
|
||||
String.Other: "#4e9a06", # class: 'sx'
|
||||
String.Regex: "#4e9a06", # class: 'sr'
|
||||
String.Single: "#4e9a06", # class: 's1'
|
||||
String.Symbol: "#4e9a06", # class: 'ss'
|
||||
Generic: "#000000", # class: 'g'
|
||||
Generic.Deleted: "#a40000", # class: 'gd'
|
||||
Generic.Emph: "italic #000000", # class: 'ge'
|
||||
Generic.Error: "#ef2929", # class: 'gr'
|
||||
Generic.Heading: "bold #000080", # class: 'gh'
|
||||
Generic.Inserted: "#00A000", # class: 'gi'
|
||||
Generic.Output: "#888", # class: 'go'
|
||||
Generic.Prompt: "#745334", # class: 'gp'
|
||||
Generic.Strong: "bold #000000", # class: 'gs'
|
||||
Generic.Subheading: "bold #800080", # class: 'gu'
|
||||
Generic.Traceback: "bold #a40000", # class: 'gt'
|
||||
}
|
||||
|
|
|
@ -1,15 +1,18 @@
|
|||
import py
|
||||
import subprocess
|
||||
|
||||
|
||||
def test_build_docs(tmpdir):
|
||||
doctrees = tmpdir.join("doctrees")
|
||||
htmldir = tmpdir.join("html")
|
||||
subprocess.check_call([
|
||||
"sphinx-build", "-W", "-bhtml",
|
||||
"-d", str(doctrees), ".", str(htmldir)])
|
||||
subprocess.check_call(
|
||||
["sphinx-build", "-W", "-bhtml", "-d", str(doctrees), ".", str(htmldir)]
|
||||
)
|
||||
|
||||
|
||||
def test_linkcheck(tmpdir):
|
||||
doctrees = tmpdir.join("doctrees")
|
||||
htmldir = tmpdir.join("html")
|
||||
subprocess.check_call(
|
||||
["sphinx-build", "-blinkcheck",
|
||||
"-d", str(doctrees), ".", str(htmldir)])
|
||||
["sphinx-build", "-blinkcheck", "-d", str(doctrees), ".", str(htmldir)]
|
||||
)
|
||||
|
|
217
doc/en/conf.py
217
doc/en/conf.py
|
@ -29,7 +29,7 @@ release = ".".join(version.split(".")[:2])
|
|||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
#sys.path.insert(0, os.path.abspath('.'))
|
||||
# sys.path.insert(0, os.path.abspath('.'))
|
||||
|
||||
autodoc_member_order = "bysource"
|
||||
todo_include_todos = 1
|
||||
|
@ -37,59 +37,68 @@ todo_include_todos = 1
|
|||
# -- General configuration -----------------------------------------------------
|
||||
|
||||
# If your documentation needs a minimal Sphinx version, state it here.
|
||||
#needs_sphinx = '1.0'
|
||||
# needs_sphinx = '1.0'
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be extensions
|
||||
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
||||
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.autosummary',
|
||||
'sphinx.ext.intersphinx', 'sphinx.ext.viewcode', 'sphinxcontrib_trio']
|
||||
extensions = [
|
||||
"sphinx.ext.autodoc",
|
||||
"sphinx.ext.todo",
|
||||
"sphinx.ext.autosummary",
|
||||
"sphinx.ext.intersphinx",
|
||||
"sphinx.ext.viewcode",
|
||||
"sphinxcontrib_trio",
|
||||
]
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ['_templates']
|
||||
templates_path = ["_templates"]
|
||||
|
||||
# The suffix of source filenames.
|
||||
source_suffix = '.rst'
|
||||
source_suffix = ".rst"
|
||||
|
||||
# The encoding of source files.
|
||||
#source_encoding = 'utf-8-sig'
|
||||
# source_encoding = 'utf-8-sig'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'contents'
|
||||
master_doc = "contents"
|
||||
|
||||
# General information about the project.
|
||||
project = u'pytest'
|
||||
project = u"pytest"
|
||||
year = datetime.datetime.utcnow().year
|
||||
copyright = u'2015–{} , holger krekel and pytest-dev team'.format(year)
|
||||
|
||||
copyright = u"2015–{} , holger krekel and pytest-dev team".format(year)
|
||||
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
#language = None
|
||||
# language = None
|
||||
|
||||
# There are two options for replacing |today|: either, you set today to some
|
||||
# non-false value, then it is used:
|
||||
#today = ''
|
||||
# today = ''
|
||||
# Else, today_fmt is used as the format for a strftime call.
|
||||
#today_fmt = '%B %d, %Y'
|
||||
# today_fmt = '%B %d, %Y'
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
exclude_patterns = ['links.inc', '_build', 'naming20.rst', 'test/*',
|
||||
exclude_patterns = [
|
||||
"links.inc",
|
||||
"_build",
|
||||
"naming20.rst",
|
||||
"test/*",
|
||||
"old_*",
|
||||
'*attic*',
|
||||
'*/attic*',
|
||||
'funcargs.rst',
|
||||
'setup.rst',
|
||||
'example/remoteinterp.rst',
|
||||
]
|
||||
"*attic*",
|
||||
"*/attic*",
|
||||
"funcargs.rst",
|
||||
"setup.rst",
|
||||
"example/remoteinterp.rst",
|
||||
]
|
||||
|
||||
|
||||
# The reST default role (used for this markup: `text`) to use for all documents.
|
||||
#default_role = None
|
||||
# default_role = None
|
||||
|
||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||
#add_function_parentheses = True
|
||||
# add_function_parentheses = True
|
||||
|
||||
# If true, the current module name will be prepended to all description
|
||||
# unit titles (such as .. function::).
|
||||
|
@ -97,39 +106,36 @@ add_module_names = False
|
|||
|
||||
# If true, sectionauthor and moduleauthor directives will be shown in the
|
||||
# output. They are ignored by default.
|
||||
#show_authors = False
|
||||
# show_authors = False
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
|
||||
pygments_style = "sphinx"
|
||||
|
||||
|
||||
# A list of ignored prefixes for module index sorting.
|
||||
#modindex_common_prefix = []
|
||||
# modindex_common_prefix = []
|
||||
|
||||
|
||||
# -- Options for HTML output ---------------------------------------------------
|
||||
|
||||
sys.path.append(os.path.abspath('_themes'))
|
||||
html_theme_path = ['_themes']
|
||||
sys.path.append(os.path.abspath("_themes"))
|
||||
html_theme_path = ["_themes"]
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
html_theme = 'flask'
|
||||
html_theme = "flask"
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
html_theme_options = {
|
||||
'index_logo': None
|
||||
}
|
||||
html_theme_options = {"index_logo": None}
|
||||
|
||||
# Add any paths that contain custom themes here, relative to this directory.
|
||||
#html_theme_path = []
|
||||
# html_theme_path = []
|
||||
|
||||
# The name for this set of Sphinx documents. If None, it defaults to
|
||||
# "<project> v<release> documentation".
|
||||
html_title = 'pytest documentation'
|
||||
html_title = "pytest documentation"
|
||||
|
||||
# A shorter title for the navigation bar. Default is the same as html_title.
|
||||
html_short_title = "pytest-%s" % release
|
||||
|
@ -150,37 +156,37 @@ html_favicon = "img/pytest1favi.ico"
|
|||
|
||||
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
|
||||
# using the given strftime format.
|
||||
#html_last_updated_fmt = '%b %d, %Y'
|
||||
# html_last_updated_fmt = '%b %d, %Y'
|
||||
|
||||
# If true, SmartyPants will be used to convert quotes and dashes to
|
||||
# typographically correct entities.
|
||||
#html_use_smartypants = True
|
||||
# html_use_smartypants = True
|
||||
|
||||
# Custom sidebar templates, maps document names to template names.
|
||||
#html_sidebars = {}
|
||||
#html_sidebars = {'index': 'indexsidebar.html'}
|
||||
# html_sidebars = {}
|
||||
# html_sidebars = {'index': 'indexsidebar.html'}
|
||||
|
||||
html_sidebars = {
|
||||
'index': [
|
||||
'sidebarintro.html',
|
||||
'globaltoc.html',
|
||||
'links.html',
|
||||
'sourcelink.html',
|
||||
'searchbox.html'
|
||||
"index": [
|
||||
"sidebarintro.html",
|
||||
"globaltoc.html",
|
||||
"links.html",
|
||||
"sourcelink.html",
|
||||
"searchbox.html",
|
||||
],
|
||||
"**": [
|
||||
"globaltoc.html",
|
||||
"relations.html",
|
||||
"links.html",
|
||||
"sourcelink.html",
|
||||
"searchbox.html",
|
||||
],
|
||||
'**': [
|
||||
'globaltoc.html',
|
||||
'relations.html',
|
||||
'links.html',
|
||||
'sourcelink.html',
|
||||
'searchbox.html'
|
||||
]
|
||||
}
|
||||
|
||||
# Additional templates that should be rendered to pages, maps page names to
|
||||
# template names.
|
||||
#html_additional_pages = {}
|
||||
#html_additional_pages = {'index': 'index.html'}
|
||||
# html_additional_pages = {}
|
||||
# html_additional_pages = {'index': 'index.html'}
|
||||
|
||||
|
||||
# If false, no module index is generated.
|
||||
|
@ -190,63 +196,68 @@ html_domain_indices = True
|
|||
html_use_index = False
|
||||
|
||||
# If true, the index is split into individual pages for each letter.
|
||||
#html_split_index = False
|
||||
# html_split_index = False
|
||||
|
||||
# If true, links to the reST sources are added to the pages.
|
||||
html_show_sourcelink = False
|
||||
|
||||
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
|
||||
#html_show_sphinx = True
|
||||
# html_show_sphinx = True
|
||||
|
||||
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
|
||||
#html_show_copyright = True
|
||||
# html_show_copyright = True
|
||||
|
||||
# If true, an OpenSearch description file will be output, and all pages will
|
||||
# contain a <link> tag referring to it. The value of this option must be the
|
||||
# base URL from which the finished HTML is served.
|
||||
#html_use_opensearch = ''
|
||||
# html_use_opensearch = ''
|
||||
|
||||
# This is the file name suffix for HTML files (e.g. ".xhtml").
|
||||
#html_file_suffix = None
|
||||
# html_file_suffix = None
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'pytestdoc'
|
||||
htmlhelp_basename = "pytestdoc"
|
||||
|
||||
|
||||
# -- Options for LaTeX output --------------------------------------------------
|
||||
|
||||
# The paper size ('letter' or 'a4').
|
||||
#latex_paper_size = 'letter'
|
||||
# latex_paper_size = 'letter'
|
||||
|
||||
# The font size ('10pt', '11pt' or '12pt').
|
||||
#latex_font_size = '10pt'
|
||||
# latex_font_size = '10pt'
|
||||
|
||||
# Grouping the document tree into LaTeX files. List of tuples
|
||||
# (source start file, target name, title, author, documentclass [howto/manual]).
|
||||
latex_documents = [
|
||||
('contents', 'pytest.tex', u'pytest Documentation',
|
||||
u'holger krekel, trainer and consultant, http://merlinux.eu', 'manual'),
|
||||
(
|
||||
"contents",
|
||||
"pytest.tex",
|
||||
u"pytest Documentation",
|
||||
u"holger krekel, trainer and consultant, http://merlinux.eu",
|
||||
"manual",
|
||||
)
|
||||
]
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top of
|
||||
# the title page.
|
||||
latex_logo = 'img/pytest1.png'
|
||||
latex_logo = "img/pytest1.png"
|
||||
|
||||
# For "manual" documents, if this is true, then toplevel headings are parts,
|
||||
# not chapters.
|
||||
#latex_use_parts = False
|
||||
# latex_use_parts = False
|
||||
|
||||
# If true, show page references after internal links.
|
||||
#latex_show_pagerefs = False
|
||||
# latex_show_pagerefs = False
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
#latex_show_urls = False
|
||||
# latex_show_urls = False
|
||||
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
#latex_preamble = ''
|
||||
# latex_preamble = ''
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
#latex_appendices = []
|
||||
# latex_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
latex_domain_indices = False
|
||||
|
@ -255,72 +266,78 @@ latex_domain_indices = False
|
|||
|
||||
# One entry per manual page. List of tuples
|
||||
# (source start file, name, description, authors, manual section).
|
||||
man_pages = [
|
||||
('usage', 'pytest', u'pytest usage',
|
||||
[u'holger krekel at merlinux eu'], 1)
|
||||
]
|
||||
man_pages = [("usage", "pytest", u"pytest usage", [u"holger krekel at merlinux eu"], 1)]
|
||||
|
||||
|
||||
# -- Options for Epub output ---------------------------------------------------
|
||||
|
||||
# Bibliographic Dublin Core info.
|
||||
epub_title = u'pytest'
|
||||
epub_author = u'holger krekel at merlinux eu'
|
||||
epub_publisher = u'holger krekel at merlinux eu'
|
||||
epub_copyright = u'2013, holger krekel et alii'
|
||||
epub_title = u"pytest"
|
||||
epub_author = u"holger krekel at merlinux eu"
|
||||
epub_publisher = u"holger krekel at merlinux eu"
|
||||
epub_copyright = u"2013, holger krekel et alii"
|
||||
|
||||
# The language of the text. It defaults to the language option
|
||||
# or en if the language is not set.
|
||||
#epub_language = ''
|
||||
# epub_language = ''
|
||||
|
||||
# The scheme of the identifier. Typical schemes are ISBN or URL.
|
||||
#epub_scheme = ''
|
||||
# epub_scheme = ''
|
||||
|
||||
# The unique identifier of the text. This can be a ISBN number
|
||||
# or the project homepage.
|
||||
#epub_identifier = ''
|
||||
# epub_identifier = ''
|
||||
|
||||
# A unique identification for the text.
|
||||
#epub_uid = ''
|
||||
# epub_uid = ''
|
||||
|
||||
# HTML files that should be inserted before the pages created by sphinx.
|
||||
# The format is a list of tuples containing the path and title.
|
||||
#epub_pre_files = []
|
||||
# epub_pre_files = []
|
||||
|
||||
# HTML files shat should be inserted after the pages created by sphinx.
|
||||
# The format is a list of tuples containing the path and title.
|
||||
#epub_post_files = []
|
||||
# epub_post_files = []
|
||||
|
||||
# A list of files that should not be packed into the epub file.
|
||||
#epub_exclude_files = []
|
||||
# epub_exclude_files = []
|
||||
|
||||
# The depth of the table of contents in toc.ncx.
|
||||
#epub_tocdepth = 3
|
||||
# epub_tocdepth = 3
|
||||
|
||||
# Allow duplicate toc entries.
|
||||
#epub_tocdup = True
|
||||
# epub_tocdup = True
|
||||
|
||||
|
||||
# -- Options for texinfo output ------------------------------------------------
|
||||
|
||||
texinfo_documents = [
|
||||
(master_doc, 'pytest', 'pytest Documentation',
|
||||
('Holger Krekel@*Benjamin Peterson@*Ronny Pfannschmidt@*'
|
||||
'Floris Bruynooghe@*others'),
|
||||
'pytest',
|
||||
'simple powerful testing with Python',
|
||||
'Programming',
|
||||
1),
|
||||
(
|
||||
master_doc,
|
||||
"pytest",
|
||||
"pytest Documentation",
|
||||
(
|
||||
"Holger Krekel@*Benjamin Peterson@*Ronny Pfannschmidt@*"
|
||||
"Floris Bruynooghe@*others"
|
||||
),
|
||||
"pytest",
|
||||
"simple powerful testing with Python",
|
||||
"Programming",
|
||||
1,
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
# Example configuration for intersphinx: refer to the Python standard library.
|
||||
intersphinx_mapping = {'python': ('http://docs.python.org/3', None)}
|
||||
intersphinx_mapping = {"python": ("http://docs.python.org/3", None)}
|
||||
|
||||
|
||||
def setup(app):
|
||||
#from sphinx.ext.autodoc import cut_lines
|
||||
#app.connect('autodoc-process-docstring', cut_lines(4, what=['module']))
|
||||
app.add_description_unit('confval', 'confval',
|
||||
objname='configuration value',
|
||||
indextemplate='pair: %s; configuration value')
|
||||
# from sphinx.ext.autodoc import cut_lines
|
||||
# app.connect('autodoc-process-docstring', cut_lines(4, what=['module']))
|
||||
app.add_description_unit(
|
||||
"confval",
|
||||
"confval",
|
||||
objname="configuration value",
|
||||
indextemplate="pair: %s; configuration value",
|
||||
)
|
||||
|
|
|
@ -2,135 +2,158 @@ from pytest import raises
|
|||
import _pytest._code
|
||||
import py
|
||||
|
||||
def otherfunc(a,b):
|
||||
assert a==b
|
||||
|
||||
def somefunc(x,y):
|
||||
otherfunc(x,y)
|
||||
def otherfunc(a, b):
|
||||
assert a == b
|
||||
|
||||
|
||||
def somefunc(x, y):
|
||||
otherfunc(x, y)
|
||||
|
||||
|
||||
def otherfunc_multi(a, b):
|
||||
assert a == b
|
||||
|
||||
def otherfunc_multi(a,b):
|
||||
assert (a ==
|
||||
b)
|
||||
|
||||
def test_generative(param1, param2):
|
||||
assert param1 * 2 < param2
|
||||
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
if 'param1' in metafunc.fixturenames:
|
||||
if "param1" in metafunc.fixturenames:
|
||||
metafunc.addcall(funcargs=dict(param1=3, param2=6))
|
||||
|
||||
|
||||
class TestFailing(object):
|
||||
|
||||
def test_simple(self):
|
||||
|
||||
def f():
|
||||
return 42
|
||||
|
||||
def g():
|
||||
return 43
|
||||
|
||||
assert f() == g()
|
||||
|
||||
def test_simple_multiline(self):
|
||||
otherfunc_multi(
|
||||
42,
|
||||
6*9)
|
||||
otherfunc_multi(42, 6 * 9)
|
||||
|
||||
def test_not(self):
|
||||
|
||||
def f():
|
||||
return 42
|
||||
|
||||
assert not f()
|
||||
|
||||
|
||||
class TestSpecialisedExplanations(object):
|
||||
|
||||
def test_eq_text(self):
|
||||
assert 'spam' == 'eggs'
|
||||
assert "spam" == "eggs"
|
||||
|
||||
def test_eq_similar_text(self):
|
||||
assert 'foo 1 bar' == 'foo 2 bar'
|
||||
assert "foo 1 bar" == "foo 2 bar"
|
||||
|
||||
def test_eq_multiline_text(self):
|
||||
assert 'foo\nspam\nbar' == 'foo\neggs\nbar'
|
||||
assert "foo\nspam\nbar" == "foo\neggs\nbar"
|
||||
|
||||
def test_eq_long_text(self):
|
||||
a = '1'*100 + 'a' + '2'*100
|
||||
b = '1'*100 + 'b' + '2'*100
|
||||
a = "1" * 100 + "a" + "2" * 100
|
||||
b = "1" * 100 + "b" + "2" * 100
|
||||
assert a == b
|
||||
|
||||
def test_eq_long_text_multiline(self):
|
||||
a = '1\n'*100 + 'a' + '2\n'*100
|
||||
b = '1\n'*100 + 'b' + '2\n'*100
|
||||
a = "1\n" * 100 + "a" + "2\n" * 100
|
||||
b = "1\n" * 100 + "b" + "2\n" * 100
|
||||
assert a == b
|
||||
|
||||
def test_eq_list(self):
|
||||
assert [0, 1, 2] == [0, 1, 3]
|
||||
|
||||
def test_eq_list_long(self):
|
||||
a = [0]*100 + [1] + [3]*100
|
||||
b = [0]*100 + [2] + [3]*100
|
||||
a = [0] * 100 + [1] + [3] * 100
|
||||
b = [0] * 100 + [2] + [3] * 100
|
||||
assert a == b
|
||||
|
||||
def test_eq_dict(self):
|
||||
assert {'a': 0, 'b': 1, 'c': 0} == {'a': 0, 'b': 2, 'd': 0}
|
||||
assert {"a": 0, "b": 1, "c": 0} == {"a": 0, "b": 2, "d": 0}
|
||||
|
||||
def test_eq_set(self):
|
||||
assert {0, 10, 11, 12} == {0, 20, 21}
|
||||
|
||||
def test_eq_longer_list(self):
|
||||
assert [1,2] == [1,2,3]
|
||||
assert [1, 2] == [1, 2, 3]
|
||||
|
||||
def test_in_list(self):
|
||||
assert 1 in [0, 2, 3, 4, 5]
|
||||
|
||||
def test_not_in_text_multiline(self):
|
||||
text = 'some multiline\ntext\nwhich\nincludes foo\nand a\ntail'
|
||||
assert 'foo' not in text
|
||||
text = "some multiline\ntext\nwhich\nincludes foo\nand a\ntail"
|
||||
assert "foo" not in text
|
||||
|
||||
def test_not_in_text_single(self):
|
||||
text = 'single foo line'
|
||||
assert 'foo' not in text
|
||||
text = "single foo line"
|
||||
assert "foo" not in text
|
||||
|
||||
def test_not_in_text_single_long(self):
|
||||
text = 'head ' * 50 + 'foo ' + 'tail ' * 20
|
||||
assert 'foo' not in text
|
||||
text = "head " * 50 + "foo " + "tail " * 20
|
||||
assert "foo" not in text
|
||||
|
||||
def test_not_in_text_single_long_term(self):
|
||||
text = 'head ' * 50 + 'f'*70 + 'tail ' * 20
|
||||
assert 'f'*70 not in text
|
||||
text = "head " * 50 + "f" * 70 + "tail " * 20
|
||||
assert "f" * 70 not in text
|
||||
|
||||
|
||||
def test_attribute():
|
||||
|
||||
class Foo(object):
|
||||
b = 1
|
||||
|
||||
i = Foo()
|
||||
assert i.b == 2
|
||||
|
||||
|
||||
def test_attribute_instance():
|
||||
|
||||
class Foo(object):
|
||||
b = 1
|
||||
|
||||
assert Foo().b == 2
|
||||
|
||||
|
||||
def test_attribute_failure():
|
||||
|
||||
class Foo(object):
|
||||
|
||||
def _get_b(self):
|
||||
raise Exception('Failed to get attrib')
|
||||
raise Exception("Failed to get attrib")
|
||||
|
||||
b = property(_get_b)
|
||||
|
||||
i = Foo()
|
||||
assert i.b == 2
|
||||
|
||||
|
||||
def test_attribute_multiple():
|
||||
|
||||
class Foo(object):
|
||||
b = 1
|
||||
|
||||
class Bar(object):
|
||||
b = 2
|
||||
|
||||
assert Foo().b == Bar().b
|
||||
|
||||
|
||||
def globf(x):
|
||||
return x+1
|
||||
return x + 1
|
||||
|
||||
|
||||
class TestRaises(object):
|
||||
|
||||
def test_raises(self):
|
||||
s = 'qwe'
|
||||
s = "qwe"
|
||||
raises(TypeError, "int(s)")
|
||||
|
||||
def test_raises_doesnt(self):
|
||||
|
@ -140,12 +163,12 @@ class TestRaises(object):
|
|||
raise ValueError("demo error")
|
||||
|
||||
def test_tupleerror(self):
|
||||
a,b = [1]
|
||||
a, b = [1]
|
||||
|
||||
def test_reinterpret_fails_with_print_for_the_fun_of_it(self):
|
||||
l = [1,2,3]
|
||||
print ("l is %r" % l)
|
||||
a,b = l.pop()
|
||||
l = [1, 2, 3]
|
||||
print("l is %r" % l)
|
||||
a, b = l.pop()
|
||||
|
||||
def test_some_error(self):
|
||||
if namenotexi:
|
||||
|
@ -159,31 +182,35 @@ class TestRaises(object):
|
|||
def test_dynamic_compile_shows_nicely():
|
||||
import imp
|
||||
import sys
|
||||
src = 'def foo():\n assert 1 == 0\n'
|
||||
name = 'abc-123'
|
||||
|
||||
src = "def foo():\n assert 1 == 0\n"
|
||||
name = "abc-123"
|
||||
module = imp.new_module(name)
|
||||
code = _pytest._code.compile(src, name, 'exec')
|
||||
code = _pytest._code.compile(src, name, "exec")
|
||||
py.builtin.exec_(code, module.__dict__)
|
||||
sys.modules[name] = module
|
||||
module.foo()
|
||||
|
||||
|
||||
|
||||
class TestMoreErrors(object):
|
||||
|
||||
def test_complex_error(self):
|
||||
|
||||
def f():
|
||||
return 44
|
||||
|
||||
def g():
|
||||
return 43
|
||||
|
||||
somefunc(f(), g())
|
||||
|
||||
def test_z1_unpack_error(self):
|
||||
l = []
|
||||
a,b = l
|
||||
a, b = l
|
||||
|
||||
def test_z2_type_error(self):
|
||||
l = 3
|
||||
a,b = l
|
||||
a, b = l
|
||||
|
||||
def test_startswith(self):
|
||||
s = "123"
|
||||
|
@ -191,17 +218,20 @@ class TestMoreErrors(object):
|
|||
assert s.startswith(g)
|
||||
|
||||
def test_startswith_nested(self):
|
||||
|
||||
def f():
|
||||
return "123"
|
||||
|
||||
def g():
|
||||
return "456"
|
||||
|
||||
assert f().startswith(g())
|
||||
|
||||
def test_global_func(self):
|
||||
assert isinstance(globf(42), float)
|
||||
|
||||
def test_instance(self):
|
||||
self.x = 6*7
|
||||
self.x = 6 * 7
|
||||
assert self.x != 42
|
||||
|
||||
def test_compare(self):
|
||||
|
@ -218,23 +248,31 @@ class TestMoreErrors(object):
|
|||
class TestCustomAssertMsg(object):
|
||||
|
||||
def test_single_line(self):
|
||||
|
||||
class A(object):
|
||||
a = 1
|
||||
|
||||
b = 2
|
||||
assert A.a == b, "A.a appears not to be b"
|
||||
|
||||
def test_multiline(self):
|
||||
|
||||
class A(object):
|
||||
a = 1
|
||||
|
||||
b = 2
|
||||
assert A.a == b, "A.a appears not to be b\n" \
|
||||
"or does not appear to be b\none of those"
|
||||
assert (
|
||||
A.a == b
|
||||
), "A.a appears not to be b\n" "or does not appear to be b\none of those"
|
||||
|
||||
def test_custom_repr(self):
|
||||
|
||||
class JSON(object):
|
||||
a = 1
|
||||
|
||||
def __repr__(self):
|
||||
return "This is JSON\n{\n 'foo': 'bar'\n}"
|
||||
|
||||
a = JSON()
|
||||
b = 2
|
||||
assert a.a == b, a
|
||||
|
|
|
@ -1,10 +1,12 @@
|
|||
import pytest, py
|
||||
|
||||
mydir = py.path.local(__file__).dirpath()
|
||||
|
||||
|
||||
def pytest_runtest_setup(item):
|
||||
if isinstance(item, pytest.Function):
|
||||
if not item.fspath.relto(mydir):
|
||||
return
|
||||
mod = item.getparent(pytest.Module).obj
|
||||
if hasattr(mod, 'hello'):
|
||||
print ("mod.hello %r" % (mod.hello,))
|
||||
if hasattr(mod, "hello"):
|
||||
print("mod.hello %r" % (mod.hello,))
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
|
||||
hello = "world"
|
||||
|
||||
|
||||
def test_func():
|
||||
pass
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
|
||||
import py
|
||||
failure_demo = py.path.local(__file__).dirpath('failure_demo.py')
|
||||
pytest_plugins = 'pytester',
|
||||
|
||||
failure_demo = py.path.local(__file__).dirpath("failure_demo.py")
|
||||
pytest_plugins = "pytester",
|
||||
|
||||
|
||||
def test_failure_demo_fails_properly(testdir):
|
||||
target = testdir.tmpdir.join(failure_demo.basename)
|
||||
failure_demo.copy(target)
|
||||
failure_demo.copy(testdir.tmpdir.join(failure_demo.basename))
|
||||
result = testdir.runpytest(target, syspathinsert=True)
|
||||
result.stdout.fnmatch_lines([
|
||||
"*42 failed*"
|
||||
])
|
||||
result.stdout.fnmatch_lines(["*42 failed*"])
|
||||
assert result.ret != 0
|
||||
|
|
|
@ -1,7 +1,9 @@
|
|||
def setup_module(module):
|
||||
module.TestStateFullThing.classcount = 0
|
||||
|
||||
|
||||
class TestStateFullThing(object):
|
||||
|
||||
def setup_class(cls):
|
||||
cls.classcount += 1
|
||||
|
||||
|
@ -19,9 +21,11 @@ class TestStateFullThing(object):
|
|||
assert self.classcount == 1
|
||||
assert self.id == 23
|
||||
|
||||
|
||||
def teardown_module(module):
|
||||
assert module.TestStateFullThing.classcount == 0
|
||||
|
||||
|
||||
""" For this example the control flow happens as follows::
|
||||
import test_setup_flow_example
|
||||
setup_module(test_setup_flow_example)
|
||||
|
|
|
@ -1,16 +1,20 @@
|
|||
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.fixture("session")
|
||||
def setup(request):
|
||||
setup = CostlySetup()
|
||||
yield setup
|
||||
setup.finalize()
|
||||
|
||||
|
||||
class CostlySetup(object):
|
||||
|
||||
def __init__(self):
|
||||
import time
|
||||
print ("performing costly setup")
|
||||
|
||||
print("performing costly setup")
|
||||
time.sleep(5)
|
||||
self.timecostly = 1
|
||||
|
||||
|
|
|
@ -1,3 +1,2 @@
|
|||
|
||||
def test_quick(setup):
|
||||
pass
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
def test_something(setup):
|
||||
assert setup.timecostly == 1
|
||||
|
||||
|
||||
def test_something_more(setup):
|
||||
assert setup.timecostly == 1
|
||||
|
|
|
@ -6,35 +6,48 @@ import py
|
|||
import pytest
|
||||
import _pytest._code
|
||||
|
||||
pythonlist = ['python2.7', 'python3.4', 'python3.5']
|
||||
pythonlist = ["python2.7", "python3.4", "python3.5"]
|
||||
|
||||
|
||||
@pytest.fixture(params=pythonlist)
|
||||
def python1(request, tmpdir):
|
||||
picklefile = tmpdir.join("data.pickle")
|
||||
return Python(request.param, picklefile)
|
||||
|
||||
|
||||
@pytest.fixture(params=pythonlist)
|
||||
def python2(request, python1):
|
||||
return Python(request.param, python1.picklefile)
|
||||
|
||||
|
||||
class Python(object):
|
||||
|
||||
def __init__(self, version, picklefile):
|
||||
self.pythonpath = py.path.local.sysfind(version)
|
||||
if not self.pythonpath:
|
||||
pytest.skip("%r not found" %(version,))
|
||||
pytest.skip("%r not found" % (version,))
|
||||
self.picklefile = picklefile
|
||||
|
||||
def dumps(self, obj):
|
||||
dumpfile = self.picklefile.dirpath("dump.py")
|
||||
dumpfile.write(_pytest._code.Source("""
|
||||
dumpfile.write(
|
||||
_pytest._code.Source(
|
||||
"""
|
||||
import pickle
|
||||
f = open(%r, 'wb')
|
||||
s = pickle.dump(%r, f, protocol=2)
|
||||
f.close()
|
||||
""" % (str(self.picklefile), obj)))
|
||||
py.process.cmdexec("%s %s" %(self.pythonpath, dumpfile))
|
||||
"""
|
||||
% (str(self.picklefile), obj)
|
||||
)
|
||||
)
|
||||
py.process.cmdexec("%s %s" % (self.pythonpath, dumpfile))
|
||||
|
||||
def load_and_is_true(self, expression):
|
||||
loadfile = self.picklefile.dirpath("load.py")
|
||||
loadfile.write(_pytest._code.Source("""
|
||||
loadfile.write(
|
||||
_pytest._code.Source(
|
||||
"""
|
||||
import pickle
|
||||
f = open(%r, 'rb')
|
||||
obj = pickle.load(f)
|
||||
|
@ -42,11 +55,15 @@ class Python(object):
|
|||
res = eval(%r)
|
||||
if not res:
|
||||
raise SystemExit(1)
|
||||
""" % (str(self.picklefile), expression)))
|
||||
print (loadfile)
|
||||
py.process.cmdexec("%s %s" %(self.pythonpath, loadfile))
|
||||
"""
|
||||
% (str(self.picklefile), expression)
|
||||
)
|
||||
)
|
||||
print(loadfile)
|
||||
py.process.cmdexec("%s %s" % (self.pythonpath, loadfile))
|
||||
|
||||
@pytest.mark.parametrize("obj", [42, {}, {1:3},])
|
||||
|
||||
@pytest.mark.parametrize("obj", [42, {}, {1: 3}])
|
||||
def test_basic_objects(python1, python2, obj):
|
||||
python1.dumps(obj)
|
||||
python2.load_and_is_true("obj == %s" % obj)
|
||||
|
|
|
@ -2,18 +2,24 @@
|
|||
|
||||
import pytest
|
||||
|
||||
|
||||
def pytest_collect_file(parent, path):
|
||||
if path.ext == ".yml" and path.basename.startswith("test"):
|
||||
return YamlFile(path, parent)
|
||||
|
||||
|
||||
class YamlFile(pytest.File):
|
||||
|
||||
def collect(self):
|
||||
import yaml # we need a yaml parser, e.g. PyYAML
|
||||
import yaml # we need a yaml parser, e.g. PyYAML
|
||||
|
||||
raw = yaml.safe_load(self.fspath.open())
|
||||
for name, spec in sorted(raw.items()):
|
||||
yield YamlItem(name, self, spec)
|
||||
|
||||
|
||||
class YamlItem(pytest.Item):
|
||||
|
||||
def __init__(self, name, parent, spec):
|
||||
super(YamlItem, self).__init__(name, parent)
|
||||
self.spec = spec
|
||||
|
@ -27,14 +33,17 @@ class YamlItem(pytest.Item):
|
|||
def repr_failure(self, excinfo):
|
||||
""" called when self.runtest() raises an exception. """
|
||||
if isinstance(excinfo.value, YamlException):
|
||||
return "\n".join([
|
||||
"usecase execution failed",
|
||||
" spec failed: %r: %r" % excinfo.value.args[1:3],
|
||||
" no further details known at this point."
|
||||
])
|
||||
return "\n".join(
|
||||
[
|
||||
"usecase execution failed",
|
||||
" spec failed: %r: %r" % excinfo.value.args[1:3],
|
||||
" no further details known at this point.",
|
||||
]
|
||||
)
|
||||
|
||||
def reportinfo(self):
|
||||
return self.fspath, 0, "usecase: %s" % self.name
|
||||
|
||||
|
||||
class YamlException(Exception):
|
||||
""" custom exception for error reporting. """
|
||||
|
|
|
@ -3,10 +3,13 @@ import pytest
|
|||
|
||||
py3 = sys.version_info[0] >= 3
|
||||
|
||||
|
||||
class DummyCollector(pytest.collect.File):
|
||||
|
||||
def collect(self):
|
||||
return []
|
||||
|
||||
|
||||
def pytest_pycollect_makemodule(path, parent):
|
||||
bn = path.basename
|
||||
if "py3" in bn and not py3 or ("py2" in bn and py3):
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
|
||||
def test_exception_syntax():
|
||||
try:
|
||||
0/0
|
||||
0 / 0
|
||||
except ZeroDivisionError as e:
|
||||
pass
|
||||
|
|
|
@ -4,8 +4,11 @@
|
|||
def test_function():
|
||||
pass
|
||||
|
||||
|
||||
class TestClass(object):
|
||||
|
||||
def test_method(self):
|
||||
pass
|
||||
|
||||
def test_anothermethod(self):
|
||||
pass
|
||||
|
|
|
@ -1,29 +1,37 @@
|
|||
import pytest
|
||||
|
||||
xfail = pytest.mark.xfail
|
||||
|
||||
|
||||
@xfail
|
||||
def test_hello():
|
||||
assert 0
|
||||
|
||||
|
||||
@xfail(run=False)
|
||||
def test_hello2():
|
||||
assert 0
|
||||
|
||||
|
||||
@xfail("hasattr(os, 'sep')")
|
||||
def test_hello3():
|
||||
assert 0
|
||||
|
||||
|
||||
@xfail(reason="bug 110")
|
||||
def test_hello4():
|
||||
assert 0
|
||||
|
||||
|
||||
@xfail('pytest.__version__[0] != "17"')
|
||||
def test_hello5():
|
||||
assert 0
|
||||
|
||||
|
||||
def test_hello6():
|
||||
pytest.xfail("reason")
|
||||
|
||||
|
||||
@xfail(raises=IndexError)
|
||||
def test_hello7():
|
||||
x = []
|
||||
|
|
|
@ -4,6 +4,7 @@ import inspect
|
|||
|
||||
|
||||
class Writer(object):
|
||||
|
||||
def __init__(self, clsname):
|
||||
self.clsname = clsname
|
||||
|
||||
|
@ -21,13 +22,11 @@ class Writer(object):
|
|||
def docmethod(self, method):
|
||||
doc = " ".join(method.__doc__.split())
|
||||
indent = " "
|
||||
w = textwrap.TextWrapper(initial_indent=indent,
|
||||
subsequent_indent=indent)
|
||||
w = textwrap.TextWrapper(initial_indent=indent, subsequent_indent=indent)
|
||||
|
||||
spec = inspect.getargspec(method)
|
||||
del spec.args[0]
|
||||
self.line(".. py:method:: " + method.__name__ +
|
||||
inspect.formatargspec(*spec))
|
||||
self.line(".. py:method:: " + method.__name__ + inspect.formatargspec(*spec))
|
||||
self.line("")
|
||||
self.line(w.fill(doc))
|
||||
self.line("")
|
||||
|
|
|
@ -15,16 +15,16 @@ def get_issues():
|
|||
data = r.json()
|
||||
if r.status_code == 403:
|
||||
# API request limit exceeded
|
||||
print(data['message'])
|
||||
print(data["message"])
|
||||
exit(1)
|
||||
issues.extend(data)
|
||||
|
||||
# Look for next page
|
||||
links = requests.utils.parse_header_links(r.headers['Link'])
|
||||
links = requests.utils.parse_header_links(r.headers["Link"])
|
||||
another_page = False
|
||||
for link in links:
|
||||
if link['rel'] == 'next':
|
||||
url = link['url']
|
||||
if link["rel"] == "next":
|
||||
url = link["url"]
|
||||
another_page = True
|
||||
if not another_page:
|
||||
return issues
|
||||
|
@ -45,11 +45,11 @@ def main(args):
|
|||
|
||||
|
||||
def _get_kind(issue):
|
||||
labels = [l['name'] for l in issue['labels']]
|
||||
for key in ('bug', 'enhancement', 'proposal'):
|
||||
labels = [l["name"] for l in issue["labels"]]
|
||||
for key in ("bug", "enhancement", "proposal"):
|
||||
if key in labels:
|
||||
return key
|
||||
return 'issue'
|
||||
return "issue"
|
||||
|
||||
|
||||
def report(issues):
|
||||
|
@ -63,20 +63,23 @@ def report(issues):
|
|||
print("----")
|
||||
print(status, kind, link)
|
||||
print(title)
|
||||
#print()
|
||||
#lines = body.split("\n")
|
||||
#print ("\n".join(lines[:3]))
|
||||
#if len(lines) > 3 or len(body) > 240:
|
||||
# print()
|
||||
# lines = body.split("\n")
|
||||
# print ("\n".join(lines[:3]))
|
||||
# if len(lines) > 3 or len(body) > 240:
|
||||
# print ("...")
|
||||
print("\n\nFound %s open issues" % len(issues))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser("process bitbucket issues")
|
||||
parser.add_argument("--refresh", action="store_true",
|
||||
help="invalidate cache, refresh issues")
|
||||
parser.add_argument("--cache", action="store", default="issues.json",
|
||||
help="cache file")
|
||||
parser.add_argument(
|
||||
"--refresh", action="store_true", help="invalidate cache, refresh issues"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--cache", action="store", default="issues.json", help="cache file"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
main(args)
|
||||
|
|
80
pytest.py
80
pytest.py
|
@ -6,10 +6,7 @@ pytest: unit and functional testing with Python.
|
|||
|
||||
# else we are imported
|
||||
|
||||
from _pytest.config import (
|
||||
main, UsageError, cmdline,
|
||||
hookspec, hookimpl
|
||||
)
|
||||
from _pytest.config import main, UsageError, cmdline, hookspec, hookimpl
|
||||
from _pytest.fixtures import fixture, yield_fixture
|
||||
from _pytest.assertion import register_assert_rewrite
|
||||
from _pytest.freeze_support import freeze_includes
|
||||
|
@ -21,58 +18,55 @@ from _pytest.mark import MARK_GEN as mark, param
|
|||
from _pytest.main import Session
|
||||
from _pytest.nodes import Item, Collector, File
|
||||
from _pytest.fixtures import fillfixtures as _fillfuncargs
|
||||
from _pytest.python import (
|
||||
Module, Class, Instance, Function, Generator,
|
||||
)
|
||||
from _pytest.python import Module, Class, Instance, Function, Generator
|
||||
|
||||
from _pytest.python_api import approx, raises
|
||||
|
||||
set_trace = __pytestPDB.set_trace
|
||||
|
||||
__all__ = [
|
||||
'main',
|
||||
'UsageError',
|
||||
'cmdline',
|
||||
'hookspec',
|
||||
'hookimpl',
|
||||
'__version__',
|
||||
'register_assert_rewrite',
|
||||
'freeze_includes',
|
||||
'set_trace',
|
||||
'warns',
|
||||
'deprecated_call',
|
||||
'fixture',
|
||||
'yield_fixture',
|
||||
'fail',
|
||||
'skip',
|
||||
'xfail',
|
||||
'importorskip',
|
||||
'exit',
|
||||
'mark',
|
||||
'param',
|
||||
'approx',
|
||||
'_fillfuncargs',
|
||||
|
||||
'Item',
|
||||
'File',
|
||||
'Collector',
|
||||
'Session',
|
||||
'Module',
|
||||
'Class',
|
||||
'Instance',
|
||||
'Function',
|
||||
'Generator',
|
||||
'raises',
|
||||
|
||||
|
||||
"main",
|
||||
"UsageError",
|
||||
"cmdline",
|
||||
"hookspec",
|
||||
"hookimpl",
|
||||
"__version__",
|
||||
"register_assert_rewrite",
|
||||
"freeze_includes",
|
||||
"set_trace",
|
||||
"warns",
|
||||
"deprecated_call",
|
||||
"fixture",
|
||||
"yield_fixture",
|
||||
"fail",
|
||||
"skip",
|
||||
"xfail",
|
||||
"importorskip",
|
||||
"exit",
|
||||
"mark",
|
||||
"param",
|
||||
"approx",
|
||||
"_fillfuncargs",
|
||||
"Item",
|
||||
"File",
|
||||
"Collector",
|
||||
"Session",
|
||||
"Module",
|
||||
"Class",
|
||||
"Instance",
|
||||
"Function",
|
||||
"Generator",
|
||||
"raises",
|
||||
]
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
# if run as a script or by 'python -m pytest'
|
||||
# we trigger the below "else" condition by the following import
|
||||
import pytest
|
||||
|
||||
raise SystemExit(pytest.main())
|
||||
else:
|
||||
|
||||
from _pytest.compat import _setup_collect_fakemodule
|
||||
|
||||
_setup_collect_fakemodule()
|
||||
|
|
|
@ -5,7 +5,16 @@ import subprocess
|
|||
import glob
|
||||
import sys
|
||||
|
||||
sys.exit(subprocess.call([
|
||||
'rst-lint', '--encoding', 'utf-8',
|
||||
'CHANGELOG.rst', 'HOWTORELEASE.rst', 'README.rst',
|
||||
] + glob.glob('changelog/[0-9]*.*')))
|
||||
sys.exit(
|
||||
subprocess.call(
|
||||
[
|
||||
"rst-lint",
|
||||
"--encoding",
|
||||
"utf-8",
|
||||
"CHANGELOG.rst",
|
||||
"HOWTORELEASE.rst",
|
||||
"README.rst",
|
||||
]
|
||||
+ glob.glob("changelog/[0-9]*.*")
|
||||
)
|
||||
)
|
||||
|
|
99
setup.py
99
setup.py
|
@ -5,21 +5,21 @@ import pkg_resources
|
|||
from setuptools import setup, Command
|
||||
|
||||
classifiers = [
|
||||
'Development Status :: 6 - Mature',
|
||||
'Intended Audience :: Developers',
|
||||
'License :: OSI Approved :: MIT License',
|
||||
'Operating System :: POSIX',
|
||||
'Operating System :: Microsoft :: Windows',
|
||||
'Operating System :: MacOS :: MacOS X',
|
||||
'Topic :: Software Development :: Testing',
|
||||
'Topic :: Software Development :: Libraries',
|
||||
'Topic :: Utilities',
|
||||
"Development Status :: 6 - Mature",
|
||||
"Intended Audience :: Developers",
|
||||
"License :: OSI Approved :: MIT License",
|
||||
"Operating System :: POSIX",
|
||||
"Operating System :: Microsoft :: Windows",
|
||||
"Operating System :: MacOS :: MacOS X",
|
||||
"Topic :: Software Development :: Testing",
|
||||
"Topic :: Software Development :: Libraries",
|
||||
"Topic :: Utilities",
|
||||
] + [
|
||||
('Programming Language :: Python :: %s' % x)
|
||||
for x in '2 2.7 3 3.4 3.5 3.6 3.7'.split()
|
||||
("Programming Language :: Python :: %s" % x)
|
||||
for x in "2 2.7 3 3.4 3.5 3.6 3.7".split()
|
||||
]
|
||||
|
||||
with open('README.rst') as fd:
|
||||
with open("README.rst") as fd:
|
||||
long_description = fd.read()
|
||||
|
||||
|
||||
|
@ -44,9 +44,9 @@ def get_environment_marker_support_level():
|
|||
"""
|
||||
try:
|
||||
version = pkg_resources.parse_version(setuptools.__version__)
|
||||
if version >= pkg_resources.parse_version('36.2.2'):
|
||||
if version >= pkg_resources.parse_version("36.2.2"):
|
||||
return 2
|
||||
if version >= pkg_resources.parse_version('0.7.2'):
|
||||
if version >= pkg_resources.parse_version("0.7.2"):
|
||||
return 1
|
||||
except Exception as exc:
|
||||
sys.stderr.write("Could not test setuptool's version: %s\n" % exc)
|
||||
|
@ -56,59 +56,57 @@ def get_environment_marker_support_level():
|
|||
def main():
|
||||
extras_require = {}
|
||||
install_requires = [
|
||||
'py>=1.5.0',
|
||||
'six>=1.10.0',
|
||||
'setuptools',
|
||||
'attrs>=17.4.0',
|
||||
'more-itertools>=4.0.0',
|
||||
'atomicwrites>=1.0',
|
||||
"py>=1.5.0",
|
||||
"six>=1.10.0",
|
||||
"setuptools",
|
||||
"attrs>=17.4.0",
|
||||
"more-itertools>=4.0.0",
|
||||
"atomicwrites>=1.0",
|
||||
]
|
||||
# if _PYTEST_SETUP_SKIP_PLUGGY_DEP is set, skip installing pluggy;
|
||||
# used by tox.ini to test with pluggy master
|
||||
if '_PYTEST_SETUP_SKIP_PLUGGY_DEP' not in os.environ:
|
||||
install_requires.append('pluggy>=0.5,<0.7')
|
||||
if "_PYTEST_SETUP_SKIP_PLUGGY_DEP" not in os.environ:
|
||||
install_requires.append("pluggy>=0.5,<0.7")
|
||||
environment_marker_support_level = get_environment_marker_support_level()
|
||||
if environment_marker_support_level >= 2:
|
||||
install_requires.append('funcsigs;python_version<"3.0"')
|
||||
install_requires.append('colorama;sys_platform=="win32"')
|
||||
elif environment_marker_support_level == 1:
|
||||
extras_require[':python_version<"3.0"'] = ['funcsigs']
|
||||
extras_require[':sys_platform=="win32"'] = ['colorama']
|
||||
extras_require[':python_version<"3.0"'] = ["funcsigs"]
|
||||
extras_require[':sys_platform=="win32"'] = ["colorama"]
|
||||
else:
|
||||
if sys.platform == 'win32':
|
||||
install_requires.append('colorama')
|
||||
if sys.platform == "win32":
|
||||
install_requires.append("colorama")
|
||||
if sys.version_info < (3, 0):
|
||||
install_requires.append('funcsigs')
|
||||
install_requires.append("funcsigs")
|
||||
|
||||
setup(
|
||||
name='pytest',
|
||||
description='pytest: simple powerful testing with Python',
|
||||
name="pytest",
|
||||
description="pytest: simple powerful testing with Python",
|
||||
long_description=long_description,
|
||||
use_scm_version={
|
||||
'write_to': '_pytest/_version.py',
|
||||
},
|
||||
url='http://pytest.org',
|
||||
use_scm_version={"write_to": "_pytest/_version.py"},
|
||||
url="http://pytest.org",
|
||||
project_urls={
|
||||
'Source': 'https://github.com/pytest-dev/pytest',
|
||||
'Tracker': 'https://github.com/pytest-dev/pytest/issues',
|
||||
"Source": "https://github.com/pytest-dev/pytest",
|
||||
"Tracker": "https://github.com/pytest-dev/pytest/issues",
|
||||
},
|
||||
license='MIT license',
|
||||
platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'],
|
||||
license="MIT license",
|
||||
platforms=["unix", "linux", "osx", "cygwin", "win32"],
|
||||
author=(
|
||||
'Holger Krekel, Bruno Oliveira, Ronny Pfannschmidt, '
|
||||
'Floris Bruynooghe, Brianna Laugher, Florian Bruhin and others'),
|
||||
entry_points={'console_scripts': [
|
||||
'pytest=pytest:main', 'py.test=pytest:main']},
|
||||
"Holger Krekel, Bruno Oliveira, Ronny Pfannschmidt, "
|
||||
"Floris Bruynooghe, Brianna Laugher, Florian Bruhin and others"
|
||||
),
|
||||
entry_points={"console_scripts": ["pytest=pytest:main", "py.test=pytest:main"]},
|
||||
classifiers=classifiers,
|
||||
keywords="test unittest",
|
||||
cmdclass={'test': PyTest},
|
||||
cmdclass={"test": PyTest},
|
||||
# the following should be enabled for release
|
||||
setup_requires=['setuptools-scm'],
|
||||
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
|
||||
setup_requires=["setuptools-scm"],
|
||||
python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*",
|
||||
install_requires=install_requires,
|
||||
extras_require=extras_require,
|
||||
packages=['_pytest', '_pytest.assertion', '_pytest._code', '_pytest.mark'],
|
||||
py_modules=['pytest'],
|
||||
packages=["_pytest", "_pytest.assertion", "_pytest._code", "_pytest.mark"],
|
||||
py_modules=["pytest"],
|
||||
zip_safe=False,
|
||||
)
|
||||
|
||||
|
@ -124,12 +122,13 @@ class PyTest(Command):
|
|||
|
||||
def run(self):
|
||||
import subprocess
|
||||
PPATH = [x for x in os.environ.get('PYTHONPATH', '').split(':') if x]
|
||||
|
||||
PPATH = [x for x in os.environ.get("PYTHONPATH", "").split(":") if x]
|
||||
PPATH.insert(0, os.getcwd())
|
||||
os.environ['PYTHONPATH'] = ':'.join(PPATH)
|
||||
errno = subprocess.call([sys.executable, 'pytest.py', '--ignore=doc'])
|
||||
os.environ["PYTHONPATH"] = ":".join(PPATH)
|
||||
errno = subprocess.call([sys.executable, "pytest.py", "--ignore=doc"])
|
||||
raise SystemExit(errno)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
|
@ -7,6 +7,4 @@ import invoke
|
|||
from . import generate
|
||||
|
||||
|
||||
ns = invoke.Collection(
|
||||
generate,
|
||||
)
|
||||
ns = invoke.Collection(generate)
|
||||
|
|
|
@ -7,54 +7,66 @@ from subprocess import check_output, check_call
|
|||
import invoke
|
||||
|
||||
|
||||
@invoke.task(help={
|
||||
'version': 'version being released',
|
||||
})
|
||||
@invoke.task(help={"version": "version being released"})
|
||||
def announce(ctx, version):
|
||||
"""Generates a new release announcement entry in the docs."""
|
||||
# Get our list of authors
|
||||
stdout = check_output(["git", "describe", "--abbrev=0", '--tags'])
|
||||
stdout = stdout.decode('utf-8')
|
||||
stdout = check_output(["git", "describe", "--abbrev=0", "--tags"])
|
||||
stdout = stdout.decode("utf-8")
|
||||
last_version = stdout.strip()
|
||||
|
||||
stdout = check_output(["git", "log", "{}..HEAD".format(last_version), "--format=%aN"])
|
||||
stdout = stdout.decode('utf-8')
|
||||
stdout = check_output(
|
||||
["git", "log", "{}..HEAD".format(last_version), "--format=%aN"]
|
||||
)
|
||||
stdout = stdout.decode("utf-8")
|
||||
|
||||
contributors = set(stdout.splitlines())
|
||||
|
||||
template_name = 'release.minor.rst' if version.endswith('.0') else 'release.patch.rst'
|
||||
template_text = Path(__file__).parent.joinpath(template_name).read_text(encoding='UTF-8')
|
||||
template_name = "release.minor.rst" if version.endswith(
|
||||
".0"
|
||||
) else "release.patch.rst"
|
||||
template_text = Path(__file__).parent.joinpath(template_name).read_text(
|
||||
encoding="UTF-8"
|
||||
)
|
||||
|
||||
contributors_text = '\n'.join('* {}'.format(name) for name in sorted(contributors)) + '\n'
|
||||
contributors_text = "\n".join(
|
||||
"* {}".format(name) for name in sorted(contributors)
|
||||
) + "\n"
|
||||
text = template_text.format(version=version, contributors=contributors_text)
|
||||
|
||||
target = Path(__file__).parent.joinpath('../doc/en/announce/release-{}.rst'.format(version))
|
||||
target.write_text(text, encoding='UTF-8')
|
||||
target = Path(__file__).parent.joinpath(
|
||||
"../doc/en/announce/release-{}.rst".format(version)
|
||||
)
|
||||
target.write_text(text, encoding="UTF-8")
|
||||
print("[generate.announce] Generated {}".format(target.name))
|
||||
|
||||
# Update index with the new release entry
|
||||
index_path = Path(__file__).parent.joinpath('../doc/en/announce/index.rst')
|
||||
lines = index_path.read_text(encoding='UTF-8').splitlines()
|
||||
indent = ' '
|
||||
index_path = Path(__file__).parent.joinpath("../doc/en/announce/index.rst")
|
||||
lines = index_path.read_text(encoding="UTF-8").splitlines()
|
||||
indent = " "
|
||||
for index, line in enumerate(lines):
|
||||
if line.startswith('{}release-'.format(indent)):
|
||||
if line.startswith("{}release-".format(indent)):
|
||||
new_line = indent + target.stem
|
||||
if line != new_line:
|
||||
lines.insert(index, new_line)
|
||||
index_path.write_text('\n'.join(lines) + '\n', encoding='UTF-8')
|
||||
index_path.write_text("\n".join(lines) + "\n", encoding="UTF-8")
|
||||
print("[generate.announce] Updated {}".format(index_path.name))
|
||||
else:
|
||||
print("[generate.announce] Skip {} (already contains release)".format(index_path.name))
|
||||
print(
|
||||
"[generate.announce] Skip {} (already contains release)".format(
|
||||
index_path.name
|
||||
)
|
||||
)
|
||||
break
|
||||
|
||||
check_call(['git', 'add', str(target)])
|
||||
check_call(["git", "add", str(target)])
|
||||
|
||||
|
||||
@invoke.task()
|
||||
def regen(ctx):
|
||||
"""Call regendoc tool to update examples and pytest output in the docs."""
|
||||
print("[generate.regen] Updating docs")
|
||||
check_call(['tox', '-e', 'regen'])
|
||||
check_call(["tox", "-e", "regen"])
|
||||
|
||||
|
||||
@invoke.task()
|
||||
|
@ -62,9 +74,9 @@ def make_tag(ctx, version):
|
|||
"""Create a new, local tag for the release, only if the repository is clean."""
|
||||
from git import Repo
|
||||
|
||||
repo = Repo('.')
|
||||
repo = Repo(".")
|
||||
if repo.is_dirty():
|
||||
print('Current repository is dirty. Please commit any changes and try again.')
|
||||
print("Current repository is dirty. Please commit any changes and try again.")
|
||||
raise invoke.Exit(code=2)
|
||||
|
||||
tag_names = [x.name for x in repo.tags]
|
||||
|
@ -76,31 +88,31 @@ def make_tag(ctx, version):
|
|||
repo.create_tag(version)
|
||||
|
||||
|
||||
@invoke.task(help={
|
||||
'version': 'version being released',
|
||||
})
|
||||
@invoke.task(help={"version": "version being released"})
|
||||
def pre_release(ctx, version):
|
||||
"""Generates new docs, release announcements and creates a local tag."""
|
||||
announce(ctx, version)
|
||||
regen(ctx)
|
||||
changelog(ctx, version, write_out=True)
|
||||
|
||||
msg = 'Preparing release version {}'.format(version)
|
||||
check_call(['git', 'commit', '-a', '-m', msg])
|
||||
msg = "Preparing release version {}".format(version)
|
||||
check_call(["git", "commit", "-a", "-m", msg])
|
||||
|
||||
make_tag(ctx, version)
|
||||
|
||||
print()
|
||||
print('[generate.pre_release] Please push your branch and open a PR.')
|
||||
print("[generate.pre_release] Please push your branch and open a PR.")
|
||||
|
||||
|
||||
@invoke.task(help={
|
||||
'version': 'version being released',
|
||||
'write_out': 'write changes to the actual changelog'
|
||||
})
|
||||
@invoke.task(
|
||||
help={
|
||||
"version": "version being released",
|
||||
"write_out": "write changes to the actual changelog",
|
||||
}
|
||||
)
|
||||
def changelog(ctx, version, write_out=False):
|
||||
if write_out:
|
||||
addopts = []
|
||||
else:
|
||||
addopts = ['--draft']
|
||||
check_call(['towncrier', '--yes', '--version', version] + addopts)
|
||||
addopts = ["--draft"]
|
||||
check_call(["towncrier", "--yes", "--version", version] + addopts)
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -9,15 +9,15 @@ from test_excinfo import TWMock
|
|||
|
||||
|
||||
def test_ne():
|
||||
code1 = _pytest._code.Code(compile('foo = "bar"', '', 'exec'))
|
||||
code1 = _pytest._code.Code(compile('foo = "bar"', "", "exec"))
|
||||
assert code1 == code1
|
||||
code2 = _pytest._code.Code(compile('foo = "baz"', '', 'exec'))
|
||||
code2 = _pytest._code.Code(compile('foo = "baz"', "", "exec"))
|
||||
assert code2 != code1
|
||||
|
||||
|
||||
def test_code_gives_back_name_for_not_existing_file():
|
||||
name = 'abc-123'
|
||||
co_code = compile("pass\n", name, 'exec')
|
||||
name = "abc-123"
|
||||
co_code = compile("pass\n", name, "exec")
|
||||
assert co_code.co_filename == name
|
||||
code = _pytest._code.Code(co_code)
|
||||
assert str(code.path) == name
|
||||
|
@ -25,12 +25,15 @@ def test_code_gives_back_name_for_not_existing_file():
|
|||
|
||||
|
||||
def test_code_with_class():
|
||||
|
||||
class A(object):
|
||||
pass
|
||||
|
||||
pytest.raises(TypeError, "_pytest._code.Code(A)")
|
||||
|
||||
|
||||
if True:
|
||||
|
||||
def x():
|
||||
pass
|
||||
|
||||
|
@ -38,7 +41,7 @@ if True:
|
|||
def test_code_fullsource():
|
||||
code = _pytest._code.Code(x)
|
||||
full = code.fullsource
|
||||
assert 'test_code_fullsource()' in str(full)
|
||||
assert "test_code_fullsource()" in str(full)
|
||||
|
||||
|
||||
def test_code_source():
|
||||
|
@ -50,8 +53,10 @@ def test_code_source():
|
|||
|
||||
|
||||
def test_frame_getsourcelineno_myself():
|
||||
|
||||
def func():
|
||||
return sys._getframe(0)
|
||||
|
||||
f = func()
|
||||
f = _pytest._code.Frame(f)
|
||||
source, lineno = f.code.fullsource, f.lineno
|
||||
|
@ -59,8 +64,10 @@ def test_frame_getsourcelineno_myself():
|
|||
|
||||
|
||||
def test_getstatement_empty_fullsource():
|
||||
|
||||
def func():
|
||||
return sys._getframe(0)
|
||||
|
||||
f = func()
|
||||
f = _pytest._code.Frame(f)
|
||||
prop = f.code.__class__.fullsource
|
||||
|
@ -78,7 +85,7 @@ def test_code_from_func():
|
|||
|
||||
|
||||
def test_unicode_handling():
|
||||
value = py.builtin._totext('\xc4\x85\xc4\x87\n', 'utf-8').encode('utf8')
|
||||
value = py.builtin._totext("\xc4\x85\xc4\x87\n", "utf-8").encode("utf8")
|
||||
|
||||
def f():
|
||||
raise Exception(value)
|
||||
|
@ -89,12 +96,12 @@ def test_unicode_handling():
|
|||
unicode(excinfo)
|
||||
|
||||
|
||||
@pytest.mark.skipif(sys.version_info[0] >= 3, reason='python 2 only issue')
|
||||
@pytest.mark.skipif(sys.version_info[0] >= 3, reason="python 2 only issue")
|
||||
def test_unicode_handling_syntax_error():
|
||||
value = py.builtin._totext('\xc4\x85\xc4\x87\n', 'utf-8').encode('utf8')
|
||||
value = py.builtin._totext("\xc4\x85\xc4\x87\n", "utf-8").encode("utf8")
|
||||
|
||||
def f():
|
||||
raise SyntaxError('invalid syntax', (None, 1, 3, value))
|
||||
raise SyntaxError("invalid syntax", (None, 1, 3, value))
|
||||
|
||||
excinfo = pytest.raises(Exception, f)
|
||||
str(excinfo)
|
||||
|
@ -103,48 +110,57 @@ def test_unicode_handling_syntax_error():
|
|||
|
||||
|
||||
def test_code_getargs():
|
||||
|
||||
def f1(x):
|
||||
pass
|
||||
|
||||
c1 = _pytest._code.Code(f1)
|
||||
assert c1.getargs(var=True) == ('x',)
|
||||
assert c1.getargs(var=True) == ("x",)
|
||||
|
||||
def f2(x, *y):
|
||||
pass
|
||||
|
||||
c2 = _pytest._code.Code(f2)
|
||||
assert c2.getargs(var=True) == ('x', 'y')
|
||||
assert c2.getargs(var=True) == ("x", "y")
|
||||
|
||||
def f3(x, **z):
|
||||
pass
|
||||
|
||||
c3 = _pytest._code.Code(f3)
|
||||
assert c3.getargs(var=True) == ('x', 'z')
|
||||
assert c3.getargs(var=True) == ("x", "z")
|
||||
|
||||
def f4(x, *y, **z):
|
||||
pass
|
||||
|
||||
c4 = _pytest._code.Code(f4)
|
||||
assert c4.getargs(var=True) == ('x', 'y', 'z')
|
||||
assert c4.getargs(var=True) == ("x", "y", "z")
|
||||
|
||||
|
||||
def test_frame_getargs():
|
||||
|
||||
def f1(x):
|
||||
return sys._getframe(0)
|
||||
fr1 = _pytest._code.Frame(f1('a'))
|
||||
assert fr1.getargs(var=True) == [('x', 'a')]
|
||||
|
||||
fr1 = _pytest._code.Frame(f1("a"))
|
||||
assert fr1.getargs(var=True) == [("x", "a")]
|
||||
|
||||
def f2(x, *y):
|
||||
return sys._getframe(0)
|
||||
fr2 = _pytest._code.Frame(f2('a', 'b', 'c'))
|
||||
assert fr2.getargs(var=True) == [('x', 'a'), ('y', ('b', 'c'))]
|
||||
|
||||
fr2 = _pytest._code.Frame(f2("a", "b", "c"))
|
||||
assert fr2.getargs(var=True) == [("x", "a"), ("y", ("b", "c"))]
|
||||
|
||||
def f3(x, **z):
|
||||
return sys._getframe(0)
|
||||
fr3 = _pytest._code.Frame(f3('a', b='c'))
|
||||
assert fr3.getargs(var=True) == [('x', 'a'), ('z', {'b': 'c'})]
|
||||
|
||||
fr3 = _pytest._code.Frame(f3("a", b="c"))
|
||||
assert fr3.getargs(var=True) == [("x", "a"), ("z", {"b": "c"})]
|
||||
|
||||
def f4(x, *y, **z):
|
||||
return sys._getframe(0)
|
||||
fr4 = _pytest._code.Frame(f4('a', 'b', c='d'))
|
||||
assert fr4.getargs(var=True) == [('x', 'a'), ('y', ('b',)),
|
||||
('z', {'c': 'd'})]
|
||||
|
||||
fr4 = _pytest._code.Frame(f4("a", "b", c="d"))
|
||||
assert fr4.getargs(var=True) == [("x", "a"), ("y", ("b",)), ("z", {"c": "d"})]
|
||||
|
||||
|
||||
class TestExceptionInfo(object):
|
||||
|
@ -173,7 +189,7 @@ class TestTracebackEntry(object):
|
|||
entry = exci.traceback[0]
|
||||
source = entry.getsource()
|
||||
assert len(source) == 6
|
||||
assert 'assert False' in source[5]
|
||||
assert "assert False" in source[5]
|
||||
|
||||
|
||||
class TestReprFuncArgs(object):
|
||||
|
@ -183,14 +199,11 @@ class TestReprFuncArgs(object):
|
|||
|
||||
tw = TWMock()
|
||||
|
||||
args = [
|
||||
('unicode_string', u"São Paulo"),
|
||||
('utf8_string', 'S\xc3\xa3o Paulo'),
|
||||
]
|
||||
args = [("unicode_string", u"São Paulo"), ("utf8_string", "S\xc3\xa3o Paulo")]
|
||||
|
||||
r = ReprFuncArgs(args)
|
||||
r.toterminal(tw)
|
||||
if sys.version_info[0] >= 3:
|
||||
assert tw.lines[0] == 'unicode_string = São Paulo, utf8_string = São Paulo'
|
||||
assert tw.lines[0] == "unicode_string = São Paulo, utf8_string = São Paulo"
|
||||
else:
|
||||
assert tw.lines[0] == 'unicode_string = São Paulo, utf8_string = São Paulo'
|
||||
assert tw.lines[0] == "unicode_string = São Paulo, utf8_string = São Paulo"
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -23,14 +23,20 @@ def test_source_str_function():
|
|||
x = Source(" 3")
|
||||
assert str(x) == "3"
|
||||
|
||||
x = Source("""
|
||||
x = Source(
|
||||
"""
|
||||
3
|
||||
""", rstrip=False)
|
||||
""",
|
||||
rstrip=False,
|
||||
)
|
||||
assert str(x) == "\n3\n "
|
||||
|
||||
x = Source("""
|
||||
x = Source(
|
||||
"""
|
||||
3
|
||||
""", rstrip=True)
|
||||
""",
|
||||
rstrip=True,
|
||||
)
|
||||
assert str(x) == "\n3"
|
||||
|
||||
|
||||
|
@ -41,70 +47,81 @@ def test_unicode():
|
|||
return
|
||||
x = Source(unicode("4"))
|
||||
assert str(x) == "4"
|
||||
co = _pytest._code.compile(unicode('u"\xc3\xa5"', 'utf8'), mode='eval')
|
||||
co = _pytest._code.compile(unicode('u"\xc3\xa5"', "utf8"), mode="eval")
|
||||
val = eval(co)
|
||||
assert isinstance(val, unicode)
|
||||
|
||||
|
||||
def test_source_from_function():
|
||||
source = _pytest._code.Source(test_source_str_function)
|
||||
assert str(source).startswith('def test_source_str_function():')
|
||||
assert str(source).startswith("def test_source_str_function():")
|
||||
|
||||
|
||||
def test_source_from_method():
|
||||
|
||||
class TestClass(object):
|
||||
|
||||
def test_method(self):
|
||||
pass
|
||||
|
||||
source = _pytest._code.Source(TestClass().test_method)
|
||||
assert source.lines == ["def test_method(self):",
|
||||
" pass"]
|
||||
assert source.lines == ["def test_method(self):", " pass"]
|
||||
|
||||
|
||||
def test_source_from_lines():
|
||||
lines = ["a \n", "b\n", "c"]
|
||||
source = _pytest._code.Source(lines)
|
||||
assert source.lines == ['a ', 'b', 'c']
|
||||
assert source.lines == ["a ", "b", "c"]
|
||||
|
||||
|
||||
def test_source_from_inner_function():
|
||||
|
||||
def f():
|
||||
pass
|
||||
|
||||
source = _pytest._code.Source(f, deindent=False)
|
||||
assert str(source).startswith(' def f():')
|
||||
assert str(source).startswith(" def f():")
|
||||
source = _pytest._code.Source(f)
|
||||
assert str(source).startswith('def f():')
|
||||
assert str(source).startswith("def f():")
|
||||
|
||||
|
||||
def test_source_putaround_simple():
|
||||
source = Source("raise ValueError")
|
||||
source = source.putaround(
|
||||
"try:", """\
|
||||
"try:",
|
||||
"""\
|
||||
except ValueError:
|
||||
x = 42
|
||||
else:
|
||||
x = 23""")
|
||||
assert str(source) == """\
|
||||
x = 23""",
|
||||
)
|
||||
assert (
|
||||
str(source)
|
||||
== """\
|
||||
try:
|
||||
raise ValueError
|
||||
except ValueError:
|
||||
x = 42
|
||||
else:
|
||||
x = 23"""
|
||||
)
|
||||
|
||||
|
||||
def test_source_putaround():
|
||||
source = Source()
|
||||
source = source.putaround("""
|
||||
source = source.putaround(
|
||||
"""
|
||||
if 1:
|
||||
x=1
|
||||
""")
|
||||
"""
|
||||
)
|
||||
assert str(source).strip() == "if 1:\n x=1"
|
||||
|
||||
|
||||
def test_source_strips():
|
||||
source = Source("")
|
||||
assert source == Source()
|
||||
assert str(source) == ''
|
||||
assert str(source) == ""
|
||||
assert source.strip() == source
|
||||
|
||||
|
||||
|
@ -116,10 +133,10 @@ def test_source_strip_multiline():
|
|||
|
||||
|
||||
def test_syntaxerror_rerepresentation():
|
||||
ex = pytest.raises(SyntaxError, _pytest._code.compile, 'xyz xyz')
|
||||
ex = pytest.raises(SyntaxError, _pytest._code.compile, "xyz xyz")
|
||||
assert ex.value.lineno == 1
|
||||
assert ex.value.offset in (4, 7) # XXX pypy/jython versus cpython?
|
||||
assert ex.value.text.strip(), 'x x'
|
||||
assert ex.value.text.strip(), "x x"
|
||||
|
||||
|
||||
def test_isparseable():
|
||||
|
@ -132,12 +149,14 @@ def test_isparseable():
|
|||
|
||||
|
||||
class TestAccesses(object):
|
||||
source = Source("""\
|
||||
source = Source(
|
||||
"""\
|
||||
def f(x):
|
||||
pass
|
||||
def g(x):
|
||||
pass
|
||||
""")
|
||||
"""
|
||||
)
|
||||
|
||||
def test_getrange(self):
|
||||
x = self.source[0:2]
|
||||
|
@ -158,18 +177,20 @@ class TestAccesses(object):
|
|||
|
||||
|
||||
class TestSourceParsingAndCompiling(object):
|
||||
source = Source("""\
|
||||
source = Source(
|
||||
"""\
|
||||
def f(x):
|
||||
assert (x ==
|
||||
3 +
|
||||
4)
|
||||
""").strip()
|
||||
"""
|
||||
).strip()
|
||||
|
||||
def test_compile(self):
|
||||
co = _pytest._code.compile("x=3")
|
||||
d = {}
|
||||
exec(co, d)
|
||||
assert d['x'] == 3
|
||||
assert d["x"] == 3
|
||||
|
||||
def test_compile_and_getsource_simple(self):
|
||||
co = _pytest._code.compile("x=3")
|
||||
|
@ -178,20 +199,26 @@ class TestSourceParsingAndCompiling(object):
|
|||
assert str(source) == "x=3"
|
||||
|
||||
def test_compile_and_getsource_through_same_function(self):
|
||||
|
||||
def gensource(source):
|
||||
return _pytest._code.compile(source)
|
||||
co1 = gensource("""
|
||||
|
||||
co1 = gensource(
|
||||
"""
|
||||
def f():
|
||||
raise KeyError()
|
||||
""")
|
||||
co2 = gensource("""
|
||||
"""
|
||||
)
|
||||
co2 = gensource(
|
||||
"""
|
||||
def f():
|
||||
raise ValueError()
|
||||
""")
|
||||
"""
|
||||
)
|
||||
source1 = inspect.getsource(co1)
|
||||
assert 'KeyError' in source1
|
||||
assert "KeyError" in source1
|
||||
source2 = inspect.getsource(co2)
|
||||
assert 'ValueError' in source2
|
||||
assert "ValueError" in source2
|
||||
|
||||
def test_getstatement(self):
|
||||
# print str(self.source)
|
||||
|
@ -199,13 +226,15 @@ class TestSourceParsingAndCompiling(object):
|
|||
for i in range(1, 4):
|
||||
# print "trying start in line %r" % self.source[i]
|
||||
s = self.source.getstatement(i)
|
||||
#x = s.deindent()
|
||||
# x = s.deindent()
|
||||
assert str(s) == ass
|
||||
|
||||
def test_getstatementrange_triple_quoted(self):
|
||||
# print str(self.source)
|
||||
source = Source("""hello('''
|
||||
''')""")
|
||||
source = Source(
|
||||
"""hello('''
|
||||
''')"""
|
||||
)
|
||||
s = source.getstatement(0)
|
||||
assert s == str(source)
|
||||
s = source.getstatement(1)
|
||||
|
@ -213,7 +242,8 @@ class TestSourceParsingAndCompiling(object):
|
|||
|
||||
@astonly
|
||||
def test_getstatementrange_within_constructs(self):
|
||||
source = Source("""\
|
||||
source = Source(
|
||||
"""\
|
||||
try:
|
||||
try:
|
||||
raise ValueError
|
||||
|
@ -221,7 +251,8 @@ class TestSourceParsingAndCompiling(object):
|
|||
pass
|
||||
finally:
|
||||
42
|
||||
""")
|
||||
"""
|
||||
)
|
||||
assert len(source) == 7
|
||||
# check all lineno's that could occur in a traceback
|
||||
# assert source.getstatementrange(0) == (0, 7)
|
||||
|
@ -233,19 +264,22 @@ class TestSourceParsingAndCompiling(object):
|
|||
assert source.getstatementrange(6) == (6, 7)
|
||||
|
||||
def test_getstatementrange_bug(self):
|
||||
source = Source("""\
|
||||
source = Source(
|
||||
"""\
|
||||
try:
|
||||
x = (
|
||||
y +
|
||||
z)
|
||||
except:
|
||||
pass
|
||||
""")
|
||||
"""
|
||||
)
|
||||
assert len(source) == 6
|
||||
assert source.getstatementrange(2) == (1, 4)
|
||||
|
||||
def test_getstatementrange_bug2(self):
|
||||
source = Source("""\
|
||||
source = Source(
|
||||
"""\
|
||||
assert (
|
||||
33
|
||||
==
|
||||
|
@ -255,19 +289,22 @@ class TestSourceParsingAndCompiling(object):
|
|||
),
|
||||
]
|
||||
)
|
||||
""")
|
||||
"""
|
||||
)
|
||||
assert len(source) == 9
|
||||
assert source.getstatementrange(5) == (0, 9)
|
||||
|
||||
def test_getstatementrange_ast_issue58(self):
|
||||
source = Source("""\
|
||||
source = Source(
|
||||
"""\
|
||||
|
||||
def test_some():
|
||||
for a in [a for a in
|
||||
CAUSE_ERROR]: pass
|
||||
|
||||
x = 3
|
||||
""")
|
||||
"""
|
||||
)
|
||||
assert getstatement(2, source).lines == source.lines[2:3]
|
||||
assert getstatement(3, source).lines == source.lines[3:4]
|
||||
|
||||
|
@ -282,6 +319,7 @@ class TestSourceParsingAndCompiling(object):
|
|||
|
||||
def test_compile_to_ast(self):
|
||||
import ast
|
||||
|
||||
source = Source("x = 4")
|
||||
mod = source.compile(flag=ast.PyCF_ONLY_AST)
|
||||
assert isinstance(mod, ast.Module)
|
||||
|
@ -295,10 +333,11 @@ class TestSourceParsingAndCompiling(object):
|
|||
frame = excinfo.traceback[-1].frame
|
||||
stmt = frame.code.fullsource.getstatement(frame.lineno)
|
||||
# print "block", str(block)
|
||||
assert str(stmt).strip().startswith('assert')
|
||||
assert str(stmt).strip().startswith("assert")
|
||||
|
||||
@pytest.mark.parametrize('name', ['', None, 'my'])
|
||||
@pytest.mark.parametrize("name", ["", None, "my"])
|
||||
def test_compilefuncs_and_path_sanity(self, name):
|
||||
|
||||
def check(comp, name):
|
||||
co = comp(self.source, name)
|
||||
if not name:
|
||||
|
@ -316,33 +355,41 @@ class TestSourceParsingAndCompiling(object):
|
|||
check(comp, name)
|
||||
|
||||
def test_offsetless_synerr(self):
|
||||
pytest.raises(SyntaxError, _pytest._code.compile, "lambda a,a: 0", mode='eval')
|
||||
pytest.raises(SyntaxError, _pytest._code.compile, "lambda a,a: 0", mode="eval")
|
||||
|
||||
|
||||
def test_getstartingblock_singleline():
|
||||
|
||||
class A(object):
|
||||
|
||||
def __init__(self, *args):
|
||||
frame = sys._getframe(1)
|
||||
self.source = _pytest._code.Frame(frame).statement
|
||||
|
||||
x = A('x', 'y')
|
||||
x = A("x", "y")
|
||||
|
||||
values = [i for i in x.source.lines if i.strip()]
|
||||
assert len(values) == 1
|
||||
|
||||
|
||||
def test_getline_finally():
|
||||
def c(): pass
|
||||
excinfo = pytest.raises(TypeError, """
|
||||
|
||||
def c():
|
||||
pass
|
||||
|
||||
excinfo = pytest.raises(
|
||||
TypeError,
|
||||
"""
|
||||
teardown = None
|
||||
try:
|
||||
c(1)
|
||||
finally:
|
||||
if teardown:
|
||||
teardown()
|
||||
""")
|
||||
""",
|
||||
)
|
||||
source = excinfo.traceback[-1].statement
|
||||
assert str(source).strip() == 'c(1)'
|
||||
assert str(source).strip() == "c(1)"
|
||||
|
||||
|
||||
def test_getfuncsource_dynamic():
|
||||
|
@ -354,26 +401,33 @@ def test_getfuncsource_dynamic():
|
|||
"""
|
||||
co = _pytest._code.compile(source)
|
||||
py.builtin.exec_(co, globals())
|
||||
assert str(_pytest._code.Source(f)).strip() == 'def f():\n raise ValueError'
|
||||
assert str(_pytest._code.Source(g)).strip() == 'def g(): pass'
|
||||
assert str(_pytest._code.Source(f)).strip() == "def f():\n raise ValueError"
|
||||
assert str(_pytest._code.Source(g)).strip() == "def g(): pass"
|
||||
|
||||
|
||||
def test_getfuncsource_with_multine_string():
|
||||
|
||||
def f():
|
||||
c = '''while True:
|
||||
c = """while True:
|
||||
pass
|
||||
'''
|
||||
assert str(_pytest._code.Source(f)).strip() == "def f():\n c = '''while True:\n pass\n'''"
|
||||
"""
|
||||
|
||||
assert (
|
||||
str(_pytest._code.Source(f)).strip()
|
||||
== "def f():\n c = '''while True:\n pass\n'''"
|
||||
)
|
||||
|
||||
|
||||
def test_deindent():
|
||||
from _pytest._code.source import deindent as deindent
|
||||
assert deindent(['\tfoo', '\tbar', ]) == ['foo', 'bar']
|
||||
|
||||
assert deindent(["\tfoo", "\tbar"]) == ["foo", "bar"]
|
||||
|
||||
def f():
|
||||
c = '''while True:
|
||||
c = """while True:
|
||||
pass
|
||||
'''
|
||||
"""
|
||||
|
||||
lines = deindent(inspect.getsource(f).splitlines())
|
||||
assert lines == ["def f():", " c = '''while True:", " pass", "'''"]
|
||||
|
||||
|
@ -383,17 +437,19 @@ def test_deindent():
|
|||
pass
|
||||
"""
|
||||
lines = deindent(source.splitlines())
|
||||
assert lines == ['', 'def f():', ' def g():', ' pass', ' ']
|
||||
assert lines == ["", "def f():", " def g():", " pass", " "]
|
||||
|
||||
|
||||
def test_source_of_class_at_eof_without_newline(tmpdir):
|
||||
# this test fails because the implicit inspect.getsource(A) below
|
||||
# does not return the "x = 1" last line.
|
||||
source = _pytest._code.Source('''
|
||||
source = _pytest._code.Source(
|
||||
"""
|
||||
class A(object):
|
||||
def method(self):
|
||||
x = 1
|
||||
''')
|
||||
"""
|
||||
)
|
||||
path = tmpdir.join("a.py")
|
||||
path.write(source)
|
||||
s2 = _pytest._code.Source(tmpdir.join("a.py").pyimport().A)
|
||||
|
@ -401,12 +457,14 @@ def test_source_of_class_at_eof_without_newline(tmpdir):
|
|||
|
||||
|
||||
if True:
|
||||
|
||||
def x():
|
||||
pass
|
||||
|
||||
|
||||
def test_getsource_fallback():
|
||||
from _pytest._code.source import getsource
|
||||
|
||||
expected = """def x():
|
||||
pass"""
|
||||
src = getsource(x)
|
||||
|
@ -415,6 +473,7 @@ def test_getsource_fallback():
|
|||
|
||||
def test_idem_compile_and_getsource():
|
||||
from _pytest._code.source import getsource
|
||||
|
||||
expected = "def x(): pass"
|
||||
co = _pytest._code.compile(expected)
|
||||
src = getsource(co)
|
||||
|
@ -423,25 +482,29 @@ def test_idem_compile_and_getsource():
|
|||
|
||||
def test_findsource_fallback():
|
||||
from _pytest._code.source import findsource
|
||||
|
||||
src, lineno = findsource(x)
|
||||
assert 'test_findsource_simple' in str(src)
|
||||
assert src[lineno] == ' def x():'
|
||||
assert "test_findsource_simple" in str(src)
|
||||
assert src[lineno] == " def x():"
|
||||
|
||||
|
||||
def test_findsource():
|
||||
from _pytest._code.source import findsource
|
||||
co = _pytest._code.compile("""if 1:
|
||||
|
||||
co = _pytest._code.compile(
|
||||
"""if 1:
|
||||
def x():
|
||||
pass
|
||||
""")
|
||||
"""
|
||||
)
|
||||
|
||||
src, lineno = findsource(co)
|
||||
assert 'if 1:' in str(src)
|
||||
assert "if 1:" in str(src)
|
||||
|
||||
d = {}
|
||||
eval(co, d)
|
||||
src, lineno = findsource(d['x'])
|
||||
assert 'if 1:' in str(src)
|
||||
src, lineno = findsource(d["x"])
|
||||
assert "if 1:" in str(src)
|
||||
assert src[lineno] == " def x():"
|
||||
|
||||
|
||||
|
@ -469,30 +532,37 @@ def test_getfslineno():
|
|||
|
||||
class B(object):
|
||||
pass
|
||||
|
||||
B.__name__ = "B2"
|
||||
assert getfslineno(B)[1] == -1
|
||||
|
||||
|
||||
def test_code_of_object_instance_with_call():
|
||||
|
||||
class A(object):
|
||||
pass
|
||||
|
||||
pytest.raises(TypeError, lambda: _pytest._code.Source(A()))
|
||||
|
||||
class WithCall(object):
|
||||
|
||||
def __call__(self):
|
||||
pass
|
||||
|
||||
code = _pytest._code.Code(WithCall())
|
||||
assert 'pass' in str(code.source())
|
||||
assert "pass" in str(code.source())
|
||||
|
||||
class Hello(object):
|
||||
|
||||
def __call__(self):
|
||||
pass
|
||||
|
||||
pytest.raises(TypeError, lambda: _pytest._code.Code(Hello))
|
||||
|
||||
|
||||
def getstatement(lineno, source):
|
||||
from _pytest._code.source import getstatementrange_ast
|
||||
|
||||
source = _pytest._code.Source(source, deindent=False)
|
||||
ast, start, end = getstatementrange_ast(lineno, source)
|
||||
return source[start:end]
|
||||
|
@ -505,9 +575,14 @@ def test_oneline():
|
|||
|
||||
def test_comment_and_no_newline_at_end():
|
||||
from _pytest._code.source import getstatementrange_ast
|
||||
source = Source(['def test_basic_complex():',
|
||||
' assert 1 == 2',
|
||||
'# vim: filetype=pyopencl:fdm=marker'])
|
||||
|
||||
source = Source(
|
||||
[
|
||||
"def test_basic_complex():",
|
||||
" assert 1 == 2",
|
||||
"# vim: filetype=pyopencl:fdm=marker",
|
||||
]
|
||||
)
|
||||
ast, start, end = getstatementrange_ast(1, source)
|
||||
assert end == 2
|
||||
|
||||
|
@ -517,8 +592,7 @@ def test_oneline_and_comment():
|
|||
assert str(source) == "raise ValueError"
|
||||
|
||||
|
||||
@pytest.mark.xfail(hasattr(sys, "pypy_version_info"),
|
||||
reason='does not work on pypy')
|
||||
@pytest.mark.xfail(hasattr(sys, "pypy_version_info"), reason="does not work on pypy")
|
||||
def test_comments():
|
||||
source = '''def test():
|
||||
"comment 1"
|
||||
|
@ -533,20 +607,22 @@ comment 4
|
|||
"""
|
||||
'''
|
||||
for line in range(2, 6):
|
||||
assert str(getstatement(line, source)) == ' x = 1'
|
||||
assert str(getstatement(line, source)) == " x = 1"
|
||||
for line in range(6, 10):
|
||||
assert str(getstatement(line, source)) == ' assert False'
|
||||
assert str(getstatement(line, source)) == " assert False"
|
||||
assert str(getstatement(10, source)) == '"""'
|
||||
|
||||
|
||||
def test_comment_in_statement():
|
||||
source = '''test(foo=1,
|
||||
source = """test(foo=1,
|
||||
# comment 1
|
||||
bar=2)
|
||||
'''
|
||||
"""
|
||||
for line in range(1, 3):
|
||||
assert str(getstatement(line, source)) == \
|
||||
'test(foo=1,\n # comment 1\n bar=2)'
|
||||
assert (
|
||||
str(getstatement(line, source))
|
||||
== "test(foo=1,\n # comment 1\n bar=2)"
|
||||
)
|
||||
|
||||
|
||||
def test_single_line_else():
|
||||
|
@ -560,19 +636,24 @@ def test_single_line_finally():
|
|||
|
||||
|
||||
def test_issue55():
|
||||
source = ('def round_trip(dinp):\n assert 1 == dinp\n'
|
||||
'def test_rt():\n round_trip("""\n""")\n')
|
||||
source = (
|
||||
"def round_trip(dinp):\n assert 1 == dinp\n"
|
||||
'def test_rt():\n round_trip("""\n""")\n'
|
||||
)
|
||||
s = getstatement(3, source)
|
||||
assert str(s) == ' round_trip("""\n""")'
|
||||
|
||||
|
||||
def XXXtest_multiline():
|
||||
source = getstatement(0, """\
|
||||
source = getstatement(
|
||||
0,
|
||||
"""\
|
||||
raise ValueError(
|
||||
23
|
||||
)
|
||||
x = 3
|
||||
""")
|
||||
""",
|
||||
)
|
||||
assert str(source) == "raise ValueError(\n 23\n)"
|
||||
|
||||
|
||||
|
|
|
@ -12,15 +12,14 @@ def test_getstartingblock_multiline():
|
|||
see hhatto/autopep8#307). It was considered better to just move this single test to its own
|
||||
file and exclude it from autopep8 than try to complicate things.
|
||||
"""
|
||||
|
||||
class A(object):
|
||||
|
||||
def __init__(self, *args):
|
||||
frame = sys._getframe(1)
|
||||
self.source = _pytest._code.Frame(frame).statement
|
||||
|
||||
x = A('x',
|
||||
'y'
|
||||
,
|
||||
'z')
|
||||
x = A("x", "y", "z")
|
||||
|
||||
values = [i for i in x.source.lines if i.strip()]
|
||||
assert len(values) == 4
|
||||
|
|
|
@ -3,7 +3,8 @@ import pytest
|
|||
|
||||
|
||||
def test_yield_tests_deprecation(testdir):
|
||||
testdir.makepyfile("""
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
def func1(arg, arg2):
|
||||
assert arg == arg2
|
||||
def test_gen():
|
||||
|
@ -12,101 +13,129 @@ def test_yield_tests_deprecation(testdir):
|
|||
def test_gen2():
|
||||
for k in range(10):
|
||||
yield func1, 1, 1
|
||||
""")
|
||||
result = testdir.runpytest('-ra')
|
||||
result.stdout.fnmatch_lines([
|
||||
'*yield tests are deprecated, and scheduled to be removed in pytest 4.0*',
|
||||
'*2 passed*',
|
||||
])
|
||||
assert result.stdout.str().count('yield tests are deprecated') == 2
|
||||
"""
|
||||
)
|
||||
result = testdir.runpytest("-ra")
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"*yield tests are deprecated, and scheduled to be removed in pytest 4.0*",
|
||||
"*2 passed*",
|
||||
]
|
||||
)
|
||||
assert result.stdout.str().count("yield tests are deprecated") == 2
|
||||
|
||||
|
||||
def test_funcarg_prefix_deprecation(testdir):
|
||||
testdir.makepyfile("""
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
def pytest_funcarg__value():
|
||||
return 10
|
||||
|
||||
def test_funcarg_prefix(value):
|
||||
assert value == 10
|
||||
""")
|
||||
result = testdir.runpytest('-ra')
|
||||
result.stdout.fnmatch_lines([
|
||||
('*pytest_funcarg__value: '
|
||||
'declaring fixtures using "pytest_funcarg__" prefix is deprecated '
|
||||
'and scheduled to be removed in pytest 4.0. '
|
||||
'Please remove the prefix and use the @pytest.fixture decorator instead.'),
|
||||
'*1 passed*',
|
||||
])
|
||||
"""
|
||||
)
|
||||
result = testdir.runpytest("-ra")
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
(
|
||||
"*pytest_funcarg__value: "
|
||||
'declaring fixtures using "pytest_funcarg__" prefix is deprecated '
|
||||
"and scheduled to be removed in pytest 4.0. "
|
||||
"Please remove the prefix and use the @pytest.fixture decorator instead."
|
||||
),
|
||||
"*1 passed*",
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def test_pytest_setup_cfg_deprecated(testdir):
|
||||
testdir.makefile('.cfg', setup='''
|
||||
testdir.makefile(
|
||||
".cfg",
|
||||
setup="""
|
||||
[pytest]
|
||||
addopts = --verbose
|
||||
''')
|
||||
""",
|
||||
)
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines(['*pytest*section in setup.cfg files is deprecated*use*tool:pytest*instead*'])
|
||||
result.stdout.fnmatch_lines(
|
||||
["*pytest*section in setup.cfg files is deprecated*use*tool:pytest*instead*"]
|
||||
)
|
||||
|
||||
|
||||
def test_pytest_custom_cfg_deprecated(testdir):
|
||||
testdir.makefile('.cfg', custom='''
|
||||
testdir.makefile(
|
||||
".cfg",
|
||||
custom="""
|
||||
[pytest]
|
||||
addopts = --verbose
|
||||
''')
|
||||
""",
|
||||
)
|
||||
result = testdir.runpytest("-c", "custom.cfg")
|
||||
result.stdout.fnmatch_lines(['*pytest*section in custom.cfg files is deprecated*use*tool:pytest*instead*'])
|
||||
result.stdout.fnmatch_lines(
|
||||
["*pytest*section in custom.cfg files is deprecated*use*tool:pytest*instead*"]
|
||||
)
|
||||
|
||||
|
||||
def test_str_args_deprecated(tmpdir, testdir):
|
||||
"""Deprecate passing strings to pytest.main(). Scheduled for removal in pytest-4.0."""
|
||||
from _pytest.main import EXIT_NOTESTSCOLLECTED
|
||||
|
||||
warnings = []
|
||||
|
||||
class Collect(object):
|
||||
|
||||
def pytest_logwarning(self, message):
|
||||
warnings.append(message)
|
||||
|
||||
ret = pytest.main("%s -x" % tmpdir, plugins=[Collect()])
|
||||
msg = ('passing a string to pytest.main() is deprecated, '
|
||||
'pass a list of arguments instead.')
|
||||
msg = (
|
||||
"passing a string to pytest.main() is deprecated, "
|
||||
"pass a list of arguments instead."
|
||||
)
|
||||
assert msg in warnings
|
||||
assert ret == EXIT_NOTESTSCOLLECTED
|
||||
|
||||
|
||||
def test_getfuncargvalue_is_deprecated(request):
|
||||
pytest.deprecated_call(request.getfuncargvalue, 'tmpdir')
|
||||
pytest.deprecated_call(request.getfuncargvalue, "tmpdir")
|
||||
|
||||
|
||||
def test_resultlog_is_deprecated(testdir):
|
||||
result = testdir.runpytest('--help')
|
||||
result.stdout.fnmatch_lines(['*DEPRECATED path for machine-readable result log*'])
|
||||
result = testdir.runpytest("--help")
|
||||
result.stdout.fnmatch_lines(["*DEPRECATED path for machine-readable result log*"])
|
||||
|
||||
testdir.makepyfile('''
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
def test():
|
||||
pass
|
||||
''')
|
||||
result = testdir.runpytest('--result-log=%s' % testdir.tmpdir.join('result.log'))
|
||||
result.stdout.fnmatch_lines([
|
||||
'*--result-log is deprecated and scheduled for removal in pytest 4.0*',
|
||||
'*See https://docs.pytest.org/*/usage.html#creating-resultlog-format-files for more information*',
|
||||
])
|
||||
"""
|
||||
)
|
||||
result = testdir.runpytest("--result-log=%s" % testdir.tmpdir.join("result.log"))
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"*--result-log is deprecated and scheduled for removal in pytest 4.0*",
|
||||
"*See https://docs.pytest.org/*/usage.html#creating-resultlog-format-files for more information*",
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.filterwarnings('always:Metafunc.addcall is deprecated')
|
||||
@pytest.mark.filterwarnings("always:Metafunc.addcall is deprecated")
|
||||
def test_metafunc_addcall_deprecated(testdir):
|
||||
testdir.makepyfile("""
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
def pytest_generate_tests(metafunc):
|
||||
metafunc.addcall({'i': 1})
|
||||
metafunc.addcall({'i': 2})
|
||||
def test_func(i):
|
||||
pass
|
||||
""")
|
||||
res = testdir.runpytest('-s')
|
||||
"""
|
||||
)
|
||||
res = testdir.runpytest("-s")
|
||||
assert res.ret == 0
|
||||
res.stdout.fnmatch_lines([
|
||||
"*Metafunc.addcall is deprecated*",
|
||||
"*2 passed, 2 warnings*",
|
||||
])
|
||||
res.stdout.fnmatch_lines(
|
||||
["*Metafunc.addcall is deprecated*", "*2 passed, 2 warnings*"]
|
||||
)
|
||||
|
||||
|
||||
def test_terminal_reporter_writer_attr(pytestconfig):
|
||||
|
@ -115,89 +144,122 @@ def test_terminal_reporter_writer_attr(pytestconfig):
|
|||
"""
|
||||
try:
|
||||
import xdist # noqa
|
||||
pytest.skip('xdist workers disable the terminal reporter plugin')
|
||||
|
||||
pytest.skip("xdist workers disable the terminal reporter plugin")
|
||||
except ImportError:
|
||||
pass
|
||||
terminal_reporter = pytestconfig.pluginmanager.get_plugin('terminalreporter')
|
||||
terminal_reporter = pytestconfig.pluginmanager.get_plugin("terminalreporter")
|
||||
assert terminal_reporter.writer is terminal_reporter._tw
|
||||
|
||||
|
||||
@pytest.mark.parametrize('plugin', ['catchlog', 'capturelog'])
|
||||
@pytest.mark.parametrize("plugin", ["catchlog", "capturelog"])
|
||||
def test_pytest_catchlog_deprecated(testdir, plugin):
|
||||
testdir.makepyfile("""
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
def test_func(pytestconfig):
|
||||
pytestconfig.pluginmanager.register(None, 'pytest_{}')
|
||||
""".format(plugin))
|
||||
""".format(
|
||||
plugin
|
||||
)
|
||||
)
|
||||
res = testdir.runpytest()
|
||||
assert res.ret == 0
|
||||
res.stdout.fnmatch_lines([
|
||||
"*pytest-*log plugin has been merged into the core*",
|
||||
"*1 passed, 1 warnings*",
|
||||
])
|
||||
res.stdout.fnmatch_lines(
|
||||
["*pytest-*log plugin has been merged into the core*", "*1 passed, 1 warnings*"]
|
||||
)
|
||||
|
||||
|
||||
def test_pytest_plugins_in_non_top_level_conftest_deprecated(testdir):
|
||||
from _pytest.deprecated import PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST
|
||||
|
||||
subdirectory = testdir.tmpdir.join("subdirectory")
|
||||
subdirectory.mkdir()
|
||||
# create the inner conftest with makeconftest and then move it to the subdirectory
|
||||
testdir.makeconftest("""
|
||||
testdir.makeconftest(
|
||||
"""
|
||||
pytest_plugins=['capture']
|
||||
""")
|
||||
"""
|
||||
)
|
||||
testdir.tmpdir.join("conftest.py").move(subdirectory.join("conftest.py"))
|
||||
# make the top level conftest
|
||||
testdir.makeconftest("""
|
||||
testdir.makeconftest(
|
||||
"""
|
||||
import warnings
|
||||
warnings.filterwarnings('always', category=DeprecationWarning)
|
||||
""")
|
||||
testdir.makepyfile("""
|
||||
"""
|
||||
)
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
def test_func():
|
||||
pass
|
||||
""")
|
||||
"""
|
||||
)
|
||||
res = testdir.runpytest_subprocess()
|
||||
assert res.ret == 0
|
||||
res.stderr.fnmatch_lines('*' + str(PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST).splitlines()[0])
|
||||
res.stderr.fnmatch_lines(
|
||||
"*" + str(PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST).splitlines()[0]
|
||||
)
|
||||
|
||||
|
||||
def test_pytest_plugins_in_non_top_level_conftest_deprecated_no_top_level_conftest(testdir):
|
||||
def test_pytest_plugins_in_non_top_level_conftest_deprecated_no_top_level_conftest(
|
||||
testdir
|
||||
):
|
||||
from _pytest.deprecated import PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST
|
||||
subdirectory = testdir.tmpdir.join('subdirectory')
|
||||
|
||||
subdirectory = testdir.tmpdir.join("subdirectory")
|
||||
subdirectory.mkdir()
|
||||
testdir.makeconftest("""
|
||||
testdir.makeconftest(
|
||||
"""
|
||||
import warnings
|
||||
warnings.filterwarnings('always', category=DeprecationWarning)
|
||||
pytest_plugins=['capture']
|
||||
""")
|
||||
"""
|
||||
)
|
||||
testdir.tmpdir.join("conftest.py").move(subdirectory.join("conftest.py"))
|
||||
|
||||
testdir.makepyfile("""
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
def test_func():
|
||||
pass
|
||||
""")
|
||||
"""
|
||||
)
|
||||
|
||||
res = testdir.runpytest_subprocess()
|
||||
assert res.ret == 0
|
||||
res.stderr.fnmatch_lines('*' + str(PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST).splitlines()[0])
|
||||
res.stderr.fnmatch_lines(
|
||||
"*" + str(PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST).splitlines()[0]
|
||||
)
|
||||
|
||||
|
||||
def test_pytest_plugins_in_non_top_level_conftest_deprecated_no_false_positives(testdir):
|
||||
def test_pytest_plugins_in_non_top_level_conftest_deprecated_no_false_positives(
|
||||
testdir
|
||||
):
|
||||
from _pytest.deprecated import PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST
|
||||
subdirectory = testdir.tmpdir.join('subdirectory')
|
||||
|
||||
subdirectory = testdir.tmpdir.join("subdirectory")
|
||||
subdirectory.mkdir()
|
||||
testdir.makeconftest("""
|
||||
testdir.makeconftest(
|
||||
"""
|
||||
pass
|
||||
""")
|
||||
"""
|
||||
)
|
||||
testdir.tmpdir.join("conftest.py").move(subdirectory.join("conftest.py"))
|
||||
|
||||
testdir.makeconftest("""
|
||||
testdir.makeconftest(
|
||||
"""
|
||||
import warnings
|
||||
warnings.filterwarnings('always', category=DeprecationWarning)
|
||||
pytest_plugins=['capture']
|
||||
""")
|
||||
testdir.makepyfile("""
|
||||
"""
|
||||
)
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
def test_func():
|
||||
pass
|
||||
""")
|
||||
"""
|
||||
)
|
||||
res = testdir.runpytest_subprocess()
|
||||
assert res.ret == 0
|
||||
assert str(PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST).splitlines()[0] not in res.stderr.str()
|
||||
assert str(PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST).splitlines()[
|
||||
0
|
||||
] not in res.stderr.str()
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
"""
|
||||
Generates an executable with pytest runner embedded using PyInstaller.
|
||||
"""
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
import pytest
|
||||
import subprocess
|
||||
|
||||
hidden = []
|
||||
for x in pytest.freeze_includes():
|
||||
hidden.extend(['--hidden-import', x])
|
||||
args = ['pyinstaller', '--noconfirm'] + hidden + ['runtests_script.py']
|
||||
subprocess.check_call(' '.join(args), shell=True)
|
||||
hidden.extend(["--hidden-import", x])
|
||||
args = ["pyinstaller", "--noconfirm"] + hidden + ["runtests_script.py"]
|
||||
subprocess.check_call(" ".join(args), shell=True)
|
||||
|
|
|
@ -3,7 +3,8 @@ This is the script that is actually frozen into an executable: simply executes
|
|||
py.test main().
|
||||
"""
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
import pytest
|
||||
|
||||
sys.exit(pytest.main())
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
|
||||
def test_upper():
|
||||
assert 'foo'.upper() == 'FOO'
|
||||
assert "foo".upper() == "FOO"
|
||||
|
||||
|
||||
def test_lower():
|
||||
assert 'FOO'.lower() == 'foo'
|
||||
assert "FOO".lower() == "foo"
|
||||
|
|
|
@ -2,11 +2,11 @@
|
|||
Called by tox.ini: uses the generated executable to run the tests in ./tests/
|
||||
directory.
|
||||
"""
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
import os
|
||||
import sys
|
||||
|
||||
executable = os.path.join(os.getcwd(), 'dist', 'runtests_script', 'runtests_script')
|
||||
if sys.platform.startswith('win'):
|
||||
executable += '.exe'
|
||||
sys.exit(os.system('%s tests' % executable))
|
||||
executable = os.path.join(os.getcwd(), "dist", "runtests_script", "runtests_script")
|
||||
if sys.platform.startswith("win"):
|
||||
executable += ".exe"
|
||||
sys.exit(os.system("%s tests" % executable))
|
||||
|
|
|
@ -4,32 +4,33 @@ import logging
|
|||
import pytest
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
sublogger = logging.getLogger(__name__ + '.baz')
|
||||
sublogger = logging.getLogger(__name__ + ".baz")
|
||||
|
||||
|
||||
def test_fixture_help(testdir):
|
||||
result = testdir.runpytest('--fixtures')
|
||||
result.stdout.fnmatch_lines(['*caplog*'])
|
||||
result = testdir.runpytest("--fixtures")
|
||||
result.stdout.fnmatch_lines(["*caplog*"])
|
||||
|
||||
|
||||
def test_change_level(caplog):
|
||||
caplog.set_level(logging.INFO)
|
||||
logger.debug('handler DEBUG level')
|
||||
logger.info('handler INFO level')
|
||||
logger.debug("handler DEBUG level")
|
||||
logger.info("handler INFO level")
|
||||
|
||||
caplog.set_level(logging.CRITICAL, logger=sublogger.name)
|
||||
sublogger.warning('logger WARNING level')
|
||||
sublogger.critical('logger CRITICAL level')
|
||||
sublogger.warning("logger WARNING level")
|
||||
sublogger.critical("logger CRITICAL level")
|
||||
|
||||
assert 'DEBUG' not in caplog.text
|
||||
assert 'INFO' in caplog.text
|
||||
assert 'WARNING' not in caplog.text
|
||||
assert 'CRITICAL' in caplog.text
|
||||
assert "DEBUG" not in caplog.text
|
||||
assert "INFO" in caplog.text
|
||||
assert "WARNING" not in caplog.text
|
||||
assert "CRITICAL" in caplog.text
|
||||
|
||||
|
||||
def test_change_level_undo(testdir):
|
||||
"""Ensure that 'set_level' is undone after the end of the test"""
|
||||
testdir.makepyfile('''
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
import logging
|
||||
|
||||
def test1(caplog):
|
||||
|
@ -42,58 +43,54 @@ def test_change_level_undo(testdir):
|
|||
# using + operator here so fnmatch_lines doesn't match the code in the traceback
|
||||
logging.info('log from ' + 'test2')
|
||||
assert 0
|
||||
''')
|
||||
"""
|
||||
)
|
||||
result = testdir.runpytest_subprocess()
|
||||
result.stdout.fnmatch_lines([
|
||||
'*log from test1*',
|
||||
'*2 failed in *',
|
||||
])
|
||||
assert 'log from test2' not in result.stdout.str()
|
||||
result.stdout.fnmatch_lines(["*log from test1*", "*2 failed in *"])
|
||||
assert "log from test2" not in result.stdout.str()
|
||||
|
||||
|
||||
def test_with_statement(caplog):
|
||||
with caplog.at_level(logging.INFO):
|
||||
logger.debug('handler DEBUG level')
|
||||
logger.info('handler INFO level')
|
||||
logger.debug("handler DEBUG level")
|
||||
logger.info("handler INFO level")
|
||||
|
||||
with caplog.at_level(logging.CRITICAL, logger=sublogger.name):
|
||||
sublogger.warning('logger WARNING level')
|
||||
sublogger.critical('logger CRITICAL level')
|
||||
sublogger.warning("logger WARNING level")
|
||||
sublogger.critical("logger CRITICAL level")
|
||||
|
||||
assert 'DEBUG' not in caplog.text
|
||||
assert 'INFO' in caplog.text
|
||||
assert 'WARNING' not in caplog.text
|
||||
assert 'CRITICAL' in caplog.text
|
||||
assert "DEBUG" not in caplog.text
|
||||
assert "INFO" in caplog.text
|
||||
assert "WARNING" not in caplog.text
|
||||
assert "CRITICAL" in caplog.text
|
||||
|
||||
|
||||
def test_log_access(caplog):
|
||||
caplog.set_level(logging.INFO)
|
||||
logger.info('boo %s', 'arg')
|
||||
assert caplog.records[0].levelname == 'INFO'
|
||||
assert caplog.records[0].msg == 'boo %s'
|
||||
assert 'boo arg' in caplog.text
|
||||
logger.info("boo %s", "arg")
|
||||
assert caplog.records[0].levelname == "INFO"
|
||||
assert caplog.records[0].msg == "boo %s"
|
||||
assert "boo arg" in caplog.text
|
||||
|
||||
|
||||
def test_record_tuples(caplog):
|
||||
caplog.set_level(logging.INFO)
|
||||
logger.info('boo %s', 'arg')
|
||||
logger.info("boo %s", "arg")
|
||||
|
||||
assert caplog.record_tuples == [
|
||||
(__name__, logging.INFO, 'boo arg'),
|
||||
]
|
||||
assert caplog.record_tuples == [(__name__, logging.INFO, "boo arg")]
|
||||
|
||||
|
||||
def test_unicode(caplog):
|
||||
caplog.set_level(logging.INFO)
|
||||
logger.info(u'bū')
|
||||
assert caplog.records[0].levelname == 'INFO'
|
||||
assert caplog.records[0].msg == u'bū'
|
||||
assert u'bū' in caplog.text
|
||||
logger.info(u"bū")
|
||||
assert caplog.records[0].levelname == "INFO"
|
||||
assert caplog.records[0].msg == u"bū"
|
||||
assert u"bū" in caplog.text
|
||||
|
||||
|
||||
def test_clear(caplog):
|
||||
caplog.set_level(logging.INFO)
|
||||
logger.info(u'bū')
|
||||
logger.info(u"bū")
|
||||
assert len(caplog.records)
|
||||
assert caplog.text
|
||||
caplog.clear()
|
||||
|
@ -103,20 +100,20 @@ def test_clear(caplog):
|
|||
|
||||
@pytest.fixture
|
||||
def logging_during_setup_and_teardown(caplog):
|
||||
caplog.set_level('INFO')
|
||||
logger.info('a_setup_log')
|
||||
caplog.set_level("INFO")
|
||||
logger.info("a_setup_log")
|
||||
yield
|
||||
logger.info('a_teardown_log')
|
||||
assert [x.message for x in caplog.get_records('teardown')] == ['a_teardown_log']
|
||||
logger.info("a_teardown_log")
|
||||
assert [x.message for x in caplog.get_records("teardown")] == ["a_teardown_log"]
|
||||
|
||||
|
||||
def test_caplog_captures_for_all_stages(caplog, logging_during_setup_and_teardown):
|
||||
assert not caplog.records
|
||||
assert not caplog.get_records('call')
|
||||
logger.info('a_call_log')
|
||||
assert [x.message for x in caplog.get_records('call')] == ['a_call_log']
|
||||
assert not caplog.get_records("call")
|
||||
logger.info("a_call_log")
|
||||
assert [x.message for x in caplog.get_records("call")] == ["a_call_log"]
|
||||
|
||||
assert [x.message for x in caplog.get_records('setup')] == ['a_setup_log']
|
||||
assert [x.message for x in caplog.get_records("setup")] == ["a_setup_log"]
|
||||
|
||||
# This reachers into private API, don't use this type of thing in real tests!
|
||||
assert set(caplog._item.catch_log_handlers.keys()) == {'setup', 'call'}
|
||||
assert set(caplog._item.catch_log_handlers.keys()) == {"setup", "call"}
|
||||
|
|
|
@ -5,13 +5,20 @@ from _pytest.logging import ColoredLevelFormatter
|
|||
|
||||
|
||||
def test_coloredlogformatter():
|
||||
logfmt = '%(filename)-25s %(lineno)4d %(levelname)-8s %(message)s'
|
||||
logfmt = "%(filename)-25s %(lineno)4d %(levelname)-8s %(message)s"
|
||||
|
||||
record = logging.LogRecord(
|
||||
name='dummy', level=logging.INFO, pathname='dummypath', lineno=10,
|
||||
msg='Test Message', args=(), exc_info=False)
|
||||
name="dummy",
|
||||
level=logging.INFO,
|
||||
pathname="dummypath",
|
||||
lineno=10,
|
||||
msg="Test Message",
|
||||
args=(),
|
||||
exc_info=False,
|
||||
)
|
||||
|
||||
class ColorConfig(object):
|
||||
|
||||
class option(object):
|
||||
pass
|
||||
|
||||
|
@ -19,11 +26,12 @@ def test_coloredlogformatter():
|
|||
tw.hasmarkup = True
|
||||
formatter = ColoredLevelFormatter(tw, logfmt)
|
||||
output = formatter.format(record)
|
||||
assert output == ('dummypath 10 '
|
||||
'\x1b[32mINFO \x1b[0m Test Message')
|
||||
assert (
|
||||
output
|
||||
== ("dummypath 10 " "\x1b[32mINFO \x1b[0m Test Message")
|
||||
)
|
||||
|
||||
tw.hasmarkup = False
|
||||
formatter = ColoredLevelFormatter(tw, logfmt)
|
||||
output = formatter.format(record)
|
||||
assert output == ('dummypath 10 '
|
||||
'INFO Test Message')
|
||||
assert output == ("dummypath 10 " "INFO Test Message")
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -8,7 +8,8 @@ from pytest import approx
|
|||
from operator import eq, ne
|
||||
from decimal import Decimal
|
||||
from fractions import Fraction
|
||||
inf, nan = float('inf'), float('nan')
|
||||
|
||||
inf, nan = float("inf"), float("nan")
|
||||
|
||||
|
||||
class MyDocTestRunner(doctest.DocTestRunner):
|
||||
|
@ -17,29 +18,47 @@ class MyDocTestRunner(doctest.DocTestRunner):
|
|||
doctest.DocTestRunner.__init__(self)
|
||||
|
||||
def report_failure(self, out, test, example, got):
|
||||
raise AssertionError("'{}' evaluates to '{}', not '{}'".format(
|
||||
example.source.strip(), got.strip(), example.want.strip()))
|
||||
raise AssertionError(
|
||||
"'{}' evaluates to '{}', not '{}'".format(
|
||||
example.source.strip(), got.strip(), example.want.strip()
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class TestApprox(object):
|
||||
|
||||
def test_repr_string(self):
|
||||
plus_minus = u'\u00b1' if sys.version_info[0] > 2 else u'+-'
|
||||
tol1, tol2, infr = '1.0e-06', '2.0e-06', 'inf'
|
||||
assert repr(approx(1.0)) == '1.0 {pm} {tol1}'.format(pm=plus_minus, tol1=tol1)
|
||||
assert repr(approx([1.0, 2.0])) == 'approx([1.0 {pm} {tol1}, 2.0 {pm} {tol2}])'.format(
|
||||
pm=plus_minus, tol1=tol1, tol2=tol2)
|
||||
assert repr(approx((1.0, 2.0))) == 'approx((1.0 {pm} {tol1}, 2.0 {pm} {tol2}))'.format(
|
||||
pm=plus_minus, tol1=tol1, tol2=tol2)
|
||||
assert repr(approx(inf)) == 'inf'
|
||||
assert repr(approx(1.0, rel=nan)) == '1.0 {pm} ???'.format(pm=plus_minus)
|
||||
assert repr(approx(1.0, rel=inf)) == '1.0 {pm} {infr}'.format(pm=plus_minus, infr=infr)
|
||||
assert repr(approx(1.0j, rel=inf)) == '1j'
|
||||
plus_minus = u"\u00b1" if sys.version_info[0] > 2 else u"+-"
|
||||
tol1, tol2, infr = "1.0e-06", "2.0e-06", "inf"
|
||||
assert repr(approx(1.0)) == "1.0 {pm} {tol1}".format(pm=plus_minus, tol1=tol1)
|
||||
assert (
|
||||
repr(approx([1.0, 2.0]))
|
||||
== "approx([1.0 {pm} {tol1}, 2.0 {pm} {tol2}])".format(
|
||||
pm=plus_minus, tol1=tol1, tol2=tol2
|
||||
)
|
||||
)
|
||||
assert (
|
||||
repr(approx((1.0, 2.0)))
|
||||
== "approx((1.0 {pm} {tol1}, 2.0 {pm} {tol2}))".format(
|
||||
pm=plus_minus, tol1=tol1, tol2=tol2
|
||||
)
|
||||
)
|
||||
assert repr(approx(inf)) == "inf"
|
||||
assert repr(approx(1.0, rel=nan)) == "1.0 {pm} ???".format(pm=plus_minus)
|
||||
assert (
|
||||
repr(approx(1.0, rel=inf))
|
||||
== "1.0 {pm} {infr}".format(pm=plus_minus, infr=infr)
|
||||
)
|
||||
assert repr(approx(1.0j, rel=inf)) == "1j"
|
||||
|
||||
# Dictionaries aren't ordered, so we need to check both orders.
|
||||
assert repr(approx({'a': 1.0, 'b': 2.0})) in (
|
||||
"approx({{'a': 1.0 {pm} {tol1}, 'b': 2.0 {pm} {tol2}}})".format(pm=plus_minus, tol1=tol1, tol2=tol2),
|
||||
"approx({{'b': 2.0 {pm} {tol2}, 'a': 1.0 {pm} {tol1}}})".format(pm=plus_minus, tol1=tol1, tol2=tol2),
|
||||
assert repr(approx({"a": 1.0, "b": 2.0})) in (
|
||||
"approx({{'a': 1.0 {pm} {tol1}, 'b': 2.0 {pm} {tol2}}})".format(
|
||||
pm=plus_minus, tol1=tol1, tol2=tol2
|
||||
),
|
||||
"approx({{'b': 2.0 {pm} {tol2}, 'a': 1.0 {pm} {tol1}}})".format(
|
||||
pm=plus_minus, tol1=tol1, tol2=tol2
|
||||
),
|
||||
)
|
||||
|
||||
def test_operator_overloading(self):
|
||||
|
@ -56,25 +75,19 @@ class TestApprox(object):
|
|||
(12345, 12345.0),
|
||||
(0.0, -0.0),
|
||||
(345678, 345678),
|
||||
(Decimal('1.0001'), Decimal('1.0001')),
|
||||
(Decimal("1.0001"), Decimal("1.0001")),
|
||||
(Fraction(1, 3), Fraction(-1, -3)),
|
||||
]
|
||||
for a, x in examples:
|
||||
assert a == approx(x)
|
||||
|
||||
def test_opposite_sign(self):
|
||||
examples = [
|
||||
(eq, 1e-100, -1e-100),
|
||||
(ne, 1e100, -1e100),
|
||||
]
|
||||
examples = [(eq, 1e-100, -1e-100), (ne, 1e100, -1e100)]
|
||||
for op, a, x in examples:
|
||||
assert op(a, approx(x))
|
||||
|
||||
def test_zero_tolerance(self):
|
||||
within_1e10 = [
|
||||
(1.1e-100, 1e-100),
|
||||
(-1.1e-100, -1e-100),
|
||||
]
|
||||
within_1e10 = [(1.1e-100, 1e-100), (-1.1e-100, -1e-100)]
|
||||
for a, x in within_1e10:
|
||||
assert x == approx(x, rel=0.0, abs=0.0)
|
||||
assert a != approx(x, rel=0.0, abs=0.0)
|
||||
|
@ -98,12 +111,7 @@ class TestApprox(object):
|
|||
|
||||
def test_inf_tolerance(self):
|
||||
# Everything should be equal if the tolerance is infinite.
|
||||
large_diffs = [
|
||||
(1, 1000),
|
||||
(1e-50, 1e50),
|
||||
(-1.0, -1e300),
|
||||
(0.0, 10),
|
||||
]
|
||||
large_diffs = [(1, 1000), (1e-50, 1e50), (-1.0, -1e300), (0.0, 10)]
|
||||
for a, x in large_diffs:
|
||||
assert a != approx(x, rel=0.0, abs=0.0)
|
||||
assert a == approx(x, rel=inf, abs=0.0)
|
||||
|
@ -113,20 +121,13 @@ class TestApprox(object):
|
|||
def test_inf_tolerance_expecting_zero(self):
|
||||
# If the relative tolerance is zero but the expected value is infinite,
|
||||
# the actual tolerance is a NaN, which should be an error.
|
||||
illegal_kwargs = [
|
||||
dict(rel=inf, abs=0.0),
|
||||
dict(rel=inf, abs=inf),
|
||||
]
|
||||
illegal_kwargs = [dict(rel=inf, abs=0.0), dict(rel=inf, abs=inf)]
|
||||
for kwargs in illegal_kwargs:
|
||||
with pytest.raises(ValueError):
|
||||
1 == approx(0, **kwargs)
|
||||
|
||||
def test_nan_tolerance(self):
|
||||
illegal_kwargs = [
|
||||
dict(rel=nan),
|
||||
dict(abs=nan),
|
||||
dict(rel=nan, abs=nan),
|
||||
]
|
||||
illegal_kwargs = [dict(rel=nan), dict(abs=nan), dict(rel=nan, abs=nan)]
|
||||
for kwargs in illegal_kwargs:
|
||||
with pytest.raises(ValueError):
|
||||
1.1 == approx(1, **kwargs)
|
||||
|
@ -148,8 +149,8 @@ class TestApprox(object):
|
|||
(eq, 1e0 + 1e-6, 1e0),
|
||||
(ne, 1e0 + 2e-6, 1e0),
|
||||
# Absolute tolerance used.
|
||||
(eq, 1e-100, + 1e-106),
|
||||
(eq, 1e-100, + 2e-106),
|
||||
(eq, 1e-100, +1e-106),
|
||||
(eq, 1e-100, +2e-106),
|
||||
(eq, 1e-100, 0),
|
||||
]
|
||||
for op, a, x in examples:
|
||||
|
@ -172,21 +173,13 @@ class TestApprox(object):
|
|||
assert 1e-8 + 1e-16 != approx(1e-8, rel=5e-9, abs=5e-17)
|
||||
|
||||
def test_relative_tolerance(self):
|
||||
within_1e8_rel = [
|
||||
(1e8 + 1e0, 1e8),
|
||||
(1e0 + 1e-8, 1e0),
|
||||
(1e-8 + 1e-16, 1e-8),
|
||||
]
|
||||
within_1e8_rel = [(1e8 + 1e0, 1e8), (1e0 + 1e-8, 1e0), (1e-8 + 1e-16, 1e-8)]
|
||||
for a, x in within_1e8_rel:
|
||||
assert a == approx(x, rel=5e-8, abs=0.0)
|
||||
assert a != approx(x, rel=5e-9, abs=0.0)
|
||||
|
||||
def test_absolute_tolerance(self):
|
||||
within_1e8_abs = [
|
||||
(1e8 + 9e-9, 1e8),
|
||||
(1e0 + 9e-9, 1e0),
|
||||
(1e-8 + 9e-9, 1e-8),
|
||||
]
|
||||
within_1e8_abs = [(1e8 + 9e-9, 1e8), (1e0 + 9e-9, 1e0), (1e-8 + 9e-9, 1e-8)]
|
||||
for a, x in within_1e8_abs:
|
||||
assert a == approx(x, rel=0, abs=5e-8)
|
||||
assert a != approx(x, rel=0, abs=5e-9)
|
||||
|
@ -233,10 +226,7 @@ class TestApprox(object):
|
|||
assert op(a, approx(x, nan_ok=True))
|
||||
|
||||
def test_int(self):
|
||||
within_1e6 = [
|
||||
(1000001, 1000000),
|
||||
(-1000001, -1000000),
|
||||
]
|
||||
within_1e6 = [(1000001, 1000000), (-1000001, -1000000)]
|
||||
for a, x in within_1e6:
|
||||
assert a == approx(x, rel=5e-6, abs=0)
|
||||
assert a != approx(x, rel=5e-7, abs=0)
|
||||
|
@ -245,15 +235,15 @@ class TestApprox(object):
|
|||
|
||||
def test_decimal(self):
|
||||
within_1e6 = [
|
||||
(Decimal('1.000001'), Decimal('1.0')),
|
||||
(Decimal('-1.000001'), Decimal('-1.0')),
|
||||
(Decimal("1.000001"), Decimal("1.0")),
|
||||
(Decimal("-1.000001"), Decimal("-1.0")),
|
||||
]
|
||||
for a, x in within_1e6:
|
||||
assert a == approx(x)
|
||||
assert a == approx(x, rel=Decimal('5e-6'), abs=0)
|
||||
assert a != approx(x, rel=Decimal('5e-7'), abs=0)
|
||||
assert approx(x, rel=Decimal('5e-6'), abs=0) == a
|
||||
assert approx(x, rel=Decimal('5e-7'), abs=0) != a
|
||||
assert a == approx(x, rel=Decimal("5e-6"), abs=0)
|
||||
assert a != approx(x, rel=Decimal("5e-7"), abs=0)
|
||||
assert approx(x, rel=Decimal("5e-6"), abs=0) == a
|
||||
assert approx(x, rel=Decimal("5e-7"), abs=0) != a
|
||||
|
||||
def test_fraction(self):
|
||||
within_1e6 = [
|
||||
|
@ -308,10 +298,10 @@ class TestApprox(object):
|
|||
assert (1, 2) != approx((1, 2, 3))
|
||||
|
||||
def test_dict(self):
|
||||
actual = {'a': 1 + 1e-7, 'b': 2 + 1e-8}
|
||||
actual = {"a": 1 + 1e-7, "b": 2 + 1e-8}
|
||||
# Dictionaries became ordered in python3.6, so switch up the order here
|
||||
# to make sure it doesn't matter.
|
||||
expected = {'b': 2, 'a': 1}
|
||||
expected = {"b": 2, "a": 1}
|
||||
|
||||
# Return false if any element is outside the tolerance.
|
||||
assert actual == approx(expected, rel=5e-7, abs=0)
|
||||
|
@ -320,12 +310,12 @@ class TestApprox(object):
|
|||
assert approx(expected, rel=5e-8, abs=0) != actual
|
||||
|
||||
def test_dict_wrong_len(self):
|
||||
assert {'a': 1, 'b': 2} != approx({'a': 1})
|
||||
assert {'a': 1, 'b': 2} != approx({'a': 1, 'c': 2})
|
||||
assert {'a': 1, 'b': 2} != approx({'a': 1, 'b': 2, 'c': 3})
|
||||
assert {"a": 1, "b": 2} != approx({"a": 1})
|
||||
assert {"a": 1, "b": 2} != approx({"a": 1, "c": 2})
|
||||
assert {"a": 1, "b": 2} != approx({"a": 1, "b": 2, "c": 3})
|
||||
|
||||
def test_numpy_array(self):
|
||||
np = pytest.importorskip('numpy')
|
||||
np = pytest.importorskip("numpy")
|
||||
|
||||
actual = np.array([1 + 1e-7, 2 + 1e-8])
|
||||
expected = np.array([1, 2])
|
||||
|
@ -343,7 +333,7 @@ class TestApprox(object):
|
|||
assert actual != approx(list(expected), rel=5e-8, abs=0)
|
||||
|
||||
def test_numpy_array_wrong_shape(self):
|
||||
np = pytest.importorskip('numpy')
|
||||
np = pytest.importorskip("numpy")
|
||||
|
||||
a12 = np.array([[1, 2]])
|
||||
a21 = np.array([[1], [2]])
|
||||
|
@ -354,10 +344,7 @@ class TestApprox(object):
|
|||
def test_doctests(self):
|
||||
parser = doctest.DocTestParser()
|
||||
test = parser.get_doctest(
|
||||
approx.__doc__,
|
||||
{'approx': approx},
|
||||
approx.__name__,
|
||||
None, None,
|
||||
approx.__doc__, {"approx": approx}, approx.__name__, None, None
|
||||
)
|
||||
runner = MyDocTestRunner()
|
||||
runner.run(test)
|
||||
|
@ -367,24 +354,28 @@ class TestApprox(object):
|
|||
Comparing approx instances inside lists should not produce an error in the detailed diff.
|
||||
Integration test for issue #2111.
|
||||
"""
|
||||
testdir.makepyfile("""
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
import pytest
|
||||
def test_foo():
|
||||
assert [3] == [pytest.approx(4)]
|
||||
""")
|
||||
expected = '4.0e-06'
|
||||
"""
|
||||
)
|
||||
expected = "4.0e-06"
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines([
|
||||
'*At index 0 diff: 3 != 4 * {}'.format(expected),
|
||||
'=* 1 failed in *=',
|
||||
])
|
||||
result.stdout.fnmatch_lines(
|
||||
["*At index 0 diff: 3 != 4 * {}".format(expected), "=* 1 failed in *="]
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize('op', [
|
||||
pytest.param(operator.le, id='<='),
|
||||
pytest.param(operator.lt, id='<'),
|
||||
pytest.param(operator.ge, id='>='),
|
||||
pytest.param(operator.gt, id='>'),
|
||||
])
|
||||
@pytest.mark.parametrize(
|
||||
"op",
|
||||
[
|
||||
pytest.param(operator.le, id="<="),
|
||||
pytest.param(operator.lt, id="<"),
|
||||
pytest.param(operator.ge, id=">="),
|
||||
pytest.param(operator.gt, id=">"),
|
||||
],
|
||||
)
|
||||
def test_comparison_operator_type_error(self, op):
|
||||
"""
|
||||
pytest.approx should raise TypeError for operators other than == and != (#2003).
|
||||
|
@ -393,7 +384,7 @@ class TestApprox(object):
|
|||
op(1, approx(1, rel=1e-6, abs=1e-12))
|
||||
|
||||
def test_numpy_array_with_scalar(self):
|
||||
np = pytest.importorskip('numpy')
|
||||
np = pytest.importorskip("numpy")
|
||||
|
||||
actual = np.array([1 + 1e-7, 1 - 1e-8])
|
||||
expected = 1.0
|
||||
|
@ -404,7 +395,7 @@ class TestApprox(object):
|
|||
assert approx(expected, rel=5e-8, abs=0) != actual
|
||||
|
||||
def test_numpy_scalar_with_array(self):
|
||||
np = pytest.importorskip('numpy')
|
||||
np = pytest.importorskip("numpy")
|
||||
|
||||
actual = 1.0
|
||||
expected = np.array([1 + 1e-7, 1 - 1e-8])
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -4,8 +4,10 @@ from _pytest import runner
|
|||
|
||||
|
||||
class TestOEJSKITSpecials(object):
|
||||
|
||||
def test_funcarg_non_pycollectobj(self, testdir): # rough jstests usage
|
||||
testdir.makeconftest("""
|
||||
testdir.makeconftest(
|
||||
"""
|
||||
import pytest
|
||||
def pytest_pycollect_makeitem(collector, name, obj):
|
||||
if name == "MyClass":
|
||||
|
@ -13,25 +15,29 @@ class TestOEJSKITSpecials(object):
|
|||
class MyCollector(pytest.Collector):
|
||||
def reportinfo(self):
|
||||
return self.fspath, 3, "xyz"
|
||||
""")
|
||||
modcol = testdir.getmodulecol("""
|
||||
"""
|
||||
)
|
||||
modcol = testdir.getmodulecol(
|
||||
"""
|
||||
import pytest
|
||||
@pytest.fixture
|
||||
def arg1(request):
|
||||
return 42
|
||||
class MyClass(object):
|
||||
pass
|
||||
""")
|
||||
"""
|
||||
)
|
||||
# this hook finds funcarg factories
|
||||
rep = runner.collect_one_node(collector=modcol)
|
||||
clscol = rep.result[0]
|
||||
clscol.obj = lambda arg1: None
|
||||
clscol.funcargs = {}
|
||||
pytest._fillfuncargs(clscol)
|
||||
assert clscol.funcargs['arg1'] == 42
|
||||
assert clscol.funcargs["arg1"] == 42
|
||||
|
||||
def test_autouse_fixture(self, testdir): # rough jstests usage
|
||||
testdir.makeconftest("""
|
||||
testdir.makeconftest(
|
||||
"""
|
||||
import pytest
|
||||
def pytest_pycollect_makeitem(collector, name, obj):
|
||||
if name == "MyClass":
|
||||
|
@ -39,8 +45,10 @@ class TestOEJSKITSpecials(object):
|
|||
class MyCollector(pytest.Collector):
|
||||
def reportinfo(self):
|
||||
return self.fspath, 3, "xyz"
|
||||
""")
|
||||
modcol = testdir.getmodulecol("""
|
||||
"""
|
||||
)
|
||||
modcol = testdir.getmodulecol(
|
||||
"""
|
||||
import pytest
|
||||
@pytest.fixture(autouse=True)
|
||||
def hello():
|
||||
|
@ -50,7 +58,8 @@ class TestOEJSKITSpecials(object):
|
|||
return 42
|
||||
class MyClass(object):
|
||||
pass
|
||||
""")
|
||||
"""
|
||||
)
|
||||
# this hook finds funcarg factories
|
||||
rep = runner.collect_one_node(modcol)
|
||||
clscol = rep.result[0]
|
||||
|
@ -61,6 +70,7 @@ class TestOEJSKITSpecials(object):
|
|||
|
||||
|
||||
def test_wrapped_getfslineno():
|
||||
|
||||
def func():
|
||||
pass
|
||||
|
||||
|
@ -72,12 +82,14 @@ def test_wrapped_getfslineno():
|
|||
@wrap
|
||||
def wrapped_func(x, y, z):
|
||||
pass
|
||||
|
||||
fs, lineno = python.getfslineno(wrapped_func)
|
||||
fs2, lineno2 = python.getfslineno(wrap)
|
||||
assert lineno > lineno2, "getfslineno does not unwrap correctly"
|
||||
|
||||
|
||||
class TestMockDecoration(object):
|
||||
|
||||
def test_wrapped_getfuncargnames(self):
|
||||
from _pytest.compat import getfuncargnames
|
||||
|
||||
|
@ -100,8 +112,10 @@ class TestMockDecoration(object):
|
|||
from _pytest.compat import getfuncargnames
|
||||
|
||||
def wrap(f):
|
||||
|
||||
def func():
|
||||
pass
|
||||
|
||||
func.__wrapped__ = f
|
||||
func.patchings = ["qwe"]
|
||||
return func
|
||||
|
@ -115,7 +129,8 @@ class TestMockDecoration(object):
|
|||
|
||||
def test_unittest_mock(self, testdir):
|
||||
pytest.importorskip("unittest.mock")
|
||||
testdir.makepyfile("""
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
import unittest.mock
|
||||
class T(unittest.TestCase):
|
||||
@unittest.mock.patch("os.path.abspath")
|
||||
|
@ -123,13 +138,15 @@ class TestMockDecoration(object):
|
|||
import os
|
||||
os.path.abspath("hello")
|
||||
abspath.assert_any_call("hello")
|
||||
""")
|
||||
"""
|
||||
)
|
||||
reprec = testdir.inline_run()
|
||||
reprec.assertoutcome(passed=1)
|
||||
|
||||
def test_unittest_mock_and_fixture(self, testdir):
|
||||
pytest.importorskip("unittest.mock")
|
||||
testdir.makepyfile("""
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
import os.path
|
||||
import unittest.mock
|
||||
import pytest
|
||||
|
@ -143,14 +160,16 @@ class TestMockDecoration(object):
|
|||
def test_hello(inject_me):
|
||||
import os
|
||||
os.path.abspath("hello")
|
||||
""")
|
||||
"""
|
||||
)
|
||||
reprec = testdir.inline_run()
|
||||
reprec.assertoutcome(passed=1)
|
||||
|
||||
def test_unittest_mock_and_pypi_mock(self, testdir):
|
||||
pytest.importorskip("unittest.mock")
|
||||
pytest.importorskip("mock", "1.0.1")
|
||||
testdir.makepyfile("""
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
import mock
|
||||
import unittest.mock
|
||||
class TestBoth(object):
|
||||
|
@ -165,13 +184,15 @@ class TestMockDecoration(object):
|
|||
import os
|
||||
os.path.abspath("hello")
|
||||
abspath.assert_any_call("hello")
|
||||
""")
|
||||
"""
|
||||
)
|
||||
reprec = testdir.inline_run()
|
||||
reprec.assertoutcome(passed=2)
|
||||
|
||||
def test_mock(self, testdir):
|
||||
pytest.importorskip("mock", "1.0.1")
|
||||
testdir.makepyfile("""
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
import os
|
||||
import unittest
|
||||
import mock
|
||||
|
@ -191,17 +212,20 @@ class TestMockDecoration(object):
|
|||
os.path.normpath(os.path.abspath("hello"))
|
||||
normpath.assert_any_call("this")
|
||||
assert os.path.basename("123") == "mock_basename"
|
||||
""")
|
||||
"""
|
||||
)
|
||||
reprec = testdir.inline_run()
|
||||
reprec.assertoutcome(passed=2)
|
||||
calls = reprec.getcalls("pytest_runtest_logreport")
|
||||
funcnames = [call.report.location[2] for call in calls
|
||||
if call.report.when == "call"]
|
||||
funcnames = [
|
||||
call.report.location[2] for call in calls if call.report.when == "call"
|
||||
]
|
||||
assert funcnames == ["T.test_hello", "test_someting"]
|
||||
|
||||
def test_mock_sorting(self, testdir):
|
||||
pytest.importorskip("mock", "1.0.1")
|
||||
testdir.makepyfile("""
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
import os
|
||||
import mock
|
||||
|
||||
|
@ -214,7 +238,8 @@ class TestMockDecoration(object):
|
|||
@mock.patch("os.path.abspath")
|
||||
def test_three(abspath):
|
||||
pass
|
||||
""")
|
||||
"""
|
||||
)
|
||||
reprec = testdir.inline_run()
|
||||
calls = reprec.getreports("pytest_runtest_logreport")
|
||||
calls = [x for x in calls if x.when == "call"]
|
||||
|
@ -223,7 +248,8 @@ class TestMockDecoration(object):
|
|||
|
||||
def test_mock_double_patch_issue473(self, testdir):
|
||||
pytest.importorskip("mock", "1.0.1")
|
||||
testdir.makepyfile("""
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
from mock import patch
|
||||
from pytest import mark
|
||||
|
||||
|
@ -233,20 +259,25 @@ class TestMockDecoration(object):
|
|||
class TestSimple(object):
|
||||
def test_simple_thing(self, mock_path, mock_getcwd):
|
||||
pass
|
||||
""")
|
||||
"""
|
||||
)
|
||||
reprec = testdir.inline_run()
|
||||
reprec.assertoutcome(passed=1)
|
||||
|
||||
|
||||
class TestReRunTests(object):
|
||||
|
||||
def test_rerun(self, testdir):
|
||||
testdir.makeconftest("""
|
||||
testdir.makeconftest(
|
||||
"""
|
||||
from _pytest.runner import runtestprotocol
|
||||
def pytest_runtest_protocol(item, nextitem):
|
||||
runtestprotocol(item, log=False, nextitem=nextitem)
|
||||
runtestprotocol(item, log=True, nextitem=nextitem)
|
||||
""")
|
||||
testdir.makepyfile("""
|
||||
"""
|
||||
)
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
import pytest
|
||||
count = 0
|
||||
req = None
|
||||
|
@ -259,36 +290,46 @@ class TestReRunTests(object):
|
|||
count += 1
|
||||
def test_fix(fix):
|
||||
pass
|
||||
""")
|
||||
"""
|
||||
)
|
||||
result = testdir.runpytest("-s")
|
||||
result.stdout.fnmatch_lines("""
|
||||
result.stdout.fnmatch_lines(
|
||||
"""
|
||||
*fix count 0*
|
||||
*fix count 1*
|
||||
""")
|
||||
result.stdout.fnmatch_lines("""
|
||||
"""
|
||||
)
|
||||
result.stdout.fnmatch_lines(
|
||||
"""
|
||||
*2 passed*
|
||||
""")
|
||||
"""
|
||||
)
|
||||
|
||||
|
||||
def test_pytestconfig_is_session_scoped():
|
||||
from _pytest.fixtures import pytestconfig
|
||||
|
||||
assert pytestconfig._pytestfixturefunction.scope == "session"
|
||||
|
||||
|
||||
class TestNoselikeTestAttribute(object):
|
||||
|
||||
def test_module_with_global_test(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
__test__ = False
|
||||
def test_hello():
|
||||
pass
|
||||
""")
|
||||
"""
|
||||
)
|
||||
reprec = testdir.inline_run()
|
||||
assert not reprec.getfailedcollections()
|
||||
calls = reprec.getreports("pytest_runtest_logreport")
|
||||
assert not calls
|
||||
|
||||
def test_class_and_method(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
__test__ = True
|
||||
def test_func():
|
||||
pass
|
||||
|
@ -298,14 +339,16 @@ class TestNoselikeTestAttribute(object):
|
|||
__test__ = False
|
||||
def test_method(self):
|
||||
pass
|
||||
""")
|
||||
"""
|
||||
)
|
||||
reprec = testdir.inline_run()
|
||||
assert not reprec.getfailedcollections()
|
||||
calls = reprec.getreports("pytest_runtest_logreport")
|
||||
assert not calls
|
||||
|
||||
def test_unittest_class(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
import unittest
|
||||
class TC(unittest.TestCase):
|
||||
def test_1(self):
|
||||
|
@ -314,7 +357,8 @@ class TestNoselikeTestAttribute(object):
|
|||
__test__ = False
|
||||
def test_2(self):
|
||||
pass
|
||||
""")
|
||||
"""
|
||||
)
|
||||
reprec = testdir.inline_run()
|
||||
assert not reprec.getfailedcollections()
|
||||
call = reprec.getcalls("pytest_collection_modifyitems")[0]
|
||||
|
@ -328,7 +372,8 @@ class TestNoselikeTestAttribute(object):
|
|||
RPC wrapper), we shouldn't assume this meant "__test__ = True".
|
||||
"""
|
||||
# https://github.com/pytest-dev/pytest/issues/1204
|
||||
testdir.makepyfile("""
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
class MetaModel(type):
|
||||
|
||||
def __getattr__(cls, key):
|
||||
|
@ -344,7 +389,8 @@ class TestNoselikeTestAttribute(object):
|
|||
|
||||
def test_blah(self):
|
||||
pass
|
||||
""")
|
||||
"""
|
||||
)
|
||||
reprec = testdir.inline_run()
|
||||
assert not reprec.getfailedcollections()
|
||||
call = reprec.getcalls("pytest_collection_modifyitems")[0]
|
||||
|
@ -355,7 +401,8 @@ class TestNoselikeTestAttribute(object):
|
|||
class TestParameterize(object):
|
||||
|
||||
def test_idfn_marker(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
import pytest
|
||||
|
||||
def idfn(param):
|
||||
|
@ -369,15 +416,14 @@ class TestParameterize(object):
|
|||
@pytest.mark.parametrize('a,b', [(0, 2), (1, 2)], ids=idfn)
|
||||
def test_params(a, b):
|
||||
pass
|
||||
""")
|
||||
res = testdir.runpytest('--collect-only')
|
||||
res.stdout.fnmatch_lines([
|
||||
"*spam-2*",
|
||||
"*ham-2*",
|
||||
])
|
||||
"""
|
||||
)
|
||||
res = testdir.runpytest("--collect-only")
|
||||
res.stdout.fnmatch_lines(["*spam-2*", "*ham-2*"])
|
||||
|
||||
def test_idfn_fixture(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
import pytest
|
||||
|
||||
def idfn(param):
|
||||
|
@ -398,9 +444,7 @@ class TestParameterize(object):
|
|||
|
||||
def test_params(a, b):
|
||||
pass
|
||||
""")
|
||||
res = testdir.runpytest('--collect-only')
|
||||
res.stdout.fnmatch_lines([
|
||||
"*spam-2*",
|
||||
"*ham-2*",
|
||||
])
|
||||
"""
|
||||
)
|
||||
res = testdir.runpytest("--collect-only")
|
||||
res.stdout.fnmatch_lines(["*spam-2*", "*ham-2*"])
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -4,6 +4,7 @@ import sys
|
|||
|
||||
|
||||
class TestRaises(object):
|
||||
|
||||
def test_raises(self):
|
||||
source = "int('qwe')"
|
||||
excinfo = pytest.raises(ValueError, source)
|
||||
|
@ -18,19 +19,23 @@ class TestRaises(object):
|
|||
pytest.raises(SyntaxError, "qwe qwe qwe")
|
||||
|
||||
def test_raises_function(self):
|
||||
pytest.raises(ValueError, int, 'hello')
|
||||
pytest.raises(ValueError, int, "hello")
|
||||
|
||||
def test_raises_callable_no_exception(self):
|
||||
|
||||
class A(object):
|
||||
|
||||
def __call__(self):
|
||||
pass
|
||||
|
||||
try:
|
||||
pytest.raises(ValueError, A())
|
||||
except pytest.raises.Exception:
|
||||
pass
|
||||
|
||||
def test_raises_as_contextmanager(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
from __future__ import with_statement
|
||||
import py, pytest
|
||||
import _pytest._code
|
||||
|
@ -52,28 +57,27 @@ class TestRaises(object):
|
|||
with pytest.raises(ZeroDivisionError):
|
||||
with pytest.raises(ValueError):
|
||||
1/0
|
||||
""")
|
||||
"""
|
||||
)
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines([
|
||||
'*3 passed*',
|
||||
])
|
||||
result.stdout.fnmatch_lines(["*3 passed*"])
|
||||
|
||||
def test_noclass(self):
|
||||
with pytest.raises(TypeError):
|
||||
pytest.raises('wrong', lambda: None)
|
||||
pytest.raises("wrong", lambda: None)
|
||||
|
||||
def test_invalid_arguments_to_raises(self):
|
||||
with pytest.raises(TypeError, match='unknown'):
|
||||
with pytest.raises(TypeError, unknown='bogus'):
|
||||
with pytest.raises(TypeError, match="unknown"):
|
||||
with pytest.raises(TypeError, unknown="bogus"):
|
||||
raise ValueError()
|
||||
|
||||
def test_tuple(self):
|
||||
with pytest.raises((KeyError, ValueError)):
|
||||
raise KeyError('oops')
|
||||
raise KeyError("oops")
|
||||
|
||||
def test_no_raise_message(self):
|
||||
try:
|
||||
pytest.raises(ValueError, int, '0')
|
||||
pytest.raises(ValueError, int, "0")
|
||||
except pytest.raises.Exception as e:
|
||||
assert e.msg == "DID NOT RAISE {}".format(repr(ValueError))
|
||||
else:
|
||||
|
@ -97,7 +101,7 @@ class TestRaises(object):
|
|||
else:
|
||||
assert False, "Expected pytest.raises.Exception"
|
||||
|
||||
@pytest.mark.parametrize('method', ['function', 'with'])
|
||||
@pytest.mark.parametrize("method", ["function", "with"])
|
||||
def test_raises_cyclic_reference(self, method):
|
||||
"""
|
||||
Ensure pytest.raises does not leave a reference cycle (#1965).
|
||||
|
@ -105,11 +109,12 @@ class TestRaises(object):
|
|||
import gc
|
||||
|
||||
class T(object):
|
||||
|
||||
def __call__(self):
|
||||
raise ValueError
|
||||
|
||||
t = T()
|
||||
if method == 'function':
|
||||
if method == "function":
|
||||
pytest.raises(ValueError, t)
|
||||
else:
|
||||
with pytest.raises(ValueError):
|
||||
|
@ -127,17 +132,19 @@ class TestRaises(object):
|
|||
def test_raises_match(self):
|
||||
msg = r"with base \d+"
|
||||
with pytest.raises(ValueError, match=msg):
|
||||
int('asdf')
|
||||
int("asdf")
|
||||
|
||||
msg = "with base 10"
|
||||
with pytest.raises(ValueError, match=msg):
|
||||
int('asdf')
|
||||
int("asdf")
|
||||
|
||||
msg = "with base 16"
|
||||
expr = r"Pattern '{}' not found in 'invalid literal for int\(\) with base 10: 'asdf''".format(msg)
|
||||
expr = r"Pattern '{}' not found in 'invalid literal for int\(\) with base 10: 'asdf''".format(
|
||||
msg
|
||||
)
|
||||
with pytest.raises(AssertionError, match=expr):
|
||||
with pytest.raises(ValueError, match=msg):
|
||||
int('asdf', base=10)
|
||||
int("asdf", base=10)
|
||||
|
||||
def test_raises_match_wrong_type(self):
|
||||
"""Raising an exception with the wrong type and match= given.
|
||||
|
@ -146,15 +153,16 @@ class TestRaises(object):
|
|||
really relevant if we got a different exception.
|
||||
"""
|
||||
with pytest.raises(ValueError):
|
||||
with pytest.raises(IndexError, match='nomatch'):
|
||||
int('asdf')
|
||||
with pytest.raises(IndexError, match="nomatch"):
|
||||
int("asdf")
|
||||
|
||||
def test_raises_exception_looks_iterable(self):
|
||||
from six import add_metaclass
|
||||
|
||||
class Meta(type(object)):
|
||||
|
||||
def __getitem__(self, item):
|
||||
return 1/0
|
||||
return 1 / 0
|
||||
|
||||
def __len__(self):
|
||||
return 1
|
||||
|
@ -163,5 +171,7 @@ class TestRaises(object):
|
|||
class ClassLooksIterableException(Exception):
|
||||
pass
|
||||
|
||||
with pytest.raises(Failed, match="DID NOT RAISE <class 'raises.ClassLooksIterableException'>"):
|
||||
with pytest.raises(
|
||||
Failed, match="DID NOT RAISE <class 'raises.ClassLooksIterableException'>"
|
||||
):
|
||||
pytest.raises(ClassLooksIterableException, lambda: None)
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
import pytest
|
||||
|
||||
|
||||
@pytest.fixture(params=['--setup-only', '--setup-plan', '--setup-show'],
|
||||
scope='module')
|
||||
@pytest.fixture(params=["--setup-only", "--setup-plan", "--setup-show"], scope="module")
|
||||
def mode(request):
|
||||
return request.param
|
||||
|
||||
|
||||
def test_show_only_active_fixtures(testdir, mode):
|
||||
p = testdir.makepyfile('''
|
||||
p = testdir.makepyfile(
|
||||
'''
|
||||
import pytest
|
||||
@pytest.fixture
|
||||
def _arg0():
|
||||
|
@ -18,21 +18,21 @@ def test_show_only_active_fixtures(testdir, mode):
|
|||
"""arg1 docstring"""
|
||||
def test_arg1(arg1):
|
||||
pass
|
||||
''')
|
||||
'''
|
||||
)
|
||||
|
||||
result = testdir.runpytest(mode, p)
|
||||
assert result.ret == 0
|
||||
|
||||
result.stdout.fnmatch_lines([
|
||||
'*SETUP F arg1*',
|
||||
'*test_arg1 (fixtures used: arg1)*',
|
||||
'*TEARDOWN F arg1*',
|
||||
])
|
||||
result.stdout.fnmatch_lines(
|
||||
["*SETUP F arg1*", "*test_arg1 (fixtures used: arg1)*", "*TEARDOWN F arg1*"]
|
||||
)
|
||||
assert "_arg0" not in result.stdout.str()
|
||||
|
||||
|
||||
def test_show_different_scopes(testdir, mode):
|
||||
p = testdir.makepyfile('''
|
||||
p = testdir.makepyfile(
|
||||
'''
|
||||
import pytest
|
||||
@pytest.fixture
|
||||
def arg_function():
|
||||
|
@ -42,50 +42,60 @@ def test_show_different_scopes(testdir, mode):
|
|||
"""session scoped fixture"""
|
||||
def test_arg1(arg_session, arg_function):
|
||||
pass
|
||||
''')
|
||||
'''
|
||||
)
|
||||
|
||||
result = testdir.runpytest(mode, p)
|
||||
assert result.ret == 0
|
||||
|
||||
result.stdout.fnmatch_lines([
|
||||
'SETUP S arg_session*',
|
||||
'*SETUP F arg_function*',
|
||||
'*test_arg1 (fixtures used: arg_function, arg_session)*',
|
||||
'*TEARDOWN F arg_function*',
|
||||
'TEARDOWN S arg_session*',
|
||||
])
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"SETUP S arg_session*",
|
||||
"*SETUP F arg_function*",
|
||||
"*test_arg1 (fixtures used: arg_function, arg_session)*",
|
||||
"*TEARDOWN F arg_function*",
|
||||
"TEARDOWN S arg_session*",
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def test_show_nested_fixtures(testdir, mode):
|
||||
testdir.makeconftest('''
|
||||
testdir.makeconftest(
|
||||
'''
|
||||
import pytest
|
||||
@pytest.fixture(scope='session')
|
||||
def arg_same():
|
||||
"""session scoped fixture"""
|
||||
''')
|
||||
p = testdir.makepyfile('''
|
||||
'''
|
||||
)
|
||||
p = testdir.makepyfile(
|
||||
'''
|
||||
import pytest
|
||||
@pytest.fixture(scope='function')
|
||||
def arg_same(arg_same):
|
||||
"""function scoped fixture"""
|
||||
def test_arg1(arg_same):
|
||||
pass
|
||||
''')
|
||||
'''
|
||||
)
|
||||
|
||||
result = testdir.runpytest(mode, p)
|
||||
assert result.ret == 0
|
||||
|
||||
result.stdout.fnmatch_lines([
|
||||
'SETUP S arg_same*',
|
||||
'*SETUP F arg_same (fixtures used: arg_same)*',
|
||||
'*test_arg1 (fixtures used: arg_same)*',
|
||||
'*TEARDOWN F arg_same*',
|
||||
'TEARDOWN S arg_same*',
|
||||
])
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"SETUP S arg_same*",
|
||||
"*SETUP F arg_same (fixtures used: arg_same)*",
|
||||
"*test_arg1 (fixtures used: arg_same)*",
|
||||
"*TEARDOWN F arg_same*",
|
||||
"TEARDOWN S arg_same*",
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def test_show_fixtures_with_autouse(testdir, mode):
|
||||
p = testdir.makepyfile('''
|
||||
p = testdir.makepyfile(
|
||||
'''
|
||||
import pytest
|
||||
@pytest.fixture
|
||||
def arg_function():
|
||||
|
@ -95,92 +105,104 @@ def test_show_fixtures_with_autouse(testdir, mode):
|
|||
"""session scoped fixture"""
|
||||
def test_arg1(arg_function):
|
||||
pass
|
||||
''')
|
||||
'''
|
||||
)
|
||||
|
||||
result = testdir.runpytest(mode, p)
|
||||
assert result.ret == 0
|
||||
|
||||
result.stdout.fnmatch_lines([
|
||||
'SETUP S arg_session*',
|
||||
'*SETUP F arg_function*',
|
||||
'*test_arg1 (fixtures used: arg_function, arg_session)*',
|
||||
])
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"SETUP S arg_session*",
|
||||
"*SETUP F arg_function*",
|
||||
"*test_arg1 (fixtures used: arg_function, arg_session)*",
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def test_show_fixtures_with_parameters(testdir, mode):
|
||||
testdir.makeconftest('''
|
||||
testdir.makeconftest(
|
||||
'''
|
||||
import pytest
|
||||
@pytest.fixture(scope='session', params=['foo', 'bar'])
|
||||
def arg_same():
|
||||
"""session scoped fixture"""
|
||||
''')
|
||||
p = testdir.makepyfile('''
|
||||
'''
|
||||
)
|
||||
p = testdir.makepyfile(
|
||||
'''
|
||||
import pytest
|
||||
@pytest.fixture(scope='function')
|
||||
def arg_other(arg_same):
|
||||
"""function scoped fixture"""
|
||||
def test_arg1(arg_other):
|
||||
pass
|
||||
''')
|
||||
'''
|
||||
)
|
||||
|
||||
result = testdir.runpytest(mode, p)
|
||||
assert result.ret == 0
|
||||
|
||||
result.stdout.fnmatch_lines([
|
||||
'SETUP S arg_same?foo?',
|
||||
'TEARDOWN S arg_same?foo?',
|
||||
'SETUP S arg_same?bar?',
|
||||
'TEARDOWN S arg_same?bar?',
|
||||
])
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"SETUP S arg_same?foo?",
|
||||
"TEARDOWN S arg_same?foo?",
|
||||
"SETUP S arg_same?bar?",
|
||||
"TEARDOWN S arg_same?bar?",
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def test_show_fixtures_with_parameter_ids(testdir, mode):
|
||||
testdir.makeconftest('''
|
||||
testdir.makeconftest(
|
||||
'''
|
||||
import pytest
|
||||
@pytest.fixture(
|
||||
scope='session', params=['foo', 'bar'], ids=['spam', 'ham'])
|
||||
def arg_same():
|
||||
"""session scoped fixture"""
|
||||
''')
|
||||
p = testdir.makepyfile('''
|
||||
'''
|
||||
)
|
||||
p = testdir.makepyfile(
|
||||
'''
|
||||
import pytest
|
||||
@pytest.fixture(scope='function')
|
||||
def arg_other(arg_same):
|
||||
"""function scoped fixture"""
|
||||
def test_arg1(arg_other):
|
||||
pass
|
||||
''')
|
||||
'''
|
||||
)
|
||||
|
||||
result = testdir.runpytest(mode, p)
|
||||
assert result.ret == 0
|
||||
|
||||
result.stdout.fnmatch_lines([
|
||||
'SETUP S arg_same?spam?',
|
||||
'SETUP S arg_same?ham?',
|
||||
])
|
||||
result.stdout.fnmatch_lines(
|
||||
["SETUP S arg_same?spam?", "SETUP S arg_same?ham?"]
|
||||
)
|
||||
|
||||
|
||||
def test_show_fixtures_with_parameter_ids_function(testdir, mode):
|
||||
p = testdir.makepyfile('''
|
||||
p = testdir.makepyfile(
|
||||
"""
|
||||
import pytest
|
||||
@pytest.fixture(params=['foo', 'bar'], ids=lambda p: p.upper())
|
||||
def foobar():
|
||||
pass
|
||||
def test_foobar(foobar):
|
||||
pass
|
||||
''')
|
||||
"""
|
||||
)
|
||||
|
||||
result = testdir.runpytest(mode, p)
|
||||
assert result.ret == 0
|
||||
|
||||
result.stdout.fnmatch_lines([
|
||||
'*SETUP F foobar?FOO?',
|
||||
'*SETUP F foobar?BAR?',
|
||||
])
|
||||
result.stdout.fnmatch_lines(["*SETUP F foobar?FOO?", "*SETUP F foobar?BAR?"])
|
||||
|
||||
|
||||
def test_dynamic_fixture_request(testdir):
|
||||
p = testdir.makepyfile('''
|
||||
p = testdir.makepyfile(
|
||||
"""
|
||||
import pytest
|
||||
@pytest.fixture()
|
||||
def dynamically_requested_fixture():
|
||||
|
@ -190,19 +212,23 @@ def test_dynamic_fixture_request(testdir):
|
|||
request.getfixturevalue('dynamically_requested_fixture')
|
||||
def test_dyn(dependent_fixture):
|
||||
pass
|
||||
''')
|
||||
"""
|
||||
)
|
||||
|
||||
result = testdir.runpytest('--setup-only', p)
|
||||
result = testdir.runpytest("--setup-only", p)
|
||||
assert result.ret == 0
|
||||
|
||||
result.stdout.fnmatch_lines([
|
||||
'*SETUP F dynamically_requested_fixture',
|
||||
'*TEARDOWN F dynamically_requested_fixture'
|
||||
])
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"*SETUP F dynamically_requested_fixture",
|
||||
"*TEARDOWN F dynamically_requested_fixture",
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def test_capturing(testdir):
|
||||
p = testdir.makepyfile('''
|
||||
p = testdir.makepyfile(
|
||||
"""
|
||||
import pytest, sys
|
||||
@pytest.fixture()
|
||||
def one():
|
||||
|
@ -213,31 +239,31 @@ def test_capturing(testdir):
|
|||
assert 0
|
||||
def test_capturing(two):
|
||||
pass
|
||||
''')
|
||||
"""
|
||||
)
|
||||
|
||||
result = testdir.runpytest('--setup-only', p)
|
||||
result.stdout.fnmatch_lines([
|
||||
'this should be captured',
|
||||
'this should also be captured'
|
||||
])
|
||||
result = testdir.runpytest("--setup-only", p)
|
||||
result.stdout.fnmatch_lines(
|
||||
["this should be captured", "this should also be captured"]
|
||||
)
|
||||
|
||||
|
||||
def test_show_fixtures_and_execute_test(testdir):
|
||||
""" Verifies that setups are shown and tests are executed. """
|
||||
p = testdir.makepyfile('''
|
||||
p = testdir.makepyfile(
|
||||
"""
|
||||
import pytest
|
||||
@pytest.fixture
|
||||
def arg():
|
||||
assert True
|
||||
def test_arg(arg):
|
||||
assert False
|
||||
''')
|
||||
"""
|
||||
)
|
||||
|
||||
result = testdir.runpytest("--setup-show", p)
|
||||
assert result.ret == 1
|
||||
|
||||
result.stdout.fnmatch_lines([
|
||||
'*SETUP F arg*',
|
||||
'*test_arg (fixtures used: arg)F*',
|
||||
'*TEARDOWN F arg*',
|
||||
])
|
||||
result.stdout.fnmatch_lines(
|
||||
["*SETUP F arg*", "*test_arg (fixtures used: arg)F*", "*TEARDOWN F arg*"]
|
||||
)
|
||||
|
|
|
@ -1,19 +1,19 @@
|
|||
def test_show_fixtures_and_test(testdir):
|
||||
""" Verifies that fixtures are not executed. """
|
||||
p = testdir.makepyfile('''
|
||||
p = testdir.makepyfile(
|
||||
"""
|
||||
import pytest
|
||||
@pytest.fixture
|
||||
def arg():
|
||||
assert False
|
||||
def test_arg(arg):
|
||||
assert False
|
||||
''')
|
||||
"""
|
||||
)
|
||||
|
||||
result = testdir.runpytest("--setup-plan", p)
|
||||
assert result.ret == 0
|
||||
|
||||
result.stdout.fnmatch_lines([
|
||||
'*SETUP F arg*',
|
||||
'*test_arg (fixtures used: arg)',
|
||||
'*TEARDOWN F arg*',
|
||||
])
|
||||
result.stdout.fnmatch_lines(
|
||||
["*SETUP F arg*", "*test_arg (fixtures used: arg)", "*TEARDOWN F arg*"]
|
||||
)
|
||||
|
|
|
@ -2,13 +2,14 @@
|
|||
|
||||
|
||||
def test_no_items_should_not_show_output(testdir):
|
||||
result = testdir.runpytest('--fixtures-per-test')
|
||||
assert 'fixtures used by' not in result.stdout.str()
|
||||
result = testdir.runpytest("--fixtures-per-test")
|
||||
assert "fixtures used by" not in result.stdout.str()
|
||||
assert result.ret == 0
|
||||
|
||||
|
||||
def test_fixtures_in_module(testdir):
|
||||
p = testdir.makepyfile('''
|
||||
p = testdir.makepyfile(
|
||||
'''
|
||||
import pytest
|
||||
@pytest.fixture
|
||||
def _arg0():
|
||||
|
@ -18,22 +19,26 @@ def test_fixtures_in_module(testdir):
|
|||
"""arg1 docstring"""
|
||||
def test_arg1(arg1):
|
||||
pass
|
||||
''')
|
||||
'''
|
||||
)
|
||||
|
||||
result = testdir.runpytest("--fixtures-per-test", p)
|
||||
assert result.ret == 0
|
||||
|
||||
result.stdout.fnmatch_lines([
|
||||
'*fixtures used by test_arg1*',
|
||||
'*(test_fixtures_in_module.py:9)*',
|
||||
'arg1',
|
||||
' arg1 docstring',
|
||||
])
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"*fixtures used by test_arg1*",
|
||||
"*(test_fixtures_in_module.py:9)*",
|
||||
"arg1",
|
||||
" arg1 docstring",
|
||||
]
|
||||
)
|
||||
assert "_arg0" not in result.stdout.str()
|
||||
|
||||
|
||||
def test_fixtures_in_conftest(testdir):
|
||||
testdir.makeconftest('''
|
||||
testdir.makeconftest(
|
||||
'''
|
||||
import pytest
|
||||
@pytest.fixture
|
||||
def arg1():
|
||||
|
@ -46,35 +51,41 @@ def test_fixtures_in_conftest(testdir):
|
|||
"""arg3
|
||||
docstring
|
||||
"""
|
||||
''')
|
||||
p = testdir.makepyfile('''
|
||||
'''
|
||||
)
|
||||
p = testdir.makepyfile(
|
||||
"""
|
||||
def test_arg2(arg2):
|
||||
pass
|
||||
def test_arg3(arg3):
|
||||
pass
|
||||
''')
|
||||
"""
|
||||
)
|
||||
result = testdir.runpytest("--fixtures-per-test", p)
|
||||
assert result.ret == 0
|
||||
|
||||
result.stdout.fnmatch_lines([
|
||||
'*fixtures used by test_arg2*',
|
||||
'*(test_fixtures_in_conftest.py:2)*',
|
||||
'arg2',
|
||||
' arg2 docstring',
|
||||
'*fixtures used by test_arg3*',
|
||||
'*(test_fixtures_in_conftest.py:4)*',
|
||||
'arg1',
|
||||
' arg1 docstring',
|
||||
'arg2',
|
||||
' arg2 docstring',
|
||||
'arg3',
|
||||
' arg3',
|
||||
' docstring',
|
||||
])
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"*fixtures used by test_arg2*",
|
||||
"*(test_fixtures_in_conftest.py:2)*",
|
||||
"arg2",
|
||||
" arg2 docstring",
|
||||
"*fixtures used by test_arg3*",
|
||||
"*(test_fixtures_in_conftest.py:4)*",
|
||||
"arg1",
|
||||
" arg1 docstring",
|
||||
"arg2",
|
||||
" arg2 docstring",
|
||||
"arg3",
|
||||
" arg3",
|
||||
" docstring",
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def test_should_show_fixtures_used_by_test(testdir):
|
||||
testdir.makeconftest('''
|
||||
testdir.makeconftest(
|
||||
'''
|
||||
import pytest
|
||||
@pytest.fixture
|
||||
def arg1():
|
||||
|
@ -82,30 +93,36 @@ def test_should_show_fixtures_used_by_test(testdir):
|
|||
@pytest.fixture
|
||||
def arg2():
|
||||
"""arg2 from conftest"""
|
||||
''')
|
||||
p = testdir.makepyfile('''
|
||||
'''
|
||||
)
|
||||
p = testdir.makepyfile(
|
||||
'''
|
||||
import pytest
|
||||
@pytest.fixture
|
||||
def arg1():
|
||||
"""arg1 from testmodule"""
|
||||
def test_args(arg1, arg2):
|
||||
pass
|
||||
''')
|
||||
'''
|
||||
)
|
||||
result = testdir.runpytest("--fixtures-per-test", p)
|
||||
assert result.ret == 0
|
||||
|
||||
result.stdout.fnmatch_lines([
|
||||
'*fixtures used by test_args*',
|
||||
'*(test_should_show_fixtures_used_by_test.py:6)*',
|
||||
'arg1',
|
||||
' arg1 from testmodule',
|
||||
'arg2',
|
||||
' arg2 from conftest',
|
||||
])
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"*fixtures used by test_args*",
|
||||
"*(test_should_show_fixtures_used_by_test.py:6)*",
|
||||
"arg1",
|
||||
" arg1 from testmodule",
|
||||
"arg2",
|
||||
" arg2 from conftest",
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def test_verbose_include_private_fixtures_and_loc(testdir):
|
||||
testdir.makeconftest('''
|
||||
testdir.makeconftest(
|
||||
'''
|
||||
import pytest
|
||||
@pytest.fixture
|
||||
def _arg1():
|
||||
|
@ -113,46 +130,54 @@ def test_verbose_include_private_fixtures_and_loc(testdir):
|
|||
@pytest.fixture
|
||||
def arg2(_arg1):
|
||||
"""arg2 from conftest"""
|
||||
''')
|
||||
p = testdir.makepyfile('''
|
||||
'''
|
||||
)
|
||||
p = testdir.makepyfile(
|
||||
'''
|
||||
import pytest
|
||||
@pytest.fixture
|
||||
def arg3():
|
||||
"""arg3 from testmodule"""
|
||||
def test_args(arg2, arg3):
|
||||
pass
|
||||
''')
|
||||
'''
|
||||
)
|
||||
result = testdir.runpytest("--fixtures-per-test", "-v", p)
|
||||
assert result.ret == 0
|
||||
|
||||
result.stdout.fnmatch_lines([
|
||||
'*fixtures used by test_args*',
|
||||
'*(test_verbose_include_private_fixtures_and_loc.py:6)*',
|
||||
'_arg1 -- conftest.py:3',
|
||||
' _arg1 from conftest',
|
||||
'arg2 -- conftest.py:6',
|
||||
' arg2 from conftest',
|
||||
'arg3 -- test_verbose_include_private_fixtures_and_loc.py:3',
|
||||
' arg3 from testmodule',
|
||||
])
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"*fixtures used by test_args*",
|
||||
"*(test_verbose_include_private_fixtures_and_loc.py:6)*",
|
||||
"_arg1 -- conftest.py:3",
|
||||
" _arg1 from conftest",
|
||||
"arg2 -- conftest.py:6",
|
||||
" arg2 from conftest",
|
||||
"arg3 -- test_verbose_include_private_fixtures_and_loc.py:3",
|
||||
" arg3 from testmodule",
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def test_doctest_items(testdir):
|
||||
testdir.makepyfile('''
|
||||
testdir.makepyfile(
|
||||
'''
|
||||
def foo():
|
||||
"""
|
||||
>>> 1 + 1
|
||||
2
|
||||
"""
|
||||
''')
|
||||
testdir.maketxtfile('''
|
||||
'''
|
||||
)
|
||||
testdir.maketxtfile(
|
||||
"""
|
||||
>>> 1 + 1
|
||||
2
|
||||
''')
|
||||
result = testdir.runpytest("--fixtures-per-test", "--doctest-modules",
|
||||
"--doctest-glob=*.txt", "-v")
|
||||
"""
|
||||
)
|
||||
result = testdir.runpytest(
|
||||
"--fixtures-per-test", "--doctest-modules", "--doctest-glob=*.txt", "-v"
|
||||
)
|
||||
assert result.ret == 0
|
||||
|
||||
result.stdout.fnmatch_lines([
|
||||
'*collected 2 items*',
|
||||
])
|
||||
result.stdout.fnmatch_lines(["*collected 2 items*"])
|
||||
|
|
|
@ -18,5 +18,5 @@ def test_pycollector_makeitem_is_deprecated():
|
|||
|
||||
collector = PyCollectorMock()
|
||||
with pytest.deprecated_call():
|
||||
collector.makeitem('foo', 'bar')
|
||||
collector.makeitem("foo", "bar")
|
||||
assert collector.called
|
||||
|
|
|
@ -11,12 +11,13 @@ def equal_with_bash(prefix, ffc, fc, out=None):
|
|||
res_bash = set(fc(prefix))
|
||||
retval = set(res) == res_bash
|
||||
if out:
|
||||
out.write('equal_with_bash %s %s\n' % (retval, res))
|
||||
out.write("equal_with_bash %s %s\n" % (retval, res))
|
||||
if not retval:
|
||||
out.write(' python - bash: %s\n' % (set(res) - res_bash))
|
||||
out.write(' bash - python: %s\n' % (res_bash - set(res)))
|
||||
out.write(" python - bash: %s\n" % (set(res) - res_bash))
|
||||
out.write(" bash - python: %s\n" % (res_bash - set(res)))
|
||||
return retval
|
||||
|
||||
|
||||
# copied from argcomplete.completers as import from there
|
||||
# also pulls in argcomplete.__init__ which opens filedescriptor 9
|
||||
# this gives an IOError at the end of testrun
|
||||
|
@ -26,10 +27,9 @@ def _wrapcall(*args, **kargs):
|
|||
try:
|
||||
if sys.version_info > (2, 7):
|
||||
return subprocess.check_output(*args, **kargs).decode().splitlines()
|
||||
if 'stdout' in kargs:
|
||||
raise ValueError('stdout argument not allowed, it will be overridden.')
|
||||
process = subprocess.Popen(
|
||||
stdout=subprocess.PIPE, *args, **kargs)
|
||||
if "stdout" in kargs:
|
||||
raise ValueError("stdout argument not allowed, it will be overridden.")
|
||||
process = subprocess.Popen(stdout=subprocess.PIPE, *args, **kargs)
|
||||
output, unused_err = process.communicate()
|
||||
retcode = process.poll()
|
||||
if retcode:
|
||||
|
@ -43,47 +43,57 @@ def _wrapcall(*args, **kargs):
|
|||
|
||||
|
||||
class FilesCompleter(object):
|
||||
'File completer class, optionally takes a list of allowed extensions'
|
||||
"File completer class, optionally takes a list of allowed extensions"
|
||||
|
||||
def __init__(self, allowednames=(), directories=True):
|
||||
# Fix if someone passes in a string instead of a list
|
||||
if type(allowednames) is str:
|
||||
allowednames = [allowednames]
|
||||
|
||||
self.allowednames = [x.lstrip('*').lstrip('.') for x in allowednames]
|
||||
self.allowednames = [x.lstrip("*").lstrip(".") for x in allowednames]
|
||||
self.directories = directories
|
||||
|
||||
def __call__(self, prefix, **kwargs):
|
||||
completion = []
|
||||
if self.allowednames:
|
||||
if self.directories:
|
||||
files = _wrapcall(['bash', '-c',
|
||||
"compgen -A directory -- '{p}'".format(p=prefix)])
|
||||
completion += [f + '/' for f in files]
|
||||
files = _wrapcall(
|
||||
["bash", "-c", "compgen -A directory -- '{p}'".format(p=prefix)]
|
||||
)
|
||||
completion += [f + "/" for f in files]
|
||||
for x in self.allowednames:
|
||||
completion += _wrapcall(['bash', '-c',
|
||||
"compgen -A file -X '!*.{0}' -- '{p}'".format(x, p=prefix)])
|
||||
completion += _wrapcall(
|
||||
[
|
||||
"bash",
|
||||
"-c",
|
||||
"compgen -A file -X '!*.{0}' -- '{p}'".format(x, p=prefix),
|
||||
]
|
||||
)
|
||||
else:
|
||||
completion += _wrapcall(['bash', '-c',
|
||||
"compgen -A file -- '{p}'".format(p=prefix)])
|
||||
completion += _wrapcall(
|
||||
["bash", "-c", "compgen -A file -- '{p}'".format(p=prefix)]
|
||||
)
|
||||
|
||||
anticomp = _wrapcall(['bash', '-c',
|
||||
"compgen -A directory -- '{p}'".format(p=prefix)])
|
||||
anticomp = _wrapcall(
|
||||
["bash", "-c", "compgen -A directory -- '{p}'".format(p=prefix)]
|
||||
)
|
||||
|
||||
completion = list(set(completion) - set(anticomp))
|
||||
|
||||
if self.directories:
|
||||
completion += [f + '/' for f in anticomp]
|
||||
completion += [f + "/" for f in anticomp]
|
||||
return completion
|
||||
|
||||
|
||||
class TestArgComplete(object):
|
||||
|
||||
@pytest.mark.skipif("sys.platform in ('win32', 'darwin')")
|
||||
def test_compare_with_compgen(self):
|
||||
from _pytest._argcomplete import FastFilesCompleter
|
||||
|
||||
ffc = FastFilesCompleter()
|
||||
fc = FilesCompleter()
|
||||
for x in ['/', '/d', '/data', 'qqq', '']:
|
||||
for x in ["/", "/d", "/data", "qqq", ""]:
|
||||
assert equal_with_bash(x, ffc, fc, out=sys.stdout)
|
||||
|
||||
@pytest.mark.skipif("sys.platform in ('win32', 'darwin')")
|
||||
|
@ -92,7 +102,8 @@ class TestArgComplete(object):
|
|||
ls /usr/<TAB>
|
||||
"""
|
||||
from _pytest._argcomplete import FastFilesCompleter
|
||||
|
||||
ffc = FastFilesCompleter()
|
||||
fc = FilesCompleter()
|
||||
for x in '/usr/'.split():
|
||||
for x in "/usr/".split():
|
||||
assert not equal_with_bash(x, ffc, fc, out=sys.stdout)
|
||||
|
|
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue