- make importorskip static at py.test.importorskip because it's

used for conditional plugin loading
- fix case where xfail is defined at module/class level
- fixes and improvements to docs, correct links to plugins
- use new skip facilities here and there

--HG--
branch : trunk
This commit is contained in:
holger krekel 2009-10-15 20:10:06 +02:00
parent 3ca770b420
commit d8b9b5f1c8
17 changed files with 148 additions and 148 deletions

View File

@ -94,6 +94,25 @@ def raises(ExpectedException, *args, **kwargs):
raise ExceptionFailure(msg="DID NOT RAISE", raise ExceptionFailure(msg="DID NOT RAISE",
expr=args, expected=ExpectedException) expr=args, expected=ExpectedException)
def importorskip(modname, minversion=None):
""" return imported module or perform a dynamic skip() """
compile(modname, '', 'eval') # to catch syntaxerrors
try:
mod = __import__(modname, None, None, ['__doc__'])
except ImportError:
py.test.skip("could not import %r" %(modname,))
if minversion is None:
return mod
verattr = getattr(mod, '__version__', None)
if isinstance(minversion, str):
minver = minversion.split(".")
else:
minver = list(minversion)
if verattr is None or verattr.split(".") < minver:
py.test.skip("module %r has __version__ %r, required is: %r" %(
modname, verattr, minversion))
return mod
# exitcodes for the command line # exitcodes for the command line
EXIT_OK = 0 EXIT_OK = 0

View File

@ -4,7 +4,8 @@ write and report coverage data with 'figleaf'.
""" """
import py import py
figleaf = py.test.importorskip("figleaf.annotate_html") py.test.importorskip("figleaf.annotate_html")
import figleaf
def pytest_addoption(parser): def pytest_addoption(parser):
group = parser.addgroup('figleaf options') group = parser.addgroup('figleaf options')

View File

@ -1,13 +1,12 @@
""" """
mark python test functions, classes or modules for conditional advanced conditional skipping for python test functions, classes or modules.
skipping (skipif) or as expected-to-fail (xfail). Both declarations
lead to special reporting and both can be systematically associated You can mark functions, classes or modules for for conditional
with functions, whole classes or modules. The difference between skipping (skipif) or as expected-to-fail (xfail). The difference
the two is that 'xfail' will still execute test functions between the two is that 'xfail' will still execute test functions
but it will revert the outcome. A passing test is now but it will invert the outcome: a passing test becomes a failure and
a failure and failing test is expected. All skip conditions a failing test is a semi-passing one. All skip conditions are
are reported at the end of test run through the terminal reported at the end of test run through the terminal reporter.
reporter.
.. _skipif: .. _skipif:
@ -20,15 +19,18 @@ Here is an example for skipping a test function on Python3::
def test_function(): def test_function():
... ...
Conditions are specified as python expressions The 'skipif' marker accepts an **arbitrary python expression**
and can access the ``sys`` module. They can also as a condition. When setting up the test function the condition
access the config object and thus depend on command is evaluated by calling ``eval(expr, namespace)``. The namespace
line or conftest options:: contains the ``sys`` and ``os`` modules as well as the
test ``config`` object. The latter allows you to skip based
on a test configuration value e.g. like this::
@py.test.mark.skipif("config.getvalue('db') is None") @py.test.mark.skipif("not config.getvalue('db')")
def test_function(...): def test_function(...):
... ...
conditionally mark a function as "expected to fail" conditionally mark a function as "expected to fail"
------------------------------------------------------- -------------------------------------------------------
@ -53,7 +55,7 @@ skip/xfail a whole test class or module
------------------------------------------- -------------------------------------------
Instead of marking single functions you can skip Instead of marking single functions you can skip
a whole class of tests when runnign on a specific a whole class of tests when running on a specific
platform:: platform::
class TestSomething: class TestSomething:
@ -75,13 +77,12 @@ You can use a helper to skip on a failing import::
You can use this helper at module level or within You can use this helper at module level or within
a test or setup function. a test or setup function.
You can aslo skip if a library does not have the right version:: You can also skip if a library does not come with a high enough version::
docutils = py.test.importorskip("docutils", minversion="0.3") docutils = py.test.importorskip("docutils", minversion="0.3")
The version will be read from the specified module's ``__version__`` attribute. The version will be read from the specified module's ``__version__`` attribute.
dynamically skip from within a test or setup dynamically skip from within a test or setup
------------------------------------------------- -------------------------------------------------
@ -96,16 +97,11 @@ If you want to skip the execution of a test you can call
.. _`funcarg factory`: ../funcargs.html#factory .. _`funcarg factory`: ../funcargs.html#factory
""" """
# XXX not all skip-related code is contained in # XXX py.test.skip, .importorskip and the Skipped class
# this plugin yet, some remains in outcome.py and # should also be defined in this plugin, requires thought/changes
# the Skipped Exception is imported here and there.
import py import py
def pytest_namespace():
return {'importorskip': importorskip}
def pytest_runtest_setup(item): def pytest_runtest_setup(item):
expr, result = evalexpression(item, 'skipif') expr, result = evalexpression(item, 'skipif')
if result: if result:
@ -117,14 +113,15 @@ def pytest_runtest_makereport(__multicall__, item, call):
if hasattr(item, 'obj'): if hasattr(item, 'obj'):
expr, result = evalexpression(item, 'xfail') expr, result = evalexpression(item, 'xfail')
if result: if result:
res = __multicall__.execute() rep = __multicall__.execute()
if call.excinfo: if call.excinfo:
res.skipped = True rep.skipped = True
res.failed = res.passed = False rep.failed = rep.passed = False
else: else:
res.skipped = res.passed = False rep.skipped = rep.passed = False
res.failed = True rep.failed = True
return res rep.keywords['xfail'] = True # expr
return rep
def pytest_report_teststatus(report): def pytest_report_teststatus(report):
if 'xfail' in report.keywords: if 'xfail' in report.keywords:
@ -157,24 +154,6 @@ def pytest_terminal_summary(terminalreporter):
pos = "%s %s:%d: unexpectedly passing" %(modpath, fspath, lineno) pos = "%s %s:%d: unexpectedly passing" %(modpath, fspath, lineno)
tr._tw.line(pos) tr._tw.line(pos)
def importorskip(modname, minversion=None):
""" return imported module or perform a dynamic skip() """
compile(modname, '', 'eval') # to catch syntaxerrors
try:
mod = __import__(modname)
except ImportError:
py.test.skip("could not import %r" %(modname,))
if minversion is None:
return mod
verattr = getattr(mod, '__version__', None)
if isinstance(minversion, str):
minver = minversion.split(".")
else:
minver = list(minversion)
if verattr is None or verattr.split(".") < minver:
py.test.skip("module %r has __version__ %r, required is: %r" %(
modname, verattr, minversion))
return mod
def getexpression(item, keyword): def getexpression(item, keyword):
if isinstance(item, py.test.collect.Function): if isinstance(item, py.test.collect.Function):
@ -193,7 +172,7 @@ def evalexpression(item, keyword):
result = None result = None
if expr: if expr:
if isinstance(expr, str): if isinstance(expr, str):
d = {'sys': py.std.sys, 'config': item.config} d = {'os': py.std.os, 'sys': py.std.sys, 'config': item.config}
result = eval(expr, d) result = eval(expr, d)
else: else:
result = expr result = expr

View File

@ -21,11 +21,13 @@ plugins = [
] ]
externals = { externals = {
'oejskit': 'run javascript tests in real life browsers', 'oejskit': "run javascript tests in real life browsers",
'django': 'support for testing django applications', 'django': "support for testing django applications",
# 'coverage': "support for using Ned's coverage module",
# 'xmlresult': "support for generating xml reports "
# "and CruiseControl integration",
} }
def warn(*args): def warn(*args):
msg = " ".join(map(str, args)) msg = " ".join(map(str, args))
print >>sys.stderr, "WARN:", msg print >>sys.stderr, "WARN:", msg
@ -123,7 +125,7 @@ class RestWriter:
self.out.close() self.out.close()
print "wrote", self.target print "wrote", self.target
del self.out del self.out
class PluginOverview(RestWriter): class PluginOverview(RestWriter):
def makerest(self, config): def makerest(self, config):
plugindir = py.path.local(py.__file__).dirpath("test", "plugin") plugindir = py.path.local(py.__file__).dirpath("test", "plugin")
@ -145,7 +147,6 @@ class PluginOverview(RestWriter):
self.Print() self.Print()
class HookSpec(RestWriter): class HookSpec(RestWriter):
def makerest(self, config): def makerest(self, config):
module = config.pluginmanager.hook._hookspecs module = config.pluginmanager.hook._hookspecs
source = py.code.Source(module) source = py.code.Source(module)
@ -212,7 +213,7 @@ class PluginDoc(RestWriter):
# "py/test/plugin/%s" %(hg_changeset, basename))) # "py/test/plugin/%s" %(hg_changeset, basename)))
self.links.append((basename, self.links.append((basename,
"http://bitbucket.org/hpk42/py-trunk/raw/%s/" "http://bitbucket.org/hpk42/py-trunk/raw/%s/"
"py/test/plugin/%s" %(pyversion, basename))) "_py/test/plugin/%s" %(pyversion, basename)))
self.links.append(('customize', '../customize.html')) self.links.append(('customize', '../customize.html'))
self.links.append(('plugins', 'index.html')) self.links.append(('plugins', 'index.html'))
self.links.append(('get in contact', '../../contact.html')) self.links.append(('get in contact', '../../contact.html'))

View File

@ -123,14 +123,14 @@ command line. Using the `--pdb`` option you can automatically activate
a PDB `Python debugger`_ when a test fails. a PDB `Python debugger`_ when a test fails.
advanced skipping of tests advanced skipping of tests
------------------------------- ======================================
py.test has builtin support for skipping tests or expecting py.test has `advanced support for skipping tests`_ or expecting
failures on tests on certain platforms. Apart from the failures on tests on certain platforms. Apart from the
minimal py.test style also unittest- and nose-style tests minimal py.test style also unittest- and nose-style tests
can make use of this feature. can make use of this feature.
.. _`advanced support for skipping tests`: plugin/skipping.html
.. _`funcargs mechanism`: funcargs.html .. _`funcargs mechanism`: funcargs.html
.. _`unittest.py`: http://docs.python.org/library/unittest.html .. _`unittest.py`: http://docs.python.org/library/unittest.html
.. _`doctest.py`: http://docs.python.org/library/doctest.html .. _`doctest.py`: http://docs.python.org/library/doctest.html

View File

@ -2,7 +2,7 @@
plugins for Python test functions plugins for Python test functions
================================= =================================
skipping_ mark python test functions, classes or modules for conditional skipping_ advanced conditional skipping for python test functions, classes or modules.
figleaf_ write and report coverage data with 'figleaf'. figleaf_ write and report coverage data with 'figleaf'.

View File

@ -1,38 +1,38 @@
.. _`helpconfig`: helpconfig.html .. _`helpconfig`: helpconfig.html
.. _`terminal`: terminal.html .. _`terminal`: terminal.html
.. _`pytest_recwarn.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_recwarn.py .. _`pytest_recwarn.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_recwarn.py
.. _`unittest`: unittest.html .. _`unittest`: unittest.html
.. _`pytest_monkeypatch.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_monkeypatch.py .. _`pytest_monkeypatch.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_monkeypatch.py
.. _`pytest_keyword.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_keyword.py .. _`pytest_keyword.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_keyword.py
.. _`pastebin`: pastebin.html .. _`pastebin`: pastebin.html
.. _`skipping`: skipping.html .. _`skipping`: skipping.html
.. _`plugins`: index.html .. _`plugins`: index.html
.. _`pytest_doctest.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_doctest.py .. _`pytest_doctest.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_doctest.py
.. _`capture`: capture.html .. _`capture`: capture.html
.. _`pytest_nose.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_nose.py .. _`pytest_nose.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_nose.py
.. _`pytest_restdoc.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_restdoc.py .. _`pytest_restdoc.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_restdoc.py
.. _`restdoc`: restdoc.html .. _`restdoc`: restdoc.html
.. _`pytest_pastebin.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_pastebin.py .. _`pytest_pastebin.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_pastebin.py
.. _`pytest_figleaf.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_figleaf.py .. _`pytest_figleaf.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_figleaf.py
.. _`pytest_hooklog.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_hooklog.py .. _`pytest_hooklog.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_hooklog.py
.. _`pytest_skipping.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_skipping.py .. _`pytest_skipping.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_skipping.py
.. _`checkout the py.test development version`: ../../download.html#checkout .. _`checkout the py.test development version`: ../../download.html#checkout
.. _`pytest_helpconfig.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_helpconfig.py .. _`pytest_helpconfig.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_helpconfig.py
.. _`oejskit`: oejskit.html .. _`oejskit`: oejskit.html
.. _`doctest`: doctest.html .. _`doctest`: doctest.html
.. _`get in contact`: ../../contact.html .. _`get in contact`: ../../contact.html
.. _`pytest_capture.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_capture.py .. _`pytest_capture.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_capture.py
.. _`figleaf`: figleaf.html .. _`figleaf`: figleaf.html
.. _`customize`: ../customize.html .. _`customize`: ../customize.html
.. _`hooklog`: hooklog.html .. _`hooklog`: hooklog.html
.. _`pytest_terminal.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_terminal.py .. _`pytest_terminal.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_terminal.py
.. _`recwarn`: recwarn.html .. _`recwarn`: recwarn.html
.. _`pytest_pdb.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_pdb.py .. _`pytest_pdb.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_pdb.py
.. _`monkeypatch`: monkeypatch.html .. _`monkeypatch`: monkeypatch.html
.. _`resultlog`: resultlog.html .. _`resultlog`: resultlog.html
.. _`keyword`: keyword.html .. _`keyword`: keyword.html
.. _`django`: django.html .. _`django`: django.html
.. _`pytest_unittest.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_unittest.py .. _`pytest_unittest.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_unittest.py
.. _`nose`: nose.html .. _`nose`: nose.html
.. _`pytest_resultlog.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_resultlog.py .. _`pytest_resultlog.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_resultlog.py
.. _`pdb`: pdb.html .. _`pdb`: pdb.html

View File

@ -2,19 +2,17 @@
pytest_skipping plugin pytest_skipping plugin
====================== ======================
mark python test functions, classes or modules for conditional advanced conditional skipping for python test functions, classes or modules.
.. contents:: .. contents::
:local: :local:
skipping (skipif) or as expected-to-fail (xfail). Both declarations You can mark functions, classes or modules for for conditional
lead to special reporting and both can be systematically associated skipping (skipif) or as expected-to-fail (xfail). The difference
with functions, whole classes or modules. The difference between between the two is that 'xfail' will still execute test functions
the two is that 'xfail' will still execute test functions but it will invert the outcome: a passing test becomes a failure and
but it will revert the outcome. A passing test is now a failing test is a semi-passing one. All skip conditions are
a failure and failing test is expected. All skip conditions reported at the end of test run through the terminal reporter.
are reported at the end of test run through the terminal
reporter.
.. _skipif: .. _skipif:
@ -27,15 +25,18 @@ Here is an example for skipping a test function on Python3::
def test_function(): def test_function():
... ...
Conditions are specified as python expressions The 'skipif' marker accepts an **arbitrary python expression**
and can access the ``sys`` module. They can also as a condition. When setting up the test function the condition
access the config object and thus depend on command is evaluated by calling ``eval(expr, namespace)``. The namespace
line or conftest options:: contains the ``sys`` and ``os`` modules as well as the
test ``config`` object. The latter allows you to skip based
on a test configuration value e.g. like this::
@py.test.mark.skipif("config.getvalue('db') is None") @py.test.mark.skipif("not config.getvalue('db')")
def test_function(...): def test_function(...):
... ...
conditionally mark a function as "expected to fail" conditionally mark a function as "expected to fail"
------------------------------------------------------- -------------------------------------------------------
@ -60,7 +61,7 @@ skip/xfail a whole test class or module
------------------------------------------- -------------------------------------------
Instead of marking single functions you can skip Instead of marking single functions you can skip
a whole class of tests when runnign on a specific a whole class of tests when running on a specific
platform:: platform::
class TestSomething: class TestSomething:
@ -82,13 +83,12 @@ You can use a helper to skip on a failing import::
You can use this helper at module level or within You can use this helper at module level or within
a test or setup function. a test or setup function.
You can aslo skip if a library does not have the right version:: You can also skip if a library does not come with a high enough version::
docutils = py.test.importorskip("docutils", minversion="0.3") docutils = py.test.importorskip("docutils", minversion="0.3")
The version will be read from the specified module's ``__version__`` attribute. The version will be read from the specified module's ``__version__`` attribute.
dynamically skip from within a test or setup dynamically skip from within a test or setup
------------------------------------------------- -------------------------------------------------

View File

@ -53,6 +53,7 @@ _py.apipkg.initpkg(__name__, dict(
'_PluginManager' : '_py.test.pluginmanager:PluginManager', '_PluginManager' : '_py.test.pluginmanager:PluginManager',
'raises' : '_py.test.outcome:raises', 'raises' : '_py.test.outcome:raises',
'skip' : '_py.test.outcome:skip', 'skip' : '_py.test.outcome:skip',
'importorskip' : '_py.test.outcome:importorskip',
'fail' : '_py.test.outcome:fail', 'fail' : '_py.test.outcome:fail',
'exit' : '_py.test.outcome:exit', 'exit' : '_py.test.outcome:exit',
# configuration/initialization related test api # configuration/initialization related test api

View File

@ -135,11 +135,7 @@ def test_assert_with_brokenrepr_arg():
class TestView: class TestView:
def setup_class(cls): def setup_class(cls):
try: cls.View = py.test.importorskip("_py.code._assertionold").View
from _py.code._assertionold import View
except ImportError:
py.test.skip("requires the compile package")
cls.View = View
def test_class_dispatch(self): def test_class_dispatch(self):
### Use a custom class hierarchy with existing instances ### Use a custom class hierarchy with existing instances

View File

@ -191,9 +191,8 @@ class TestSourceParsingAndCompiling:
assert len(source) == 9 assert len(source) == 9
assert source.getstatementrange(5) == (0, 9) assert source.getstatementrange(5) == (0, 9)
@py.test.mark.skipif("sys.version_info < (2,6)")
def test_compile_to_ast(self): def test_compile_to_ast(self):
if sys.version_info < (2, 6):
py.test.skip("requires Python 2.6")
import ast import ast
source = Source("x = 4") source = Source("x = 4")
mod = source.compile(flag=ast.PyCF_ONLY_AST) mod = source.compile(flag=ast.PyCF_ONLY_AST)
@ -257,7 +256,6 @@ def test_getstartingblock_multiline():
assert len(l) == 4 assert len(l) == 4
def test_getline_finally(): def test_getline_finally():
#py.test.skip("inner statements cannot be located yet.")
def c(): pass def c(): pass
excinfo = py.test.raises(TypeError, """ excinfo = py.test.raises(TypeError, """
teardown = None teardown = None

View File

@ -2,13 +2,6 @@ import py
import os, sys import os, sys
from _py.io import terminalwriter from _py.io import terminalwriter
def skip_win32():
if sys.platform == 'win32':
py.test.skip('Not relevant on win32')
import os
import py
def test_terminal_width_COLUMNS(monkeypatch): def test_terminal_width_COLUMNS(monkeypatch):
""" Dummy test for get_terminal_width """ Dummy test for get_terminal_width
""" """
@ -82,14 +75,14 @@ class BaseTests:
assert len(l) == 1 assert len(l) == 1
assert l[0] == "-" * 26 + " hello " + "-" * 27 + "\n" assert l[0] == "-" * 26 + " hello " + "-" * 27 + "\n"
@py.test.mark.skipif("sys.platform == 'win32'")
def test__escaped(self): def test__escaped(self):
skip_win32()
tw = self.getwriter() tw = self.getwriter()
text2 = tw._escaped("hello", (31)) text2 = tw._escaped("hello", (31))
assert text2.find("hello") != -1 assert text2.find("hello") != -1
@py.test.mark.skipif("sys.platform == 'win32'")
def test_markup(self): def test_markup(self):
skip_win32()
tw = self.getwriter() tw = self.getwriter()
for bold in (True, False): for bold in (True, False):
for color in ("red", "green"): for color in ("red", "green"):
@ -104,9 +97,9 @@ class BaseTests:
tw.line("x", bold=True) tw.line("x", bold=True)
tw.write("x\n", red=True) tw.write("x\n", red=True)
l = self.getlines() l = self.getlines()
skip_win32() if sys.platform != "win32":
assert len(l[0]) > 2, l assert len(l[0]) > 2, l
assert len(l[1]) > 2, l assert len(l[1]) > 2, l
def test_attr_fullwidth(self): def test_attr_fullwidth(self):
tw = self.getwriter() tw = self.getwriter()

View File

@ -1,9 +1,6 @@
import py, sys, os import py, sys, os
def setup_module(mod): skipif = "not hasattr(os, 'fork')"
if not hasattr(os, 'fork'):
py.test.skip("forkedfunc requires os.fork")
mod.tmpdir = py.test.ensuretemp(mod.__file__)
def test_waitfinish_removes_tempdir(): def test_waitfinish_removes_tempdir():
ff = py.process.ForkedFunc(boxf1) ff = py.process.ForkedFunc(boxf1)
@ -56,7 +53,7 @@ def test_forkedfunc_on_fds():
def test_forkedfunc_signal(): def test_forkedfunc_signal():
result = py.process.ForkedFunc(boxseg).waitfinish() result = py.process.ForkedFunc(boxseg).waitfinish()
assert result.retval is None assert result.retval is None
if py.std.sys.version_info < (2,4): if sys.version_info < (2,4):
py.test.skip("signal detection does not work with python prior 2.4") py.test.skip("signal detection does not work with python prior 2.4")
assert result.signal == 11 assert result.signal == 11

View File

@ -13,5 +13,4 @@ def test_kill():
if sys.platform == "win32" and ret == 0: if sys.platform == "win32" and ret == 0:
py.test.skip("XXX on win32, subprocess.Popen().wait() on a killed " py.test.skip("XXX on win32, subprocess.Popen().wait() on a killed "
"process does not yield return value != 0") "process does not yield return value != 0")
assert ret != 0 assert ret != 0

View File

@ -218,9 +218,8 @@ class TestExecutionNonForked(BaseFunctionalTests):
py.test.fail("did not raise") py.test.fail("did not raise")
class TestExecutionForked(BaseFunctionalTests): class TestExecutionForked(BaseFunctionalTests):
skipif = "not hasattr(os, 'fork')"
def getrunner(self): def getrunner(self):
if not hasattr(py.std.os, 'fork'):
py.test.skip("no os.fork available")
return runner.forked_run_report return runner.forked_run_report
def test_suicide(self, testdir): def test_suicide(self, testdir):
@ -262,10 +261,8 @@ class TestCollectionReports:
assert not rep.passed assert not rep.passed
assert rep.skipped assert rep.skipped
@py.test.mark.skipif("not hasattr(os, 'fork')")
def test_functional_boxed(testdir): def test_functional_boxed(testdir):
if not hasattr(py.std.os, 'fork'):
py.test.skip("needs os.fork")
p1 = testdir.makepyfile(""" p1 = testdir.makepyfile("""
import os import os
def test_function(): def test_function():

View File

@ -21,6 +21,21 @@ def test_xfail_decorator(testdir):
]) ])
assert result.ret == 1 assert result.ret == 1
def test_xfail_at_module(testdir):
p = testdir.makepyfile("""
xfail = 'True'
def test_intentional_xfail():
assert 0
""")
result = testdir.runpytest(p)
extra = result.stdout.fnmatch_lines([
"*expected failures*",
"*test_intentional_xfail*:4*",
"*1 xfailed*"
])
assert result.ret == 0
def test_skipif_decorator(testdir): def test_skipif_decorator(testdir):
p = testdir.makepyfile(""" p = testdir.makepyfile("""
import py import py
@ -84,26 +99,3 @@ def test_evalexpression_cls_config_example(testdir):
x, y = evalexpression(item, 'skipif') x, y = evalexpression(item, 'skipif')
assert x == 'config._hackxyz' assert x == 'config._hackxyz'
assert y == 3 assert y == 3
def test_importorskip():
from _py.test.outcome import Skipped
from _py.test.plugin.pytest_skipping import importorskip
assert importorskip == py.test.importorskip
try:
sys = importorskip("sys")
assert sys == py.std.sys
#path = py.test.importorskip("os.path")
#assert path == py.std.os.path
py.test.raises(Skipped, "py.test.importorskip('alskdj')")
py.test.raises(SyntaxError, "py.test.importorskip('x y z')")
py.test.raises(SyntaxError, "py.test.importorskip('x=y')")
path = importorskip("py", minversion=".".join(py.__version__))
mod = py.std.types.ModuleType("hello123")
mod.__version__ = "1.3"
py.test.raises(Skipped, """
py.test.importorskip("hello123", minversion="5.0")
""")
except Skipped:
print(py.code.ExceptionInfo())
py.test.fail("spurious skip")

View File

@ -29,3 +29,30 @@ def test_exception_printing_skip():
excinfo = py.code.ExceptionInfo() excinfo = py.code.ExceptionInfo()
s = excinfo.exconly(tryshort=True) s = excinfo.exconly(tryshort=True)
assert s.startswith("Skipped") assert s.startswith("Skipped")
def test_importorskip():
from _py.test.outcome import Skipped, importorskip
assert importorskip == py.test.importorskip
try:
sys = importorskip("sys")
assert sys == py.std.sys
#path = py.test.importorskip("os.path")
#assert path == py.std.os.path
py.test.raises(Skipped, "py.test.importorskip('alskdj')")
py.test.raises(SyntaxError, "py.test.importorskip('x y z')")
py.test.raises(SyntaxError, "py.test.importorskip('x=y')")
path = importorskip("py", minversion=".".join(py.__version__))
mod = py.std.types.ModuleType("hello123")
mod.__version__ = "1.3"
py.test.raises(Skipped, """
py.test.importorskip("hello123", minversion="5.0")
""")
except Skipped:
print(py.code.ExceptionInfo())
py.test.fail("spurious skip")
def test_importorskip_imports_last_module_part():
import os
ospath = py.test.importorskip("os.path")
assert os.path == ospath