From e20216a1a86cf0490d4776d1c9f7043eea089f10 Mon Sep 17 00:00:00 2001 From: Ronny Pfannschmidt Date: Sat, 11 Jul 2015 10:13:27 +0200 Subject: [PATCH 01/13] merge the pytest-cache plugin into core --- .gitignore | 1 + CHANGELOG | 1 + _pytest/cacheprovider.py | 214 +++++++++++++++++++++++++++++++ _pytest/config.py | 2 +- doc/en/apiref.rst | 27 ---- doc/en/cache.txt | 249 +++++++++++++++++++++++++++++++++++++ testing/test_cache.py | 96 ++++++++++++++ testing/test_lastfailed.py | 235 ++++++++++++++++++++++++++++++++++ 8 files changed, 797 insertions(+), 28 deletions(-) create mode 100755 _pytest/cacheprovider.py delete mode 100644 doc/en/apiref.rst create mode 100644 doc/en/cache.txt create mode 100755 testing/test_cache.py create mode 100755 testing/test_lastfailed.py diff --git a/.gitignore b/.gitignore index cd6a7fc9e..ebeb552dd 100644 --- a/.gitignore +++ b/.gitignore @@ -28,6 +28,7 @@ dist/ *.egg-info issue/ env/ +.env/ 3rdparty/ .tox .cache diff --git a/CHANGELOG b/CHANGELOG index fc2eb23bd..a95386a92 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -42,6 +42,7 @@ - fix issue82: avoid loading conftest files from setup.cfg/pytest.ini/tox.ini files and upwards by default (--confcutdir can still be set to override this). Thanks Bruno Oliveira for the PR. +- merge the pytest-cache extension into core - fix issue768: docstrings found in python modules were not setting up session fixtures. Thanks Jason R. Coombs for reporting and Bruno Oliveira for the PR. diff --git a/_pytest/cacheprovider.py b/_pytest/cacheprovider.py new file mode 100755 index 000000000..11023dc7c --- /dev/null +++ b/_pytest/cacheprovider.py @@ -0,0 +1,214 @@ +""" +merged implementation of the cache provider + +the name cache was not choosen to ensure pluggy automatically +ignores the external pytest-cache +""" + +import py +import pytest +import json +from os.path import sep as _sep, altsep as _altsep + + +class Cache: + def __init__(self, config): + self.config = config + self._cachedir = config.rootdir.join(".cache") + self.trace = config.trace.root.get("cache") + if config.getvalue("clearcache"): + self.trace("clearing cachedir") + if self._cachedir.check(): + self._cachedir.remove() + self._cachedir.mkdir() + + def makedir(self, name): + """ return a directory path object with the given name. If the + directory does not yet exist, it will be created. You can use it + to manage files likes e. g. store/retrieve database + dumps across test sessions. + + :param name: must be a string not containing a ``/`` separator. + Make sure the name contains your plugin or application + identifiers to prevent clashes with other cache users. + """ + if _sep in name or _altsep is not None and _altsep in name: + raise ValueError("name is not allowed to contain path separators") + return self._cachedir.ensure_dir("d", name) + + def _getvaluepath(self, key): + return self._cachedir.join('v', *key.split('/')) + + def get(self, key, default): + """ return cached value for the given key. If no value + was yet cached or the value cannot be read, the specified + default is returned. + + :param key: must be a ``/`` separated value. Usually the first + name is the name of your plugin or your application. + :param default: must be provided in case of a cache-miss or + invalid cache values. + + """ + path = self._getvaluepath(key) + if path.check(): + try: + with path.open("r") as f: + return json.load(f) + except ValueError: + self.trace("cache-invalid at %s" % (path,)) + return default + + def set(self, key, value): + """ save value for the given key. + + :param key: must be a ``/`` separated value. Usually the first + name is the name of your plugin or your application. + :param value: must be of any combination of basic + python types, including nested types + like e. g. lists of dictionaries. + """ + path = self._getvaluepath(key) + path.dirpath().ensure_dir() + with path.open("w") as f: + self.trace("cache-write %s: %r" % (key, value,)) + json.dump(value, f, indent=2, sort_keys=True) + + +class LFPlugin: + """ Plugin which implements the --lf (run last-failing) option """ + def __init__(self, config): + self.config = config + active_keys = 'lf', 'failedfirst' + self.active = any(config.getvalue(key) for key in active_keys) + if self.active: + self.lastfailed = config.cache.get("cache/lastfailed", {}) + else: + self.lastfailed = {} + + def pytest_report_header(self): + if self.active: + if not self.lastfailed: + mode = "run all (no recorded failures)" + else: + mode = "rerun last %d failures%s" % ( + len(self.lastfailed), + " first" if self.config.getvalue("failedfirst") else "") + return "run-last-failure: %s" % mode + + def pytest_runtest_logreport(self, report): + if report.failed and "xfail" not in report.keywords: + self.lastfailed[report.nodeid] = True + elif not report.failed: + if report.when == "call": + self.lastfailed.pop(report.nodeid, None) + + def pytest_collectreport(self, report): + passed = report.outcome in ('passed', 'skipped') + if passed: + if report.nodeid in self.lastfailed: + self.lastfailed.pop(report.nodeid) + self.lastfailed.update( + (item.nodeid, True) + for item in report.result) + else: + self.lastfailed[report.nodeid] = True + + def pytest_collection_modifyitems(self, session, config, items): + if self.active and self.lastfailed: + previously_failed = [] + previously_passed = [] + for item in items: + if item.nodeid in self.lastfailed: + previously_failed.append(item) + else: + previously_passed.append(item) + if not previously_failed and previously_passed: + # running a subset of all tests with recorded failures outside + # of the set of tests currently executing + pass + elif self.config.getvalue("failedfirst"): + items[:] = previously_failed + previously_passed + else: + items[:] = previously_failed + config.hook.pytest_deselected(items=previously_passed) + + def pytest_sessionfinish(self, session): + config = self.config + if config.getvalue("showcache") or hasattr(config, "slaveinput"): + return + config.cache.set("cache/lastfailed", self.lastfailed) + + +def pytest_addoption(parser): + group = parser.getgroup("general") + group.addoption( + '--lf', action='store_true', dest="lf", + help="rerun only the tests that failed " + "at the last run (or all if none failed)") + group.addoption( + '--ff', action='store_true', dest="failedfirst", + help="run all tests but run the last failures first. " + "This may re-order tests and thus lead to " + "repeated fixture setup/teardown") + group.addoption( + '--cache', action='store_true', dest="showcache", + help="show cache contents, don't perform collection or tests") + group.addoption( + '--clearcache', action='store_true', dest="clearcache", + help="remove all cache contents at start of test run.") + + +def pytest_cmdline_main(config): + if config.option.showcache: + from _pytest.main import wrap_session + return wrap_session(config, showcache) + + +@pytest.hookimpl(tryfirst=True) +def pytest_configure(config): + config.cache = Cache(config) + config.pluginmanager.register(LFPlugin(config), "lfplugin") + + +def pytest_report_header(config): + if config.option.verbose: + relpath = py.path.local().bestrelpath(config.cache._cachedir) + return "cachedir: %s" % relpath + + +def showcache(config, session): + from pprint import pprint + tw = py.io.TerminalWriter() + tw.line("cachedir: " + str(config.cache._cachedir)) + if not config.cache._cachedir.check(): + tw.line("cache is empty") + return 0 + dummy = object() + basedir = config.cache._cachedir + vdir = basedir.join("v") + tw.sep("-", "cache values") + for valpath in vdir.visit(lambda x: x.isfile()): + key = valpath.relto(vdir).replace(valpath.sep, "/") + val = config.cache.get(key, dummy) + if val is dummy: + tw.line("%s contains unreadable content, " + "will be ignored" % key) + else: + tw.line("%s contains:" % key) + stream = py.io.TextIO() + pprint(val, stream=stream) + for line in stream.getvalue().splitlines(): + tw.line(" " + line) + + ddir = basedir.join("d") + if ddir.isdir() and ddir.listdir(): + tw.sep("-", "cache directories") + for p in basedir.join("d").visit(): + #if p.check(dir=1): + # print("%s/" % p.relto(basedir)) + if p.isfile(): + key = p.relto(basedir) + tw.line("%s is a file of length %d" % ( + key, p.size())) + return 0 diff --git a/_pytest/config.py b/_pytest/config.py index 2a3c71201..e9888bdf6 100644 --- a/_pytest/config.py +++ b/_pytest/config.py @@ -64,7 +64,7 @@ _preinit = [] default_plugins = ( "mark main terminal runner python pdb unittest capture skipping " "tmpdir monkeypatch recwarn pastebin helpconfig nose assertion genscript " - "junitxml resultlog doctest").split() + "junitxml resultlog doctest cacheprovider").split() builtin_plugins = set(default_plugins) builtin_plugins.add("pytester") diff --git a/doc/en/apiref.rst b/doc/en/apiref.rst deleted file mode 100644 index 6b9a6a5e3..000000000 --- a/doc/en/apiref.rst +++ /dev/null @@ -1,27 +0,0 @@ - -.. _apiref: - -pytest reference documentation -================================================ - -.. toctree:: - :maxdepth: 2 - - builtin - customize - assert - fixture - yieldfixture - parametrize - xunit_setup - capture - monkeypatch - xdist - tmpdir - mark - skipping - recwarn - unittest - nose - doctest - diff --git a/doc/en/cache.txt b/doc/en/cache.txt new file mode 100644 index 000000000..51437eb8e --- /dev/null +++ b/doc/en/cache.txt @@ -0,0 +1,249 @@ +cache: working with cross-testrun state +======================================= + +Usage +--------- + +plugins can access the `config.cache`_ object +which helps sharing values between ``py.test`` invocations. + +The plugin provides two options to rerun failures, namely: + +* ``--lf`` (last failures) - to only re-run the failures. +* ``--ff`` (failures first) - to run the failures first and then the rest of + the tests. + +For cleanup (usually not needed), a ``--clearcache`` option allows to remove +all cross-session cache contents ahead of a test run. + + +Rerunning only failures or failures first +----------------------------------------------- + +First, let's create 50 test invocation of which only 2 fail:: + + # content of test_50.py + import pytest + + @pytest.mark.parametrize("i", range(50)) + def test_num(i): + if i in (17, 25): + pytest.fail("bad luck") + +If you run this for the first time you will see two failures:: + + $ py.test -q + .................F.......F........................ + =================================== FAILURES =================================== + _________________________________ test_num[17] _________________________________ + + i = 17 + + @pytest.mark.parametrize("i", range(50)) + def test_num(i): + if i in (17,25): + > pytest.fail("bad luck") + E Failed: bad luck + + test_50.py:6: Failed + _________________________________ test_num[25] _________________________________ + + i = 25 + + @pytest.mark.parametrize("i", range(50)) + def test_num(i): + if i in (17,25): + > pytest.fail("bad luck") + E Failed: bad luck + + test_50.py:6: Failed + +If you then run it with ``--lf`` you will run only the two failing test +from the last run:: + + $ py.test --lf + ============================= test session starts ============================== + platform linux2 -- Python 2.7.3 -- pytest-2.3.5 + run-last-failure: rerun last 2 failures + plugins: cache + collected 50 items + + test_50.py FF + + =================================== FAILURES =================================== + _________________________________ test_num[17] _________________________________ + + i = 17 + + @pytest.mark.parametrize("i", range(50)) + def test_num(i): + if i in (17,25): + > pytest.fail("bad luck") + E Failed: bad luck + + test_50.py:6: Failed + _________________________________ test_num[25] _________________________________ + + i = 25 + + @pytest.mark.parametrize("i", range(50)) + def test_num(i): + if i in (17,25): + > pytest.fail("bad luck") + E Failed: bad luck + + test_50.py:6: Failed + =================== 2 failed, 48 deselected in 0.02 seconds ==================== + +The last line indicates that 48 tests have not been run. + +If you run with the ``--ff`` option, all tests will be run but the first +failures will be executed first (as can be seen from the series of ``FF`` and +dots):: + + $ py.test --ff + ============================= test session starts ============================== + platform linux2 -- Python 2.7.3 -- pytest-2.3.5 + run-last-failure: rerun last 2 failures first + plugins: cache + collected 50 items + + test_50.py FF................................................ + + =================================== FAILURES =================================== + _________________________________ test_num[17] _________________________________ + + i = 17 + + @pytest.mark.parametrize("i", range(50)) + def test_num(i): + if i in (17,25): + > pytest.fail("bad luck") + E Failed: bad luck + + test_50.py:6: Failed + _________________________________ test_num[25] _________________________________ + + i = 25 + + @pytest.mark.parametrize("i", range(50)) + def test_num(i): + if i in (17,25): + > pytest.fail("bad luck") + E Failed: bad luck + + test_50.py:6: Failed + ===================== 2 failed, 48 passed in 0.07 seconds ====================== + +.. _`config.cache`: + +The new config.cache object +-------------------------------- + +.. regendoc:wipe + +Plugins or conftest.py support code can get a cached value +using the pytest ``config`` object. Here is a basic example +plugin which implements a `funcarg `_ +which re-uses previously created state across py.test invocations:: + + # content of test_caching.py + import time + + def pytest_funcarg__mydata(request): + val = request.config.cache.get("example/value", None) + if val is None: + time.sleep(9*0.6) # expensive computation :) + val = 42 + request.config.cache.set("example/value", val) + return val + + def test_function(mydata): + assert mydata == 23 + +If you run this command once, it will take a while because +of the sleep:: + + $ py.test -q + F + =================================== FAILURES =================================== + ________________________________ test_function _________________________________ + + mydata = 42 + + def test_function(mydata): + > assert mydata == 23 + E assert 42 == 23 + + test_caching.py:12: AssertionError + +If you run it a second time the value will be retrieved from +the cache and this will be quick:: + + $ py.test -q + F + =================================== FAILURES =================================== + ________________________________ test_function _________________________________ + + mydata = 42 + + def test_function(mydata): + > assert mydata == 23 + E assert 42 == 23 + + test_caching.py:12: AssertionError + +Consult the `pytest-cache API `_ +for more details. + + +Inspecting Cache content +------------------------------- + +You can always peek at the content of the cache using the +``--cache`` command line option:: + + $ py.test --cache + ============================= test session starts ============================== + platform linux2 -- Python 2.7.3 -- pytest-2.3.5 + plugins: cache + cachedir: /tmp/doc-exec-6/.cache + --------------------------------- cache values --------------------------------- + example/value contains: + 42 + cache/lastfailed contains: + set(['test_caching.py::test_function']) + + =============================== in 0.01 seconds =============================== + +Clearing Cache content +------------------------------- + +You can instruct pytest to clear all cache files and values +by adding the ``--clearcache`` option like this:: + + py.test --clearcache + +This is recommended for invocations from Continous Integration +servers where isolation and correctness is more important +than speed. + + +config.cache API +======================================== + +The `config.cache`` object allows other plugins, +including ``conftest.py`` files, +to safely and flexibly store and retrieve values across +test runs because the ``config`` object is available +in many places. + +Under the hood, the cache plugin uses the simple +dumps/loads API of the json stdlib module + +.. currentmodule:: pytest_cache + +.. automethod:: Cache.get +.. automethod:: Cache.set +.. automethod:: Cache.makedir + diff --git a/testing/test_cache.py b/testing/test_cache.py new file mode 100755 index 000000000..142d676a8 --- /dev/null +++ b/testing/test_cache.py @@ -0,0 +1,96 @@ +import os +import pytest +import shutil +import py + +pytest_plugins = "pytester", + +class TestNewAPI: + def test_config_cache_makedir(self, testdir): + testdir.makeini("[pytest]") + config = testdir.parseconfigure() + with pytest.raises(ValueError): + config.cache.makedir("key/name") + + p = config.cache.makedir("name") + assert p.check() + + def test_config_cache_dataerror(self, testdir): + testdir.makeini("[pytest]") + config = testdir.parseconfigure() + cache = config.cache + pytest.raises(TypeError, lambda: cache.set("key/name", cache)) + config.cache.set("key/name", 0) + config.cache._getvaluepath("key/name").write("123invalid") + val = config.cache.get("key/name", -2) + assert val == -2 + + def test_config_cache(self, testdir): + testdir.makeconftest(""" + def pytest_configure(config): + # see that we get cache information early on + assert hasattr(config, "cache") + """) + testdir.makepyfile(""" + def test_session(pytestconfig): + assert hasattr(pytestconfig, "cache") + """) + result = testdir.runpytest() + assert result.ret == 0 + result.stdout.fnmatch_lines(["*1 passed*"]) + + def XXX_test_cachefuncarg(self, testdir): + testdir.makepyfile(""" + import pytest + def test_cachefuncarg(cache): + val = cache.get("some/thing", None) + assert val is None + cache.set("some/thing", [1]) + pytest.raises(TypeError, lambda: cache.get("some/thing")) + val = cache.get("some/thing", []) + assert val == [1] + """) + result = testdir.runpytest() + assert result.ret == 0 + result.stdout.fnmatch_lines(["*1 passed*"]) + + + +def test_cache_reportheader(testdir): + p = testdir.makepyfile(""" + def test_hello(): + pass + """) + result = testdir.runpytest("-v") + result.stdout.fnmatch_lines([ + "cachedir: .cache" + ]) + +def test_cache_show(testdir): + result = testdir.runpytest("--cache") + assert result.ret == 0 + result.stdout.fnmatch_lines([ + "*cache is empty*" + ]) + p = testdir.makeconftest(""" + def pytest_configure(config): + config.cache.set("my/name", [1,2,3]) + config.cache.set("other/some", {1:2}) + dp = config.cache.makedir("mydb") + dp.ensure("hello") + dp.ensure("world") + """) + result = testdir.runpytest() + assert result.ret == 0 + result = testdir.runpytest("--cache") + result.stdout.fnmatch_lines_random([ + "*cachedir:*", + "-*cache values*-", + "*my/name contains:", + " [1, 2, 3]", + "*other/some contains*", + " {*1*: 2}", + "-*cache directories*-", + "*mydb/hello*length 0*", + "*mydb/world*length 0*", + ]) diff --git a/testing/test_lastfailed.py b/testing/test_lastfailed.py new file mode 100755 index 000000000..35ecd9a13 --- /dev/null +++ b/testing/test_lastfailed.py @@ -0,0 +1,235 @@ +import os +import pytest +import shutil +import py + +pytest_plugins = "pytester", + + +class TestLastFailed: + @pytest.mark.skipif("sys.version_info < (2,6)") + def test_lastfailed_usecase(self, testdir, monkeypatch): + monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1) + p = testdir.makepyfile(""" + def test_1(): + assert 0 + def test_2(): + assert 0 + def test_3(): + assert 1 + """) + result = testdir.runpytest() + result.stdout.fnmatch_lines([ + "*2 failed*", + ]) + p.write(py.code.Source(""" + def test_1(): + assert 1 + + def test_2(): + assert 1 + + def test_3(): + assert 0 + """)) + result = testdir.runpytest("--lf") + result.stdout.fnmatch_lines([ + "*2 passed*1 desel*", + ]) + result = testdir.runpytest("--lf") + result.stdout.fnmatch_lines([ + "*1 failed*2 passed*", + ]) + result = testdir.runpytest("--lf", "--clearcache") + result.stdout.fnmatch_lines([ + "*1 failed*2 passed*", + ]) + + # Run this again to make sure clearcache is robust + if os.path.isdir('.cache'): + shutil.rmtree('.cache') + result = testdir.runpytest("--lf", "--clearcache") + result.stdout.fnmatch_lines([ + "*1 failed*2 passed*", + ]) + + def test_failedfirst_order(self, testdir): + always_pass = testdir.tmpdir.join('test_a.py').write(py.code.Source(""" + def test_always_passes(): + assert 1 + """)) + always_fail = testdir.tmpdir.join('test_b.py').write(py.code.Source(""" + def test_always_fails(): + assert 0 + """)) + result = testdir.runpytest() + # Test order will be collection order; alphabetical + result.stdout.fnmatch_lines([ + "test_a.py*", + "test_b.py*", + ]) + result = testdir.runpytest("--lf", "--ff") + # Test order will be failing tests firs + result.stdout.fnmatch_lines([ + "test_b.py*", + "test_a.py*", + ]) + + @pytest.mark.skipif("sys.version_info < (2,6)") + def test_lastfailed_difference_invocations(self, testdir, monkeypatch): + monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1) + testdir.makepyfile(test_a=""" + def test_a1(): + assert 0 + def test_a2(): + assert 1 + """, test_b=""" + def test_b1(): + assert 0 + """) + p = testdir.tmpdir.join("test_a.py") + p2 = testdir.tmpdir.join("test_b.py") + + result = testdir.runpytest() + result.stdout.fnmatch_lines([ + "*2 failed*", + ]) + result = testdir.runpytest("--lf", p2) + result.stdout.fnmatch_lines([ + "*1 failed*", + ]) + p2.write(py.code.Source(""" + def test_b1(): + assert 1 + """)) + result = testdir.runpytest("--lf", p2) + result.stdout.fnmatch_lines([ + "*1 passed*", + ]) + result = testdir.runpytest("--lf", p) + result.stdout.fnmatch_lines([ + "*1 failed*1 desel*", + ]) + + @pytest.mark.skipif("sys.version_info < (2,6)") + def test_lastfailed_usecase_splice(self, testdir, monkeypatch): + monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1) + p1 = testdir.makepyfile(""" + def test_1(): + assert 0 + """) + p2 = testdir.tmpdir.join("test_something.py") + p2.write(py.code.Source(""" + def test_2(): + assert 0 + """)) + result = testdir.runpytest() + result.stdout.fnmatch_lines([ + "*2 failed*", + ]) + result = testdir.runpytest("--lf", p2) + result.stdout.fnmatch_lines([ + "*1 failed*", + ]) + result = testdir.runpytest("--lf") + result.stdout.fnmatch_lines([ + "*2 failed*", + ]) + + def test_lastfailed_xpass(self, testdir): + rep = testdir.inline_runsource(""" + import pytest + @pytest.mark.xfail + def test_hello(): + assert 1 + """) + config = testdir.parseconfigure() + lastfailed = config.cache.get("cache/lastfailed", -1) + assert not lastfailed + + def test_lastfailed_collectfailure(self, testdir, monkeypatch): + + testdir.makepyfile(test_maybe=""" + import py + env = py.std.os.environ + if '1' == env['FAILIMPORT']: + raise ImportError('fail') + def test_hello(): + assert '0' == env['FAILTEST'] + """) + + def rlf(fail_import, fail_run): + monkeypatch.setenv('FAILIMPORT', fail_import) + monkeypatch.setenv('FAILTEST', fail_run) + + testdir.runpytest('-q') + config = testdir.parseconfigure() + lastfailed = config.cache.get("cache/lastfailed", -1) + return lastfailed + + lastfailed = rlf(fail_import=0, fail_run=0) + assert not lastfailed + + lastfailed = rlf(fail_import=1, fail_run=0) + assert list(lastfailed) == ['test_maybe.py'] + + lastfailed = rlf(fail_import=0, fail_run=1) + assert list(lastfailed) == ['test_maybe.py::test_hello'] + + + def test_lastfailed_failure_subset(self, testdir, monkeypatch): + + testdir.makepyfile(test_maybe=""" + import py + env = py.std.os.environ + if '1' == env['FAILIMPORT']: + raise ImportError('fail') + def test_hello(): + assert '0' == env['FAILTEST'] + """) + + testdir.makepyfile(test_maybe2=""" + import py + env = py.std.os.environ + if '1' == env['FAILIMPORT']: + raise ImportError('fail') + def test_hello(): + assert '0' == env['FAILTEST'] + + def test_pass(): + pass + """) + + def rlf(fail_import, fail_run, args=()): + monkeypatch.setenv('FAILIMPORT', fail_import) + monkeypatch.setenv('FAILTEST', fail_run) + + result = testdir.runpytest('-q', '--lf', *args) + config = testdir.parseconfigure() + lastfailed = config.cache.get("cache/lastfailed", -1) + return result, lastfailed + + result, lastfailed = rlf(fail_import=0, fail_run=0) + assert not lastfailed + result.stdout.fnmatch_lines([ + '*3 passed*', + ]) + + result, lastfailed = rlf(fail_import=1, fail_run=0) + assert sorted(list(lastfailed)) == ['test_maybe.py', 'test_maybe2.py'] + + + result, lastfailed = rlf(fail_import=0, fail_run=0, + args=('test_maybe2.py',)) + assert list(lastfailed) == ['test_maybe.py'] + + + # edge case of test selection - even if we remember failures + # from other tests we still need to run all tests if no test + # matches the failures + result, lastfailed = rlf(fail_import=0, fail_run=0, + args=('test_maybe2.py',)) + assert list(lastfailed) == ['test_maybe.py'] + result.stdout.fnmatch_lines([ + '*2 passed*', + ]) From e035f57535327093bc1d693eb885784eda5c564a Mon Sep 17 00:00:00 2001 From: Ronny Pfannschmidt Date: Fri, 24 Jul 2015 09:59:59 +0200 Subject: [PATCH 02/13] s/--cache/--show-cache/ --- _pytest/cacheprovider.py | 4 ++-- testing/test_cache.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/_pytest/cacheprovider.py b/_pytest/cacheprovider.py index 11023dc7c..5f7b476db 100755 --- a/_pytest/cacheprovider.py +++ b/_pytest/cacheprovider.py @@ -11,7 +11,7 @@ import json from os.path import sep as _sep, altsep as _altsep -class Cache: +class Cache(object): def __init__(self, config): self.config = config self._cachedir = config.rootdir.join(".cache") @@ -152,7 +152,7 @@ def pytest_addoption(parser): "This may re-order tests and thus lead to " "repeated fixture setup/teardown") group.addoption( - '--cache', action='store_true', dest="showcache", + '--show-cache', action='store_true', dest="showcache", help="show cache contents, don't perform collection or tests") group.addoption( '--clearcache', action='store_true', dest="clearcache", diff --git a/testing/test_cache.py b/testing/test_cache.py index 142d676a8..c4c2c3e78 100755 --- a/testing/test_cache.py +++ b/testing/test_cache.py @@ -67,7 +67,7 @@ def test_cache_reportheader(testdir): ]) def test_cache_show(testdir): - result = testdir.runpytest("--cache") + result = testdir.runpytest("--show-cache") assert result.ret == 0 result.stdout.fnmatch_lines([ "*cache is empty*" @@ -82,7 +82,7 @@ def test_cache_show(testdir): """) result = testdir.runpytest() assert result.ret == 0 - result = testdir.runpytest("--cache") + result = testdir.runpytest("--show-cache") result.stdout.fnmatch_lines_random([ "*cachedir:*", "-*cache values*-", From 1de38a25fc9c8a6a1cad876b8a7e2cffb503ced2 Mon Sep 17 00:00:00 2001 From: Ronny Pfannschmidt Date: Fri, 24 Jul 2015 10:05:54 +0200 Subject: [PATCH 03/13] use flake8 in the flakes testenv and extend the ignored errors list so pytest is clean we def have to trim down that one --- pytest.py | 9 ++++++++- testing/test_cache.py | 7 ++----- testing/test_lastfailed.py | 8 ++++---- tox.ini | 9 ++++++--- 4 files changed, 20 insertions(+), 13 deletions(-) diff --git a/pytest.py b/pytest.py index 161c44822..e1d12d163 100644 --- a/pytest.py +++ b/pytest.py @@ -2,7 +2,14 @@ """ pytest: unit and functional testing with Python. """ -__all__ = ['main'] +__all__ = [ + 'main', + 'UsageError', + 'cmdline', + 'hookspec', + 'hookimpl', + '__version__', +] if __name__ == '__main__': # if run as a script or by 'python -m pytest' # we trigger the below "else" condition by the following import diff --git a/testing/test_cache.py b/testing/test_cache.py index c4c2c3e78..9ec8966d7 100755 --- a/testing/test_cache.py +++ b/testing/test_cache.py @@ -1,7 +1,4 @@ -import os import pytest -import shutil -import py pytest_plugins = "pytester", @@ -57,7 +54,7 @@ class TestNewAPI: def test_cache_reportheader(testdir): - p = testdir.makepyfile(""" + testdir.makepyfile(""" def test_hello(): pass """) @@ -72,7 +69,7 @@ def test_cache_show(testdir): result.stdout.fnmatch_lines([ "*cache is empty*" ]) - p = testdir.makeconftest(""" + testdir.makeconftest(""" def pytest_configure(config): config.cache.set("my/name", [1,2,3]) config.cache.set("other/some", {1:2}) diff --git a/testing/test_lastfailed.py b/testing/test_lastfailed.py index 35ecd9a13..445d81be0 100755 --- a/testing/test_lastfailed.py +++ b/testing/test_lastfailed.py @@ -54,11 +54,11 @@ class TestLastFailed: ]) def test_failedfirst_order(self, testdir): - always_pass = testdir.tmpdir.join('test_a.py').write(py.code.Source(""" + testdir.tmpdir.join('test_a.py').write(py.code.Source(""" def test_always_passes(): assert 1 """)) - always_fail = testdir.tmpdir.join('test_b.py').write(py.code.Source(""" + testdir.tmpdir.join('test_b.py').write(py.code.Source(""" def test_always_fails(): assert 0 """)) @@ -114,7 +114,7 @@ class TestLastFailed: @pytest.mark.skipif("sys.version_info < (2,6)") def test_lastfailed_usecase_splice(self, testdir, monkeypatch): monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1) - p1 = testdir.makepyfile(""" + testdir.makepyfile(""" def test_1(): assert 0 """) @@ -137,7 +137,7 @@ class TestLastFailed: ]) def test_lastfailed_xpass(self, testdir): - rep = testdir.inline_runsource(""" + testdir.inline_runsource(""" import pytest @pytest.mark.xfail def test_hello(): diff --git a/tox.ini b/tox.ini index 73ecdfb45..be04e0a0a 100644 --- a/tox.ini +++ b/tox.ini @@ -33,8 +33,8 @@ commands= py.test --genscript=pytest1 [testenv:flakes] basepython = python2.7 -deps = pytest-flakes>=0.2 -commands = py.test --flakes -m flakes _pytest testing +deps = flake8 +commands = flake8 pytest.py _pytest testing [testenv:py27-xdist] deps=pytest-xdist>=1.13 @@ -148,5 +148,8 @@ rsyncdirs=tox.ini pytest.py _pytest testing python_files=test_*.py *_test.py testing/*/*.py python_classes=Test Acceptance python_functions=test -pep8ignore = E401 E225 E261 E128 E124 E302 norecursedirs = .tox ja .hg cx_freeze_source + + +[flake8] +ignore =E401,E225,E261,E128,E124,E301,E302,E121,E303,W391,E501,E231,E126,E701,E265,E241,E251,E226,E101,W191,E131,E203,E122,E123,E271,E712,E222,E127,E125,E221,W292,E111,E113,E293,E262,W293,E129,E702,E201,E272,E202 From 2e87cf4a624ffd8f48e86a9bda204309ed010ba1 Mon Sep 17 00:00:00 2001 From: Ronny Pfannschmidt Date: Fri, 24 Jul 2015 20:32:50 +0200 Subject: [PATCH 04/13] create the previously missing cache fixture there was a disabled test --- _pytest/cacheprovider.py | 4 ++++ testing/test_cache.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/_pytest/cacheprovider.py b/_pytest/cacheprovider.py index 5f7b476db..cff951fec 100755 --- a/_pytest/cacheprovider.py +++ b/_pytest/cacheprovider.py @@ -171,6 +171,10 @@ def pytest_configure(config): config.pluginmanager.register(LFPlugin(config), "lfplugin") +@pytest.fixture +def cache(request): + return request.config.cache + def pytest_report_header(config): if config.option.verbose: relpath = py.path.local().bestrelpath(config.cache._cachedir) diff --git a/testing/test_cache.py b/testing/test_cache.py index 9ec8966d7..9eedb7dc7 100755 --- a/testing/test_cache.py +++ b/testing/test_cache.py @@ -36,7 +36,7 @@ class TestNewAPI: assert result.ret == 0 result.stdout.fnmatch_lines(["*1 passed*"]) - def XXX_test_cachefuncarg(self, testdir): + def test_cachefuncarg(self, testdir): testdir.makepyfile(""" import pytest def test_cachefuncarg(cache): From 3d843edc694a554b0069a9691b4d89f8338f23bf Mon Sep 17 00:00:00 2001 From: Ronny Pfannschmidt Date: Mon, 17 Aug 2015 19:17:39 +0200 Subject: [PATCH 05/13] minor doc reference fixes --- doc/en/cache.txt | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/doc/en/cache.txt b/doc/en/cache.txt index 51437eb8e..5affda10f 100644 --- a/doc/en/cache.txt +++ b/doc/en/cache.txt @@ -1,6 +1,8 @@ cache: working with cross-testrun state ======================================= +.. versionadded:: 2.8 + Usage --------- @@ -241,9 +243,8 @@ in many places. Under the hood, the cache plugin uses the simple dumps/loads API of the json stdlib module -.. currentmodule:: pytest_cache +.. currentmodule:: _pytest.cacheprovider .. automethod:: Cache.get .. automethod:: Cache.set .. automethod:: Cache.makedir - From 1e107e6bd12c2a7f14718fbeffe161714d560fe2 Mon Sep 17 00:00:00 2001 From: Ronny Pfannschmidt Date: Thu, 27 Aug 2015 19:22:22 +0200 Subject: [PATCH 06/13] restrucure pytest.main.wrap_session to allow for non-testrun wraps --- _pytest/cacheprovider.py | 1 + _pytest/main.py | 13 +++++++------ testing/test_cache.py | 2 +- 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/_pytest/cacheprovider.py b/_pytest/cacheprovider.py index cff951fec..5c6bf2214 100755 --- a/_pytest/cacheprovider.py +++ b/_pytest/cacheprovider.py @@ -165,6 +165,7 @@ def pytest_cmdline_main(config): return wrap_session(config, showcache) + @pytest.hookimpl(tryfirst=True) def pytest_configure(config): config.cache = Cache(config) diff --git a/_pytest/main.py b/_pytest/main.py index 6b2ccb6b7..e9ac57d8f 100644 --- a/_pytest/main.py +++ b/_pytest/main.py @@ -87,7 +87,7 @@ def wrap_session(config, doit): initstate = 1 config.hook.pytest_sessionstart(session=session) initstate = 2 - doit(config, session) + session.exitstatus = doit(config, session) or 0 except pytest.UsageError: raise except KeyboardInterrupt: @@ -100,11 +100,7 @@ def wrap_session(config, doit): session.exitstatus = EXIT_INTERNALERROR if excinfo.errisinstance(SystemExit): sys.stderr.write("mainloop: caught Spurious SystemExit!\n") - else: - if session.testsfailed: - session.exitstatus = EXIT_TESTSFAILED - elif session.testscollected == 0: - session.exitstatus = EXIT_NOTESTSCOLLECTED + finally: excinfo = None # Explicitly break reference cycle. session.startdir.chdir() @@ -124,6 +120,11 @@ def _main(config, session): config.hook.pytest_collection(session=session) config.hook.pytest_runtestloop(session=session) + if session.testsfailed: + return EXIT_TESTSFAILED + elif session.testscollected == 0: + return EXIT_NOTESTSCOLLECTED + def pytest_collection(session): return session.perform_collect() diff --git a/testing/test_cache.py b/testing/test_cache.py index 9eedb7dc7..5a09bedad 100755 --- a/testing/test_cache.py +++ b/testing/test_cache.py @@ -78,7 +78,7 @@ def test_cache_show(testdir): dp.ensure("world") """) result = testdir.runpytest() - assert result.ret == 0 + assert result.ret == 5 # no tests executed result = testdir.runpytest("--show-cache") result.stdout.fnmatch_lines_random([ "*cachedir:*", From c06ff2a99240a4defe948607dcf9edfc098eb073 Mon Sep 17 00:00:00 2001 From: Ronny Pfannschmidt Date: Thu, 27 Aug 2015 20:31:56 +0200 Subject: [PATCH 07/13] update docs on the json usage of builtin cache --- CHANGELOG | 6 +++++- doc/en/cache.txt | 8 +++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/CHANGELOG b/CHANGELOG index a95386a92..dd908d95a 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -42,7 +42,10 @@ - fix issue82: avoid loading conftest files from setup.cfg/pytest.ini/tox.ini files and upwards by default (--confcutdir can still be set to override this). Thanks Bruno Oliveira for the PR. -- merge the pytest-cache extension into core + +- merge a refined variant of the pytest-cache extension into core + it uses json instead of execnet for the serializer + and deactivates the external cache plugin - fix issue768: docstrings found in python modules were not setting up session fixtures. Thanks Jason R. Coombs for reporting and Bruno Oliveira for the PR. @@ -67,6 +70,7 @@ - Summary bar now is colored yellow for warning situations such as: all tests either were skipped or xpass/xfailed, or no tests were run at all (this is a partial fix for issue500). + - fix issue812: pytest now exits with status code 5 in situations where no tests were run at all, such as the directory given in the command line does not contain any tests or as result of a command line option filters diff --git a/doc/en/cache.txt b/doc/en/cache.txt index 5affda10f..2d2965048 100644 --- a/doc/en/cache.txt +++ b/doc/en/cache.txt @@ -3,11 +3,17 @@ cache: working with cross-testrun state .. versionadded:: 2.8 +.. warning:: + + the external pytest-cache plugin used execnet_ as serializer, + which supported a wider range of builtin objects. + the buitin one however uses json. + Usage --------- plugins can access the `config.cache`_ object -which helps sharing values between ``py.test`` invocations. +which helps sharing **json encodable** values between ``py.test`` invocations. The plugin provides two options to rerun failures, namely: From cd475c7b27147a9e0599d6994ba1f29da25ef726 Mon Sep 17 00:00:00 2001 From: Ronny Pfannschmidt Date: Thu, 27 Aug 2015 20:35:46 +0200 Subject: [PATCH 08/13] minor flake8 fixes --- testing/python/collect.py | 2 +- testing/test_terminal.py | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/testing/python/collect.py b/testing/python/collect.py index 6a302f291..7a53cb764 100644 --- a/testing/python/collect.py +++ b/testing/python/collect.py @@ -908,7 +908,7 @@ def test_unorderable_types(testdir): """) result = testdir.runpytest() assert "TypeError" not in result.stdout.str() - assert result.ret == EXIT_NOTESTSCOLLECTED + assert result.ret == EXIT_NOTESTSCOLLECTED def test_collect_functools_partial(testdir): diff --git a/testing/test_terminal.py b/testing/test_terminal.py index 81fdfd60b..3bb526649 100644 --- a/testing/test_terminal.py +++ b/testing/test_terminal.py @@ -53,7 +53,6 @@ def pytest_generate_tests(metafunc): DistInfo(project_name='test', version=1) ], ['test-1']), ], ids=['normal', 'prefix-strip', 'deduplicate']) - def test_plugin_nameversion(input, expected): pluginlist = [(None, x) for x in input] result = _plugin_nameversions(pluginlist) From e0645564fe3b36aa3a700305c030232223a10b4a Mon Sep 17 00:00:00 2001 From: Ronny Pfannschmidt Date: Sun, 13 Sep 2015 21:49:01 +0200 Subject: [PATCH 09/13] more pep8 fixes --- _pytest/monkeypatch.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/_pytest/monkeypatch.py b/_pytest/monkeypatch.py index 07998650a..c9a2132b9 100644 --- a/_pytest/monkeypatch.py +++ b/_pytest/monkeypatch.py @@ -193,12 +193,12 @@ class monkeypatch: undo stack. Calling it a second time has no effect unless you do more monkeypatching after the undo call. - There is generally no need to call `undo()`, since it is + There is generally no need to call `undo()`, since it is called automatically during tear-down. Note that the same `monkeypatch` fixture is used across a single test function invocation. If `monkeypatch` is used both by - the test function itself and one of the test fixtures, + the test function itself and one of the test fixtures, calling `undo()` will undo all of the changes made in both functions. """ From 9a90aaca96ad4d5548e006594cb764759735238c Mon Sep 17 00:00:00 2001 From: holger krekel Date: Wed, 16 Sep 2015 17:15:31 +0200 Subject: [PATCH 10/13] improve and integrate docs --- CHANGELOG | 11 +++++++---- doc/en/{cache.txt => cache.rst} | 12 +++++++----- doc/en/contents.rst | 1 + doc/en/index.rst | 1 + 4 files changed, 16 insertions(+), 9 deletions(-) rename doc/en/{cache.txt => cache.rst} (95%) diff --git a/CHANGELOG b/CHANGELOG index dd908d95a..8a09ca076 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -1,6 +1,13 @@ 2.8.0.dev (compared to 2.7.X) ----------------------------- +- new ``--lf`` and ``-ff`` options to run only the last failing tests or + "failing tests first" from the last run. This functionality is provided + through porting the formerly external pytest-cache plugin into pytest core. + BACKWARD INCOMPAT: if you used pytest-cache's functionality to persist + data between test runs be aware that we don't serialize sets anymore. + Thanks Ronny Pfannschmidt for most of the merging work. + - "-r" option now accepts "a" to include all possible reports, similar to passing "fEsxXw" explicitly (isse960). Thanks Abhijeet Kasurde for the PR. @@ -43,10 +50,6 @@ files and upwards by default (--confcutdir can still be set to override this). Thanks Bruno Oliveira for the PR. -- merge a refined variant of the pytest-cache extension into core - it uses json instead of execnet for the serializer - and deactivates the external cache plugin - - fix issue768: docstrings found in python modules were not setting up session fixtures. Thanks Jason R. Coombs for reporting and Bruno Oliveira for the PR. diff --git a/doc/en/cache.txt b/doc/en/cache.rst similarity index 95% rename from doc/en/cache.txt rename to doc/en/cache.rst index 2d2965048..78afd6bf9 100644 --- a/doc/en/cache.txt +++ b/doc/en/cache.rst @@ -5,9 +5,10 @@ cache: working with cross-testrun state .. warning:: - the external pytest-cache plugin used execnet_ as serializer, - which supported a wider range of builtin objects. - the buitin one however uses json. + The functionality of this core plugin was previosuly distributed + as a third party plugin named ``pytest-cache``. The core plugin + is compatible regarding command line options and API usage except that you + can only store/receive data between test runs that is json-serializable. Usage --------- @@ -201,8 +202,7 @@ the cache and this will be quick:: test_caching.py:12: AssertionError -Consult the `pytest-cache API `_ -for more details. +See the `cache-api`_ for more details. Inspecting Cache content @@ -237,6 +237,8 @@ servers where isolation and correctness is more important than speed. +.. _`cache-api`: + config.cache API ======================================== diff --git a/doc/en/contents.rst b/doc/en/contents.rst index dabfcbecb..7666b5e59 100644 --- a/doc/en/contents.rst +++ b/doc/en/contents.rst @@ -12,6 +12,7 @@ Full pytest documentation overview apiref + cache plugins plugins_index/index example/index diff --git a/doc/en/index.rst b/doc/en/index.rst index 37842f4b5..24beb5b7c 100644 --- a/doc/en/index.rst +++ b/doc/en/index.rst @@ -32,6 +32,7 @@ pytest: helps you write better programs - :ref:`skipping` (improved in 2.4) - :ref:`distribute tests to multiple CPUs ` through :ref:`xdist plugin ` - :ref:`continuously re-run failing tests ` + - :doc:`cache` - flexible :ref:`Python test discovery` **integrates with other testing methods and tools**: From 45065e4e2eb2bcddf06f713e49fb2042a6b9d665 Mon Sep 17 00:00:00 2001 From: holger krekel Date: Wed, 16 Sep 2015 20:41:22 +0200 Subject: [PATCH 11/13] refine command line option naming and docs --- _pytest/cacheprovider.py | 14 +-- doc/en/cache.rst | 161 +++++++++++++------------ testing/test_cache.py | 237 ++++++++++++++++++++++++++++++++++++- testing/test_lastfailed.py | 235 ------------------------------------ 4 files changed, 328 insertions(+), 319 deletions(-) delete mode 100755 testing/test_lastfailed.py diff --git a/_pytest/cacheprovider.py b/_pytest/cacheprovider.py index 5c6bf2214..977647fee 100755 --- a/_pytest/cacheprovider.py +++ b/_pytest/cacheprovider.py @@ -16,7 +16,7 @@ class Cache(object): self.config = config self._cachedir = config.rootdir.join(".cache") self.trace = config.trace.root.get("cache") - if config.getvalue("clearcache"): + if config.getvalue("cacheclear"): self.trace("clearing cachedir") if self._cachedir.check(): self._cachedir.remove() @@ -135,7 +135,7 @@ class LFPlugin: def pytest_sessionfinish(self, session): config = self.config - if config.getvalue("showcache") or hasattr(config, "slaveinput"): + if config.getvalue("cacheshow") or hasattr(config, "slaveinput"): return config.cache.set("cache/lastfailed", self.lastfailed) @@ -152,17 +152,17 @@ def pytest_addoption(parser): "This may re-order tests and thus lead to " "repeated fixture setup/teardown") group.addoption( - '--show-cache', action='store_true', dest="showcache", + '--cache-show', action='store_true', dest="cacheshow", help="show cache contents, don't perform collection or tests") group.addoption( - '--clearcache', action='store_true', dest="clearcache", + '--cache-clear', action='store_true', dest="cacheclear", help="remove all cache contents at start of test run.") def pytest_cmdline_main(config): - if config.option.showcache: + if config.option.cacheshow: from _pytest.main import wrap_session - return wrap_session(config, showcache) + return wrap_session(config, cacheshow) @@ -182,7 +182,7 @@ def pytest_report_header(config): return "cachedir: %s" % relpath -def showcache(config, session): +def cacheshow(config, session): from pprint import pprint tw = py.io.TerminalWriter() tw.line("cachedir: " + str(config.cache._cachedir)) diff --git a/doc/en/cache.rst b/doc/en/cache.rst index 78afd6bf9..b50265725 100644 --- a/doc/en/cache.rst +++ b/doc/en/cache.rst @@ -13,18 +13,19 @@ cache: working with cross-testrun state Usage --------- -plugins can access the `config.cache`_ object -which helps sharing **json encodable** values between ``py.test`` invocations. - -The plugin provides two options to rerun failures, namely: +The plugin provides two command line options to rerun failures from the +last ``py.test`` invocation: * ``--lf`` (last failures) - to only re-run the failures. * ``--ff`` (failures first) - to run the failures first and then the rest of the tests. -For cleanup (usually not needed), a ``--clearcache`` option allows to remove +For cleanup (usually not needed), a ``--cache-clear`` option allows to remove all cross-session cache contents ahead of a test run. +Other plugins may access the `config.cache`_ object to set/get +**json encodable** values between ``py.test`` invocations. + Rerunning only failures or failures first ----------------------------------------------- @@ -43,66 +44,67 @@ If you run this for the first time you will see two failures:: $ py.test -q .................F.......F........................ - =================================== FAILURES =================================== - _________________________________ test_num[17] _________________________________ - + ================================= FAILURES ================================= + _______________________________ test_num[17] _______________________________ + i = 17 - + @pytest.mark.parametrize("i", range(50)) def test_num(i): - if i in (17,25): + if i in (17, 25): > pytest.fail("bad luck") E Failed: bad luck - + test_50.py:6: Failed - _________________________________ test_num[25] _________________________________ - + _______________________________ test_num[25] _______________________________ + i = 25 - + @pytest.mark.parametrize("i", range(50)) def test_num(i): - if i in (17,25): + if i in (17, 25): > pytest.fail("bad luck") E Failed: bad luck - + test_50.py:6: Failed + 2 failed, 48 passed in 0.04 seconds If you then run it with ``--lf`` you will run only the two failing test from the last run:: $ py.test --lf - ============================= test session starts ============================== - platform linux2 -- Python 2.7.3 -- pytest-2.3.5 + =========================== test session starts ============================ + platform linux2 -- Python 2.7.6, pytest-2.7.3.dev426+ng9a90aac.d20150916, py-1.4.30, pluggy-0.3.0 run-last-failure: rerun last 2 failures - plugins: cache + rootdir: /tmp/doc-exec-9, inifile: collected 50 items - + test_50.py FF - - =================================== FAILURES =================================== - _________________________________ test_num[17] _________________________________ - + + ================================= FAILURES ================================= + _______________________________ test_num[17] _______________________________ + i = 17 - + @pytest.mark.parametrize("i", range(50)) def test_num(i): - if i in (17,25): + if i in (17, 25): > pytest.fail("bad luck") E Failed: bad luck - + test_50.py:6: Failed - _________________________________ test_num[25] _________________________________ - + _______________________________ test_num[25] _______________________________ + i = 25 - + @pytest.mark.parametrize("i", range(50)) def test_num(i): - if i in (17,25): + if i in (17, 25): > pytest.fail("bad luck") E Failed: bad luck - + test_50.py:6: Failed - =================== 2 failed, 48 deselected in 0.02 seconds ==================== + ================= 2 failed, 48 deselected in 0.02 seconds ================== The last line indicates that 48 tests have not been run. @@ -111,38 +113,38 @@ failures will be executed first (as can be seen from the series of ``FF`` and dots):: $ py.test --ff - ============================= test session starts ============================== - platform linux2 -- Python 2.7.3 -- pytest-2.3.5 + =========================== test session starts ============================ + platform linux2 -- Python 2.7.6, pytest-2.7.3.dev426+ng9a90aac.d20150916, py-1.4.30, pluggy-0.3.0 run-last-failure: rerun last 2 failures first - plugins: cache + rootdir: /tmp/doc-exec-9, inifile: collected 50 items - + test_50.py FF................................................ - - =================================== FAILURES =================================== - _________________________________ test_num[17] _________________________________ - + + ================================= FAILURES ================================= + _______________________________ test_num[17] _______________________________ + i = 17 - + @pytest.mark.parametrize("i", range(50)) def test_num(i): - if i in (17,25): + if i in (17, 25): > pytest.fail("bad luck") E Failed: bad luck - + test_50.py:6: Failed - _________________________________ test_num[25] _________________________________ - + _______________________________ test_num[25] _______________________________ + i = 25 - + @pytest.mark.parametrize("i", range(50)) def test_num(i): - if i in (17,25): + if i in (17, 25): > pytest.fail("bad luck") E Failed: bad luck - + test_50.py:6: Failed - ===================== 2 failed, 48 passed in 0.07 seconds ====================== + =================== 2 failed, 48 passed in 0.04 seconds ==================== .. _`config.cache`: @@ -175,32 +177,34 @@ of the sleep:: $ py.test -q F - =================================== FAILURES =================================== - ________________________________ test_function _________________________________ - + ================================= FAILURES ================================= + ______________________________ test_function _______________________________ + mydata = 42 - + def test_function(mydata): > assert mydata == 23 E assert 42 == 23 - + test_caching.py:12: AssertionError + 1 failed in 5.41 seconds If you run it a second time the value will be retrieved from the cache and this will be quick:: $ py.test -q F - =================================== FAILURES =================================== - ________________________________ test_function _________________________________ - + ================================= FAILURES ================================= + ______________________________ test_function _______________________________ + mydata = 42 - + def test_function(mydata): > assert mydata == 23 E assert 42 == 23 - + test_caching.py:12: AssertionError + 1 failed in 0.01 seconds See the `cache-api`_ for more details. @@ -209,28 +213,35 @@ Inspecting Cache content ------------------------------- You can always peek at the content of the cache using the -``--cache`` command line option:: +``--cache-clear`` command line option:: - $ py.test --cache - ============================= test session starts ============================== - platform linux2 -- Python 2.7.3 -- pytest-2.3.5 - plugins: cache - cachedir: /tmp/doc-exec-6/.cache - --------------------------------- cache values --------------------------------- - example/value contains: - 42 - cache/lastfailed contains: - set(['test_caching.py::test_function']) - - =============================== in 0.01 seconds =============================== + $ py.test --cache-clear + =========================== test session starts ============================ + platform linux2 -- Python 2.7.6, pytest-2.7.3.dev426+ng9a90aac.d20150916, py-1.4.30, pluggy-0.3.0 + rootdir: /tmp/doc-exec-9, inifile: + collected 1 items + + test_caching.py F + + ================================= FAILURES ================================= + ______________________________ test_function _______________________________ + + mydata = 42 + + def test_function(mydata): + > assert mydata == 23 + E assert 42 == 23 + + test_caching.py:12: AssertionError + ========================= 1 failed in 5.41 seconds ========================= Clearing Cache content ------------------------------- You can instruct pytest to clear all cache files and values -by adding the ``--clearcache`` option like this:: +by adding the ``--cache-clear`` option like this:: - py.test --clearcache + py.test --cache-clear This is recommended for invocations from Continous Integration servers where isolation and correctness is more important diff --git a/testing/test_cache.py b/testing/test_cache.py index 5a09bedad..8eac4e8e0 100755 --- a/testing/test_cache.py +++ b/testing/test_cache.py @@ -1,4 +1,7 @@ import pytest +import os +import shutil +import py pytest_plugins = "pytester", @@ -63,8 +66,9 @@ def test_cache_reportheader(testdir): "cachedir: .cache" ]) + def test_cache_show(testdir): - result = testdir.runpytest("--show-cache") + result = testdir.runpytest("--cache-show") assert result.ret == 0 result.stdout.fnmatch_lines([ "*cache is empty*" @@ -79,7 +83,7 @@ def test_cache_show(testdir): """) result = testdir.runpytest() assert result.ret == 5 # no tests executed - result = testdir.runpytest("--show-cache") + result = testdir.runpytest("--cache-show") result.stdout.fnmatch_lines_random([ "*cachedir:*", "-*cache values*-", @@ -91,3 +95,232 @@ def test_cache_show(testdir): "*mydb/hello*length 0*", "*mydb/world*length 0*", ]) + + +class TestLastFailed: + @pytest.mark.skipif("sys.version_info < (2,6)") + def test_lastfailed_usecase(self, testdir, monkeypatch): + monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1) + p = testdir.makepyfile(""" + def test_1(): + assert 0 + def test_2(): + assert 0 + def test_3(): + assert 1 + """) + result = testdir.runpytest() + result.stdout.fnmatch_lines([ + "*2 failed*", + ]) + p.write(py.code.Source(""" + def test_1(): + assert 1 + + def test_2(): + assert 1 + + def test_3(): + assert 0 + """)) + result = testdir.runpytest("--lf") + result.stdout.fnmatch_lines([ + "*2 passed*1 desel*", + ]) + result = testdir.runpytest("--lf") + result.stdout.fnmatch_lines([ + "*1 failed*2 passed*", + ]) + result = testdir.runpytest("--lf", "--cache-clear") + result.stdout.fnmatch_lines([ + "*1 failed*2 passed*", + ]) + + # Run this again to make sure clear-cache is robust + if os.path.isdir('.cache'): + shutil.rmtree('.cache') + result = testdir.runpytest("--lf", "--cache-clear") + result.stdout.fnmatch_lines([ + "*1 failed*2 passed*", + ]) + + def test_failedfirst_order(self, testdir): + testdir.tmpdir.join('test_a.py').write(py.code.Source(""" + def test_always_passes(): + assert 1 + """)) + testdir.tmpdir.join('test_b.py').write(py.code.Source(""" + def test_always_fails(): + assert 0 + """)) + result = testdir.runpytest() + # Test order will be collection order; alphabetical + result.stdout.fnmatch_lines([ + "test_a.py*", + "test_b.py*", + ]) + result = testdir.runpytest("--lf", "--ff") + # Test order will be failing tests firs + result.stdout.fnmatch_lines([ + "test_b.py*", + "test_a.py*", + ]) + + @pytest.mark.skipif("sys.version_info < (2,6)") + def test_lastfailed_difference_invocations(self, testdir, monkeypatch): + monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1) + testdir.makepyfile(test_a=""" + def test_a1(): + assert 0 + def test_a2(): + assert 1 + """, test_b=""" + def test_b1(): + assert 0 + """) + p = testdir.tmpdir.join("test_a.py") + p2 = testdir.tmpdir.join("test_b.py") + + result = testdir.runpytest() + result.stdout.fnmatch_lines([ + "*2 failed*", + ]) + result = testdir.runpytest("--lf", p2) + result.stdout.fnmatch_lines([ + "*1 failed*", + ]) + p2.write(py.code.Source(""" + def test_b1(): + assert 1 + """)) + result = testdir.runpytest("--lf", p2) + result.stdout.fnmatch_lines([ + "*1 passed*", + ]) + result = testdir.runpytest("--lf", p) + result.stdout.fnmatch_lines([ + "*1 failed*1 desel*", + ]) + + @pytest.mark.skipif("sys.version_info < (2,6)") + def test_lastfailed_usecase_splice(self, testdir, monkeypatch): + monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1) + testdir.makepyfile(""" + def test_1(): + assert 0 + """) + p2 = testdir.tmpdir.join("test_something.py") + p2.write(py.code.Source(""" + def test_2(): + assert 0 + """)) + result = testdir.runpytest() + result.stdout.fnmatch_lines([ + "*2 failed*", + ]) + result = testdir.runpytest("--lf", p2) + result.stdout.fnmatch_lines([ + "*1 failed*", + ]) + result = testdir.runpytest("--lf") + result.stdout.fnmatch_lines([ + "*2 failed*", + ]) + + def test_lastfailed_xpass(self, testdir): + testdir.inline_runsource(""" + import pytest + @pytest.mark.xfail + def test_hello(): + assert 1 + """) + config = testdir.parseconfigure() + lastfailed = config.cache.get("cache/lastfailed", -1) + assert not lastfailed + + def test_lastfailed_collectfailure(self, testdir, monkeypatch): + + testdir.makepyfile(test_maybe=""" + import py + env = py.std.os.environ + if '1' == env['FAILIMPORT']: + raise ImportError('fail') + def test_hello(): + assert '0' == env['FAILTEST'] + """) + + def rlf(fail_import, fail_run): + monkeypatch.setenv('FAILIMPORT', fail_import) + monkeypatch.setenv('FAILTEST', fail_run) + + testdir.runpytest('-q') + config = testdir.parseconfigure() + lastfailed = config.cache.get("cache/lastfailed", -1) + return lastfailed + + lastfailed = rlf(fail_import=0, fail_run=0) + assert not lastfailed + + lastfailed = rlf(fail_import=1, fail_run=0) + assert list(lastfailed) == ['test_maybe.py'] + + lastfailed = rlf(fail_import=0, fail_run=1) + assert list(lastfailed) == ['test_maybe.py::test_hello'] + + + def test_lastfailed_failure_subset(self, testdir, monkeypatch): + + testdir.makepyfile(test_maybe=""" + import py + env = py.std.os.environ + if '1' == env['FAILIMPORT']: + raise ImportError('fail') + def test_hello(): + assert '0' == env['FAILTEST'] + """) + + testdir.makepyfile(test_maybe2=""" + import py + env = py.std.os.environ + if '1' == env['FAILIMPORT']: + raise ImportError('fail') + def test_hello(): + assert '0' == env['FAILTEST'] + + def test_pass(): + pass + """) + + def rlf(fail_import, fail_run, args=()): + monkeypatch.setenv('FAILIMPORT', fail_import) + monkeypatch.setenv('FAILTEST', fail_run) + + result = testdir.runpytest('-q', '--lf', *args) + config = testdir.parseconfigure() + lastfailed = config.cache.get("cache/lastfailed", -1) + return result, lastfailed + + result, lastfailed = rlf(fail_import=0, fail_run=0) + assert not lastfailed + result.stdout.fnmatch_lines([ + '*3 passed*', + ]) + + result, lastfailed = rlf(fail_import=1, fail_run=0) + assert sorted(list(lastfailed)) == ['test_maybe.py', 'test_maybe2.py'] + + + result, lastfailed = rlf(fail_import=0, fail_run=0, + args=('test_maybe2.py',)) + assert list(lastfailed) == ['test_maybe.py'] + + + # edge case of test selection - even if we remember failures + # from other tests we still need to run all tests if no test + # matches the failures + result, lastfailed = rlf(fail_import=0, fail_run=0, + args=('test_maybe2.py',)) + assert list(lastfailed) == ['test_maybe.py'] + result.stdout.fnmatch_lines([ + '*2 passed*', + ]) diff --git a/testing/test_lastfailed.py b/testing/test_lastfailed.py deleted file mode 100755 index 445d81be0..000000000 --- a/testing/test_lastfailed.py +++ /dev/null @@ -1,235 +0,0 @@ -import os -import pytest -import shutil -import py - -pytest_plugins = "pytester", - - -class TestLastFailed: - @pytest.mark.skipif("sys.version_info < (2,6)") - def test_lastfailed_usecase(self, testdir, monkeypatch): - monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1) - p = testdir.makepyfile(""" - def test_1(): - assert 0 - def test_2(): - assert 0 - def test_3(): - assert 1 - """) - result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "*2 failed*", - ]) - p.write(py.code.Source(""" - def test_1(): - assert 1 - - def test_2(): - assert 1 - - def test_3(): - assert 0 - """)) - result = testdir.runpytest("--lf") - result.stdout.fnmatch_lines([ - "*2 passed*1 desel*", - ]) - result = testdir.runpytest("--lf") - result.stdout.fnmatch_lines([ - "*1 failed*2 passed*", - ]) - result = testdir.runpytest("--lf", "--clearcache") - result.stdout.fnmatch_lines([ - "*1 failed*2 passed*", - ]) - - # Run this again to make sure clearcache is robust - if os.path.isdir('.cache'): - shutil.rmtree('.cache') - result = testdir.runpytest("--lf", "--clearcache") - result.stdout.fnmatch_lines([ - "*1 failed*2 passed*", - ]) - - def test_failedfirst_order(self, testdir): - testdir.tmpdir.join('test_a.py').write(py.code.Source(""" - def test_always_passes(): - assert 1 - """)) - testdir.tmpdir.join('test_b.py').write(py.code.Source(""" - def test_always_fails(): - assert 0 - """)) - result = testdir.runpytest() - # Test order will be collection order; alphabetical - result.stdout.fnmatch_lines([ - "test_a.py*", - "test_b.py*", - ]) - result = testdir.runpytest("--lf", "--ff") - # Test order will be failing tests firs - result.stdout.fnmatch_lines([ - "test_b.py*", - "test_a.py*", - ]) - - @pytest.mark.skipif("sys.version_info < (2,6)") - def test_lastfailed_difference_invocations(self, testdir, monkeypatch): - monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1) - testdir.makepyfile(test_a=""" - def test_a1(): - assert 0 - def test_a2(): - assert 1 - """, test_b=""" - def test_b1(): - assert 0 - """) - p = testdir.tmpdir.join("test_a.py") - p2 = testdir.tmpdir.join("test_b.py") - - result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "*2 failed*", - ]) - result = testdir.runpytest("--lf", p2) - result.stdout.fnmatch_lines([ - "*1 failed*", - ]) - p2.write(py.code.Source(""" - def test_b1(): - assert 1 - """)) - result = testdir.runpytest("--lf", p2) - result.stdout.fnmatch_lines([ - "*1 passed*", - ]) - result = testdir.runpytest("--lf", p) - result.stdout.fnmatch_lines([ - "*1 failed*1 desel*", - ]) - - @pytest.mark.skipif("sys.version_info < (2,6)") - def test_lastfailed_usecase_splice(self, testdir, monkeypatch): - monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1) - testdir.makepyfile(""" - def test_1(): - assert 0 - """) - p2 = testdir.tmpdir.join("test_something.py") - p2.write(py.code.Source(""" - def test_2(): - assert 0 - """)) - result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "*2 failed*", - ]) - result = testdir.runpytest("--lf", p2) - result.stdout.fnmatch_lines([ - "*1 failed*", - ]) - result = testdir.runpytest("--lf") - result.stdout.fnmatch_lines([ - "*2 failed*", - ]) - - def test_lastfailed_xpass(self, testdir): - testdir.inline_runsource(""" - import pytest - @pytest.mark.xfail - def test_hello(): - assert 1 - """) - config = testdir.parseconfigure() - lastfailed = config.cache.get("cache/lastfailed", -1) - assert not lastfailed - - def test_lastfailed_collectfailure(self, testdir, monkeypatch): - - testdir.makepyfile(test_maybe=""" - import py - env = py.std.os.environ - if '1' == env['FAILIMPORT']: - raise ImportError('fail') - def test_hello(): - assert '0' == env['FAILTEST'] - """) - - def rlf(fail_import, fail_run): - monkeypatch.setenv('FAILIMPORT', fail_import) - monkeypatch.setenv('FAILTEST', fail_run) - - testdir.runpytest('-q') - config = testdir.parseconfigure() - lastfailed = config.cache.get("cache/lastfailed", -1) - return lastfailed - - lastfailed = rlf(fail_import=0, fail_run=0) - assert not lastfailed - - lastfailed = rlf(fail_import=1, fail_run=0) - assert list(lastfailed) == ['test_maybe.py'] - - lastfailed = rlf(fail_import=0, fail_run=1) - assert list(lastfailed) == ['test_maybe.py::test_hello'] - - - def test_lastfailed_failure_subset(self, testdir, monkeypatch): - - testdir.makepyfile(test_maybe=""" - import py - env = py.std.os.environ - if '1' == env['FAILIMPORT']: - raise ImportError('fail') - def test_hello(): - assert '0' == env['FAILTEST'] - """) - - testdir.makepyfile(test_maybe2=""" - import py - env = py.std.os.environ - if '1' == env['FAILIMPORT']: - raise ImportError('fail') - def test_hello(): - assert '0' == env['FAILTEST'] - - def test_pass(): - pass - """) - - def rlf(fail_import, fail_run, args=()): - monkeypatch.setenv('FAILIMPORT', fail_import) - monkeypatch.setenv('FAILTEST', fail_run) - - result = testdir.runpytest('-q', '--lf', *args) - config = testdir.parseconfigure() - lastfailed = config.cache.get("cache/lastfailed", -1) - return result, lastfailed - - result, lastfailed = rlf(fail_import=0, fail_run=0) - assert not lastfailed - result.stdout.fnmatch_lines([ - '*3 passed*', - ]) - - result, lastfailed = rlf(fail_import=1, fail_run=0) - assert sorted(list(lastfailed)) == ['test_maybe.py', 'test_maybe2.py'] - - - result, lastfailed = rlf(fail_import=0, fail_run=0, - args=('test_maybe2.py',)) - assert list(lastfailed) == ['test_maybe.py'] - - - # edge case of test selection - even if we remember failures - # from other tests we still need to run all tests if no test - # matches the failures - result, lastfailed = rlf(fail_import=0, fail_run=0, - args=('test_maybe2.py',)) - assert list(lastfailed) == ['test_maybe.py'] - result.stdout.fnmatch_lines([ - '*2 passed*', - ]) From 79d22bf3347899a263c35fd2e251fd1e95062799 Mon Sep 17 00:00:00 2001 From: holger krekel Date: Wed, 16 Sep 2015 20:44:41 +0200 Subject: [PATCH 12/13] some more doc refinements --- doc/en/cache.rst | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/doc/en/cache.rst b/doc/en/cache.rst index b50265725..300aea5a0 100644 --- a/doc/en/cache.rst +++ b/doc/en/cache.rst @@ -69,8 +69,7 @@ If you run this for the first time you will see two failures:: test_50.py:6: Failed 2 failed, 48 passed in 0.04 seconds -If you then run it with ``--lf`` you will run only the two failing test -from the last run:: +If you then run it with ``--lf``:: $ py.test --lf =========================== test session starts ============================ @@ -106,11 +105,12 @@ from the last run:: test_50.py:6: Failed ================= 2 failed, 48 deselected in 0.02 seconds ================== -The last line indicates that 48 tests have not been run. +You have run only the two failing test from the last run, while 48 tests have +not been run ("deselected"). -If you run with the ``--ff`` option, all tests will be run but the first -failures will be executed first (as can be seen from the series of ``FF`` and -dots):: +Now, if you run with the ``--ff`` option, all tests will be run but the first +previous failures will be executed first (as can be seen from the series +of ``FF`` and dots):: $ py.test --ff =========================== test session starts ============================ From 3841e99720e2b5ee4299857f404505a3920f112a Mon Sep 17 00:00:00 2001 From: holger krekel Date: Wed, 16 Sep 2015 21:06:44 +0200 Subject: [PATCH 13/13] avoid oldstyle funcarg usage --- doc/en/cache.rst | 34 ++++++++++++++++++---------------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/doc/en/cache.rst b/doc/en/cache.rst index 300aea5a0..1682e7c43 100644 --- a/doc/en/cache.rst +++ b/doc/en/cache.rst @@ -73,9 +73,9 @@ If you then run it with ``--lf``:: $ py.test --lf =========================== test session starts ============================ - platform linux2 -- Python 2.7.6, pytest-2.7.3.dev426+ng9a90aac.d20150916, py-1.4.30, pluggy-0.3.0 + platform linux2 -- Python 2.7.6, pytest-2.7.3.dev428+ng79d22bf.d20150916, py-1.4.30, pluggy-0.3.0 run-last-failure: rerun last 2 failures - rootdir: /tmp/doc-exec-9, inifile: + rootdir: /tmp/doc-exec-94, inifile: collected 50 items test_50.py FF @@ -103,7 +103,7 @@ If you then run it with ``--lf``:: E Failed: bad luck test_50.py:6: Failed - ================= 2 failed, 48 deselected in 0.02 seconds ================== + ================= 2 failed, 48 deselected in 0.01 seconds ================== You have run only the two failing test from the last run, while 48 tests have not been run ("deselected"). @@ -114,9 +114,9 @@ of ``FF`` and dots):: $ py.test --ff =========================== test session starts ============================ - platform linux2 -- Python 2.7.6, pytest-2.7.3.dev426+ng9a90aac.d20150916, py-1.4.30, pluggy-0.3.0 + platform linux2 -- Python 2.7.6, pytest-2.7.3.dev428+ng79d22bf.d20150916, py-1.4.30, pluggy-0.3.0 run-last-failure: rerun last 2 failures first - rootdir: /tmp/doc-exec-9, inifile: + rootdir: /tmp/doc-exec-94, inifile: collected 50 items test_50.py FF................................................ @@ -144,7 +144,7 @@ of ``FF`` and dots):: E Failed: bad luck test_50.py:6: Failed - =================== 2 failed, 48 passed in 0.04 seconds ==================== + =================== 2 failed, 48 passed in 0.03 seconds ==================== .. _`config.cache`: @@ -153,15 +153,17 @@ The new config.cache object .. regendoc:wipe -Plugins or conftest.py support code can get a cached value -using the pytest ``config`` object. Here is a basic example -plugin which implements a `funcarg `_ -which re-uses previously created state across py.test invocations:: +Plugins or conftest.py support code can get a cached value using the +pytest ``config`` object. Here is a basic example plugin which +implements a :ref:`fixture` which re-uses previously created state +across py.test invocations:: # content of test_caching.py + import pytest import time - def pytest_funcarg__mydata(request): + @pytest.fixture + def mydata(request): val = request.config.cache.get("example/value", None) if val is None: time.sleep(9*0.6) # expensive computation :) @@ -186,7 +188,7 @@ of the sleep:: > assert mydata == 23 E assert 42 == 23 - test_caching.py:12: AssertionError + test_caching.py:14: AssertionError 1 failed in 5.41 seconds If you run it a second time the value will be retrieved from @@ -203,7 +205,7 @@ the cache and this will be quick:: > assert mydata == 23 E assert 42 == 23 - test_caching.py:12: AssertionError + test_caching.py:14: AssertionError 1 failed in 0.01 seconds See the `cache-api`_ for more details. @@ -217,8 +219,8 @@ You can always peek at the content of the cache using the $ py.test --cache-clear =========================== test session starts ============================ - platform linux2 -- Python 2.7.6, pytest-2.7.3.dev426+ng9a90aac.d20150916, py-1.4.30, pluggy-0.3.0 - rootdir: /tmp/doc-exec-9, inifile: + platform linux2 -- Python 2.7.6, pytest-2.7.3.dev428+ng79d22bf.d20150916, py-1.4.30, pluggy-0.3.0 + rootdir: /tmp/doc-exec-94, inifile: collected 1 items test_caching.py F @@ -232,7 +234,7 @@ You can always peek at the content of the cache using the > assert mydata == 23 E assert 42 == 23 - test_caching.py:12: AssertionError + test_caching.py:14: AssertionError ========================= 1 failed in 5.41 seconds ========================= Clearing Cache content