From 55ebf261ce9eee7d7dd67a332deb151747e8648e Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Tue, 3 Jul 2018 13:37:03 -0400 Subject: [PATCH 01/17] Switch to new API --- src/_pytest/mark/structures.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/_pytest/mark/structures.py b/src/_pytest/mark/structures.py index 3fb15bbc2..3c161c4a6 100644 --- a/src/_pytest/mark/structures.py +++ b/src/_pytest/mark/structures.py @@ -294,7 +294,7 @@ def _marked(func, mark): class MarkInfo(object): """ Marking object created by :class:`MarkDecorator` instances. """ - _marks = attr.ib(convert=list) + _marks = attr.ib(converter=list) @_marks.validator def validate_marks(self, attribute, value): From b815f67e656d77b089babb7f68340014aab01807 Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Tue, 3 Jul 2018 13:40:04 -0400 Subject: [PATCH 02/17] add changelog --- changelog/3653.trivial.rst | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 changelog/3653.trivial.rst diff --git a/changelog/3653.trivial.rst b/changelog/3653.trivial.rst new file mode 100644 index 000000000..e69de29bb From b84a6463899c63e8b6c761d3c2e3682b5f48e1c7 Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Tue, 3 Jul 2018 21:08:27 -0300 Subject: [PATCH 03/17] Add note to the changelog --- changelog/3653.trivial.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/changelog/3653.trivial.rst b/changelog/3653.trivial.rst index e69de29bb..51fe2238e 100644 --- a/changelog/3653.trivial.rst +++ b/changelog/3653.trivial.rst @@ -0,0 +1 @@ +Fix usage of ``attr.ib`` deprecated ``convert`` parameter. From 43c0346d681800d41510ad3c0cfc5f8fd1bf04e4 Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Wed, 4 Jul 2018 00:51:21 +0000 Subject: [PATCH 04/17] Preparing release version 3.6.3 --- CHANGELOG.rst | 49 ++++ changelog/3061.bugfix.rst | 1 - changelog/3593.bugfix.rst | 5 - changelog/3598.trivial.rst | 1 - changelog/3605.bugfix.rst | 1 - changelog/3609.trivial.rst | 1 - changelog/3611.doc.rst | 1 - changelog/3630.bugfix.rst | 1 - changelog/3631.bugfix.rst | 1 - changelog/3653.trivial.rst | 1 - doc/en/announce/index.rst | 1 + doc/en/announce/release-3.6.3.rst | 28 ++ doc/en/assert.rst | 20 +- doc/en/builtin.rst | 30 +- doc/en/cache.rst | 58 ++-- doc/en/capture.rst | 8 +- doc/en/doctest.rst | 4 +- doc/en/example/markers.rst | 92 +++--- doc/en/example/nonpython.rst | 10 +- doc/en/example/parametrize.rst | 34 +-- doc/en/example/pythoncollection.rst | 6 +- doc/en/example/reportingdemo.rst | 434 ++++++++++++++-------------- doc/en/example/simple.rst | 104 +++---- doc/en/fixture.rst | 80 ++--- doc/en/getting-started.rst | 20 +- doc/en/index.rst | 8 +- doc/en/parametrize.rst | 20 +- doc/en/skipping.rst | 6 +- doc/en/tmpdir.rst | 10 +- doc/en/unittest.rst | 16 +- doc/en/usage.rst | 2 +- doc/en/warnings.rst | 18 +- 32 files changed, 562 insertions(+), 509 deletions(-) delete mode 100644 changelog/3061.bugfix.rst delete mode 100644 changelog/3593.bugfix.rst delete mode 100644 changelog/3598.trivial.rst delete mode 100644 changelog/3605.bugfix.rst delete mode 100644 changelog/3609.trivial.rst delete mode 100644 changelog/3611.doc.rst delete mode 100644 changelog/3630.bugfix.rst delete mode 100644 changelog/3631.bugfix.rst delete mode 100644 changelog/3653.trivial.rst create mode 100644 doc/en/announce/release-3.6.3.rst diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 21a090414..b6797b0ca 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -8,6 +8,55 @@ .. towncrier release notes start +Pytest 3.6.3 (2018-07-04) +========================= + +Bug Fixes +--------- + +- Fix ``ImportWarning`` triggered by explicit relative imports in + assertion-rewritten package modules. (`#3061 + `_) + +- If the user pass as a expected value a numpy array created like + numpy.array(5); it will creates an array with one element without shape, when + used with approx it will raise an error for the `repr` 'TypeError: iteration + over a 0-d array'. With this PR pytest will iterate properly in the numpy + array even with 0 dimension. (`#3593 + `_) + +- no longer ValueError when using the ``get_marker`` api. (`#3605 + `_) + +- Log messages with unicode characters would not appear in the output log file. + (`#3630 `_) + +- No longer raise AttributeError when legacy marks can't be stored. (`#3631 + `_) + + +Improved Documentation +---------------------- + +- The description above the example for ``@pytest.mark.skipif`` now better + matches the code. (`#3611 + `_) + + +Trivial/Internal Changes +------------------------ + +- Internal refactoring: removed unused ``CallSpec2tox ._globalid_args`` + attribute and ``metafunc`` parameter from ``CallSpec2.copy()``. (`#3598 + `_) + +- Silence usage of ``reduce`` warning in python 2 (`#3609 + `_) + +- Fix usage of ``attr.ib`` deprecated ``convert`` parameter. (`#3653 + `_) + + Pytest 3.6.2 (2018-06-20) ========================= diff --git a/changelog/3061.bugfix.rst b/changelog/3061.bugfix.rst deleted file mode 100644 index 1bdd1064e..000000000 --- a/changelog/3061.bugfix.rst +++ /dev/null @@ -1 +0,0 @@ -Fix ``ImportWarning`` triggered by explicit relative imports in assertion-rewritten package modules. diff --git a/changelog/3593.bugfix.rst b/changelog/3593.bugfix.rst deleted file mode 100644 index 37aa30577..000000000 --- a/changelog/3593.bugfix.rst +++ /dev/null @@ -1,5 +0,0 @@ -If the user pass as a expected value a numpy array created like -numpy.array(5); it will creates an array with one element without shape, -when used with approx it will raise an error for the `repr` -'TypeError: iteration over a 0-d array'. With this PR pytest will iterate -properly in the numpy array even with 0 dimension. diff --git a/changelog/3598.trivial.rst b/changelog/3598.trivial.rst deleted file mode 100644 index fd80f58cd..000000000 --- a/changelog/3598.trivial.rst +++ /dev/null @@ -1 +0,0 @@ -Internal refactoring: removed unused ``CallSpec2tox ._globalid_args`` attribute and ``metafunc`` parameter from ``CallSpec2.copy()``. diff --git a/changelog/3605.bugfix.rst b/changelog/3605.bugfix.rst deleted file mode 100644 index 58a294ecb..000000000 --- a/changelog/3605.bugfix.rst +++ /dev/null @@ -1 +0,0 @@ -no longer ValueError when using the ``get_marker`` api. diff --git a/changelog/3609.trivial.rst b/changelog/3609.trivial.rst deleted file mode 100644 index 96e720d92..000000000 --- a/changelog/3609.trivial.rst +++ /dev/null @@ -1 +0,0 @@ -Silence usage of ``reduce`` warning in python 2 diff --git a/changelog/3611.doc.rst b/changelog/3611.doc.rst deleted file mode 100644 index fe19cc025..000000000 --- a/changelog/3611.doc.rst +++ /dev/null @@ -1 +0,0 @@ -The description above the example for ``@pytest.mark.skipif`` now better matches the code. diff --git a/changelog/3630.bugfix.rst b/changelog/3630.bugfix.rst deleted file mode 100644 index 505bcb8fd..000000000 --- a/changelog/3630.bugfix.rst +++ /dev/null @@ -1 +0,0 @@ -Log messages with unicode characters would not appear in the output log file. diff --git a/changelog/3631.bugfix.rst b/changelog/3631.bugfix.rst deleted file mode 100644 index 261c41ccd..000000000 --- a/changelog/3631.bugfix.rst +++ /dev/null @@ -1 +0,0 @@ -No longer raise AttributeError when legacy marks can't be stored. diff --git a/changelog/3653.trivial.rst b/changelog/3653.trivial.rst deleted file mode 100644 index 51fe2238e..000000000 --- a/changelog/3653.trivial.rst +++ /dev/null @@ -1 +0,0 @@ -Fix usage of ``attr.ib`` deprecated ``convert`` parameter. diff --git a/doc/en/announce/index.rst b/doc/en/announce/index.rst index 107fcd2ad..8283bf86d 100644 --- a/doc/en/announce/index.rst +++ b/doc/en/announce/index.rst @@ -6,6 +6,7 @@ Release announcements :maxdepth: 2 + release-3.6.3 release-3.6.2 release-3.6.1 release-3.6.0 diff --git a/doc/en/announce/release-3.6.3.rst b/doc/en/announce/release-3.6.3.rst new file mode 100644 index 000000000..1aff2bc38 --- /dev/null +++ b/doc/en/announce/release-3.6.3.rst @@ -0,0 +1,28 @@ +pytest-3.6.3 +======================================= + +pytest 3.6.3 has just been released to PyPI. + +This is a bug-fix release, being a drop-in replacement. To upgrade:: + + pip install --upgrade pytest + +The full changelog is available at http://doc.pytest.org/en/latest/changelog.html. + +Thanks to all who contributed to this release, among them: + +* AdamEr8 +* Anthony Sottile +* Bruno Oliveira +* Jean-Paul Calderone +* Jon Dufresne +* Marcelo Duarte Trevisani +* Ondřej Súkup +* Ronny Pfannschmidt +* T.E.A de Souza +* Victor +* victor + + +Happy testing, +The pytest Development Team diff --git a/doc/en/assert.rst b/doc/en/assert.rst index e0e9b9305..bcc0a28c9 100644 --- a/doc/en/assert.rst +++ b/doc/en/assert.rst @@ -29,17 +29,17 @@ you will see the return value of the function call:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 item - + test_assert1.py F [100%] - + ================================= FAILURES ================================= ______________________________ test_function _______________________________ - + def test_function(): > assert f() == 4 E assert 3 == 4 E + where 3 = f() - + test_assert1.py:5: AssertionError ========================= 1 failed in 0.12 seconds ========================= @@ -172,12 +172,12 @@ if you run this module:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 item - + test_assert2.py F [100%] - + ================================= FAILURES ================================= ___________________________ test_set_comparison ____________________________ - + def test_set_comparison(): set1 = set("1308") set2 = set("8035") @@ -188,7 +188,7 @@ if you run this module:: E Extra items in the right set: E '5' E Use -v to get the full diff - + test_assert2.py:5: AssertionError ========================= 1 failed in 0.12 seconds ========================= @@ -241,14 +241,14 @@ the conftest file:: F [100%] ================================= FAILURES ================================= _______________________________ test_compare _______________________________ - + def test_compare(): f1 = Foo(1) f2 = Foo(2) > assert f1 == f2 E assert Comparing Foo instances: E vals: 1 != 2 - + test_foocompare.py:11: AssertionError 1 failed in 0.12 seconds diff --git a/doc/en/builtin.rst b/doc/en/builtin.rst index c2d23469b..35f1315d3 100644 --- a/doc/en/builtin.rst +++ b/doc/en/builtin.rst @@ -17,13 +17,13 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a $ pytest -q --fixtures cache Return a cache object that can persist state between testing sessions. - + cache.get(key, default) cache.set(key, value) - + Keys must be a ``/`` separated value, where the first part is usually the name of your plugin or application to avoid clashes with other cache users. - + Values can be any object handled by the json stdlib module. capsys Enable capturing of writes to ``sys.stdout`` and ``sys.stderr`` and make @@ -49,9 +49,9 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a Fixture that returns a :py:class:`dict` that will be injected into the namespace of doctests. pytestconfig Session-scoped fixture that returns the :class:`_pytest.config.Config` object. - + Example:: - + def test_foo(pytestconfig): if pytestconfig.getoption("verbose"): ... @@ -61,9 +61,9 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a configured reporters, like JUnit XML. The fixture is callable with ``(name, value)``, with value being automatically xml-encoded. - + Example:: - + def test_function(record_property): record_property("example_key", 1) record_xml_property @@ -74,9 +74,9 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a automatically xml-encoded caplog Access and control log capturing. - + Captured logs are available through the following methods:: - + * caplog.text -> string containing formatted log output * caplog.records -> list of logging.LogRecord instances * caplog.record_tuples -> list of (logger_name, level, message) tuples @@ -84,23 +84,23 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a monkeypatch The returned ``monkeypatch`` fixture provides these helper methods to modify objects, dictionaries or os.environ:: - + monkeypatch.setattr(obj, name, value, raising=True) monkeypatch.delattr(obj, name, raising=True) monkeypatch.setitem(mapping, name, value) monkeypatch.delitem(obj, name, raising=True) monkeypatch.setenv(name, value, prepend=False) - monkeypatch.delenv(name, value, raising=True) + monkeypatch.delenv(name, raising=True) monkeypatch.syspath_prepend(path) monkeypatch.chdir(path) - + All modifications will be undone after the requesting test function or fixture has finished. The ``raising`` parameter determines if a KeyError or AttributeError will be raised if the set/deletion operation has no target. recwarn Return a :class:`WarningsRecorder` instance that records all warnings emitted by test functions. - + See http://docs.python.org/library/warnings.html for information on warning categories. tmpdir_factory @@ -111,9 +111,9 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a created as a sub directory of the base temporary directory. The returned object is a `py.path.local`_ path object. - + .. _`py.path.local`: https://py.readthedocs.io/en/latest/path.html - + no tests ran in 0.12 seconds You can also interactively ask for help, e.g. by typing on the Python interactive prompt something like:: diff --git a/doc/en/cache.rst b/doc/en/cache.rst index 37bcf7070..437109db0 100644 --- a/doc/en/cache.rst +++ b/doc/en/cache.rst @@ -49,26 +49,26 @@ If you run this for the first time you will see two failures:: .................F.......F........................ [100%] ================================= FAILURES ================================= _______________________________ test_num[17] _______________________________ - + i = 17 - + @pytest.mark.parametrize("i", range(50)) def test_num(i): if i in (17, 25): > pytest.fail("bad luck") E Failed: bad luck - + test_50.py:6: Failed _______________________________ test_num[25] _______________________________ - + i = 25 - + @pytest.mark.parametrize("i", range(50)) def test_num(i): if i in (17, 25): > pytest.fail("bad luck") E Failed: bad luck - + test_50.py:6: Failed 2 failed, 48 passed in 0.12 seconds @@ -80,31 +80,31 @@ If you then run it with ``--lf``:: rootdir: $REGENDOC_TMPDIR, inifile: collected 50 items / 48 deselected run-last-failure: rerun previous 2 failures - + test_50.py FF [100%] - + ================================= FAILURES ================================= _______________________________ test_num[17] _______________________________ - + i = 17 - + @pytest.mark.parametrize("i", range(50)) def test_num(i): if i in (17, 25): > pytest.fail("bad luck") E Failed: bad luck - + test_50.py:6: Failed _______________________________ test_num[25] _______________________________ - + i = 25 - + @pytest.mark.parametrize("i", range(50)) def test_num(i): if i in (17, 25): > pytest.fail("bad luck") E Failed: bad luck - + test_50.py:6: Failed ================= 2 failed, 48 deselected in 0.12 seconds ================== @@ -121,31 +121,31 @@ of ``FF`` and dots):: rootdir: $REGENDOC_TMPDIR, inifile: collected 50 items run-last-failure: rerun previous 2 failures first - + test_50.py FF................................................ [100%] - + ================================= FAILURES ================================= _______________________________ test_num[17] _______________________________ - + i = 17 - + @pytest.mark.parametrize("i", range(50)) def test_num(i): if i in (17, 25): > pytest.fail("bad luck") E Failed: bad luck - + test_50.py:6: Failed _______________________________ test_num[25] _______________________________ - + i = 25 - + @pytest.mark.parametrize("i", range(50)) def test_num(i): if i in (17, 25): > pytest.fail("bad luck") E Failed: bad luck - + test_50.py:6: Failed =================== 2 failed, 48 passed in 0.12 seconds ==================== @@ -198,13 +198,13 @@ of the sleep:: F [100%] ================================= FAILURES ================================= ______________________________ test_function _______________________________ - + mydata = 42 - + def test_function(mydata): > assert mydata == 23 E assert 42 == 23 - + test_caching.py:14: AssertionError 1 failed in 0.12 seconds @@ -215,13 +215,13 @@ the cache and this will be quick:: F [100%] ================================= FAILURES ================================= ______________________________ test_function _______________________________ - + mydata = 42 - + def test_function(mydata): > assert mydata == 23 E assert 42 == 23 - + test_caching.py:14: AssertionError 1 failed in 0.12 seconds @@ -246,7 +246,7 @@ You can always peek at the content of the cache using the ['test_caching.py::test_function'] example/value contains: 42 - + ======================= no tests ran in 0.12 seconds ======================= Clearing Cache content diff --git a/doc/en/capture.rst b/doc/en/capture.rst index ab86fb55f..549845cc9 100644 --- a/doc/en/capture.rst +++ b/doc/en/capture.rst @@ -68,16 +68,16 @@ of the failing function and hide the other one:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items - + test_module.py .F [100%] - + ================================= FAILURES ================================= ________________________________ test_func2 ________________________________ - + def test_func2(): > assert False E assert False - + test_module.py:9: AssertionError -------------------------- Captured stdout setup --------------------------- setting up diff --git a/doc/en/doctest.rst b/doc/en/doctest.rst index 9488ee826..ac470d105 100644 --- a/doc/en/doctest.rst +++ b/doc/en/doctest.rst @@ -65,9 +65,9 @@ then you can just invoke ``pytest`` without command line options:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini collected 1 item - + mymodule.py . [100%] - + ========================= 1 passed in 0.12 seconds ========================= It is possible to use fixtures using the ``getfixture`` helper:: diff --git a/doc/en/example/markers.rst b/doc/en/example/markers.rst index bf352bc81..176fdccdb 100644 --- a/doc/en/example/markers.rst +++ b/doc/en/example/markers.rst @@ -35,9 +35,9 @@ You can then restrict a test run to only run tests marked with ``webtest``:: cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 4 items / 3 deselected - + test_server.py::test_send_http PASSED [100%] - + ================== 1 passed, 3 deselected in 0.12 seconds ================== Or the inverse, running all tests except the webtest ones:: @@ -48,11 +48,11 @@ Or the inverse, running all tests except the webtest ones:: cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 4 items / 1 deselected - + test_server.py::test_something_quick PASSED [ 33%] test_server.py::test_another PASSED [ 66%] test_server.py::TestClass::test_method PASSED [100%] - + ================== 3 passed, 1 deselected in 0.12 seconds ================== Selecting tests based on their node ID @@ -68,9 +68,9 @@ tests based on their module, class, method, or function name:: cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 1 item - + test_server.py::TestClass::test_method PASSED [100%] - + ========================= 1 passed in 0.12 seconds ========================= You can also select on the class:: @@ -81,9 +81,9 @@ You can also select on the class:: cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 1 item - + test_server.py::TestClass::test_method PASSED [100%] - + ========================= 1 passed in 0.12 seconds ========================= Or select multiple nodes:: @@ -94,10 +94,10 @@ Or select multiple nodes:: cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 2 items - + test_server.py::TestClass::test_method PASSED [ 50%] test_server.py::test_send_http PASSED [100%] - + ========================= 2 passed in 0.12 seconds ========================= .. _node-id: @@ -132,9 +132,9 @@ select tests based on their names:: cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 4 items / 3 deselected - + test_server.py::test_send_http PASSED [100%] - + ================== 1 passed, 3 deselected in 0.12 seconds ================== And you can also run all tests except the ones that match the keyword:: @@ -145,11 +145,11 @@ And you can also run all tests except the ones that match the keyword:: cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 4 items / 1 deselected - + test_server.py::test_something_quick PASSED [ 33%] test_server.py::test_another PASSED [ 66%] test_server.py::TestClass::test_method PASSED [100%] - + ================== 3 passed, 1 deselected in 0.12 seconds ================== Or to select "http" and "quick" tests:: @@ -160,10 +160,10 @@ Or to select "http" and "quick" tests:: cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 4 items / 2 deselected - + test_server.py::test_send_http PASSED [ 50%] test_server.py::test_something_quick PASSED [100%] - + ================== 2 passed, 2 deselected in 0.12 seconds ================== .. note:: @@ -199,21 +199,21 @@ You can ask which markers exist for your test suite - the list includes our just $ pytest --markers @pytest.mark.webtest: mark a test as a webtest. - + @pytest.mark.skip(reason=None): skip the given test function with an optional reason. Example: skip(reason="no way of currently testing this") skips the test. - + @pytest.mark.skipif(condition): skip the given test function if eval(condition) results in a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform. see http://pytest.org/latest/skipping.html - + @pytest.mark.xfail(condition, reason=None, run=True, raises=None, strict=False): mark the test function as an expected failure if eval(condition) has a True value. Optionally specify a reason for better reporting and run=False if you don't even want to execute the test function. If only specific exception(s) are expected, you can list them in raises, and if the test fails in other ways, it will be reported as a true failure. See http://pytest.org/latest/skipping.html - + @pytest.mark.parametrize(argnames, argvalues): call a test function multiple times passing in different arguments in turn. argvalues generally needs to be a list of values if argnames specifies only one name or a list of tuples of values if argnames specifies multiple names. Example: @parametrize('arg1', [1,2]) would lead to two calls of the decorated test function, one with arg1=1 and another with arg1=2.see http://pytest.org/latest/parametrize.html for more info and examples. - - @pytest.mark.usefixtures(fixturename1, fixturename2, ...): mark tests as needing all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures - + + @pytest.mark.usefixtures(fixturename1, fixturename2, ...): mark tests as needing all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures + @pytest.mark.tryfirst: mark a hook implementation function such that the plugin machinery will try to call it first/as early as possible. - + @pytest.mark.trylast: mark a hook implementation function such that the plugin machinery will try to call it last/as late as possible. - + For an example on how to add and work with markers from a plugin, see :ref:`adding a custom marker from a plugin`. @@ -352,9 +352,9 @@ the test needs:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 item - + test_someenv.py s [100%] - + ======================== 1 skipped in 0.12 seconds ========================= and here is one that specifies exactly the environment needed:: @@ -364,30 +364,30 @@ and here is one that specifies exactly the environment needed:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 item - + test_someenv.py . [100%] - + ========================= 1 passed in 0.12 seconds ========================= The ``--markers`` option always gives you a list of available markers:: $ pytest --markers @pytest.mark.env(name): mark test to run only on named environment - + @pytest.mark.skip(reason=None): skip the given test function with an optional reason. Example: skip(reason="no way of currently testing this") skips the test. - + @pytest.mark.skipif(condition): skip the given test function if eval(condition) results in a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform. see http://pytest.org/latest/skipping.html - + @pytest.mark.xfail(condition, reason=None, run=True, raises=None, strict=False): mark the test function as an expected failure if eval(condition) has a True value. Optionally specify a reason for better reporting and run=False if you don't even want to execute the test function. If only specific exception(s) are expected, you can list them in raises, and if the test fails in other ways, it will be reported as a true failure. See http://pytest.org/latest/skipping.html - + @pytest.mark.parametrize(argnames, argvalues): call a test function multiple times passing in different arguments in turn. argvalues generally needs to be a list of values if argnames specifies only one name or a list of tuples of values if argnames specifies multiple names. Example: @parametrize('arg1', [1,2]) would lead to two calls of the decorated test function, one with arg1=1 and another with arg1=2.see http://pytest.org/latest/parametrize.html for more info and examples. - - @pytest.mark.usefixtures(fixturename1, fixturename2, ...): mark tests as needing all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures - + + @pytest.mark.usefixtures(fixturename1, fixturename2, ...): mark tests as needing all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures + @pytest.mark.tryfirst: mark a hook implementation function such that the plugin machinery will try to call it first/as early as possible. - + @pytest.mark.trylast: mark a hook implementation function such that the plugin machinery will try to call it last/as late as possible. - + .. _`passing callables to custom markers`: @@ -523,11 +523,11 @@ then you will see two tests skipped and two executed tests as expected:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items - + test_plat.py s.s. [100%] ========================= short test summary info ========================== SKIP [2] $REGENDOC_TMPDIR/conftest.py:12: cannot run on platform linux - + =================== 2 passed, 2 skipped in 0.12 seconds ==================== Note that if you specify a platform via the marker-command line option like this:: @@ -537,9 +537,9 @@ Note that if you specify a platform via the marker-command line option like this platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items / 3 deselected - + test_plat.py . [100%] - + ================== 1 passed, 3 deselected in 0.12 seconds ================== then the unmarked-tests will not be run. It is thus a way to restrict the run to the specific tests. @@ -588,9 +588,9 @@ We can now use the ``-m option`` to select one set:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items / 2 deselected - + test_module.py FF [100%] - + ================================= FAILURES ================================= __________________________ test_interface_simple ___________________________ test_module.py:3: in test_interface_simple @@ -609,9 +609,9 @@ or to select both "event" and "interface" tests:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items / 1 deselected - + test_module.py FFF [100%] - + ================================= FAILURES ================================= __________________________ test_interface_simple ___________________________ test_module.py:3: in test_interface_simple diff --git a/doc/en/example/nonpython.rst b/doc/en/example/nonpython.rst index 4f5adf63f..ca7b2c8df 100644 --- a/doc/en/example/nonpython.rst +++ b/doc/en/example/nonpython.rst @@ -30,9 +30,9 @@ now execute the test specification:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR/nonpython, inifile: collected 2 items - + test_simple.yml F. [100%] - + ================================= FAILURES ================================= ______________________________ usecase: hello ______________________________ usecase execution failed @@ -63,10 +63,10 @@ consulted when reporting in ``verbose`` mode:: cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR/nonpython, inifile: collecting ... collected 2 items - + test_simple.yml::hello FAILED [ 50%] test_simple.yml::ok PASSED [100%] - + ================================= FAILURES ================================= ______________________________ usecase: hello ______________________________ usecase execution failed @@ -87,5 +87,5 @@ interesting to just look at the collection tree:: - + ======================= no tests ran in 0.12 seconds ======================= diff --git a/doc/en/example/parametrize.rst b/doc/en/example/parametrize.rst index 882700fec..49ffa7288 100644 --- a/doc/en/example/parametrize.rst +++ b/doc/en/example/parametrize.rst @@ -55,13 +55,13 @@ let's run the full monty:: ....F [100%] ================================= FAILURES ================================= _____________________________ test_compute[4] ______________________________ - + param1 = 4 - + def test_compute(param1): > assert param1 < 4 E assert 4 < 4 - + test_compute.py:3: AssertionError 1 failed, 4 passed in 0.12 seconds @@ -151,7 +151,7 @@ objects, they are still using the default pytest representation:: - + ======================= no tests ran in 0.12 seconds ======================= In ``test_timedistance_v3``, we used ``pytest.param`` to specify the test IDs @@ -198,9 +198,9 @@ this is a fully self-contained example which you can run with:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items - + test_scenarios.py .... [100%] - + ========================= 4 passed in 0.12 seconds ========================= If you just collect tests you'll also nicely see 'advanced' and 'basic' as variants for the test function:: @@ -218,7 +218,7 @@ If you just collect tests you'll also nicely see 'advanced' and 'basic' as varia - + ======================= no tests ran in 0.12 seconds ======================= Note that we told ``metafunc.parametrize()`` that your scenario values @@ -279,7 +279,7 @@ Let's first see how it looks like at collection time:: - + ======================= no tests ran in 0.12 seconds ======================= And then when we run the test:: @@ -288,15 +288,15 @@ And then when we run the test:: .F [100%] ================================= FAILURES ================================= _________________________ test_db_initialized[d2] __________________________ - + db = - + def test_db_initialized(db): # a dummy test if db.__class__.__name__ == "DB2": > pytest.fail("deliberately failing for demo purposes") E Failed: deliberately failing for demo purposes - + test_backends.py:6: Failed 1 failed, 1 passed in 0.12 seconds @@ -339,7 +339,7 @@ The result of this test will be successful:: collected 1 item - + ======================= no tests ran in 0.12 seconds ======================= .. regendoc:wipe @@ -384,13 +384,13 @@ argument sets to use for each test function. Let's run it:: F.. [100%] ================================= FAILURES ================================= ________________________ TestClass.test_equals[1-2] ________________________ - + self = , a = 1, b = 2 - + def test_equals(self, a, b): > assert a == b E assert 1 == 2 - + test_parametrize.py:18: AssertionError 1 failed, 2 passed in 0.12 seconds @@ -462,11 +462,11 @@ If you run this with reporting for skips enabled:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items - + test_module.py .s [100%] ========================= short test summary info ========================== SKIP [1] $REGENDOC_TMPDIR/conftest.py:11: could not import 'opt2' - + =================== 1 passed, 1 skipped in 0.12 seconds ==================== You'll see that we don't have an ``opt2`` module and thus the second test run diff --git a/doc/en/example/pythoncollection.rst b/doc/en/example/pythoncollection.rst index 8e9d3ae62..58b4364b5 100644 --- a/doc/en/example/pythoncollection.rst +++ b/doc/en/example/pythoncollection.rst @@ -133,7 +133,7 @@ then the test collection looks like this:: - + ======================= no tests ran in 0.12 seconds ======================= .. note:: @@ -180,7 +180,7 @@ You can always peek at the collection tree without running tests like this:: - + ======================= no tests ran in 0.12 seconds ======================= .. _customizing-test-collection: @@ -243,5 +243,5 @@ file will be left out:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini collected 0 items - + ======================= no tests ran in 0.12 seconds ======================= diff --git a/doc/en/example/reportingdemo.rst b/doc/en/example/reportingdemo.rst index 4691b128b..5df790479 100644 --- a/doc/en/example/reportingdemo.rst +++ b/doc/en/example/reportingdemo.rst @@ -14,83 +14,81 @@ get on the terminal - we are working on that):: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR/assertion, inifile: collected 42 items - + failure_demo.py FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF [100%] - + ================================= FAILURES ================================= ____________________________ test_generative[0] ____________________________ - + param1 = 3, param2 = 6 - + def test_generative(param1, param2): > assert param1 * 2 < param2 E assert (3 * 2) < 6 - + failure_demo.py:19: AssertionError _________________________ TestFailing.test_simple __________________________ - + self = - + def test_simple(self): - def f(): return 42 - + def g(): return 43 - + > assert f() == g() E assert 42 == 43 E + where 42 = .f at 0xdeadbeef>() E + and 43 = .g at 0xdeadbeef>() - - failure_demo.py:37: AssertionError + + failure_demo.py:35: AssertionError ____________________ TestFailing.test_simple_multiline _____________________ - + self = - + def test_simple_multiline(self): > otherfunc_multi(42, 6 * 9) - - failure_demo.py:40: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - + + failure_demo.py:38: + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + a = 42, b = 54 - + def otherfunc_multi(a, b): > assert a == b E assert 42 == 54 - + failure_demo.py:15: AssertionError ___________________________ TestFailing.test_not ___________________________ - + self = - + def test_not(self): - def f(): return 42 - + > assert not f() E assert not 42 E + where 42 = .f at 0xdeadbeef>() - - failure_demo.py:47: AssertionError + + failure_demo.py:44: AssertionError _________________ TestSpecialisedExplanations.test_eq_text _________________ - + self = - + def test_eq_text(self): > assert "spam" == "eggs" E AssertionError: assert 'spam' == 'eggs' E - spam E + eggs - - failure_demo.py:53: AssertionError + + failure_demo.py:49: AssertionError _____________ TestSpecialisedExplanations.test_eq_similar_text _____________ - + self = - + def test_eq_similar_text(self): > assert "foo 1 bar" == "foo 2 bar" E AssertionError: assert 'foo 1 bar' == 'foo 2 bar' @@ -98,12 +96,12 @@ get on the terminal - we are working on that):: E ? ^ E + foo 2 bar E ? ^ - - failure_demo.py:56: AssertionError + + failure_demo.py:52: AssertionError ____________ TestSpecialisedExplanations.test_eq_multiline_text ____________ - + self = - + def test_eq_multiline_text(self): > assert "foo\nspam\nbar" == "foo\neggs\nbar" E AssertionError: assert 'foo\nspam\nbar' == 'foo\neggs\nbar' @@ -111,12 +109,12 @@ get on the terminal - we are working on that):: E - spam E + eggs E bar - - failure_demo.py:59: AssertionError + + failure_demo.py:55: AssertionError ______________ TestSpecialisedExplanations.test_eq_long_text _______________ - + self = - + def test_eq_long_text(self): a = "1" * 100 + "a" + "2" * 100 b = "1" * 100 + "b" + "2" * 100 @@ -128,12 +126,12 @@ get on the terminal - we are working on that):: E ? ^ E + 1111111111b222222222 E ? ^ - - failure_demo.py:64: AssertionError + + failure_demo.py:60: AssertionError _________ TestSpecialisedExplanations.test_eq_long_text_multiline __________ - + self = - + def test_eq_long_text_multiline(self): a = "1\n" * 100 + "a" + "2\n" * 100 b = "1\n" * 100 + "b" + "2\n" * 100 @@ -146,25 +144,25 @@ get on the terminal - we are working on that):: E 1 E 1 E 1... - E + E E ...Full output truncated (7 lines hidden), use '-vv' to show - - failure_demo.py:69: AssertionError + + failure_demo.py:65: AssertionError _________________ TestSpecialisedExplanations.test_eq_list _________________ - + self = - + def test_eq_list(self): > assert [0, 1, 2] == [0, 1, 3] E assert [0, 1, 2] == [0, 1, 3] E At index 2 diff: 2 != 3 E Use -v to get the full diff - - failure_demo.py:72: AssertionError + + failure_demo.py:68: AssertionError ______________ TestSpecialisedExplanations.test_eq_list_long _______________ - + self = - + def test_eq_list_long(self): a = [0] * 100 + [1] + [3] * 100 b = [0] * 100 + [2] + [3] * 100 @@ -172,12 +170,12 @@ get on the terminal - we are working on that):: E assert [0, 0, 0, 0, 0, 0, ...] == [0, 0, 0, 0, 0, 0, ...] E At index 100 diff: 1 != 2 E Use -v to get the full diff - - failure_demo.py:77: AssertionError + + failure_demo.py:73: AssertionError _________________ TestSpecialisedExplanations.test_eq_dict _________________ - + self = - + def test_eq_dict(self): > assert {"a": 0, "b": 1, "c": 0} == {"a": 0, "b": 2, "d": 0} E AssertionError: assert {'a': 0, 'b': 1, 'c': 0} == {'a': 0, 'b': 2, 'd': 0} @@ -188,14 +186,14 @@ get on the terminal - we are working on that):: E {'c': 0} E Right contains more items: E {'d': 0}... - E + E E ...Full output truncated (2 lines hidden), use '-vv' to show - - failure_demo.py:80: AssertionError + + failure_demo.py:76: AssertionError _________________ TestSpecialisedExplanations.test_eq_set __________________ - + self = - + def test_eq_set(self): > assert {0, 10, 11, 12} == {0, 20, 21} E AssertionError: assert {0, 10, 11, 12} == {0, 20, 21} @@ -206,34 +204,34 @@ get on the terminal - we are working on that):: E Extra items in the right set: E 20 E 21... - E + E E ...Full output truncated (2 lines hidden), use '-vv' to show - - failure_demo.py:83: AssertionError + + failure_demo.py:79: AssertionError _____________ TestSpecialisedExplanations.test_eq_longer_list ______________ - + self = - + def test_eq_longer_list(self): > assert [1, 2] == [1, 2, 3] E assert [1, 2] == [1, 2, 3] E Right contains more items, first extra item: 3 E Use -v to get the full diff - - failure_demo.py:86: AssertionError + + failure_demo.py:82: AssertionError _________________ TestSpecialisedExplanations.test_in_list _________________ - + self = - + def test_in_list(self): > assert 1 in [0, 2, 3, 4, 5] E assert 1 in [0, 2, 3, 4, 5] - - failure_demo.py:89: AssertionError + + failure_demo.py:85: AssertionError __________ TestSpecialisedExplanations.test_not_in_text_multiline __________ - + self = - + def test_not_in_text_multiline(self): text = "some multiline\ntext\nwhich\nincludes foo\nand a\ntail" > assert "foo" not in text @@ -245,14 +243,14 @@ get on the terminal - we are working on that):: E includes foo E ? +++ E and a... - E + E E ...Full output truncated (2 lines hidden), use '-vv' to show - - failure_demo.py:93: AssertionError + + failure_demo.py:89: AssertionError ___________ TestSpecialisedExplanations.test_not_in_text_single ____________ - + self = - + def test_not_in_text_single(self): text = "single foo line" > assert "foo" not in text @@ -260,172 +258,167 @@ get on the terminal - we are working on that):: E 'foo' is contained here: E single foo line E ? +++ - - failure_demo.py:97: AssertionError + + failure_demo.py:93: AssertionError _________ TestSpecialisedExplanations.test_not_in_text_single_long _________ - + self = - + def test_not_in_text_single_long(self): text = "head " * 50 + "foo " + "tail " * 20 > assert "foo" not in text E AssertionError: assert 'foo' not in 'head head head head hea...ail tail tail tail tail ' E 'foo' is contained here: - E head head foo tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail + E head head foo tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail E ? +++ - - failure_demo.py:101: AssertionError + + failure_demo.py:97: AssertionError ______ TestSpecialisedExplanations.test_not_in_text_single_long_term _______ - + self = - + def test_not_in_text_single_long_term(self): text = "head " * 50 + "f" * 70 + "tail " * 20 > assert "f" * 70 not in text E AssertionError: assert 'fffffffffff...ffffffffffff' not in 'head head he...l tail tail ' E 'ffffffffffffffffff...fffffffffffffffffff' is contained here: - E head head fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffftail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail + E head head fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffftail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail E ? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - - failure_demo.py:105: AssertionError + + failure_demo.py:101: AssertionError ______________________________ test_attribute ______________________________ - + def test_attribute(): - class Foo(object): b = 1 - + i = Foo() > assert i.b == 2 E assert 1 == 2 E + where 1 = .Foo object at 0xdeadbeef>.b - - failure_demo.py:114: AssertionError + + failure_demo.py:109: AssertionError _________________________ test_attribute_instance __________________________ - + def test_attribute_instance(): - class Foo(object): b = 1 - + > assert Foo().b == 2 E AssertionError: assert 1 == 2 E + where 1 = .Foo object at 0xdeadbeef>.b E + where .Foo object at 0xdeadbeef> = .Foo'>() - - failure_demo.py:122: AssertionError + + failure_demo.py:116: AssertionError __________________________ test_attribute_failure __________________________ - + def test_attribute_failure(): - class Foo(object): - def _get_b(self): raise Exception("Failed to get attrib") - + b = property(_get_b) - + i = Foo() > assert i.b == 2 - - failure_demo.py:135: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - + + failure_demo.py:127: + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + self = .Foo object at 0xdeadbeef> - + def _get_b(self): > raise Exception("Failed to get attrib") E Exception: Failed to get attrib - - failure_demo.py:130: Exception + + failure_demo.py:122: Exception _________________________ test_attribute_multiple __________________________ - + def test_attribute_multiple(): - class Foo(object): b = 1 - + class Bar(object): b = 2 - + > assert Foo().b == Bar().b E AssertionError: assert 1 == 2 E + where 1 = .Foo object at 0xdeadbeef>.b E + where .Foo object at 0xdeadbeef> = .Foo'>() E + and 2 = .Bar object at 0xdeadbeef>.b E + where .Bar object at 0xdeadbeef> = .Bar'>() - - failure_demo.py:146: AssertionError + + failure_demo.py:137: AssertionError __________________________ TestRaises.test_raises __________________________ - + self = - + def test_raises(self): s = "qwe" # NOQA > raises(TypeError, "int(s)") - - failure_demo.py:157: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - + + failure_demo.py:147: + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + > int(s) E ValueError: invalid literal for int() with base 10: 'qwe' - - <0-codegen $PYTHON_PREFIX/lib/python3.5/site-packages/_pytest/python_api.py:634>:1: ValueError + + <0-codegen $PYTHON_PREFIX/lib/python3.5/site-packages/_pytest/python_api.py:635>:1: ValueError ______________________ TestRaises.test_raises_doesnt _______________________ - + self = - + def test_raises_doesnt(self): > raises(IOError, "int('3')") E Failed: DID NOT RAISE - - failure_demo.py:160: Failed + + failure_demo.py:150: Failed __________________________ TestRaises.test_raise ___________________________ - + self = - + def test_raise(self): > raise ValueError("demo error") E ValueError: demo error - - failure_demo.py:163: ValueError + + failure_demo.py:153: ValueError ________________________ TestRaises.test_tupleerror ________________________ - + self = - + def test_tupleerror(self): > a, b = [1] # NOQA E ValueError: not enough values to unpack (expected 2, got 1) - - failure_demo.py:166: ValueError + + failure_demo.py:156: ValueError ______ TestRaises.test_reinterpret_fails_with_print_for_the_fun_of_it ______ - + self = - + def test_reinterpret_fails_with_print_for_the_fun_of_it(self): items = [1, 2, 3] print("items is %r" % items) > a, b = items.pop() E TypeError: 'int' object is not iterable - - failure_demo.py:171: TypeError + + failure_demo.py:161: TypeError --------------------------- Captured stdout call --------------------------- items is [1, 2, 3] ________________________ TestRaises.test_some_error ________________________ - + self = - + def test_some_error(self): > if namenotexi: # NOQA E NameError: name 'namenotexi' is not defined - - failure_demo.py:174: NameError + + failure_demo.py:164: NameError ____________________ test_dynamic_compile_shows_nicely _____________________ - + def test_dynamic_compile_shows_nicely(): import imp import sys - + src = "def foo():\n assert 1 == 0\n" name = "abc-123" module = imp.new_module(name) @@ -433,66 +426,65 @@ get on the terminal - we are working on that):: py.builtin.exec_(code, module.__dict__) sys.modules[name] = module > module.foo() - - failure_demo.py:192: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - + + failure_demo.py:182: + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + def foo(): > assert 1 == 0 E AssertionError - - <2-codegen 'abc-123' $REGENDOC_TMPDIR/assertion/failure_demo.py:189>:2: AssertionError + + <2-codegen 'abc-123' $REGENDOC_TMPDIR/assertion/failure_demo.py:179>:2: AssertionError ____________________ TestMoreErrors.test_complex_error _____________________ - + self = - + def test_complex_error(self): - def f(): return 44 - + def g(): return 43 - + > somefunc(f(), g()) - - failure_demo.py:205: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + + failure_demo.py:193: + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ failure_demo.py:11: in somefunc otherfunc(x, y) - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + a = 44, b = 43 - + def otherfunc(a, b): > assert a == b E assert 44 == 43 - + failure_demo.py:7: AssertionError ___________________ TestMoreErrors.test_z1_unpack_error ____________________ - + self = - + def test_z1_unpack_error(self): items = [] > a, b = items E ValueError: not enough values to unpack (expected 2, got 0) - - failure_demo.py:209: ValueError + + failure_demo.py:197: ValueError ____________________ TestMoreErrors.test_z2_type_error _____________________ - + self = - + def test_z2_type_error(self): items = 3 > a, b = items E TypeError: 'int' object is not iterable - - failure_demo.py:213: TypeError + + failure_demo.py:201: TypeError ______________________ TestMoreErrors.test_startswith ______________________ - + self = - + def test_startswith(self): s = "123" g = "456" @@ -500,96 +492,93 @@ get on the terminal - we are working on that):: E AssertionError: assert False E + where False = ('456') E + where = '123'.startswith - - failure_demo.py:218: AssertionError + + failure_demo.py:206: AssertionError __________________ TestMoreErrors.test_startswith_nested ___________________ - + self = - + def test_startswith_nested(self): - def f(): return "123" - + def g(): return "456" - + > assert f().startswith(g()) E AssertionError: assert False E + where False = ('456') E + where = '123'.startswith E + where '123' = .f at 0xdeadbeef>() E + and '456' = .g at 0xdeadbeef>() - - failure_demo.py:228: AssertionError + + failure_demo.py:215: AssertionError _____________________ TestMoreErrors.test_global_func ______________________ - + self = - + def test_global_func(self): > assert isinstance(globf(42), float) E assert False E + where False = isinstance(43, float) E + where 43 = globf(42) - - failure_demo.py:231: AssertionError + + failure_demo.py:218: AssertionError _______________________ TestMoreErrors.test_instance _______________________ - + self = - + def test_instance(self): self.x = 6 * 7 > assert self.x != 42 E assert 42 != 42 E + where 42 = .x - - failure_demo.py:235: AssertionError + + failure_demo.py:222: AssertionError _______________________ TestMoreErrors.test_compare ________________________ - + self = - + def test_compare(self): > assert globf(10) < 5 E assert 11 < 5 E + where 11 = globf(10) - - failure_demo.py:238: AssertionError + + failure_demo.py:225: AssertionError _____________________ TestMoreErrors.test_try_finally ______________________ - + self = - + def test_try_finally(self): x = 1 try: > assert x == 0 E assert 1 == 0 - - failure_demo.py:243: AssertionError + + failure_demo.py:230: AssertionError ___________________ TestCustomAssertMsg.test_single_line ___________________ - + self = - + def test_single_line(self): - class A(object): a = 1 - + b = 2 > assert A.a == b, "A.a appears not to be b" E AssertionError: A.a appears not to be b E assert 1 == 2 E + where 1 = .A'>.a - - failure_demo.py:256: AssertionError + + failure_demo.py:241: AssertionError ____________________ TestCustomAssertMsg.test_multiline ____________________ - + self = - + def test_multiline(self): - class A(object): a = 1 - + b = 2 > assert ( A.a == b @@ -599,20 +588,19 @@ get on the terminal - we are working on that):: E one of those E assert 1 == 2 E + where 1 = .A'>.a - - failure_demo.py:264: AssertionError + + failure_demo.py:248: AssertionError ___________________ TestCustomAssertMsg.test_custom_repr ___________________ - + self = - + def test_custom_repr(self): - class JSON(object): a = 1 - + def __repr__(self): return "This is JSON\n{\n 'foo': 'bar'\n}" - + a = JSON() b = 2 > assert a.a == b, a @@ -622,12 +610,12 @@ get on the terminal - we are working on that):: E } E assert 1 == 2 E + where 1 = This is JSON\n{\n 'foo': 'bar'\n}.a - - failure_demo.py:278: AssertionError + + failure_demo.py:261: AssertionError ============================= warnings summary ============================= Metafunc.addcall is deprecated and scheduled to be removed in pytest 4.0. Please use Metafunc.parametrize instead. - + -- Docs: http://doc.pytest.org/en/latest/warnings.html ================== 42 failed, 1 warnings in 0.12 seconds =================== diff --git a/doc/en/example/simple.rst b/doc/en/example/simple.rst index 180637ae9..c86939fb1 100644 --- a/doc/en/example/simple.rst +++ b/doc/en/example/simple.rst @@ -49,9 +49,9 @@ Let's run this without supplying our new option:: F [100%] ================================= FAILURES ================================= _______________________________ test_answer ________________________________ - + cmdopt = 'type1' - + def test_answer(cmdopt): if cmdopt == "type1": print("first") @@ -59,7 +59,7 @@ Let's run this without supplying our new option:: print("second") > assert 0 # to see what was printed E assert 0 - + test_sample.py:6: AssertionError --------------------------- Captured stdout call --------------------------- first @@ -71,9 +71,9 @@ And now with supplying a command line option:: F [100%] ================================= FAILURES ================================= _______________________________ test_answer ________________________________ - + cmdopt = 'type2' - + def test_answer(cmdopt): if cmdopt == "type1": print("first") @@ -81,7 +81,7 @@ And now with supplying a command line option:: print("second") > assert 0 # to see what was printed E assert 0 - + test_sample.py:6: AssertionError --------------------------- Captured stdout call --------------------------- second @@ -124,7 +124,7 @@ directory with the above conftest.py:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 0 items - + ======================= no tests ran in 0.12 seconds ======================= .. _`excontrolskip`: @@ -182,11 +182,11 @@ and when running it will see a skipped "slow" test:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items - + test_module.py .s [100%] ========================= short test summary info ========================== SKIP [1] test_module.py:8: need --runslow option to run - + =================== 1 passed, 1 skipped in 0.12 seconds ==================== Or run it including the ``slow`` marked test:: @@ -196,9 +196,9 @@ Or run it including the ``slow`` marked test:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items - + test_module.py .. [100%] - + ========================= 2 passed in 0.12 seconds ========================= Writing well integrated assertion helpers @@ -236,11 +236,11 @@ Let's run our little function:: F [100%] ================================= FAILURES ================================= ______________________________ test_something ______________________________ - + def test_something(): > checkconfig(42) E Failed: not configured: 42 - + test_checkconfig.py:11: Failed 1 failed in 0.12 seconds @@ -335,7 +335,7 @@ which will add the string to the test header accordingly:: project deps: mylib-1.1 rootdir: $REGENDOC_TMPDIR, inifile: collected 0 items - + ======================= no tests ran in 0.12 seconds ======================= .. regendoc:wipe @@ -363,7 +363,7 @@ which will add info only when run with "--v":: did you? rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 0 items - + ======================= no tests ran in 0.12 seconds ======================= and nothing when run plainly:: @@ -373,7 +373,7 @@ and nothing when run plainly:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 0 items - + ======================= no tests ran in 0.12 seconds ======================= profiling test duration @@ -410,13 +410,13 @@ Now we can profile which test functions execute the slowest:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 3 items - + test_some_are_slow.py ... [100%] - + ========================= slowest 3 test durations ========================= 0.30s call test_some_are_slow.py::test_funcslow2 0.20s call test_some_are_slow.py::test_funcslow1 - 0.10s call test_some_are_slow.py::test_funcfast + 0.13s call test_some_are_slow.py::test_funcfast ========================= 3 passed in 0.12 seconds ========================= incremental testing - test steps @@ -482,18 +482,18 @@ If we run this:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items - + test_step.py .Fx. [100%] - + ================================= FAILURES ================================= ____________________ TestUserHandling.test_modification ____________________ - + self = - + def test_modification(self): > assert 0 E assert 0 - + test_step.py:11: AssertionError ========================= short test summary info ========================== XFAIL test_step.py::TestUserHandling::()::test_deletion @@ -563,12 +563,12 @@ We can run this:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 7 items - + test_step.py .Fx. [ 57%] a/test_db.py F [ 71%] a/test_db2.py F [ 85%] b/test_error.py E [100%] - + ================================== ERRORS ================================== _______________________ ERROR at setup of test_root ________________________ file $REGENDOC_TMPDIR/b/test_error.py, line 1 @@ -576,37 +576,37 @@ We can run this:: E fixture 'db' not found > available fixtures: cache, capfd, capfdbinary, caplog, capsys, capsysbinary, doctest_namespace, monkeypatch, pytestconfig, record_property, record_xml_attribute, record_xml_property, recwarn, tmpdir, tmpdir_factory > use 'pytest --fixtures [testpath]' for help on them. - + $REGENDOC_TMPDIR/b/test_error.py:1 ================================= FAILURES ================================= ____________________ TestUserHandling.test_modification ____________________ - + self = - + def test_modification(self): > assert 0 E assert 0 - + test_step.py:11: AssertionError _________________________________ test_a1 __________________________________ - + db = - + def test_a1(db): > assert 0, db # to show value E AssertionError: E assert 0 - + a/test_db.py:2: AssertionError _________________________________ test_a2 __________________________________ - + db = - + def test_a2(db): > assert 0, db # to show value E AssertionError: E assert 0 - + a/test_db2.py:2: AssertionError ========== 3 failed, 2 passed, 1 xfailed, 1 error in 0.12 seconds ========== @@ -674,25 +674,25 @@ and run them:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items - + test_module.py FF [100%] - + ================================= FAILURES ================================= ________________________________ test_fail1 ________________________________ - + tmpdir = local('PYTEST_TMPDIR/test_fail10') - + def test_fail1(tmpdir): > assert 0 E assert 0 - + test_module.py:2: AssertionError ________________________________ test_fail2 ________________________________ - + def test_fail2(): > assert 0 E assert 0 - + test_module.py:6: AssertionError ========================= 2 failed in 0.12 seconds ========================= @@ -773,36 +773,36 @@ and run it:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 3 items - + test_module.py Esetting up a test failed! test_module.py::test_setup_fails Fexecuting test failed test_module.py::test_call_fails F - + ================================== ERRORS ================================== ____________________ ERROR at setup of test_setup_fails ____________________ - + @pytest.fixture def other(): > assert 0 E assert 0 - + test_module.py:7: AssertionError ================================= FAILURES ================================= _____________________________ test_call_fails ______________________________ - + something = None - + def test_call_fails(something): > assert 0 E assert 0 - + test_module.py:15: AssertionError ________________________________ test_fail2 ________________________________ - + def test_fail2(): > assert 0 E assert 0 - + test_module.py:19: AssertionError ==================== 2 failed, 1 error in 0.12 seconds ===================== diff --git a/doc/en/fixture.rst b/doc/en/fixture.rst index e07d00eaa..c29a24bc3 100644 --- a/doc/en/fixture.rst +++ b/doc/en/fixture.rst @@ -73,20 +73,20 @@ marked ``smtp`` fixture function. Running the test looks like this:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 item - + test_smtpsimple.py F [100%] - + ================================= FAILURES ================================= ________________________________ test_ehlo _________________________________ - + smtp = - + def test_ehlo(smtp): response, msg = smtp.ehlo() assert response == 250 > assert 0 # for demo purposes E assert 0 - + test_smtpsimple.py:11: AssertionError ========================= 1 failed in 0.12 seconds ========================= @@ -209,32 +209,32 @@ inspect what is going on and can now run the tests:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items - + test_module.py FF [100%] - + ================================= FAILURES ================================= ________________________________ test_ehlo _________________________________ - + smtp = - + def test_ehlo(smtp): response, msg = smtp.ehlo() assert response == 250 assert b"smtp.gmail.com" in msg > assert 0 # for demo purposes E assert 0 - + test_module.py:6: AssertionError ________________________________ test_noop _________________________________ - + smtp = - + def test_noop(smtp): response, msg = smtp.noop() assert response == 250 > assert 0 # for demo purposes E assert 0 - + test_module.py:11: AssertionError ========================= 2 failed in 0.12 seconds ========================= @@ -337,7 +337,7 @@ Let's execute it:: $ pytest -s -q --tb=no FFteardown smtp - + 2 failed in 0.12 seconds We see that the ``smtp`` instance is finalized after the two @@ -446,7 +446,7 @@ again, nothing much has changed:: $ pytest -s -q --tb=no FFfinalizing (smtp.gmail.com) - + 2 failed in 0.12 seconds Let's quickly create another test module that actually sets the @@ -567,51 +567,51 @@ So let's just do another run:: FFFF [100%] ================================= FAILURES ================================= ________________________ test_ehlo[smtp.gmail.com] _________________________ - + smtp = - + def test_ehlo(smtp): response, msg = smtp.ehlo() assert response == 250 assert b"smtp.gmail.com" in msg > assert 0 # for demo purposes E assert 0 - + test_module.py:6: AssertionError ________________________ test_noop[smtp.gmail.com] _________________________ - + smtp = - + def test_noop(smtp): response, msg = smtp.noop() assert response == 250 > assert 0 # for demo purposes E assert 0 - + test_module.py:11: AssertionError ________________________ test_ehlo[mail.python.org] ________________________ - + smtp = - + def test_ehlo(smtp): response, msg = smtp.ehlo() assert response == 250 > assert b"smtp.gmail.com" in msg E AssertionError: assert b'smtp.gmail.com' in b'mail.python.org\nPIPELINING\nSIZE 51200000\nETRN\nSTARTTLS\nAUTH DIGEST-MD5 NTLM CRAM-MD5\nENHANCEDSTATUSCODES\n8BITMIME\nDSN\nSMTPUTF8' - + test_module.py:5: AssertionError -------------------------- Captured stdout setup --------------------------- finalizing ________________________ test_noop[mail.python.org] ________________________ - + smtp = - + def test_noop(smtp): response, msg = smtp.noop() assert response == 250 > assert 0 # for demo purposes E assert 0 - + test_module.py:11: AssertionError ------------------------- Captured stdout teardown ------------------------- finalizing @@ -683,7 +683,7 @@ Running the above tests results in the following test IDs being used:: - + ======================= no tests ran in 0.12 seconds ======================= .. _`fixture-parametrize-marks`: @@ -713,11 +713,11 @@ Running this test will *skip* the invocation of ``data_set`` with value ``2``:: cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 3 items - + test_fixture_marks.py::test_data[0] PASSED [ 33%] test_fixture_marks.py::test_data[1] PASSED [ 66%] test_fixture_marks.py::test_data[2] SKIPPED [100%] - + =================== 2 passed, 1 skipped in 0.12 seconds ==================== .. _`interdependent fixtures`: @@ -756,10 +756,10 @@ Here we declare an ``app`` fixture which receives the previously defined cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 2 items - + test_appsetup.py::test_smtp_exists[smtp.gmail.com] PASSED [ 50%] test_appsetup.py::test_smtp_exists[mail.python.org] PASSED [100%] - + ========================= 2 passed in 0.12 seconds ========================= Due to the parametrization of ``smtp`` the test will run twice with two @@ -825,26 +825,26 @@ Let's run the tests in verbose mode and with looking at the print-output:: cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 8 items - + test_module.py::test_0[1] SETUP otherarg 1 RUN test0 with otherarg 1 PASSED TEARDOWN otherarg 1 - + test_module.py::test_0[2] SETUP otherarg 2 RUN test0 with otherarg 2 PASSED TEARDOWN otherarg 2 - + test_module.py::test_1[mod1] SETUP modarg mod1 RUN test1 with modarg mod1 PASSED test_module.py::test_2[mod1-1] SETUP otherarg 1 RUN test2 with otherarg 1 and modarg mod1 PASSED TEARDOWN otherarg 1 - + test_module.py::test_2[mod1-2] SETUP otherarg 2 RUN test2 with otherarg 2 and modarg mod1 PASSED TEARDOWN otherarg 2 - + test_module.py::test_1[mod2] TEARDOWN modarg mod1 SETUP modarg mod2 RUN test1 with modarg mod2 @@ -852,13 +852,13 @@ Let's run the tests in verbose mode and with looking at the print-output:: test_module.py::test_2[mod2-1] SETUP otherarg 1 RUN test2 with otherarg 1 and modarg mod2 PASSED TEARDOWN otherarg 1 - + test_module.py::test_2[mod2-2] SETUP otherarg 2 RUN test2 with otherarg 2 and modarg mod2 PASSED TEARDOWN otherarg 2 TEARDOWN modarg mod2 - - + + ========================= 8 passed in 0.12 seconds ========================= You can see that the parametrized module-scoped ``modarg`` resource caused an diff --git a/doc/en/getting-started.rst b/doc/en/getting-started.rst index f2dbec5e9..aae0bf971 100644 --- a/doc/en/getting-started.rst +++ b/doc/en/getting-started.rst @@ -50,17 +50,17 @@ That’s it. You can now execute the test function:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 item - + test_sample.py F [100%] - + ================================= FAILURES ================================= _______________________________ test_answer ________________________________ - + def test_answer(): > assert func(3) == 5 E assert 4 == 5 E + where 4 = func(3) - + test_sample.py:5: AssertionError ========================= 1 failed in 0.12 seconds ========================= @@ -117,15 +117,15 @@ Once you develop multiple tests, you may want to group them into a class. pytest .F [100%] ================================= FAILURES ================================= ____________________________ TestClass.test_two ____________________________ - + self = - + def test_two(self): x = "hello" > assert hasattr(x, 'check') E AssertionError: assert False E + where False = hasattr('hello', 'check') - + test_class.py:8: AssertionError 1 failed, 1 passed in 0.12 seconds @@ -147,14 +147,14 @@ List the name ``tmpdir`` in the test function signature and ``pytest`` will look F [100%] ================================= FAILURES ================================= _____________________________ test_needsfiles ______________________________ - + tmpdir = local('PYTEST_TMPDIR/test_needsfiles0') - + def test_needsfiles(tmpdir): print (tmpdir) > assert 0 E assert 0 - + test_tmpdir.py:3: AssertionError --------------------------- Captured stdout call --------------------------- PYTEST_TMPDIR/test_needsfiles0 diff --git a/doc/en/index.rst b/doc/en/index.rst index 6a382e571..0539a1c55 100644 --- a/doc/en/index.rst +++ b/doc/en/index.rst @@ -29,17 +29,17 @@ To execute it:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 item - + test_sample.py F [100%] - + ================================= FAILURES ================================= _______________________________ test_answer ________________________________ - + def test_answer(): > assert inc(3) == 5 E assert 4 == 5 E + where 4 = inc(3) - + test_sample.py:6: AssertionError ========================= 1 failed in 0.12 seconds ========================= diff --git a/doc/en/parametrize.rst b/doc/en/parametrize.rst index 693cf1913..f87f47d08 100644 --- a/doc/en/parametrize.rst +++ b/doc/en/parametrize.rst @@ -57,14 +57,14 @@ them in turn:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 3 items - + test_expectation.py ..F [100%] - + ================================= FAILURES ================================= ____________________________ test_eval[6*9-42] _____________________________ - + test_input = '6*9', expected = 42 - + @pytest.mark.parametrize("test_input,expected", [ ("3+5", 8), ("2+4", 6), @@ -74,7 +74,7 @@ them in turn:: > assert eval(test_input) == expected E AssertionError: assert 54 == 42 E + where 54 = eval('6*9') - + test_expectation.py:8: AssertionError ==================== 1 failed, 2 passed in 0.12 seconds ==================== @@ -106,9 +106,9 @@ Let's run this:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 3 items - + test_expectation.py ..x [100%] - + =================== 2 passed, 1 xfailed in 0.12 seconds ==================== The one parameter set which caused a failure previously now @@ -174,15 +174,15 @@ Let's also run with a stringinput that will lead to a failing test:: F [100%] ================================= FAILURES ================================= ___________________________ test_valid_string[!] ___________________________ - + stringinput = '!' - + def test_valid_string(stringinput): > assert stringinput.isalpha() E AssertionError: assert False E + where False = () E + where = '!'.isalpha - + test_strings.py:3: AssertionError 1 failed in 0.12 seconds diff --git a/doc/en/skipping.rst b/doc/en/skipping.rst index cda67554d..830dd55f5 100644 --- a/doc/en/skipping.rst +++ b/doc/en/skipping.rst @@ -334,12 +334,12 @@ Running it with the report-on-xfail option gives this output:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR/example, inifile: collected 7 items - + xfail_demo.py xxxxxxx [100%] ========================= short test summary info ========================== XFAIL xfail_demo.py::test_hello XFAIL xfail_demo.py::test_hello2 - reason: [NOTRUN] + reason: [NOTRUN] XFAIL xfail_demo.py::test_hello3 condition: hasattr(os, 'sep') XFAIL xfail_demo.py::test_hello4 @@ -349,7 +349,7 @@ Running it with the report-on-xfail option gives this output:: XFAIL xfail_demo.py::test_hello6 reason: reason XFAIL xfail_demo.py::test_hello7 - + ======================== 7 xfailed in 0.12 seconds ========================= .. _`skip/xfail with parametrize`: diff --git a/doc/en/tmpdir.rst b/doc/en/tmpdir.rst index 421b4c898..0171e3168 100644 --- a/doc/en/tmpdir.rst +++ b/doc/en/tmpdir.rst @@ -32,14 +32,14 @@ Running this would result in a passed test except for the last platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 item - + test_tmpdir.py F [100%] - + ================================= FAILURES ================================= _____________________________ test_create_file _____________________________ - + tmpdir = local('PYTEST_TMPDIR/test_create_file0') - + def test_create_file(tmpdir): p = tmpdir.mkdir("sub").join("hello.txt") p.write("content") @@ -47,7 +47,7 @@ Running this would result in a passed test except for the last assert len(tmpdir.listdir()) == 1 > assert 0 E assert 0 - + test_tmpdir.py:7: AssertionError ========================= 1 failed in 0.12 seconds ========================= diff --git a/doc/en/unittest.rst b/doc/en/unittest.rst index 53192b346..ec9f466b9 100644 --- a/doc/en/unittest.rst +++ b/doc/en/unittest.rst @@ -130,30 +130,30 @@ the ``self.db`` values in the traceback:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items - + test_unittest_db.py FF [100%] - + ================================= FAILURES ================================= ___________________________ MyTest.test_method1 ____________________________ - + self = - + def test_method1(self): assert hasattr(self, "db") > assert 0, self.db # fail for demo purposes E AssertionError: .DummyDB object at 0xdeadbeef> E assert 0 - + test_unittest_db.py:9: AssertionError ___________________________ MyTest.test_method2 ____________________________ - + self = - + def test_method2(self): > assert 0, self.db # fail for demo purposes E AssertionError: .DummyDB object at 0xdeadbeef> E assert 0 - + test_unittest_db.py:12: AssertionError ========================= 2 failed in 0.12 seconds ========================= diff --git a/doc/en/usage.rst b/doc/en/usage.rst index 25be54395..07077468a 100644 --- a/doc/en/usage.rst +++ b/doc/en/usage.rst @@ -502,7 +502,7 @@ hook was invoked:: $ python myinvoke.py . [100%]*** test run reporting finishing - + .. note:: diff --git a/doc/en/warnings.rst b/doc/en/warnings.rst index df93a02b5..62f96ccf7 100644 --- a/doc/en/warnings.rst +++ b/doc/en/warnings.rst @@ -25,14 +25,14 @@ Running pytest now produces this output:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 item - + test_show_warnings.py . [100%] - + ============================= warnings summary ============================= test_show_warnings.py::test_one $REGENDOC_TMPDIR/test_show_warnings.py:4: UserWarning: api v1, should use functions from v2 warnings.warn(UserWarning("api v1, should use functions from v2")) - + -- Docs: http://doc.pytest.org/en/latest/warnings.html =================== 1 passed, 1 warnings in 0.12 seconds =================== @@ -45,17 +45,17 @@ them into errors:: F [100%] ================================= FAILURES ================================= _________________________________ test_one _________________________________ - + def test_one(): > assert api_v1() == 1 - - test_show_warnings.py:8: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - + + test_show_warnings.py:8: + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + def api_v1(): > warnings.warn(UserWarning("api v1, should use functions from v2")) E UserWarning: api v1, should use functions from v2 - + test_show_warnings.py:4: UserWarning 1 failed in 0.12 seconds From 2921ca6e6449fbd69e9a73df304bc373dd41f0da Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Tue, 3 Jul 2018 21:58:18 -0300 Subject: [PATCH 05/17] Run pre-commit on all doc files --- doc/en/assert.rst | 20 +- doc/en/builtin.rst | 28 +-- doc/en/cache.rst | 58 ++--- doc/en/capture.rst | 8 +- doc/en/doctest.rst | 4 +- doc/en/example/markers.rst | 92 ++++---- doc/en/example/nonpython.rst | 10 +- doc/en/example/parametrize.rst | 34 +-- doc/en/example/pythoncollection.rst | 6 +- doc/en/example/reportingdemo.rst | 344 ++++++++++++++-------------- doc/en/example/simple.rst | 102 ++++----- doc/en/fixture.rst | 80 +++---- doc/en/getting-started.rst | 20 +- doc/en/index.rst | 8 +- doc/en/parametrize.rst | 20 +- doc/en/skipping.rst | 6 +- doc/en/tmpdir.rst | 10 +- doc/en/unittest.rst | 16 +- doc/en/usage.rst | 2 +- doc/en/warnings.rst | 18 +- 20 files changed, 443 insertions(+), 443 deletions(-) diff --git a/doc/en/assert.rst b/doc/en/assert.rst index bcc0a28c9..e0e9b9305 100644 --- a/doc/en/assert.rst +++ b/doc/en/assert.rst @@ -29,17 +29,17 @@ you will see the return value of the function call:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 item - + test_assert1.py F [100%] - + ================================= FAILURES ================================= ______________________________ test_function _______________________________ - + def test_function(): > assert f() == 4 E assert 3 == 4 E + where 3 = f() - + test_assert1.py:5: AssertionError ========================= 1 failed in 0.12 seconds ========================= @@ -172,12 +172,12 @@ if you run this module:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 item - + test_assert2.py F [100%] - + ================================= FAILURES ================================= ___________________________ test_set_comparison ____________________________ - + def test_set_comparison(): set1 = set("1308") set2 = set("8035") @@ -188,7 +188,7 @@ if you run this module:: E Extra items in the right set: E '5' E Use -v to get the full diff - + test_assert2.py:5: AssertionError ========================= 1 failed in 0.12 seconds ========================= @@ -241,14 +241,14 @@ the conftest file:: F [100%] ================================= FAILURES ================================= _______________________________ test_compare _______________________________ - + def test_compare(): f1 = Foo(1) f2 = Foo(2) > assert f1 == f2 E assert Comparing Foo instances: E vals: 1 != 2 - + test_foocompare.py:11: AssertionError 1 failed in 0.12 seconds diff --git a/doc/en/builtin.rst b/doc/en/builtin.rst index 35f1315d3..e52151a1b 100644 --- a/doc/en/builtin.rst +++ b/doc/en/builtin.rst @@ -17,13 +17,13 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a $ pytest -q --fixtures cache Return a cache object that can persist state between testing sessions. - + cache.get(key, default) cache.set(key, value) - + Keys must be a ``/`` separated value, where the first part is usually the name of your plugin or application to avoid clashes with other cache users. - + Values can be any object handled by the json stdlib module. capsys Enable capturing of writes to ``sys.stdout`` and ``sys.stderr`` and make @@ -49,9 +49,9 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a Fixture that returns a :py:class:`dict` that will be injected into the namespace of doctests. pytestconfig Session-scoped fixture that returns the :class:`_pytest.config.Config` object. - + Example:: - + def test_foo(pytestconfig): if pytestconfig.getoption("verbose"): ... @@ -61,9 +61,9 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a configured reporters, like JUnit XML. The fixture is callable with ``(name, value)``, with value being automatically xml-encoded. - + Example:: - + def test_function(record_property): record_property("example_key", 1) record_xml_property @@ -74,9 +74,9 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a automatically xml-encoded caplog Access and control log capturing. - + Captured logs are available through the following methods:: - + * caplog.text -> string containing formatted log output * caplog.records -> list of logging.LogRecord instances * caplog.record_tuples -> list of (logger_name, level, message) tuples @@ -84,7 +84,7 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a monkeypatch The returned ``monkeypatch`` fixture provides these helper methods to modify objects, dictionaries or os.environ:: - + monkeypatch.setattr(obj, name, value, raising=True) monkeypatch.delattr(obj, name, raising=True) monkeypatch.setitem(mapping, name, value) @@ -93,14 +93,14 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a monkeypatch.delenv(name, raising=True) monkeypatch.syspath_prepend(path) monkeypatch.chdir(path) - + All modifications will be undone after the requesting test function or fixture has finished. The ``raising`` parameter determines if a KeyError or AttributeError will be raised if the set/deletion operation has no target. recwarn Return a :class:`WarningsRecorder` instance that records all warnings emitted by test functions. - + See http://docs.python.org/library/warnings.html for information on warning categories. tmpdir_factory @@ -111,9 +111,9 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a created as a sub directory of the base temporary directory. The returned object is a `py.path.local`_ path object. - + .. _`py.path.local`: https://py.readthedocs.io/en/latest/path.html - + no tests ran in 0.12 seconds You can also interactively ask for help, e.g. by typing on the Python interactive prompt something like:: diff --git a/doc/en/cache.rst b/doc/en/cache.rst index 437109db0..37bcf7070 100644 --- a/doc/en/cache.rst +++ b/doc/en/cache.rst @@ -49,26 +49,26 @@ If you run this for the first time you will see two failures:: .................F.......F........................ [100%] ================================= FAILURES ================================= _______________________________ test_num[17] _______________________________ - + i = 17 - + @pytest.mark.parametrize("i", range(50)) def test_num(i): if i in (17, 25): > pytest.fail("bad luck") E Failed: bad luck - + test_50.py:6: Failed _______________________________ test_num[25] _______________________________ - + i = 25 - + @pytest.mark.parametrize("i", range(50)) def test_num(i): if i in (17, 25): > pytest.fail("bad luck") E Failed: bad luck - + test_50.py:6: Failed 2 failed, 48 passed in 0.12 seconds @@ -80,31 +80,31 @@ If you then run it with ``--lf``:: rootdir: $REGENDOC_TMPDIR, inifile: collected 50 items / 48 deselected run-last-failure: rerun previous 2 failures - + test_50.py FF [100%] - + ================================= FAILURES ================================= _______________________________ test_num[17] _______________________________ - + i = 17 - + @pytest.mark.parametrize("i", range(50)) def test_num(i): if i in (17, 25): > pytest.fail("bad luck") E Failed: bad luck - + test_50.py:6: Failed _______________________________ test_num[25] _______________________________ - + i = 25 - + @pytest.mark.parametrize("i", range(50)) def test_num(i): if i in (17, 25): > pytest.fail("bad luck") E Failed: bad luck - + test_50.py:6: Failed ================= 2 failed, 48 deselected in 0.12 seconds ================== @@ -121,31 +121,31 @@ of ``FF`` and dots):: rootdir: $REGENDOC_TMPDIR, inifile: collected 50 items run-last-failure: rerun previous 2 failures first - + test_50.py FF................................................ [100%] - + ================================= FAILURES ================================= _______________________________ test_num[17] _______________________________ - + i = 17 - + @pytest.mark.parametrize("i", range(50)) def test_num(i): if i in (17, 25): > pytest.fail("bad luck") E Failed: bad luck - + test_50.py:6: Failed _______________________________ test_num[25] _______________________________ - + i = 25 - + @pytest.mark.parametrize("i", range(50)) def test_num(i): if i in (17, 25): > pytest.fail("bad luck") E Failed: bad luck - + test_50.py:6: Failed =================== 2 failed, 48 passed in 0.12 seconds ==================== @@ -198,13 +198,13 @@ of the sleep:: F [100%] ================================= FAILURES ================================= ______________________________ test_function _______________________________ - + mydata = 42 - + def test_function(mydata): > assert mydata == 23 E assert 42 == 23 - + test_caching.py:14: AssertionError 1 failed in 0.12 seconds @@ -215,13 +215,13 @@ the cache and this will be quick:: F [100%] ================================= FAILURES ================================= ______________________________ test_function _______________________________ - + mydata = 42 - + def test_function(mydata): > assert mydata == 23 E assert 42 == 23 - + test_caching.py:14: AssertionError 1 failed in 0.12 seconds @@ -246,7 +246,7 @@ You can always peek at the content of the cache using the ['test_caching.py::test_function'] example/value contains: 42 - + ======================= no tests ran in 0.12 seconds ======================= Clearing Cache content diff --git a/doc/en/capture.rst b/doc/en/capture.rst index 549845cc9..ab86fb55f 100644 --- a/doc/en/capture.rst +++ b/doc/en/capture.rst @@ -68,16 +68,16 @@ of the failing function and hide the other one:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items - + test_module.py .F [100%] - + ================================= FAILURES ================================= ________________________________ test_func2 ________________________________ - + def test_func2(): > assert False E assert False - + test_module.py:9: AssertionError -------------------------- Captured stdout setup --------------------------- setting up diff --git a/doc/en/doctest.rst b/doc/en/doctest.rst index ac470d105..9488ee826 100644 --- a/doc/en/doctest.rst +++ b/doc/en/doctest.rst @@ -65,9 +65,9 @@ then you can just invoke ``pytest`` without command line options:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini collected 1 item - + mymodule.py . [100%] - + ========================= 1 passed in 0.12 seconds ========================= It is possible to use fixtures using the ``getfixture`` helper:: diff --git a/doc/en/example/markers.rst b/doc/en/example/markers.rst index 176fdccdb..bf352bc81 100644 --- a/doc/en/example/markers.rst +++ b/doc/en/example/markers.rst @@ -35,9 +35,9 @@ You can then restrict a test run to only run tests marked with ``webtest``:: cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 4 items / 3 deselected - + test_server.py::test_send_http PASSED [100%] - + ================== 1 passed, 3 deselected in 0.12 seconds ================== Or the inverse, running all tests except the webtest ones:: @@ -48,11 +48,11 @@ Or the inverse, running all tests except the webtest ones:: cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 4 items / 1 deselected - + test_server.py::test_something_quick PASSED [ 33%] test_server.py::test_another PASSED [ 66%] test_server.py::TestClass::test_method PASSED [100%] - + ================== 3 passed, 1 deselected in 0.12 seconds ================== Selecting tests based on their node ID @@ -68,9 +68,9 @@ tests based on their module, class, method, or function name:: cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 1 item - + test_server.py::TestClass::test_method PASSED [100%] - + ========================= 1 passed in 0.12 seconds ========================= You can also select on the class:: @@ -81,9 +81,9 @@ You can also select on the class:: cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 1 item - + test_server.py::TestClass::test_method PASSED [100%] - + ========================= 1 passed in 0.12 seconds ========================= Or select multiple nodes:: @@ -94,10 +94,10 @@ Or select multiple nodes:: cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 2 items - + test_server.py::TestClass::test_method PASSED [ 50%] test_server.py::test_send_http PASSED [100%] - + ========================= 2 passed in 0.12 seconds ========================= .. _node-id: @@ -132,9 +132,9 @@ select tests based on their names:: cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 4 items / 3 deselected - + test_server.py::test_send_http PASSED [100%] - + ================== 1 passed, 3 deselected in 0.12 seconds ================== And you can also run all tests except the ones that match the keyword:: @@ -145,11 +145,11 @@ And you can also run all tests except the ones that match the keyword:: cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 4 items / 1 deselected - + test_server.py::test_something_quick PASSED [ 33%] test_server.py::test_another PASSED [ 66%] test_server.py::TestClass::test_method PASSED [100%] - + ================== 3 passed, 1 deselected in 0.12 seconds ================== Or to select "http" and "quick" tests:: @@ -160,10 +160,10 @@ Or to select "http" and "quick" tests:: cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 4 items / 2 deselected - + test_server.py::test_send_http PASSED [ 50%] test_server.py::test_something_quick PASSED [100%] - + ================== 2 passed, 2 deselected in 0.12 seconds ================== .. note:: @@ -199,21 +199,21 @@ You can ask which markers exist for your test suite - the list includes our just $ pytest --markers @pytest.mark.webtest: mark a test as a webtest. - + @pytest.mark.skip(reason=None): skip the given test function with an optional reason. Example: skip(reason="no way of currently testing this") skips the test. - + @pytest.mark.skipif(condition): skip the given test function if eval(condition) results in a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform. see http://pytest.org/latest/skipping.html - + @pytest.mark.xfail(condition, reason=None, run=True, raises=None, strict=False): mark the test function as an expected failure if eval(condition) has a True value. Optionally specify a reason for better reporting and run=False if you don't even want to execute the test function. If only specific exception(s) are expected, you can list them in raises, and if the test fails in other ways, it will be reported as a true failure. See http://pytest.org/latest/skipping.html - + @pytest.mark.parametrize(argnames, argvalues): call a test function multiple times passing in different arguments in turn. argvalues generally needs to be a list of values if argnames specifies only one name or a list of tuples of values if argnames specifies multiple names. Example: @parametrize('arg1', [1,2]) would lead to two calls of the decorated test function, one with arg1=1 and another with arg1=2.see http://pytest.org/latest/parametrize.html for more info and examples. - - @pytest.mark.usefixtures(fixturename1, fixturename2, ...): mark tests as needing all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures - + + @pytest.mark.usefixtures(fixturename1, fixturename2, ...): mark tests as needing all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures + @pytest.mark.tryfirst: mark a hook implementation function such that the plugin machinery will try to call it first/as early as possible. - + @pytest.mark.trylast: mark a hook implementation function such that the plugin machinery will try to call it last/as late as possible. - + For an example on how to add and work with markers from a plugin, see :ref:`adding a custom marker from a plugin`. @@ -352,9 +352,9 @@ the test needs:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 item - + test_someenv.py s [100%] - + ======================== 1 skipped in 0.12 seconds ========================= and here is one that specifies exactly the environment needed:: @@ -364,30 +364,30 @@ and here is one that specifies exactly the environment needed:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 item - + test_someenv.py . [100%] - + ========================= 1 passed in 0.12 seconds ========================= The ``--markers`` option always gives you a list of available markers:: $ pytest --markers @pytest.mark.env(name): mark test to run only on named environment - + @pytest.mark.skip(reason=None): skip the given test function with an optional reason. Example: skip(reason="no way of currently testing this") skips the test. - + @pytest.mark.skipif(condition): skip the given test function if eval(condition) results in a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform. see http://pytest.org/latest/skipping.html - + @pytest.mark.xfail(condition, reason=None, run=True, raises=None, strict=False): mark the test function as an expected failure if eval(condition) has a True value. Optionally specify a reason for better reporting and run=False if you don't even want to execute the test function. If only specific exception(s) are expected, you can list them in raises, and if the test fails in other ways, it will be reported as a true failure. See http://pytest.org/latest/skipping.html - + @pytest.mark.parametrize(argnames, argvalues): call a test function multiple times passing in different arguments in turn. argvalues generally needs to be a list of values if argnames specifies only one name or a list of tuples of values if argnames specifies multiple names. Example: @parametrize('arg1', [1,2]) would lead to two calls of the decorated test function, one with arg1=1 and another with arg1=2.see http://pytest.org/latest/parametrize.html for more info and examples. - - @pytest.mark.usefixtures(fixturename1, fixturename2, ...): mark tests as needing all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures - + + @pytest.mark.usefixtures(fixturename1, fixturename2, ...): mark tests as needing all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures + @pytest.mark.tryfirst: mark a hook implementation function such that the plugin machinery will try to call it first/as early as possible. - + @pytest.mark.trylast: mark a hook implementation function such that the plugin machinery will try to call it last/as late as possible. - + .. _`passing callables to custom markers`: @@ -523,11 +523,11 @@ then you will see two tests skipped and two executed tests as expected:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items - + test_plat.py s.s. [100%] ========================= short test summary info ========================== SKIP [2] $REGENDOC_TMPDIR/conftest.py:12: cannot run on platform linux - + =================== 2 passed, 2 skipped in 0.12 seconds ==================== Note that if you specify a platform via the marker-command line option like this:: @@ -537,9 +537,9 @@ Note that if you specify a platform via the marker-command line option like this platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items / 3 deselected - + test_plat.py . [100%] - + ================== 1 passed, 3 deselected in 0.12 seconds ================== then the unmarked-tests will not be run. It is thus a way to restrict the run to the specific tests. @@ -588,9 +588,9 @@ We can now use the ``-m option`` to select one set:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items / 2 deselected - + test_module.py FF [100%] - + ================================= FAILURES ================================= __________________________ test_interface_simple ___________________________ test_module.py:3: in test_interface_simple @@ -609,9 +609,9 @@ or to select both "event" and "interface" tests:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items / 1 deselected - + test_module.py FFF [100%] - + ================================= FAILURES ================================= __________________________ test_interface_simple ___________________________ test_module.py:3: in test_interface_simple diff --git a/doc/en/example/nonpython.rst b/doc/en/example/nonpython.rst index ca7b2c8df..4f5adf63f 100644 --- a/doc/en/example/nonpython.rst +++ b/doc/en/example/nonpython.rst @@ -30,9 +30,9 @@ now execute the test specification:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR/nonpython, inifile: collected 2 items - + test_simple.yml F. [100%] - + ================================= FAILURES ================================= ______________________________ usecase: hello ______________________________ usecase execution failed @@ -63,10 +63,10 @@ consulted when reporting in ``verbose`` mode:: cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR/nonpython, inifile: collecting ... collected 2 items - + test_simple.yml::hello FAILED [ 50%] test_simple.yml::ok PASSED [100%] - + ================================= FAILURES ================================= ______________________________ usecase: hello ______________________________ usecase execution failed @@ -87,5 +87,5 @@ interesting to just look at the collection tree:: - + ======================= no tests ran in 0.12 seconds ======================= diff --git a/doc/en/example/parametrize.rst b/doc/en/example/parametrize.rst index 49ffa7288..882700fec 100644 --- a/doc/en/example/parametrize.rst +++ b/doc/en/example/parametrize.rst @@ -55,13 +55,13 @@ let's run the full monty:: ....F [100%] ================================= FAILURES ================================= _____________________________ test_compute[4] ______________________________ - + param1 = 4 - + def test_compute(param1): > assert param1 < 4 E assert 4 < 4 - + test_compute.py:3: AssertionError 1 failed, 4 passed in 0.12 seconds @@ -151,7 +151,7 @@ objects, they are still using the default pytest representation:: - + ======================= no tests ran in 0.12 seconds ======================= In ``test_timedistance_v3``, we used ``pytest.param`` to specify the test IDs @@ -198,9 +198,9 @@ this is a fully self-contained example which you can run with:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items - + test_scenarios.py .... [100%] - + ========================= 4 passed in 0.12 seconds ========================= If you just collect tests you'll also nicely see 'advanced' and 'basic' as variants for the test function:: @@ -218,7 +218,7 @@ If you just collect tests you'll also nicely see 'advanced' and 'basic' as varia - + ======================= no tests ran in 0.12 seconds ======================= Note that we told ``metafunc.parametrize()`` that your scenario values @@ -279,7 +279,7 @@ Let's first see how it looks like at collection time:: - + ======================= no tests ran in 0.12 seconds ======================= And then when we run the test:: @@ -288,15 +288,15 @@ And then when we run the test:: .F [100%] ================================= FAILURES ================================= _________________________ test_db_initialized[d2] __________________________ - + db = - + def test_db_initialized(db): # a dummy test if db.__class__.__name__ == "DB2": > pytest.fail("deliberately failing for demo purposes") E Failed: deliberately failing for demo purposes - + test_backends.py:6: Failed 1 failed, 1 passed in 0.12 seconds @@ -339,7 +339,7 @@ The result of this test will be successful:: collected 1 item - + ======================= no tests ran in 0.12 seconds ======================= .. regendoc:wipe @@ -384,13 +384,13 @@ argument sets to use for each test function. Let's run it:: F.. [100%] ================================= FAILURES ================================= ________________________ TestClass.test_equals[1-2] ________________________ - + self = , a = 1, b = 2 - + def test_equals(self, a, b): > assert a == b E assert 1 == 2 - + test_parametrize.py:18: AssertionError 1 failed, 2 passed in 0.12 seconds @@ -462,11 +462,11 @@ If you run this with reporting for skips enabled:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items - + test_module.py .s [100%] ========================= short test summary info ========================== SKIP [1] $REGENDOC_TMPDIR/conftest.py:11: could not import 'opt2' - + =================== 1 passed, 1 skipped in 0.12 seconds ==================== You'll see that we don't have an ``opt2`` module and thus the second test run diff --git a/doc/en/example/pythoncollection.rst b/doc/en/example/pythoncollection.rst index 58b4364b5..8e9d3ae62 100644 --- a/doc/en/example/pythoncollection.rst +++ b/doc/en/example/pythoncollection.rst @@ -133,7 +133,7 @@ then the test collection looks like this:: - + ======================= no tests ran in 0.12 seconds ======================= .. note:: @@ -180,7 +180,7 @@ You can always peek at the collection tree without running tests like this:: - + ======================= no tests ran in 0.12 seconds ======================= .. _customizing-test-collection: @@ -243,5 +243,5 @@ file will be left out:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini collected 0 items - + ======================= no tests ran in 0.12 seconds ======================= diff --git a/doc/en/example/reportingdemo.rst b/doc/en/example/reportingdemo.rst index 5df790479..a7cc81694 100644 --- a/doc/en/example/reportingdemo.rst +++ b/doc/en/example/reportingdemo.rst @@ -14,81 +14,81 @@ get on the terminal - we are working on that):: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR/assertion, inifile: collected 42 items - + failure_demo.py FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF [100%] - + ================================= FAILURES ================================= ____________________________ test_generative[0] ____________________________ - + param1 = 3, param2 = 6 - + def test_generative(param1, param2): > assert param1 * 2 < param2 E assert (3 * 2) < 6 - + failure_demo.py:19: AssertionError _________________________ TestFailing.test_simple __________________________ - + self = - + def test_simple(self): def f(): return 42 - + def g(): return 43 - + > assert f() == g() E assert 42 == 43 E + where 42 = .f at 0xdeadbeef>() E + and 43 = .g at 0xdeadbeef>() - + failure_demo.py:35: AssertionError ____________________ TestFailing.test_simple_multiline _____________________ - + self = - + def test_simple_multiline(self): > otherfunc_multi(42, 6 * 9) - - failure_demo.py:38: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - + + failure_demo.py:38: + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + a = 42, b = 54 - + def otherfunc_multi(a, b): > assert a == b E assert 42 == 54 - + failure_demo.py:15: AssertionError ___________________________ TestFailing.test_not ___________________________ - + self = - + def test_not(self): def f(): return 42 - + > assert not f() E assert not 42 E + where 42 = .f at 0xdeadbeef>() - + failure_demo.py:44: AssertionError _________________ TestSpecialisedExplanations.test_eq_text _________________ - + self = - + def test_eq_text(self): > assert "spam" == "eggs" E AssertionError: assert 'spam' == 'eggs' E - spam E + eggs - + failure_demo.py:49: AssertionError _____________ TestSpecialisedExplanations.test_eq_similar_text _____________ - + self = - + def test_eq_similar_text(self): > assert "foo 1 bar" == "foo 2 bar" E AssertionError: assert 'foo 1 bar' == 'foo 2 bar' @@ -96,12 +96,12 @@ get on the terminal - we are working on that):: E ? ^ E + foo 2 bar E ? ^ - + failure_demo.py:52: AssertionError ____________ TestSpecialisedExplanations.test_eq_multiline_text ____________ - + self = - + def test_eq_multiline_text(self): > assert "foo\nspam\nbar" == "foo\neggs\nbar" E AssertionError: assert 'foo\nspam\nbar' == 'foo\neggs\nbar' @@ -109,12 +109,12 @@ get on the terminal - we are working on that):: E - spam E + eggs E bar - + failure_demo.py:55: AssertionError ______________ TestSpecialisedExplanations.test_eq_long_text _______________ - + self = - + def test_eq_long_text(self): a = "1" * 100 + "a" + "2" * 100 b = "1" * 100 + "b" + "2" * 100 @@ -126,12 +126,12 @@ get on the terminal - we are working on that):: E ? ^ E + 1111111111b222222222 E ? ^ - + failure_demo.py:60: AssertionError _________ TestSpecialisedExplanations.test_eq_long_text_multiline __________ - + self = - + def test_eq_long_text_multiline(self): a = "1\n" * 100 + "a" + "2\n" * 100 b = "1\n" * 100 + "b" + "2\n" * 100 @@ -144,25 +144,25 @@ get on the terminal - we are working on that):: E 1 E 1 E 1... - E + E E ...Full output truncated (7 lines hidden), use '-vv' to show - + failure_demo.py:65: AssertionError _________________ TestSpecialisedExplanations.test_eq_list _________________ - + self = - + def test_eq_list(self): > assert [0, 1, 2] == [0, 1, 3] E assert [0, 1, 2] == [0, 1, 3] E At index 2 diff: 2 != 3 E Use -v to get the full diff - + failure_demo.py:68: AssertionError ______________ TestSpecialisedExplanations.test_eq_list_long _______________ - + self = - + def test_eq_list_long(self): a = [0] * 100 + [1] + [3] * 100 b = [0] * 100 + [2] + [3] * 100 @@ -170,12 +170,12 @@ get on the terminal - we are working on that):: E assert [0, 0, 0, 0, 0, 0, ...] == [0, 0, 0, 0, 0, 0, ...] E At index 100 diff: 1 != 2 E Use -v to get the full diff - + failure_demo.py:73: AssertionError _________________ TestSpecialisedExplanations.test_eq_dict _________________ - + self = - + def test_eq_dict(self): > assert {"a": 0, "b": 1, "c": 0} == {"a": 0, "b": 2, "d": 0} E AssertionError: assert {'a': 0, 'b': 1, 'c': 0} == {'a': 0, 'b': 2, 'd': 0} @@ -186,14 +186,14 @@ get on the terminal - we are working on that):: E {'c': 0} E Right contains more items: E {'d': 0}... - E + E E ...Full output truncated (2 lines hidden), use '-vv' to show - + failure_demo.py:76: AssertionError _________________ TestSpecialisedExplanations.test_eq_set __________________ - + self = - + def test_eq_set(self): > assert {0, 10, 11, 12} == {0, 20, 21} E AssertionError: assert {0, 10, 11, 12} == {0, 20, 21} @@ -204,34 +204,34 @@ get on the terminal - we are working on that):: E Extra items in the right set: E 20 E 21... - E + E E ...Full output truncated (2 lines hidden), use '-vv' to show - + failure_demo.py:79: AssertionError _____________ TestSpecialisedExplanations.test_eq_longer_list ______________ - + self = - + def test_eq_longer_list(self): > assert [1, 2] == [1, 2, 3] E assert [1, 2] == [1, 2, 3] E Right contains more items, first extra item: 3 E Use -v to get the full diff - + failure_demo.py:82: AssertionError _________________ TestSpecialisedExplanations.test_in_list _________________ - + self = - + def test_in_list(self): > assert 1 in [0, 2, 3, 4, 5] E assert 1 in [0, 2, 3, 4, 5] - + failure_demo.py:85: AssertionError __________ TestSpecialisedExplanations.test_not_in_text_multiline __________ - + self = - + def test_not_in_text_multiline(self): text = "some multiline\ntext\nwhich\nincludes foo\nand a\ntail" > assert "foo" not in text @@ -243,14 +243,14 @@ get on the terminal - we are working on that):: E includes foo E ? +++ E and a... - E + E E ...Full output truncated (2 lines hidden), use '-vv' to show - + failure_demo.py:89: AssertionError ___________ TestSpecialisedExplanations.test_not_in_text_single ____________ - + self = - + def test_not_in_text_single(self): text = "single foo line" > assert "foo" not in text @@ -258,167 +258,167 @@ get on the terminal - we are working on that):: E 'foo' is contained here: E single foo line E ? +++ - + failure_demo.py:93: AssertionError _________ TestSpecialisedExplanations.test_not_in_text_single_long _________ - + self = - + def test_not_in_text_single_long(self): text = "head " * 50 + "foo " + "tail " * 20 > assert "foo" not in text E AssertionError: assert 'foo' not in 'head head head head hea...ail tail tail tail tail ' E 'foo' is contained here: - E head head foo tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail + E head head foo tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail E ? +++ - + failure_demo.py:97: AssertionError ______ TestSpecialisedExplanations.test_not_in_text_single_long_term _______ - + self = - + def test_not_in_text_single_long_term(self): text = "head " * 50 + "f" * 70 + "tail " * 20 > assert "f" * 70 not in text E AssertionError: assert 'fffffffffff...ffffffffffff' not in 'head head he...l tail tail ' E 'ffffffffffffffffff...fffffffffffffffffff' is contained here: - E head head fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffftail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail + E head head fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffftail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail E ? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - + failure_demo.py:101: AssertionError ______________________________ test_attribute ______________________________ - + def test_attribute(): class Foo(object): b = 1 - + i = Foo() > assert i.b == 2 E assert 1 == 2 E + where 1 = .Foo object at 0xdeadbeef>.b - + failure_demo.py:109: AssertionError _________________________ test_attribute_instance __________________________ - + def test_attribute_instance(): class Foo(object): b = 1 - + > assert Foo().b == 2 E AssertionError: assert 1 == 2 E + where 1 = .Foo object at 0xdeadbeef>.b E + where .Foo object at 0xdeadbeef> = .Foo'>() - + failure_demo.py:116: AssertionError __________________________ test_attribute_failure __________________________ - + def test_attribute_failure(): class Foo(object): def _get_b(self): raise Exception("Failed to get attrib") - + b = property(_get_b) - + i = Foo() > assert i.b == 2 - - failure_demo.py:127: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - + + failure_demo.py:127: + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + self = .Foo object at 0xdeadbeef> - + def _get_b(self): > raise Exception("Failed to get attrib") E Exception: Failed to get attrib - + failure_demo.py:122: Exception _________________________ test_attribute_multiple __________________________ - + def test_attribute_multiple(): class Foo(object): b = 1 - + class Bar(object): b = 2 - + > assert Foo().b == Bar().b E AssertionError: assert 1 == 2 E + where 1 = .Foo object at 0xdeadbeef>.b E + where .Foo object at 0xdeadbeef> = .Foo'>() E + and 2 = .Bar object at 0xdeadbeef>.b E + where .Bar object at 0xdeadbeef> = .Bar'>() - + failure_demo.py:137: AssertionError __________________________ TestRaises.test_raises __________________________ - + self = - + def test_raises(self): s = "qwe" # NOQA > raises(TypeError, "int(s)") - - failure_demo.py:147: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - + + failure_demo.py:147: + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + > int(s) E ValueError: invalid literal for int() with base 10: 'qwe' - + <0-codegen $PYTHON_PREFIX/lib/python3.5/site-packages/_pytest/python_api.py:635>:1: ValueError ______________________ TestRaises.test_raises_doesnt _______________________ - + self = - + def test_raises_doesnt(self): > raises(IOError, "int('3')") E Failed: DID NOT RAISE - + failure_demo.py:150: Failed __________________________ TestRaises.test_raise ___________________________ - + self = - + def test_raise(self): > raise ValueError("demo error") E ValueError: demo error - + failure_demo.py:153: ValueError ________________________ TestRaises.test_tupleerror ________________________ - + self = - + def test_tupleerror(self): > a, b = [1] # NOQA E ValueError: not enough values to unpack (expected 2, got 1) - + failure_demo.py:156: ValueError ______ TestRaises.test_reinterpret_fails_with_print_for_the_fun_of_it ______ - + self = - + def test_reinterpret_fails_with_print_for_the_fun_of_it(self): items = [1, 2, 3] print("items is %r" % items) > a, b = items.pop() E TypeError: 'int' object is not iterable - + failure_demo.py:161: TypeError --------------------------- Captured stdout call --------------------------- items is [1, 2, 3] ________________________ TestRaises.test_some_error ________________________ - + self = - + def test_some_error(self): > if namenotexi: # NOQA E NameError: name 'namenotexi' is not defined - + failure_demo.py:164: NameError ____________________ test_dynamic_compile_shows_nicely _____________________ - + def test_dynamic_compile_shows_nicely(): import imp import sys - + src = "def foo():\n assert 1 == 0\n" name = "abc-123" module = imp.new_module(name) @@ -426,65 +426,65 @@ get on the terminal - we are working on that):: py.builtin.exec_(code, module.__dict__) sys.modules[name] = module > module.foo() - - failure_demo.py:182: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - + + failure_demo.py:182: + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + def foo(): > assert 1 == 0 E AssertionError - + <2-codegen 'abc-123' $REGENDOC_TMPDIR/assertion/failure_demo.py:179>:2: AssertionError ____________________ TestMoreErrors.test_complex_error _____________________ - + self = - + def test_complex_error(self): def f(): return 44 - + def g(): return 43 - + > somefunc(f(), g()) - - failure_demo.py:193: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + + failure_demo.py:193: + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ failure_demo.py:11: in somefunc otherfunc(x, y) - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + a = 44, b = 43 - + def otherfunc(a, b): > assert a == b E assert 44 == 43 - + failure_demo.py:7: AssertionError ___________________ TestMoreErrors.test_z1_unpack_error ____________________ - + self = - + def test_z1_unpack_error(self): items = [] > a, b = items E ValueError: not enough values to unpack (expected 2, got 0) - + failure_demo.py:197: ValueError ____________________ TestMoreErrors.test_z2_type_error _____________________ - + self = - + def test_z2_type_error(self): items = 3 > a, b = items E TypeError: 'int' object is not iterable - + failure_demo.py:201: TypeError ______________________ TestMoreErrors.test_startswith ______________________ - + self = - + def test_startswith(self): s = "123" g = "456" @@ -492,93 +492,93 @@ get on the terminal - we are working on that):: E AssertionError: assert False E + where False = ('456') E + where = '123'.startswith - + failure_demo.py:206: AssertionError __________________ TestMoreErrors.test_startswith_nested ___________________ - + self = - + def test_startswith_nested(self): def f(): return "123" - + def g(): return "456" - + > assert f().startswith(g()) E AssertionError: assert False E + where False = ('456') E + where = '123'.startswith E + where '123' = .f at 0xdeadbeef>() E + and '456' = .g at 0xdeadbeef>() - + failure_demo.py:215: AssertionError _____________________ TestMoreErrors.test_global_func ______________________ - + self = - + def test_global_func(self): > assert isinstance(globf(42), float) E assert False E + where False = isinstance(43, float) E + where 43 = globf(42) - + failure_demo.py:218: AssertionError _______________________ TestMoreErrors.test_instance _______________________ - + self = - + def test_instance(self): self.x = 6 * 7 > assert self.x != 42 E assert 42 != 42 E + where 42 = .x - + failure_demo.py:222: AssertionError _______________________ TestMoreErrors.test_compare ________________________ - + self = - + def test_compare(self): > assert globf(10) < 5 E assert 11 < 5 E + where 11 = globf(10) - + failure_demo.py:225: AssertionError _____________________ TestMoreErrors.test_try_finally ______________________ - + self = - + def test_try_finally(self): x = 1 try: > assert x == 0 E assert 1 == 0 - + failure_demo.py:230: AssertionError ___________________ TestCustomAssertMsg.test_single_line ___________________ - + self = - + def test_single_line(self): class A(object): a = 1 - + b = 2 > assert A.a == b, "A.a appears not to be b" E AssertionError: A.a appears not to be b E assert 1 == 2 E + where 1 = .A'>.a - + failure_demo.py:241: AssertionError ____________________ TestCustomAssertMsg.test_multiline ____________________ - + self = - + def test_multiline(self): class A(object): a = 1 - + b = 2 > assert ( A.a == b @@ -588,19 +588,19 @@ get on the terminal - we are working on that):: E one of those E assert 1 == 2 E + where 1 = .A'>.a - + failure_demo.py:248: AssertionError ___________________ TestCustomAssertMsg.test_custom_repr ___________________ - + self = - + def test_custom_repr(self): class JSON(object): a = 1 - + def __repr__(self): return "This is JSON\n{\n 'foo': 'bar'\n}" - + a = JSON() b = 2 > assert a.a == b, a @@ -610,12 +610,12 @@ get on the terminal - we are working on that):: E } E assert 1 == 2 E + where 1 = This is JSON\n{\n 'foo': 'bar'\n}.a - + failure_demo.py:261: AssertionError ============================= warnings summary ============================= Metafunc.addcall is deprecated and scheduled to be removed in pytest 4.0. Please use Metafunc.parametrize instead. - + -- Docs: http://doc.pytest.org/en/latest/warnings.html ================== 42 failed, 1 warnings in 0.12 seconds =================== diff --git a/doc/en/example/simple.rst b/doc/en/example/simple.rst index c86939fb1..ed2c9d67a 100644 --- a/doc/en/example/simple.rst +++ b/doc/en/example/simple.rst @@ -49,9 +49,9 @@ Let's run this without supplying our new option:: F [100%] ================================= FAILURES ================================= _______________________________ test_answer ________________________________ - + cmdopt = 'type1' - + def test_answer(cmdopt): if cmdopt == "type1": print("first") @@ -59,7 +59,7 @@ Let's run this without supplying our new option:: print("second") > assert 0 # to see what was printed E assert 0 - + test_sample.py:6: AssertionError --------------------------- Captured stdout call --------------------------- first @@ -71,9 +71,9 @@ And now with supplying a command line option:: F [100%] ================================= FAILURES ================================= _______________________________ test_answer ________________________________ - + cmdopt = 'type2' - + def test_answer(cmdopt): if cmdopt == "type1": print("first") @@ -81,7 +81,7 @@ And now with supplying a command line option:: print("second") > assert 0 # to see what was printed E assert 0 - + test_sample.py:6: AssertionError --------------------------- Captured stdout call --------------------------- second @@ -124,7 +124,7 @@ directory with the above conftest.py:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 0 items - + ======================= no tests ran in 0.12 seconds ======================= .. _`excontrolskip`: @@ -182,11 +182,11 @@ and when running it will see a skipped "slow" test:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items - + test_module.py .s [100%] ========================= short test summary info ========================== SKIP [1] test_module.py:8: need --runslow option to run - + =================== 1 passed, 1 skipped in 0.12 seconds ==================== Or run it including the ``slow`` marked test:: @@ -196,9 +196,9 @@ Or run it including the ``slow`` marked test:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items - + test_module.py .. [100%] - + ========================= 2 passed in 0.12 seconds ========================= Writing well integrated assertion helpers @@ -236,11 +236,11 @@ Let's run our little function:: F [100%] ================================= FAILURES ================================= ______________________________ test_something ______________________________ - + def test_something(): > checkconfig(42) E Failed: not configured: 42 - + test_checkconfig.py:11: Failed 1 failed in 0.12 seconds @@ -335,7 +335,7 @@ which will add the string to the test header accordingly:: project deps: mylib-1.1 rootdir: $REGENDOC_TMPDIR, inifile: collected 0 items - + ======================= no tests ran in 0.12 seconds ======================= .. regendoc:wipe @@ -363,7 +363,7 @@ which will add info only when run with "--v":: did you? rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 0 items - + ======================= no tests ran in 0.12 seconds ======================= and nothing when run plainly:: @@ -373,7 +373,7 @@ and nothing when run plainly:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 0 items - + ======================= no tests ran in 0.12 seconds ======================= profiling test duration @@ -410,9 +410,9 @@ Now we can profile which test functions execute the slowest:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 3 items - + test_some_are_slow.py ... [100%] - + ========================= slowest 3 test durations ========================= 0.30s call test_some_are_slow.py::test_funcslow2 0.20s call test_some_are_slow.py::test_funcslow1 @@ -482,18 +482,18 @@ If we run this:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items - + test_step.py .Fx. [100%] - + ================================= FAILURES ================================= ____________________ TestUserHandling.test_modification ____________________ - + self = - + def test_modification(self): > assert 0 E assert 0 - + test_step.py:11: AssertionError ========================= short test summary info ========================== XFAIL test_step.py::TestUserHandling::()::test_deletion @@ -563,12 +563,12 @@ We can run this:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 7 items - + test_step.py .Fx. [ 57%] a/test_db.py F [ 71%] a/test_db2.py F [ 85%] b/test_error.py E [100%] - + ================================== ERRORS ================================== _______________________ ERROR at setup of test_root ________________________ file $REGENDOC_TMPDIR/b/test_error.py, line 1 @@ -576,37 +576,37 @@ We can run this:: E fixture 'db' not found > available fixtures: cache, capfd, capfdbinary, caplog, capsys, capsysbinary, doctest_namespace, monkeypatch, pytestconfig, record_property, record_xml_attribute, record_xml_property, recwarn, tmpdir, tmpdir_factory > use 'pytest --fixtures [testpath]' for help on them. - + $REGENDOC_TMPDIR/b/test_error.py:1 ================================= FAILURES ================================= ____________________ TestUserHandling.test_modification ____________________ - + self = - + def test_modification(self): > assert 0 E assert 0 - + test_step.py:11: AssertionError _________________________________ test_a1 __________________________________ - + db = - + def test_a1(db): > assert 0, db # to show value E AssertionError: E assert 0 - + a/test_db.py:2: AssertionError _________________________________ test_a2 __________________________________ - + db = - + def test_a2(db): > assert 0, db # to show value E AssertionError: E assert 0 - + a/test_db2.py:2: AssertionError ========== 3 failed, 2 passed, 1 xfailed, 1 error in 0.12 seconds ========== @@ -674,25 +674,25 @@ and run them:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items - + test_module.py FF [100%] - + ================================= FAILURES ================================= ________________________________ test_fail1 ________________________________ - + tmpdir = local('PYTEST_TMPDIR/test_fail10') - + def test_fail1(tmpdir): > assert 0 E assert 0 - + test_module.py:2: AssertionError ________________________________ test_fail2 ________________________________ - + def test_fail2(): > assert 0 E assert 0 - + test_module.py:6: AssertionError ========================= 2 failed in 0.12 seconds ========================= @@ -773,36 +773,36 @@ and run it:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 3 items - + test_module.py Esetting up a test failed! test_module.py::test_setup_fails Fexecuting test failed test_module.py::test_call_fails F - + ================================== ERRORS ================================== ____________________ ERROR at setup of test_setup_fails ____________________ - + @pytest.fixture def other(): > assert 0 E assert 0 - + test_module.py:7: AssertionError ================================= FAILURES ================================= _____________________________ test_call_fails ______________________________ - + something = None - + def test_call_fails(something): > assert 0 E assert 0 - + test_module.py:15: AssertionError ________________________________ test_fail2 ________________________________ - + def test_fail2(): > assert 0 E assert 0 - + test_module.py:19: AssertionError ==================== 2 failed, 1 error in 0.12 seconds ===================== diff --git a/doc/en/fixture.rst b/doc/en/fixture.rst index c29a24bc3..e07d00eaa 100644 --- a/doc/en/fixture.rst +++ b/doc/en/fixture.rst @@ -73,20 +73,20 @@ marked ``smtp`` fixture function. Running the test looks like this:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 item - + test_smtpsimple.py F [100%] - + ================================= FAILURES ================================= ________________________________ test_ehlo _________________________________ - + smtp = - + def test_ehlo(smtp): response, msg = smtp.ehlo() assert response == 250 > assert 0 # for demo purposes E assert 0 - + test_smtpsimple.py:11: AssertionError ========================= 1 failed in 0.12 seconds ========================= @@ -209,32 +209,32 @@ inspect what is going on and can now run the tests:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items - + test_module.py FF [100%] - + ================================= FAILURES ================================= ________________________________ test_ehlo _________________________________ - + smtp = - + def test_ehlo(smtp): response, msg = smtp.ehlo() assert response == 250 assert b"smtp.gmail.com" in msg > assert 0 # for demo purposes E assert 0 - + test_module.py:6: AssertionError ________________________________ test_noop _________________________________ - + smtp = - + def test_noop(smtp): response, msg = smtp.noop() assert response == 250 > assert 0 # for demo purposes E assert 0 - + test_module.py:11: AssertionError ========================= 2 failed in 0.12 seconds ========================= @@ -337,7 +337,7 @@ Let's execute it:: $ pytest -s -q --tb=no FFteardown smtp - + 2 failed in 0.12 seconds We see that the ``smtp`` instance is finalized after the two @@ -446,7 +446,7 @@ again, nothing much has changed:: $ pytest -s -q --tb=no FFfinalizing (smtp.gmail.com) - + 2 failed in 0.12 seconds Let's quickly create another test module that actually sets the @@ -567,51 +567,51 @@ So let's just do another run:: FFFF [100%] ================================= FAILURES ================================= ________________________ test_ehlo[smtp.gmail.com] _________________________ - + smtp = - + def test_ehlo(smtp): response, msg = smtp.ehlo() assert response == 250 assert b"smtp.gmail.com" in msg > assert 0 # for demo purposes E assert 0 - + test_module.py:6: AssertionError ________________________ test_noop[smtp.gmail.com] _________________________ - + smtp = - + def test_noop(smtp): response, msg = smtp.noop() assert response == 250 > assert 0 # for demo purposes E assert 0 - + test_module.py:11: AssertionError ________________________ test_ehlo[mail.python.org] ________________________ - + smtp = - + def test_ehlo(smtp): response, msg = smtp.ehlo() assert response == 250 > assert b"smtp.gmail.com" in msg E AssertionError: assert b'smtp.gmail.com' in b'mail.python.org\nPIPELINING\nSIZE 51200000\nETRN\nSTARTTLS\nAUTH DIGEST-MD5 NTLM CRAM-MD5\nENHANCEDSTATUSCODES\n8BITMIME\nDSN\nSMTPUTF8' - + test_module.py:5: AssertionError -------------------------- Captured stdout setup --------------------------- finalizing ________________________ test_noop[mail.python.org] ________________________ - + smtp = - + def test_noop(smtp): response, msg = smtp.noop() assert response == 250 > assert 0 # for demo purposes E assert 0 - + test_module.py:11: AssertionError ------------------------- Captured stdout teardown ------------------------- finalizing @@ -683,7 +683,7 @@ Running the above tests results in the following test IDs being used:: - + ======================= no tests ran in 0.12 seconds ======================= .. _`fixture-parametrize-marks`: @@ -713,11 +713,11 @@ Running this test will *skip* the invocation of ``data_set`` with value ``2``:: cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 3 items - + test_fixture_marks.py::test_data[0] PASSED [ 33%] test_fixture_marks.py::test_data[1] PASSED [ 66%] test_fixture_marks.py::test_data[2] SKIPPED [100%] - + =================== 2 passed, 1 skipped in 0.12 seconds ==================== .. _`interdependent fixtures`: @@ -756,10 +756,10 @@ Here we declare an ``app`` fixture which receives the previously defined cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 2 items - + test_appsetup.py::test_smtp_exists[smtp.gmail.com] PASSED [ 50%] test_appsetup.py::test_smtp_exists[mail.python.org] PASSED [100%] - + ========================= 2 passed in 0.12 seconds ========================= Due to the parametrization of ``smtp`` the test will run twice with two @@ -825,26 +825,26 @@ Let's run the tests in verbose mode and with looking at the print-output:: cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 8 items - + test_module.py::test_0[1] SETUP otherarg 1 RUN test0 with otherarg 1 PASSED TEARDOWN otherarg 1 - + test_module.py::test_0[2] SETUP otherarg 2 RUN test0 with otherarg 2 PASSED TEARDOWN otherarg 2 - + test_module.py::test_1[mod1] SETUP modarg mod1 RUN test1 with modarg mod1 PASSED test_module.py::test_2[mod1-1] SETUP otherarg 1 RUN test2 with otherarg 1 and modarg mod1 PASSED TEARDOWN otherarg 1 - + test_module.py::test_2[mod1-2] SETUP otherarg 2 RUN test2 with otherarg 2 and modarg mod1 PASSED TEARDOWN otherarg 2 - + test_module.py::test_1[mod2] TEARDOWN modarg mod1 SETUP modarg mod2 RUN test1 with modarg mod2 @@ -852,13 +852,13 @@ Let's run the tests in verbose mode and with looking at the print-output:: test_module.py::test_2[mod2-1] SETUP otherarg 1 RUN test2 with otherarg 1 and modarg mod2 PASSED TEARDOWN otherarg 1 - + test_module.py::test_2[mod2-2] SETUP otherarg 2 RUN test2 with otherarg 2 and modarg mod2 PASSED TEARDOWN otherarg 2 TEARDOWN modarg mod2 - - + + ========================= 8 passed in 0.12 seconds ========================= You can see that the parametrized module-scoped ``modarg`` resource caused an diff --git a/doc/en/getting-started.rst b/doc/en/getting-started.rst index aae0bf971..f2dbec5e9 100644 --- a/doc/en/getting-started.rst +++ b/doc/en/getting-started.rst @@ -50,17 +50,17 @@ That’s it. You can now execute the test function:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 item - + test_sample.py F [100%] - + ================================= FAILURES ================================= _______________________________ test_answer ________________________________ - + def test_answer(): > assert func(3) == 5 E assert 4 == 5 E + where 4 = func(3) - + test_sample.py:5: AssertionError ========================= 1 failed in 0.12 seconds ========================= @@ -117,15 +117,15 @@ Once you develop multiple tests, you may want to group them into a class. pytest .F [100%] ================================= FAILURES ================================= ____________________________ TestClass.test_two ____________________________ - + self = - + def test_two(self): x = "hello" > assert hasattr(x, 'check') E AssertionError: assert False E + where False = hasattr('hello', 'check') - + test_class.py:8: AssertionError 1 failed, 1 passed in 0.12 seconds @@ -147,14 +147,14 @@ List the name ``tmpdir`` in the test function signature and ``pytest`` will look F [100%] ================================= FAILURES ================================= _____________________________ test_needsfiles ______________________________ - + tmpdir = local('PYTEST_TMPDIR/test_needsfiles0') - + def test_needsfiles(tmpdir): print (tmpdir) > assert 0 E assert 0 - + test_tmpdir.py:3: AssertionError --------------------------- Captured stdout call --------------------------- PYTEST_TMPDIR/test_needsfiles0 diff --git a/doc/en/index.rst b/doc/en/index.rst index 0539a1c55..6a382e571 100644 --- a/doc/en/index.rst +++ b/doc/en/index.rst @@ -29,17 +29,17 @@ To execute it:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 item - + test_sample.py F [100%] - + ================================= FAILURES ================================= _______________________________ test_answer ________________________________ - + def test_answer(): > assert inc(3) == 5 E assert 4 == 5 E + where 4 = inc(3) - + test_sample.py:6: AssertionError ========================= 1 failed in 0.12 seconds ========================= diff --git a/doc/en/parametrize.rst b/doc/en/parametrize.rst index f87f47d08..693cf1913 100644 --- a/doc/en/parametrize.rst +++ b/doc/en/parametrize.rst @@ -57,14 +57,14 @@ them in turn:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 3 items - + test_expectation.py ..F [100%] - + ================================= FAILURES ================================= ____________________________ test_eval[6*9-42] _____________________________ - + test_input = '6*9', expected = 42 - + @pytest.mark.parametrize("test_input,expected", [ ("3+5", 8), ("2+4", 6), @@ -74,7 +74,7 @@ them in turn:: > assert eval(test_input) == expected E AssertionError: assert 54 == 42 E + where 54 = eval('6*9') - + test_expectation.py:8: AssertionError ==================== 1 failed, 2 passed in 0.12 seconds ==================== @@ -106,9 +106,9 @@ Let's run this:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 3 items - + test_expectation.py ..x [100%] - + =================== 2 passed, 1 xfailed in 0.12 seconds ==================== The one parameter set which caused a failure previously now @@ -174,15 +174,15 @@ Let's also run with a stringinput that will lead to a failing test:: F [100%] ================================= FAILURES ================================= ___________________________ test_valid_string[!] ___________________________ - + stringinput = '!' - + def test_valid_string(stringinput): > assert stringinput.isalpha() E AssertionError: assert False E + where False = () E + where = '!'.isalpha - + test_strings.py:3: AssertionError 1 failed in 0.12 seconds diff --git a/doc/en/skipping.rst b/doc/en/skipping.rst index 830dd55f5..cda67554d 100644 --- a/doc/en/skipping.rst +++ b/doc/en/skipping.rst @@ -334,12 +334,12 @@ Running it with the report-on-xfail option gives this output:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR/example, inifile: collected 7 items - + xfail_demo.py xxxxxxx [100%] ========================= short test summary info ========================== XFAIL xfail_demo.py::test_hello XFAIL xfail_demo.py::test_hello2 - reason: [NOTRUN] + reason: [NOTRUN] XFAIL xfail_demo.py::test_hello3 condition: hasattr(os, 'sep') XFAIL xfail_demo.py::test_hello4 @@ -349,7 +349,7 @@ Running it with the report-on-xfail option gives this output:: XFAIL xfail_demo.py::test_hello6 reason: reason XFAIL xfail_demo.py::test_hello7 - + ======================== 7 xfailed in 0.12 seconds ========================= .. _`skip/xfail with parametrize`: diff --git a/doc/en/tmpdir.rst b/doc/en/tmpdir.rst index 0171e3168..421b4c898 100644 --- a/doc/en/tmpdir.rst +++ b/doc/en/tmpdir.rst @@ -32,14 +32,14 @@ Running this would result in a passed test except for the last platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 item - + test_tmpdir.py F [100%] - + ================================= FAILURES ================================= _____________________________ test_create_file _____________________________ - + tmpdir = local('PYTEST_TMPDIR/test_create_file0') - + def test_create_file(tmpdir): p = tmpdir.mkdir("sub").join("hello.txt") p.write("content") @@ -47,7 +47,7 @@ Running this would result in a passed test except for the last assert len(tmpdir.listdir()) == 1 > assert 0 E assert 0 - + test_tmpdir.py:7: AssertionError ========================= 1 failed in 0.12 seconds ========================= diff --git a/doc/en/unittest.rst b/doc/en/unittest.rst index ec9f466b9..53192b346 100644 --- a/doc/en/unittest.rst +++ b/doc/en/unittest.rst @@ -130,30 +130,30 @@ the ``self.db`` values in the traceback:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items - + test_unittest_db.py FF [100%] - + ================================= FAILURES ================================= ___________________________ MyTest.test_method1 ____________________________ - + self = - + def test_method1(self): assert hasattr(self, "db") > assert 0, self.db # fail for demo purposes E AssertionError: .DummyDB object at 0xdeadbeef> E assert 0 - + test_unittest_db.py:9: AssertionError ___________________________ MyTest.test_method2 ____________________________ - + self = - + def test_method2(self): > assert 0, self.db # fail for demo purposes E AssertionError: .DummyDB object at 0xdeadbeef> E assert 0 - + test_unittest_db.py:12: AssertionError ========================= 2 failed in 0.12 seconds ========================= diff --git a/doc/en/usage.rst b/doc/en/usage.rst index 07077468a..25be54395 100644 --- a/doc/en/usage.rst +++ b/doc/en/usage.rst @@ -502,7 +502,7 @@ hook was invoked:: $ python myinvoke.py . [100%]*** test run reporting finishing - + .. note:: diff --git a/doc/en/warnings.rst b/doc/en/warnings.rst index 62f96ccf7..df93a02b5 100644 --- a/doc/en/warnings.rst +++ b/doc/en/warnings.rst @@ -25,14 +25,14 @@ Running pytest now produces this output:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 item - + test_show_warnings.py . [100%] - + ============================= warnings summary ============================= test_show_warnings.py::test_one $REGENDOC_TMPDIR/test_show_warnings.py:4: UserWarning: api v1, should use functions from v2 warnings.warn(UserWarning("api v1, should use functions from v2")) - + -- Docs: http://doc.pytest.org/en/latest/warnings.html =================== 1 passed, 1 warnings in 0.12 seconds =================== @@ -45,17 +45,17 @@ them into errors:: F [100%] ================================= FAILURES ================================= _________________________________ test_one _________________________________ - + def test_one(): > assert api_v1() == 1 - - test_show_warnings.py:8: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - + + test_show_warnings.py:8: + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + def api_v1(): > warnings.warn(UserWarning("api v1, should use functions from v2")) E UserWarning: api v1, should use functions from v2 - + test_show_warnings.py:4: UserWarning 1 failed in 0.12 seconds From fa3161011a6039bef955ee29ec46f5748bdec303 Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Tue, 3 Jul 2018 22:07:51 -0300 Subject: [PATCH 06/17] Improve CHANGELOG for 3.6.3 --- CHANGELOG.rst | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index b6797b0ca..9c9d254d0 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -18,21 +18,18 @@ Bug Fixes assertion-rewritten package modules. (`#3061 `_) -- If the user pass as a expected value a numpy array created like - numpy.array(5); it will creates an array with one element without shape, when - used with approx it will raise an error for the `repr` 'TypeError: iteration - over a 0-d array'. With this PR pytest will iterate properly in the numpy - array even with 0 dimension. (`#3593 - `_) +- Fix error in ``pytest.approx`` when dealing with 0-dimension numpy + arrays. (`#3593 `_) -- no longer ValueError when using the ``get_marker`` api. (`#3605 +- No longer raise ``ValueError`` when using the ``get_marker`` API. (`#3605 `_) -- Log messages with unicode characters would not appear in the output log file. +- Fix problem where log messages with non-ascii characters would not + appear in the output log file. (`#3630 `_) -- No longer raise AttributeError when legacy marks can't be stored. (`#3631 - `_) +- No longer raise ``AttributeError`` when legacy marks can't be stored in + functions. (`#3631 `_) Improved Documentation @@ -50,7 +47,7 @@ Trivial/Internal Changes attribute and ``metafunc`` parameter from ``CallSpec2.copy()``. (`#3598 `_) -- Silence usage of ``reduce`` warning in python 2 (`#3609 +- Silence usage of ``reduce`` warning in Python 2 (`#3609 `_) - Fix usage of ``attr.ib`` deprecated ``convert`` parameter. (`#3653 From 73d787df3a13af635d18464a6560984e78c0b4da Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Tue, 3 Jul 2018 22:15:11 -0300 Subject: [PATCH 07/17] HOWTORELEASE: create branch first and run pre-commit after generate-release task This makes more sense because we need to install from tasks/requirements.txt --- HOWTORELEASE.rst | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/HOWTORELEASE.rst b/HOWTORELEASE.rst index 97bddf720..b5e852d3b 100644 --- a/HOWTORELEASE.rst +++ b/HOWTORELEASE.rst @@ -10,10 +10,6 @@ taking a lot of time to make a new one. pytest releases must be prepared on **Linux** because the docs and examples expect to be executed in that platform. -#. Install development dependencies in a virtual environment with:: - - pip3 install -U -r tasks/requirements.txt - #. Create a branch ``release-X.Y.Z`` with the version for the release. * **patch releases**: from the latest ``master``; @@ -22,9 +18,19 @@ taking a lot of time to make a new one. Ensure your are in a clean work tree. -#. Generate docs, changelog, announcements and a **local** tag:: +#. Install development dependencies in a virtual environment with:: + + $ pip3 install -U -r tasks/requirements.txt + +#. Generate docs, changelog, announcements, and a **local** tag:: + + $ invoke generate.pre-release + +#. Execute pre-commit on all files to ensure the docs are conformant and commit your results:: + + $ pre-commit run --all-files + $ git commit -am "Fix files with pre-commit" - invoke generate.pre-release #. Open a PR for this branch targeting ``master``. From 50f030d2330280abc31824075660a9d5d0aaa808 Mon Sep 17 00:00:00 2001 From: Anthony Sottile Date: Wed, 4 Jul 2018 15:16:34 -0700 Subject: [PATCH 08/17] Correct code blocks in docs --- .pre-commit-config.yaml | 5 +++++ CHANGELOG.rst | 18 +++++++++--------- doc/en/announce/release-2.9.0.rst | 2 +- doc/en/example/markers.rst | 4 ++-- doc/en/goodpractices.rst | 2 +- doc/en/mark.rst | 2 -- 6 files changed, 18 insertions(+), 15 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9e9549ed9..cae90a428 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -34,3 +34,8 @@ repos: language: python additional_dependencies: [pygments, restructuredtext_lint] python_version: python3.6 + - id: rst-backticks + name: rst ``code`` is two backticks + entry: ' `[^`]+[^_]`([^_]|$)' + language: pygrep + types: [rst] diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 9c9d254d0..0c3bb2476 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -72,7 +72,7 @@ Bug Fixes raises an exception. (`#3569 `_) -- Fix encoding error with `print` statements in doctests (`#3583 +- Fix encoding error with ``print`` statements in doctests (`#3583 `_) @@ -345,7 +345,7 @@ Features ``pytest_runtest_logfinish`` hooks when live logs are enabled. (`#3189 `_) -- Passing `--log-cli-level` in the command-line now automatically activates +- Passing ``--log-cli-level`` in the command-line now automatically activates live logging. (`#3190 `_) - Add command line option ``--deselect`` to allow deselection of individual @@ -697,8 +697,8 @@ Trivial/Internal Changes - Code cleanup. (`#3015 `_, `#3021 `_) -- Clean up code by replacing imports and references of `_ast` to `ast`. (`#3018 - `_) +- Clean up code by replacing imports and references of ``_ast`` to ``ast``. + (`#3018 `_) Pytest 3.3.1 (2017-12-05) @@ -1026,7 +1026,7 @@ Pytest 3.2.2 (2017-09-06) Bug Fixes --------- -- Calling the deprecated `request.getfuncargvalue()` now shows the source of +- Calling the deprecated ``request.getfuncargvalue()`` now shows the source of the call. (`#2681 `_) - Allow tests declared as ``@staticmethod`` to use fixtures. (`#2699 @@ -1048,10 +1048,10 @@ Improved Documentation ``pytest.mark.MARKER_NAME.__call__`` (`#2604 `_) -- In one of the simple examples, use `pytest_collection_modifyitems()` to skip +- In one of the simple examples, use ``pytest_collection_modifyitems()`` to skip tests based on a command-line option, allowing its sharing while preventing a - user error when acessing `pytest.config` before the argument parsing. (`#2653 - `_) + user error when acessing ``pytest.config`` before the argument parsing. + (`#2653 `_) Trivial/Internal Changes @@ -1129,7 +1129,7 @@ Features from parent classes or modules. (`#2516 `_) -- Collection ignores local virtualenvs by default; `--collect-in-virtualenv` +- Collection ignores local virtualenvs by default; ``--collect-in-virtualenv`` overrides this behavior. (`#2518 `_) diff --git a/doc/en/announce/release-2.9.0.rst b/doc/en/announce/release-2.9.0.rst index 8d829996d..c079fdf6b 100644 --- a/doc/en/announce/release-2.9.0.rst +++ b/doc/en/announce/release-2.9.0.rst @@ -124,7 +124,7 @@ The py.test Development Team Thanks `@biern`_ for the PR. * Fix `traceback style docs`_ to describe all of the available options - (auto/long/short/line/native/no), with `auto` being the default since v2.6. + (auto/long/short/line/native/no), with ``auto`` being the default since v2.6. Thanks `@hackebrot`_ for the PR. * Fix (`#1422`_): junit record_xml_property doesn't allow multiple records diff --git a/doc/en/example/markers.rst b/doc/en/example/markers.rst index bf352bc81..1b4aa9279 100644 --- a/doc/en/example/markers.rst +++ b/doc/en/example/markers.rst @@ -299,10 +299,10 @@ Skip and xfail marks can also be applied in this way, see :ref:`skip/xfail with .. note:: If the data you are parametrizing happen to be single callables, you need to be careful - when marking these items. `pytest.mark.xfail(my_func)` won't work because it's also the + when marking these items. ``pytest.mark.xfail(my_func)`` won't work because it's also the signature of a function being decorated. To resolve this ambiguity, you need to pass a reason argument: - `pytest.mark.xfail(func_bar, reason="Issue#7")`. + ``pytest.mark.xfail(func_bar, reason="Issue#7")``. .. _`adding a custom marker from a plugin`: diff --git a/doc/en/goodpractices.rst b/doc/en/goodpractices.rst index 2bbd9d0ae..d9c685299 100644 --- a/doc/en/goodpractices.rst +++ b/doc/en/goodpractices.rst @@ -187,7 +187,7 @@ You can then install your package in "editable" mode:: pip install -e . which lets you change your source code (both tests and application) and rerun tests at will. -This is similar to running `python setup.py develop` or `conda develop` in that it installs +This is similar to running ``python setup.py develop`` or ``conda develop`` in that it installs your package using a symlink to your development code. Once you are done with your work and want to make sure that your actual diff --git a/doc/en/mark.rst b/doc/en/mark.rst index aa1210bb6..c99768ce0 100644 --- a/doc/en/mark.rst +++ b/doc/en/mark.rst @@ -52,8 +52,6 @@ should add ``--strict`` to ``addopts``: serial -.. `marker-iteration` - Marker revamp and iteration --------------------------- From d7b722e2ae6dbf4b10bbba14c3a2dc5c4f2738b4 Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Fri, 6 Jul 2018 20:55:42 -0300 Subject: [PATCH 09/17] Add reference docs for pytest.mark.usefixtures --- doc/en/reference.rst | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/doc/en/reference.rst b/doc/en/reference.rst index fe9e87042..cdae37f95 100644 --- a/doc/en/reference.rst +++ b/doc/en/reference.rst @@ -161,6 +161,20 @@ Skip a test function if a condition is ``True``. :keyword str reason: Reason why the test function is being skipped. +.. _`pytest.mark.usefixtures ref`: + +pytest.mark.usefixtures +~~~~~~~~~~~~~~~~~~~~~~~ + +**Tutorial**: :ref:`usefixtures`. + +Mark a test function as using the given fixture names. + +.. py:function:: pytest.mark.usefixtures(*names) + + :param args: the names of the fixture to use as strings + + .. _`pytest.mark.xfail ref`: pytest.mark.xfail From 18b2fc11adf4b79d860ba91b73678a22c0c53083 Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Fri, 6 Jul 2018 20:57:30 -0300 Subject: [PATCH 10/17] Dummy change --- README.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/README.rst b/README.rst index 564ffff6c..c0b2d658f 100644 --- a/README.rst +++ b/README.rst @@ -3,6 +3,7 @@ :align: center :alt: pytest + ------ .. image:: https://img.shields.io/pypi/v/pytest.svg From f359b50fe5e8c232c34d7400b00d4763ce2f43a4 Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Fri, 6 Jul 2018 21:03:27 -0300 Subject: [PATCH 11/17] Adjust copyright in README --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index c0b2d658f..97b21898e 100644 --- a/README.rst +++ b/README.rst @@ -110,7 +110,7 @@ Consult the `Changelog `__ page License ------- -Copyright Holger Krekel and others, 2004-2017. +Copyright Holger Krekel and others, 2004-2018. Distributed under the terms of the `MIT`_ license, pytest is free and open source software. From d26a596072c4b83340904512d5d34a107daf13eb Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Sat, 7 Jul 2018 10:01:10 -0300 Subject: [PATCH 12/17] Add a warning about usefixtures mark not working in fixtures Fix #1014 --- doc/en/fixture.rst | 18 +++++++++++++++++- doc/en/reference.rst | 7 ++++++- 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/doc/en/fixture.rst b/doc/en/fixture.rst index e07d00eaa..301bcee4c 100644 --- a/doc/en/fixture.rst +++ b/doc/en/fixture.rst @@ -943,7 +943,7 @@ a generic feature of the mark mechanism: Note that the assigned variable *must* be called ``pytestmark``, assigning e.g. ``foomark`` will not activate the fixtures. -Lastly you can put fixtures required by all tests in your project +It is also possible to put fixtures required by all tests in your project into an ini-file: .. code-block:: ini @@ -953,6 +953,22 @@ into an ini-file: usefixtures = cleandir +.. warning:: + + Note this mark has no effect in **fixture functions**. For example, + this **will not work as expected**: + + .. code-block:: python + + @pytest.mark.usefixtures("my_other_fixture") + @pytest.fixture + def my_fixture_that_sadly_wont_use_my_other_fixture(): + ... + + Currently this will not generate any error or warning, but this is intended + to be handled by `#3664 `_. + + .. _`autouse`: .. _`autouse fixtures`: diff --git a/doc/en/reference.rst b/doc/en/reference.rst index cdae37f95..b65e15822 100644 --- a/doc/en/reference.rst +++ b/doc/en/reference.rst @@ -170,9 +170,14 @@ pytest.mark.usefixtures Mark a test function as using the given fixture names. +.. warning:: + + This mark can be used with *test functions* only, having no affect when applied + to a **fixture** function. + .. py:function:: pytest.mark.usefixtures(*names) - :param args: the names of the fixture to use as strings + :param args: the names of the fixture to use, as strings .. _`pytest.mark.xfail ref`: From 49e82a4be8efe8a38b9208724e3681db413f22be Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Sat, 7 Jul 2018 12:12:07 -0300 Subject: [PATCH 13/17] Skip deploy stage entirely unless we have a tag Borrowed from https://github.com/tox-dev/tox/pull/877 --- .travis.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index f2921e118..28393a0b7 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,7 +3,8 @@ language: python stages: - linting - test -- deploy +- name: deploy + if: repo = pytest-dev/pytest AND tag IS present python: - '3.6' install: From 42bbb4fa8a43b913ce267454f417605f9744a95b Mon Sep 17 00:00:00 2001 From: Anthony Sottile Date: Sat, 7 Jul 2018 17:18:44 -0700 Subject: [PATCH 14/17] Use -mpytest when invoking pytest in pytester --- changelog/742.bugfix.rst | 1 + src/_pytest/pytester.py | 9 +-------- testing/test_pytester.py | 5 +++++ 3 files changed, 7 insertions(+), 8 deletions(-) create mode 100644 changelog/742.bugfix.rst diff --git a/changelog/742.bugfix.rst b/changelog/742.bugfix.rst new file mode 100644 index 000000000..51dfce972 --- /dev/null +++ b/changelog/742.bugfix.rst @@ -0,0 +1 @@ +Invoke pytest using ``-mpytest`` so ``sys.path`` does not get polluted by packages installed in ``site-packages``. diff --git a/src/_pytest/pytester.py b/src/_pytest/pytester.py index ce1c8ea1c..7c9c09b1c 100644 --- a/src/_pytest/pytester.py +++ b/src/_pytest/pytester.py @@ -23,11 +23,6 @@ from _pytest.main import Session, EXIT_OK from _pytest.assertion.rewrite import AssertionRewritingHook -PYTEST_FULLPATH = os.path.abspath(pytest.__file__.rstrip("oc")).replace( - "$py.class", ".py" -) - - IGNORE_PAM = [ # filenames added when obtaining details about the current user u"/var/lib/sss/mc/passwd" ] @@ -1029,9 +1024,7 @@ class Testdir(object): print("couldn't print to %s because of encoding" % (fp,)) def _getpytestargs(self): - # we cannot use `(sys.executable, script)` because on Windows the - # script is e.g. `pytest.exe` - return (sys.executable, PYTEST_FULLPATH) # noqa + return (sys.executable, "-mpytest") def runpython(self, script): """Run a python script using sys.executable as interpreter. diff --git a/testing/test_pytester.py b/testing/test_pytester.py index 195f2c7f1..9776a8659 100644 --- a/testing/test_pytester.py +++ b/testing/test_pytester.py @@ -394,3 +394,8 @@ class TestSysPathsSnapshot(object): assert getattr(sys, path_type) == original_data assert getattr(sys, other_path_type) is original_other assert getattr(sys, other_path_type) == original_other_data + + +def test_testdir_subprocess(testdir): + testfile = testdir.makepyfile("def test_one(): pass") + assert testdir.runpytest_subprocess(testfile).ret == 0 From 4ae93a7a078fc92d910b632a46a86cb779b3e2b7 Mon Sep 17 00:00:00 2001 From: Anthony Sottile Date: Sun, 8 Jul 2018 08:35:53 -0700 Subject: [PATCH 15/17] Remove obsolete __future__ imports --- changelog/2319.trivial.rst | 1 + src/_pytest/_code/source.py | 13 +++---------- testing/python/raises.py | 3 +-- testing/test_assertrewrite.py | 6 +++--- testing/test_capture.py | 1 - 5 files changed, 8 insertions(+), 16 deletions(-) create mode 100644 changelog/2319.trivial.rst diff --git a/changelog/2319.trivial.rst b/changelog/2319.trivial.rst new file mode 100644 index 000000000..a69ec1345 --- /dev/null +++ b/changelog/2319.trivial.rst @@ -0,0 +1 @@ +Remove obsolete ``__future__`` imports. diff --git a/src/_pytest/_code/source.py b/src/_pytest/_code/source.py index 711408f61..3b037b7d4 100644 --- a/src/_pytest/_code/source.py +++ b/src/_pytest/_code/source.py @@ -1,4 +1,4 @@ -from __future__ import absolute_import, division, generators, print_function +from __future__ import absolute_import, division, print_function import ast from ast import PyCF_ONLY_AST as _AST_FLAG @@ -152,12 +152,7 @@ class Source(object): return "\n".join(self.lines) def compile( - self, - filename=None, - mode="exec", - flag=generators.compiler_flag, - dont_inherit=0, - _genframe=None, + self, filename=None, mode="exec", flag=0, dont_inherit=0, _genframe=None ): """ return compiled code object. if filename is None invent an artificial filename which displays @@ -201,9 +196,7 @@ class Source(object): # -def compile_( - source, filename=None, mode="exec", flags=generators.compiler_flag, dont_inherit=0 -): +def compile_(source, filename=None, mode="exec", flags=0, dont_inherit=0): """ compile the given source to a raw code object, and maintain an internal cache which allows later retrieval of the source code for the code object diff --git a/testing/python/raises.py b/testing/python/raises.py index 99aeffdf2..732e1e82c 100644 --- a/testing/python/raises.py +++ b/testing/python/raises.py @@ -33,8 +33,7 @@ class TestRaises(object): def test_raises_as_contextmanager(self, testdir): testdir.makepyfile( """ - from __future__ import with_statement - import py, pytest + import pytest import _pytest._code def test_simple(): diff --git a/testing/test_assertrewrite.py b/testing/test_assertrewrite.py index 20cc476d8..2640beb63 100644 --- a/testing/test_assertrewrite.py +++ b/testing/test_assertrewrite.py @@ -97,7 +97,7 @@ class TestAssertionRewrite(object): assert imp.lineno == 2 assert imp.col_offset == 0 assert isinstance(m.body[2], ast.Assign) - s = """from __future__ import with_statement\nother_stuff""" + s = """from __future__ import division\nother_stuff""" m = rewrite(s) assert isinstance(m.body[0], ast.ImportFrom) for imp in m.body[1:3]: @@ -105,7 +105,7 @@ class TestAssertionRewrite(object): assert imp.lineno == 2 assert imp.col_offset == 0 assert isinstance(m.body[3], ast.Expr) - s = """'doc string'\nfrom __future__ import with_statement""" + s = """'doc string'\nfrom __future__ import division""" m = rewrite(s) adjust_body_for_new_docstring_in_module_node(m) assert isinstance(m.body[0], ast.ImportFrom) @@ -113,7 +113,7 @@ class TestAssertionRewrite(object): assert isinstance(imp, ast.Import) assert imp.lineno == 2 assert imp.col_offset == 0 - s = """'doc string'\nfrom __future__ import with_statement\nother""" + s = """'doc string'\nfrom __future__ import division\nother""" m = rewrite(s) adjust_body_for_new_docstring_in_module_node(m) assert isinstance(m.body[0], ast.ImportFrom) diff --git a/testing/test_capture.py b/testing/test_capture.py index 54f0fbc44..5f5e1b98d 100644 --- a/testing/test_capture.py +++ b/testing/test_capture.py @@ -2,7 +2,6 @@ from __future__ import absolute_import, division, print_function # note: py.io capture tests where copied from # pylib 1.4.20.dev2 (rev 13d9af95547e) -from __future__ import with_statement import pickle import os import sys From af0059079cb35372fe9fceeeb6bb183795ce3b7e Mon Sep 17 00:00:00 2001 From: Anthony Sottile Date: Sun, 8 Jul 2018 17:05:01 -0700 Subject: [PATCH 16/17] Remove unused fix-lint tox environment --- testing/code/test_source.py | 16 ++++++++++++ testing/code/test_source_multiline_block.py | 28 --------------------- tox.ini | 8 ------ 3 files changed, 16 insertions(+), 36 deletions(-) delete mode 100644 testing/code/test_source_multiline_block.py diff --git a/testing/code/test_source.py b/testing/code/test_source.py index 7982cfa35..995fabcf4 100644 --- a/testing/code/test_source.py +++ b/testing/code/test_source.py @@ -744,3 +744,19 @@ something '''""" result = getstatement(1, source) assert str(result) == "'''\n'''" + + +def test_getstartingblock_multiline(): + class A(object): + def __init__(self, *args): + frame = sys._getframe(1) + self.source = _pytest._code.Frame(frame).statement + + # fmt: off + x = A('x', + 'y' + , + 'z') + # fmt: on + values = [i for i in x.source.lines if i.strip()] + assert len(values) == 4 diff --git a/testing/code/test_source_multiline_block.py b/testing/code/test_source_multiline_block.py deleted file mode 100644 index 009bb87ae..000000000 --- a/testing/code/test_source_multiline_block.py +++ /dev/null @@ -1,28 +0,0 @@ -# flake8: noqa -import sys - -import _pytest._code - - -def test_getstartingblock_multiline(): - """ - This test was originally found in test_source.py, but it depends on the weird - formatting of the ``x = A`` construct seen here and our autopep8 tool can only exclude entire - files (it does not support excluding lines/blocks using the traditional #noqa comment yet, - see hhatto/autopep8#307). It was considered better to just move this single test to its own - file and exclude it from autopep8 than try to complicate things. - """ - - class A(object): - def __init__(self, *args): - frame = sys._getframe(1) - self.source = _pytest._code.Frame(frame).statement - - # fmt: off - x = A('x', - 'y' - , - 'z') - # fmt: on - values = [i for i in x.source.lines if i.strip()] - assert len(values) == 4 diff --git a/tox.ini b/tox.ini index adc4e9746..6e5d7ca68 100644 --- a/tox.ini +++ b/tox.ini @@ -151,14 +151,6 @@ commands = rm -rf /tmp/doc-exec* make regen -[testenv:fix-lint] -skipsdist = True -usedevelop = True -deps = - autopep8 -commands = - autopep8 --in-place -r --max-line-length=120 --exclude=test_source_multiline_block.py _pytest testing setup.py pytest.py - [testenv:jython] changedir = testing commands = From 61301d934e5e08620c5f7be82384bc07d3d51ea9 Mon Sep 17 00:00:00 2001 From: Anthony Sottile Date: Sun, 8 Jul 2018 16:57:18 -0700 Subject: [PATCH 17/17] Remove some extraneous `# noqa` comments This was partially automated with https://github.com/asottile/yesqa _with a few caveats_: - it was run under python2 (chosen arbitrarily, when run under python3 other things were changed) - I used `git checkout -p` to revert the removal of `noqa` comments from `cmp()` lines. --- src/_pytest/compat.py | 4 ++-- src/_pytest/config/__init__.py | 2 +- testing/test_assertrewrite.py | 2 +- testing/test_pytester.py | 5 +---- 4 files changed, 5 insertions(+), 8 deletions(-) diff --git a/src/_pytest/compat.py b/src/_pytest/compat.py index 3ca27fe60..dab09c9d5 100644 --- a/src/_pytest/compat.py +++ b/src/_pytest/compat.py @@ -41,8 +41,8 @@ PY36 = sys.version_info[:2] >= (3, 6) MODULE_NOT_FOUND_ERROR = "ModuleNotFoundError" if PY36 else "ImportError" if _PY3: - from collections.abc import MutableMapping as MappingMixin # noqa - from collections.abc import Mapping, Sequence # noqa + from collections.abc import MutableMapping as MappingMixin + from collections.abc import Mapping, Sequence else: # those raise DeprecationWarnings in Python >=3.7 from collections import MutableMapping as MappingMixin # noqa diff --git a/src/_pytest/config/__init__.py b/src/_pytest/config/__init__.py index 7e7f3b6d2..421d124e9 100644 --- a/src/_pytest/config/__init__.py +++ b/src/_pytest/config/__init__.py @@ -71,7 +71,7 @@ def main(args=None, plugins=None): return 4 -class cmdline(object): # NOQA compatibility namespace +class cmdline(object): # compatibility namespace main = staticmethod(main) diff --git a/testing/test_assertrewrite.py b/testing/test_assertrewrite.py index 2640beb63..274b1ac53 100644 --- a/testing/test_assertrewrite.py +++ b/testing/test_assertrewrite.py @@ -941,7 +941,7 @@ class TestAssertionRewriteHookDetails(object): e = IOError() e.errno = 10 raise e - yield # noqa + yield monkeypatch.setattr(atomicwrites, "atomic_write", atomic_write_failed) assert not _write_pyc(state, [1], source_path.stat(), pycpath) diff --git a/testing/test_pytester.py b/testing/test_pytester.py index 9776a8659..86dc35796 100644 --- a/testing/test_pytester.py +++ b/testing/test_pytester.py @@ -364,10 +364,7 @@ class TestSysPathsSnapshot(object): original = list(sys_path) original_other = list(getattr(sys, other_path_type)) snapshot = SysPathsSnapshot() - transformation = { - "source": (0, 1, 2, 3, 4, 5), - "target": (6, 2, 9, 7, 5, 8), - } # noqa: E201 + transformation = {"source": (0, 1, 2, 3, 4, 5), "target": (6, 2, 9, 7, 5, 8)} assert sys_path == [self.path(x) for x in transformation["source"]] sys_path[1] = self.path(6) sys_path[3] = self.path(7)